mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Merge remote-tracking branch 'origin/master' into minus1
This commit is contained in:
commit
e1b5e2e6f8
18
.github/actions/debug/action.yml
vendored
Normal file
18
.github/actions/debug/action.yml
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
name: DebugInfo
|
||||
description: Prints workflow debug info
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Print envs
|
||||
shell: bash
|
||||
run: |
|
||||
echo "::group::Envs"
|
||||
env
|
||||
echo "::endgroup::"
|
||||
- name: Print Event.json
|
||||
shell: bash
|
||||
run: |
|
||||
echo "::group::Event.json"
|
||||
python3 -m json.tool "$GITHUB_EVENT_PATH"
|
||||
echo "::endgroup::"
|
109
.github/workflows/auto_releases.yml
vendored
Normal file
109
.github/workflows/auto_releases.yml
vendored
Normal file
@ -0,0 +1,109 @@
|
||||
name: AutoReleases
|
||||
|
||||
env:
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
concurrency:
|
||||
group: autoreleases
|
||||
|
||||
on:
|
||||
# schedule:
|
||||
# - cron: '0 9 * * *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry-run:
|
||||
description: 'Dry run'
|
||||
required: false
|
||||
default: true
|
||||
type: boolean
|
||||
|
||||
jobs:
|
||||
AutoReleaseInfo:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
outputs:
|
||||
data: ${{ steps.info.outputs.AUTO_RELEASE_PARAMS }}
|
||||
dry_run: ${{ steps.info.outputs.DRY_RUN }}
|
||||
steps:
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
EOF
|
||||
echo "DRY_RUN=true" >> "$GITHUB_ENV"
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
- name: Prepare Info
|
||||
id: info
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 auto_release.py --prepare
|
||||
echo "::group::Auto Release Info"
|
||||
python3 -m json.tool /tmp/autorelease_info.json
|
||||
echo "::endgroup::"
|
||||
{
|
||||
echo 'AUTO_RELEASE_PARAMS<<EOF'
|
||||
cat /tmp/autorelease_info.json
|
||||
echo 'EOF'
|
||||
} >> "$GITHUB_ENV"
|
||||
{
|
||||
echo 'AUTO_RELEASE_PARAMS<<EOF'
|
||||
cat /tmp/autorelease_info.json
|
||||
echo 'EOF'
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
echo "DRY_RUN=true" >> "$GITHUB_OUTPUT"
|
||||
- name: Post Release Branch statuses
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 auto_release.py --post-status
|
||||
- name: Clean up
|
||||
uses: ./.github/actions/clean
|
||||
|
||||
Release_0:
|
||||
needs: AutoReleaseInfo
|
||||
name: Release ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0].release_branch }}
|
||||
if: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0] && fromJson(needs.AutoReleaseInfo.outputs.data).releases[0].ready }}
|
||||
uses: ./.github/workflows/create_release.yml
|
||||
with:
|
||||
ref: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0].commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ needs.AutoReleaseInfo.outputs.dry_run }}
|
||||
#
|
||||
# Release_1:
|
||||
# needs: [AutoReleaseInfo, Release_0]
|
||||
# name: Release ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[1].release_branch }}
|
||||
# if: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[1] && fromJson(needs.AutoReleaseInfo.outputs.data).releases[1].ready }}
|
||||
# uses: ./.github/workflows/create_release.yml
|
||||
# with:
|
||||
# ref: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[1].commit_sha }}
|
||||
# type: patch
|
||||
# dry-run: ${{ env.DRY_RUN }}
|
||||
#
|
||||
# Release_2:
|
||||
# needs: [AutoReleaseInfo, Release_1]
|
||||
# name: Release ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[2].release_branch }}
|
||||
# if: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0] && fromJson(needs.AutoReleaseInfo.outputs.data).releases[2].ready }}
|
||||
# uses: ./.github/workflow/create_release.yml
|
||||
# with:
|
||||
# ref: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0].commit_sha }}
|
||||
# type: patch
|
||||
# dry-run: ${{ env.DRY_RUN }}
|
||||
#
|
||||
# Release_3:
|
||||
# needs: [AutoReleaseInfo, Release_2]
|
||||
# name: Release ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[3].release_branch }}
|
||||
# if: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[3] && fromJson(needs.AutoReleaseInfo.outputs.data).releases[3].ready }}
|
||||
# uses: ./.github/workflow/create_release.yml
|
||||
# with:
|
||||
# ref: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[3].commit_sha }}
|
||||
# type: patch
|
||||
# dry-run: ${{ env.DRY_RUN }}
|
||||
|
||||
# - name: Post Slack Message
|
||||
# if: ${{ !cancelled() }}
|
||||
# run: |
|
||||
# cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
# python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }}
|
22
.github/workflows/create_release.yml
vendored
22
.github/workflows/create_release.yml
vendored
@ -2,6 +2,7 @@ name: CreateRelease
|
||||
|
||||
concurrency:
|
||||
group: release
|
||||
|
||||
'on':
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@ -26,6 +27,26 @@ concurrency:
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
workflow_call:
|
||||
inputs:
|
||||
ref:
|
||||
description: 'Git reference (branch or commit sha) from which to create the release'
|
||||
required: true
|
||||
type: string
|
||||
type:
|
||||
description: 'The type of release: "new" for a new release or "patch" for a patch release'
|
||||
required: true
|
||||
type: string
|
||||
only-repo:
|
||||
description: 'Run only repos updates including docker (repo-recovery, tests)'
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
dry-run:
|
||||
description: 'Dry run'
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
|
||||
jobs:
|
||||
CreateRelease:
|
||||
@ -101,6 +122,7 @@ jobs:
|
||||
--volume=".:/wd" --workdir="/wd" \
|
||||
clickhouse/style-test \
|
||||
./tests/ci/changelog.py -v --debug-helpers \
|
||||
--gh-user-or-token ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} \
|
||||
--jobs=5 \
|
||||
--output="./docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
||||
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
||||
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -345,9 +345,6 @@
|
||||
[submodule "contrib/FP16"]
|
||||
path = contrib/FP16
|
||||
url = https://github.com/Maratyszcza/FP16.git
|
||||
[submodule "contrib/robin-map"]
|
||||
path = contrib/robin-map
|
||||
url = https://github.com/Tessil/robin-map.git
|
||||
[submodule "contrib/aklomp-base64"]
|
||||
path = contrib/aklomp-base64
|
||||
url = https://github.com/aklomp/base64.git
|
||||
|
@ -322,17 +322,21 @@ if (DISABLE_OMIT_FRAME_POINTER)
|
||||
set (CMAKE_ASM_FLAGS_ADD "${CMAKE_ASM_FLAGS_ADD} -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer")
|
||||
endif()
|
||||
|
||||
# Before you start hating your debugger because it refuses to show variables ('<optimized out>'), try building with -DDEBUG_O_LEVEL="0"
|
||||
# https://stackoverflow.com/questions/63386189/whats-the-difference-between-a-compilers-o0-option-and-og-option/63386263#63386263
|
||||
set(DEBUG_O_LEVEL "g" CACHE STRING "The -Ox level used for debug builds")
|
||||
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
|
||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${COMPILER_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
|
||||
if (OS_DARWIN)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
|
||||
|
3
contrib/CMakeLists.txt
vendored
3
contrib/CMakeLists.txt
vendored
@ -209,9 +209,8 @@ endif()
|
||||
option(ENABLE_USEARCH "Enable USearch" ${ENABLE_LIBRARIES})
|
||||
if (ENABLE_USEARCH)
|
||||
add_contrib (FP16-cmake FP16)
|
||||
add_contrib (robin-map-cmake robin-map)
|
||||
add_contrib (SimSIMD-cmake SimSIMD)
|
||||
add_contrib (usearch-cmake usearch) # requires: FP16, robin-map, SimdSIMD
|
||||
add_contrib (usearch-cmake usearch) # requires: FP16, SimdSIMD
|
||||
else ()
|
||||
message(STATUS "Not using USearch")
|
||||
endif ()
|
||||
|
2
contrib/SimSIMD
vendored
2
contrib/SimSIMD
vendored
@ -1 +1 @@
|
||||
Subproject commit de2cb75b9e9e3389d5e1e51fd9f8ed151f3c17cf
|
||||
Subproject commit 91a76d1ac519b3b9dc8957734a3dabd985f00c26
|
1
contrib/robin-map
vendored
1
contrib/robin-map
vendored
@ -1 +0,0 @@
|
||||
Subproject commit 851a59e0e3063ee0e23089062090a73fd3de482d
|
@ -1 +0,0 @@
|
||||
# See contrib/usearch-cmake/CMakeLists.txt
|
2
contrib/usearch
vendored
2
contrib/usearch
vendored
@ -1 +1 @@
|
||||
Subproject commit 30810452bec5d3d3aa0931bb5d761e2f09aa6356
|
||||
Subproject commit e21a5778a0d4469ddaf38c94b7be0196bb701ee4
|
@ -1,5 +1,4 @@
|
||||
set(FP16_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/FP16")
|
||||
set(ROBIN_MAP_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/robin-map")
|
||||
set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD")
|
||||
set(USEARCH_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/usearch")
|
||||
|
||||
@ -7,7 +6,6 @@ add_library(_usearch INTERFACE)
|
||||
|
||||
target_include_directories(_usearch SYSTEM INTERFACE
|
||||
${FP16_PROJECT_DIR}/include
|
||||
${ROBIN_MAP_PROJECT_DIR}/include
|
||||
${SIMSIMD_PROJECT_DIR}/include
|
||||
${USEARCH_PROJECT_DIR}/include)
|
||||
|
||||
|
@ -59,6 +59,8 @@ Parameters:
|
||||
- `ef_construction`: (optional, default: 128)
|
||||
- `ef_search`: (optional, default: 64)
|
||||
|
||||
Value 0 for parameters `m`, `ef_construction`, and `ef_search` refers to the default value.
|
||||
|
||||
Example:
|
||||
|
||||
```sql
|
||||
|
@ -359,13 +359,14 @@ DESC format(JSONEachRow, '{"int" : 42, "float" : 42.42, "string" : "Hello, World
|
||||
Dates, DateTimes:
|
||||
|
||||
```sql
|
||||
DESC format(JSONEachRow, '{"date" : "2022-01-01", "datetime" : "2022-01-01 00:00:00"}')
|
||||
DESC format(JSONEachRow, '{"date" : "2022-01-01", "datetime" : "2022-01-01 00:00:00", "datetime64" : "2022-01-01 00:00:00.000"}')
|
||||
```
|
||||
```response
|
||||
┌─name─────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ date │ Nullable(Date) │ │ │ │ │ │
|
||||
│ datetime │ Nullable(DateTime64(9)) │ │ │ │ │ │
|
||||
└──────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
┌─name───────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ date │ Nullable(Date) │ │ │ │ │ │
|
||||
│ datetime │ Nullable(DateTime) │ │ │ │ │ │
|
||||
│ datetime64 │ Nullable(DateTime64(9)) │ │ │ │ │ │
|
||||
└────────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
Arrays:
|
||||
@ -759,12 +760,13 @@ DESC format(CSV, 'Hello world!,World hello!')
|
||||
Dates, DateTimes:
|
||||
|
||||
```sql
|
||||
DESC format(CSV, '"2020-01-01","2020-01-01 00:00:00"')
|
||||
DESC format(CSV, '"2020-01-01","2020-01-01 00:00:00","2022-01-01 00:00:00.000"')
|
||||
```
|
||||
```response
|
||||
┌─name─┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ c1 │ Nullable(Date) │ │ │ │ │ │
|
||||
│ c2 │ Nullable(DateTime64(9)) │ │ │ │ │ │
|
||||
│ c2 │ Nullable(DateTime) │ │ │ │ │ │
|
||||
│ c3 │ Nullable(DateTime64(9)) │ │ │ │ │ │
|
||||
└──────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
@ -956,12 +958,13 @@ DESC format(TSKV, 'int=42 float=42.42 bool=true string=Hello,World!\n')
|
||||
Dates, DateTimes:
|
||||
|
||||
```sql
|
||||
DESC format(TSV, '2020-01-01 2020-01-01 00:00:00')
|
||||
DESC format(TSV, '2020-01-01 2020-01-01 00:00:00 2022-01-01 00:00:00.000')
|
||||
```
|
||||
```response
|
||||
┌─name─┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ c1 │ Nullable(Date) │ │ │ │ │ │
|
||||
│ c2 │ Nullable(DateTime64(9)) │ │ │ │ │ │
|
||||
│ c2 │ Nullable(DateTime) │ │ │ │ │ │
|
||||
│ c3 │ Nullable(DateTime64(9)) │ │ │ │ │ │
|
||||
└──────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
@ -1126,12 +1129,13 @@ DESC format(Values, $$(42, 42.42, true, 'Hello,World!')$$)
|
||||
Dates, DateTimes:
|
||||
|
||||
```sql
|
||||
DESC format(Values, $$('2020-01-01', '2020-01-01 00:00:00')$$)
|
||||
```
|
||||
DESC format(Values, $$('2020-01-01', '2020-01-01 00:00:00', '2022-01-01 00:00:00.000')$$)
|
||||
```
|
||||
```response
|
||||
┌─name─┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ c1 │ Nullable(Date) │ │ │ │ │ │
|
||||
│ c2 │ Nullable(DateTime64(9)) │ │ │ │ │ │
|
||||
│ c2 │ Nullable(DateTime) │ │ │ │ │ │
|
||||
│ c3 │ Nullable(DateTime64(9)) │ │ │ │ │ │
|
||||
└──────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
@ -1504,8 +1508,8 @@ DESC format(JSONEachRow, $$
|
||||
|
||||
#### input_format_try_infer_datetimes
|
||||
|
||||
If enabled, ClickHouse will try to infer type `DateTime64` from string fields in schema inference for text formats.
|
||||
If all fields from a column in sample data were successfully parsed as datetimes, the result type will be `DateTime64(9)`,
|
||||
If enabled, ClickHouse will try to infer type `DateTime` or `DateTime64` from string fields in schema inference for text formats.
|
||||
If all fields from a column in sample data were successfully parsed as datetimes, the result type will be `DateTime` or `DateTime64(9)` (if any datetime had fractional part),
|
||||
if at least one field was not parsed as datetime, the result type will be `String`.
|
||||
|
||||
Enabled by default.
|
||||
@ -1513,39 +1517,66 @@ Enabled by default.
|
||||
**Examples**
|
||||
|
||||
```sql
|
||||
SET input_format_try_infer_datetimes = 0
|
||||
SET input_format_try_infer_datetimes = 0;
|
||||
DESC format(JSONEachRow, $$
|
||||
{"datetime" : "2021-01-01 00:00:00.000"}
|
||||
{"datetime" : "2022-01-01 00:00:00.000"}
|
||||
{"datetime" : "2021-01-01 00:00:00", "datetime64" : "2021-01-01 00:00:00.000"}
|
||||
{"datetime" : "2022-01-01 00:00:00", "datetime64" : "2022-01-01 00:00:00.000"}
|
||||
$$)
|
||||
```
|
||||
```response
|
||||
┌─name─────┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ datetime │ Nullable(String) │ │ │ │ │ │
|
||||
└──────────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
┌─name───────┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ datetime │ Nullable(String) │ │ │ │ │ │
|
||||
│ datetime64 │ Nullable(String) │ │ │ │ │ │
|
||||
└────────────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
```sql
|
||||
SET input_format_try_infer_datetimes = 1
|
||||
SET input_format_try_infer_datetimes = 1;
|
||||
DESC format(JSONEachRow, $$
|
||||
{"datetime" : "2021-01-01 00:00:00.000"}
|
||||
{"datetime" : "2022-01-01 00:00:00.000"}
|
||||
{"datetime" : "2021-01-01 00:00:00", "datetime64" : "2021-01-01 00:00:00.000"}
|
||||
{"datetime" : "2022-01-01 00:00:00", "datetime64" : "2022-01-01 00:00:00.000"}
|
||||
$$)
|
||||
```
|
||||
```response
|
||||
┌─name─────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ datetime │ Nullable(DateTime64(9)) │ │ │ │ │ │
|
||||
└──────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
┌─name───────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ datetime │ Nullable(DateTime) │ │ │ │ │ │
|
||||
│ datetime64 │ Nullable(DateTime64(9)) │ │ │ │ │ │
|
||||
└────────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
```sql
|
||||
DESC format(JSONEachRow, $$
|
||||
{"datetime" : "2021-01-01 00:00:00.000"}
|
||||
{"datetime" : "unknown"}
|
||||
{"datetime" : "2021-01-01 00:00:00", "datetime64" : "2021-01-01 00:00:00.000"}
|
||||
{"datetime" : "unknown", "datetime64" : "unknown"}
|
||||
$$)
|
||||
```
|
||||
```response
|
||||
┌─name─────┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ datetime │ Nullable(String) │ │ │ │ │ │
|
||||
└──────────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
┌─name───────┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ datetime │ Nullable(String) │ │ │ │ │ │
|
||||
│ datetime64 │ Nullable(String) │ │ │ │ │ │
|
||||
└────────────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
#### input_format_try_infer_datetimes_only_datetime64
|
||||
|
||||
If enabled, ClickHouse will always infer `DateTime64(9)` when `input_format_try_infer_datetimes` is enabled even if datetime values don't contain fractional part.
|
||||
|
||||
Disabled by default.
|
||||
|
||||
**Examples**
|
||||
|
||||
```sql
|
||||
SET input_format_try_infer_datetimes = 1;
|
||||
SET input_format_try_infer_datetimes_only_datetime64 = 1;
|
||||
DESC format(JSONEachRow, $$
|
||||
{"datetime" : "2021-01-01 00:00:00", "datetime64" : "2021-01-01 00:00:00.000"}
|
||||
{"datetime" : "2022-01-01 00:00:00", "datetime64" : "2022-01-01 00:00:00.000"}
|
||||
$$)
|
||||
```
|
||||
|
||||
```text
|
||||
┌─name───────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ datetime │ Nullable(DateTime64(9)) │ │ │ │ │ │
|
||||
│ datetime64 │ Nullable(DateTime64(9)) │ │ │ │ │ │
|
||||
└────────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
Note: Parsing datetimes during schema inference respect setting [date_time_input_format](/docs/en/operations/settings/settings-formats.md#date_time_input_format)
|
||||
|
@ -1042,10 +1042,23 @@ Compression rates of LZ4 or ZSTD improve on average by 20-40%.
|
||||
This setting works best for tables with no primary key or a low-cardinality primary key, i.e. a table with only few distinct primary key values.
|
||||
High-cardinality primary keys, e.g. involving timestamp columns of type `DateTime64`, are not expected to benefit from this setting.
|
||||
|
||||
### deduplicate_merge_projection_mode
|
||||
## lightweight_mutation_projection_mode
|
||||
|
||||
By default, lightweight delete `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation. So the default value would be `throw`.
|
||||
However, this option can change the behavior. With the value either `drop` or `rebuild`, deletes will work with projections. `drop` would delete the projection so it might be fast in the current query as projection gets deleted but slow in future queries as no projection attached.
|
||||
`rebuild` would rebuild the projection which might affect the performance of the current query, but might speedup for future queries. A good thing is that these options would only work in the part level,
|
||||
which means projections in the part that don't get touched would stay intact instead of triggering any action like drop or rebuild.
|
||||
|
||||
Possible values:
|
||||
|
||||
- throw, drop, rebuild
|
||||
|
||||
Default value: throw
|
||||
|
||||
## deduplicate_merge_projection_mode
|
||||
|
||||
Whether to allow create projection for the table with non-classic MergeTree, that is not (Replicated, Shared) MergeTree. If allowed, what is the action when merge projections, either drop or rebuild. So classic MergeTree would ignore this setting.
|
||||
It also controls `OPTIMIZE DEDUPLICATE` as well, but has effect on all MergeTree family members.
|
||||
It also controls `OPTIMIZE DEDUPLICATE` as well, but has effect on all MergeTree family members. Similar to the option `lightweight_mutation_projection_mode`, it is also part level.
|
||||
|
||||
Possible values:
|
||||
|
||||
|
@ -5654,3 +5654,9 @@ Possible values:
|
||||
- 1 — the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine is enabled.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## create_if_not_exists
|
||||
|
||||
Enable `IF NOT EXISTS` for `CREATE` statement by default. If either this setting or `IF NOT EXISTS` is specified and a table with the provided name already exists, no exception will be thrown.
|
||||
|
||||
Default value: `false`.
|
||||
|
@ -38,8 +38,7 @@ If you anticipate frequent deletes, consider using a [custom partitioning key](/
|
||||
|
||||
### Lightweight `DELETE`s with projections
|
||||
|
||||
By default, `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation and may require the projection to be rebuilt, negatively affecting `DELETE` performance.
|
||||
However, there is an option to change this behavior. By changing setting `lightweight_mutation_projection_mode = 'drop'`, deletes will work with projections.
|
||||
By default, `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation. But there is a [MergeTree setting](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings) `lightweight_mutation_projection_mode` can change the behavior.
|
||||
|
||||
## Performance considerations when using lightweight `DELETE`
|
||||
|
||||
|
@ -490,8 +490,6 @@ OperationID BackupsWorker::startMakingBackup(const ASTPtr & query, const Context
|
||||
|
||||
/// process_list_element_holder is used to make an element in ProcessList live while BACKUP is working asynchronously.
|
||||
auto process_list_element = context_in_use->getProcessListElement();
|
||||
/// Update context to preserve query information in processlist (settings, current_database)
|
||||
process_list_element->updateContext(context_in_use);
|
||||
|
||||
thread_pool.scheduleOrThrowOnError(
|
||||
[this,
|
||||
@ -855,8 +853,6 @@ OperationID BackupsWorker::startRestoring(const ASTPtr & query, ContextMutablePt
|
||||
|
||||
/// process_list_element_holder is used to make an element in ProcessList live while RESTORE is working asynchronously.
|
||||
auto process_list_element = context_in_use->getProcessListElement();
|
||||
/// Update context to preserve query information in processlist (settings, current_database)
|
||||
process_list_element->updateContext(context_in_use);
|
||||
|
||||
thread_pool.scheduleOrThrowOnError(
|
||||
[this,
|
||||
|
@ -307,7 +307,7 @@
|
||||
M(FilteringMarksWithPrimaryKey, "Number of threads currently doing filtering of mark ranges by the primary key") \
|
||||
M(FilteringMarksWithSecondaryKeys, "Number of threads currently doing filtering of mark ranges by secondary keys") \
|
||||
\
|
||||
M(S3DiskNoKeyErrors, "The number of `NoSuchKey` errors that occur when reading data from S3 cloud storage through ClickHouse disks.") \
|
||||
M(DiskS3NoSuchKeyErrors, "The number of `NoSuchKey` errors that occur when reading data from S3 cloud storage through ClickHouse disks.") \
|
||||
|
||||
#ifdef APPLY_FOR_EXTERNAL_METRICS
|
||||
#define APPLY_FOR_METRICS(M) APPLY_FOR_BUILTIN_METRICS(M) APPLY_FOR_EXTERNAL_METRICS(M)
|
||||
|
@ -244,33 +244,43 @@ const char * analyzeImpl(
|
||||
is_trivial = false;
|
||||
if (!in_square_braces)
|
||||
{
|
||||
/// Check for case-insensitive flag.
|
||||
if (pos + 1 < end && pos[1] == '?')
|
||||
/// it means flag negation
|
||||
/// there are various possible flags
|
||||
/// actually only imsU are supported by re2
|
||||
auto is_flag_char = [](char x)
|
||||
{
|
||||
for (size_t offset = 2; pos + offset < end; ++offset)
|
||||
return x == '-' || x == 'i' || x == 'm' || x == 's' || x == 'U' || x == 'u';
|
||||
};
|
||||
/// Check for case-insensitive flag.
|
||||
if (pos + 2 < end && pos[1] == '?' && is_flag_char(pos[2]))
|
||||
{
|
||||
size_t offset = 2;
|
||||
for (; pos + offset < end; ++offset)
|
||||
{
|
||||
if (pos[offset] == '-' /// it means flag negation
|
||||
/// various possible flags, actually only imsU are supported by re2
|
||||
|| (pos[offset] >= 'a' && pos[offset] <= 'z')
|
||||
|| (pos[offset] >= 'A' && pos[offset] <= 'Z'))
|
||||
if (pos[offset] == 'i')
|
||||
{
|
||||
if (pos[offset] == 'i')
|
||||
{
|
||||
/// Actually it can be negated case-insensitive flag. But we don't care.
|
||||
has_case_insensitive_flag = true;
|
||||
break;
|
||||
}
|
||||
/// Actually it can be negated case-insensitive flag. But we don't care.
|
||||
has_case_insensitive_flag = true;
|
||||
}
|
||||
else
|
||||
else if (!is_flag_char(pos[offset]))
|
||||
break;
|
||||
}
|
||||
pos += offset;
|
||||
if (pos == end)
|
||||
return pos;
|
||||
/// if this group only contains flags, we have nothing to do.
|
||||
if (*pos == ')')
|
||||
{
|
||||
++pos;
|
||||
break;
|
||||
}
|
||||
}
|
||||
/// (?:regex) means non-capturing parentheses group
|
||||
if (pos + 2 < end && pos[1] == '?' && pos[2] == ':')
|
||||
else if (pos + 2 < end && pos[1] == '?' && pos[2] == ':')
|
||||
{
|
||||
pos += 2;
|
||||
}
|
||||
if (pos + 3 < end && pos[1] == '?' && (pos[2] == '<' || pos[2] == '\'' || (pos[2] == 'P' && pos[3] == '<')))
|
||||
else if (pos + 3 < end && pos[1] == '?' && (pos[2] == '<' || pos[2] == '\'' || (pos[2] == 'P' && pos[3] == '<')))
|
||||
{
|
||||
pos = skipNameCapturingGroup(pos, pos[2] == 'P' ? 3: 2, end);
|
||||
}
|
||||
|
@ -209,8 +209,35 @@
|
||||
\
|
||||
M(Merge, "Number of launched background merges.") \
|
||||
M(MergedRows, "Rows read for background merges. This is the number of rows before merge.") \
|
||||
M(MergedColumns, "Number of columns merged during the horizontal stage of merges.") \
|
||||
M(GatheredColumns, "Number of columns gathered during the vertical stage of merges.") \
|
||||
M(MergedUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) that was read for background merges. This is the number before merge.") \
|
||||
M(MergesTimeMilliseconds, "Total time spent for background merges.")\
|
||||
M(MergeTotalMilliseconds, "Total time spent for background merges") \
|
||||
M(MergeExecuteMilliseconds, "Total busy time spent for execution of background merges") \
|
||||
M(MergeHorizontalStageTotalMilliseconds, "Total time spent for horizontal stage of background merges") \
|
||||
M(MergeHorizontalStageExecuteMilliseconds, "Total busy time spent for execution of horizontal stage of background merges") \
|
||||
M(MergeVerticalStageTotalMilliseconds, "Total time spent for vertical stage of background merges") \
|
||||
M(MergeVerticalStageExecuteMilliseconds, "Total busy time spent for execution of vertical stage of background merges") \
|
||||
M(MergeProjectionStageTotalMilliseconds, "Total time spent for projection stage of background merges") \
|
||||
M(MergeProjectionStageExecuteMilliseconds, "Total busy time spent for execution of projection stage of background merges") \
|
||||
\
|
||||
M(MergingSortedMilliseconds, "Total time spent while merging sorted columns") \
|
||||
M(AggregatingSortedMilliseconds, "Total time spent while aggregating sorted columns") \
|
||||
M(CollapsingSortedMilliseconds, "Total time spent while collapsing sorted columns") \
|
||||
M(ReplacingSortedMilliseconds, "Total time spent while replacing sorted columns") \
|
||||
M(SummingSortedMilliseconds, "Total time spent while summing sorted columns") \
|
||||
M(VersionedCollapsingSortedMilliseconds, "Total time spent while version collapsing sorted columns") \
|
||||
M(GatheringColumnMilliseconds, "Total time spent while gathering columns for vertical merge") \
|
||||
\
|
||||
M(MutationTotalParts, "Number of total parts for which mutations tried to be applied") \
|
||||
M(MutationUntouchedParts, "Number of total parts for which mutations tried to be applied but which was completely skipped according to predicate") \
|
||||
M(MutatedRows, "Rows read for mutations. This is the number of rows before mutation") \
|
||||
M(MutatedUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) that was read for mutations. This is the number before mutation.") \
|
||||
M(MutationTotalMilliseconds, "Total time spent for mutations.") \
|
||||
M(MutationExecuteMilliseconds, "Total busy time spent for execution of mutations.") \
|
||||
M(MutationAllPartColumns, "Number of times when task to mutate all columns in part was created") \
|
||||
M(MutationSomePartColumns, "Number of times when task to mutate some columns in part was created") \
|
||||
M(MutateTaskProjectionsCalculationMicroseconds, "Time spent calculating projections in mutations.") \
|
||||
\
|
||||
M(MergeTreeDataWriterRows, "Number of rows INSERTed to MergeTree tables.") \
|
||||
M(MergeTreeDataWriterUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) INSERTed to MergeTree tables.") \
|
||||
@ -225,7 +252,6 @@
|
||||
M(MergeTreeDataWriterProjectionsCalculationMicroseconds, "Time spent calculating projections") \
|
||||
M(MergeTreeDataProjectionWriterSortingBlocksMicroseconds, "Time spent sorting blocks (for projection it might be a key different from table's sorting key)") \
|
||||
M(MergeTreeDataProjectionWriterMergingBlocksMicroseconds, "Time spent merging blocks") \
|
||||
M(MutateTaskProjectionsCalculationMicroseconds, "Time spent calculating projections") \
|
||||
\
|
||||
M(InsertedWideParts, "Number of parts inserted in Wide format.") \
|
||||
M(InsertedCompactParts, "Number of parts inserted in Compact format.") \
|
||||
|
@ -184,14 +184,20 @@ void DynamicResourceManager::updateConfiguration(const Poco::Util::AbstractConfi
|
||||
|
||||
// Resource update leads to loss of runtime data of nodes and may lead to temporary violation of constraints (e.g. limits)
|
||||
// Try to minimise this by reusing "equal" resources (initialized with the same configuration).
|
||||
std::vector<State::ResourcePtr> resources_to_attach;
|
||||
for (auto & [name, new_resource] : new_state->resources)
|
||||
{
|
||||
if (auto iter = state->resources.find(name); iter != state->resources.end()) // Resource update
|
||||
{
|
||||
State::ResourcePtr old_resource = iter->second;
|
||||
if (old_resource->equals(*new_resource))
|
||||
{
|
||||
new_resource = old_resource; // Rewrite with older version to avoid loss of runtime data
|
||||
continue;
|
||||
}
|
||||
}
|
||||
// It is new or updated resource
|
||||
resources_to_attach.emplace_back(new_resource);
|
||||
}
|
||||
|
||||
// Commit new state
|
||||
@ -199,17 +205,14 @@ void DynamicResourceManager::updateConfiguration(const Poco::Util::AbstractConfi
|
||||
state = new_state;
|
||||
|
||||
// Attach new and updated resources to the scheduler
|
||||
for (auto & [name, resource] : new_state->resources)
|
||||
for (auto & resource : resources_to_attach)
|
||||
{
|
||||
const SchedulerNodePtr & root = resource->nodes.find("/")->second.ptr;
|
||||
if (root->parent == nullptr)
|
||||
resource->attached_to = &scheduler;
|
||||
scheduler.event_queue->enqueue([this, root]
|
||||
{
|
||||
resource->attached_to = &scheduler;
|
||||
scheduler.event_queue->enqueue([this, root]
|
||||
{
|
||||
scheduler.attachChild(root);
|
||||
});
|
||||
}
|
||||
scheduler.attachChild(root);
|
||||
});
|
||||
}
|
||||
|
||||
// NOTE: after mutex unlock `state` became available for Classifier(s) and must be immutable
|
||||
|
@ -19,6 +19,9 @@ TEST(OptimizeRE, analyze)
|
||||
};
|
||||
test_f("abc", "abc", {}, true, true);
|
||||
test_f("c([^k]*)de", "");
|
||||
test_f("(?-s)bob", "bob", {}, false, true);
|
||||
test_f("(?s)bob", "bob", {}, false, true);
|
||||
test_f("(?ssss", "");
|
||||
test_f("abc(de)fg", "abcdefg", {}, false, true);
|
||||
test_f("abc(de|xyz)fg", "abc", {"abcdefg", "abcxyzfg"}, false, true);
|
||||
test_f("abc(de?f|xyz)fg", "abc", {"abcd", "abcxyzfg"}, false, true);
|
||||
|
@ -325,6 +325,7 @@ class IColumn;
|
||||
\
|
||||
M(Bool, join_use_nulls, false, "Use NULLs for non-joined rows of outer JOINs for types that can be inside Nullable. If false, use default value of corresponding columns data type.", IMPORTANT) \
|
||||
\
|
||||
M(Int32, join_output_by_rowlist_perkey_rows_threshold, 5, "The lower limit of per-key average rows in the right table to determine whether to output by row list in hash join.", 0) \
|
||||
M(JoinStrictness, join_default_strictness, JoinStrictness::All, "Set default strictness in JOIN query. Possible values: empty string, 'ANY', 'ALL'. If empty, query without strictness will throw exception.", 0) \
|
||||
M(Bool, any_join_distinct_right_table_keys, false, "Enable old ANY JOIN logic with many-to-one left-to-right table keys mapping for all ANY JOINs. It leads to confusing not equal results for 't1 ANY LEFT JOIN t2' and 't2 ANY RIGHT JOIN t1'. ANY RIGHT JOIN needs one-to-many keys mapping to be consistent with LEFT one.", IMPORTANT) \
|
||||
M(Bool, single_join_prefer_left_table, true, "For single JOIN in case of identifier ambiguity prefer left table", IMPORTANT) \
|
||||
@ -896,6 +897,7 @@ class IColumn;
|
||||
M(UInt64, extract_key_value_pairs_max_pairs_per_row, 1000, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory.", 0) ALIAS(extract_kvp_max_pairs_per_row) \
|
||||
M(Bool, restore_replace_external_engines_to_null, false, "Replace all the external table engines to Null on restore. Useful for testing purposes", 0) \
|
||||
M(Bool, restore_replace_external_table_functions_to_null, false, "Replace all table functions to Null on restore. Useful for testing purposes", 0) \
|
||||
M(Bool, create_if_not_exists, false, "Enable IF NOT EXISTS for CREATE statements by default", 0) \
|
||||
\
|
||||
\
|
||||
/* ###################################### */ \
|
||||
@ -1135,6 +1137,7 @@ class IColumn;
|
||||
M(Bool, input_format_try_infer_integers, true, "Try to infer integers instead of floats while schema inference in text formats", 0) \
|
||||
M(Bool, input_format_try_infer_dates, true, "Try to infer dates from string fields while schema inference in text formats", 0) \
|
||||
M(Bool, input_format_try_infer_datetimes, true, "Try to infer datetimes from string fields while schema inference in text formats", 0) \
|
||||
M(Bool, input_format_try_infer_datetimes_only_datetime64, false, "When input_format_try_infer_datetimes is enabled, infer only DateTime64 but not DateTime types", 0) \
|
||||
M(Bool, input_format_try_infer_exponent_floats, false, "Try to infer floats in exponential notation while schema inference in text formats (except JSON, where exponent numbers are always inferred)", 0) \
|
||||
M(Bool, output_format_markdown_escape_special_characters, false, "Escape special characters in Markdown", 0) \
|
||||
M(Bool, input_format_protobuf_flatten_google_wrappers, false, "Enable Google wrappers for regular non-nested columns, e.g. google.protobuf.StringValue 'str' for String column 'str'. For Nullable columns empty wrappers are recognized as defaults, and missing as nulls", 0) \
|
||||
|
@ -75,6 +75,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
||||
},
|
||||
{"24.8",
|
||||
{
|
||||
{"create_if_not_exists", false, false, "New setting."},
|
||||
{"rows_before_aggregation", true, true, "Provide exact value for rows_before_aggregation statistic, represents the number of rows read before aggregation"},
|
||||
{"restore_replace_external_table_functions_to_null", false, false, "New setting."},
|
||||
{"restore_replace_external_engines_to_null", false, false, "New setting."},
|
||||
@ -87,7 +88,9 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
||||
{"allow_experimental_time_series_table", false, false, "Added new setting to allow the TimeSeries table engine"},
|
||||
{"enable_analyzer", 1, 1, "Added an alias to a setting `allow_experimental_analyzer`."},
|
||||
{"optimize_functions_to_subcolumns", false, true, "Enabled settings by default"},
|
||||
{"join_output_by_rowlist_perkey_rows_threshold", 0, 5, "The lower limit of per-key average rows in the right table to determine whether to output by row list in hash join."},
|
||||
{"allow_experimental_vector_similarity_index", false, false, "Added new setting to allow experimental vector similarity indexes"},
|
||||
{"input_format_try_infer_datetimes_only_datetime64", true, false, "Allow to infer DateTime instead of DateTime64 in data formats"}
|
||||
}
|
||||
},
|
||||
{"24.7",
|
||||
|
@ -645,8 +645,9 @@ void CachedOnDiskReadBufferFromFile::predownload(FileSegment & file_segment)
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromSourceBytes, current_impl_buffer_size);
|
||||
|
||||
std::string failure_reason;
|
||||
bool continue_predownload = file_segment.reserve(
|
||||
current_predownload_size, settings.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds);
|
||||
current_predownload_size, settings.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds, failure_reason);
|
||||
if (continue_predownload)
|
||||
{
|
||||
LOG_TEST(log, "Left to predownload: {}, buffer size: {}", bytes_to_predownload, current_impl_buffer_size);
|
||||
@ -1002,7 +1003,8 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep()
|
||||
{
|
||||
chassert(file_offset_of_buffer_end + size - 1 <= file_segment.range().right);
|
||||
|
||||
bool success = file_segment.reserve(size, settings.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds);
|
||||
std::string failure_reason;
|
||||
bool success = file_segment.reserve(size, settings.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds, failure_reason);
|
||||
if (success)
|
||||
{
|
||||
chassert(file_segment.getCurrentWriteOffset() == static_cast<size_t>(implementation_buffer->getPosition()));
|
||||
@ -1028,7 +1030,8 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep()
|
||||
LOG_TRACE(log, "Bypassing cache because writeCache method failed");
|
||||
}
|
||||
else
|
||||
LOG_TRACE(log, "No space left in cache to reserve {} bytes, will continue without cache download", size);
|
||||
LOG_TRACE(log, "No space left in cache to reserve {} bytes, reason: {}, "
|
||||
"will continue without cache download", failure_reason, size);
|
||||
|
||||
if (!success)
|
||||
{
|
||||
|
@ -91,7 +91,8 @@ bool FileSegmentRangeWriter::write(char * data, size_t size, size_t offset, File
|
||||
|
||||
size_t size_to_write = std::min(available_size, size);
|
||||
|
||||
bool reserved = file_segment->reserve(size_to_write, reserve_space_lock_wait_timeout_milliseconds);
|
||||
std::string failure_reason;
|
||||
bool reserved = file_segment->reserve(size_to_write, reserve_space_lock_wait_timeout_milliseconds, failure_reason);
|
||||
if (!reserved)
|
||||
{
|
||||
appendFilesystemCacheLog(*file_segment);
|
||||
|
@ -63,7 +63,7 @@ void throwIfError(const Aws::Utils::Outcome<Result, Error> & response)
|
||||
{
|
||||
const auto & err = response.GetError();
|
||||
throw S3Exception(
|
||||
fmt::format("{} (Code: {}, s3 exception: {})",
|
||||
fmt::format("{} (Code: {}, S3 exception: '{}')",
|
||||
err.GetMessage(), static_cast<size_t>(err.GetErrorType()), err.GetExceptionName()),
|
||||
err.GetErrorType());
|
||||
}
|
||||
|
@ -419,10 +419,11 @@ String getAdditionalFormatInfoByEscapingRule(const FormatSettings & settings, Fo
|
||||
String result = getAdditionalFormatInfoForAllRowBasedFormats(settings);
|
||||
/// First, settings that are common for all text formats:
|
||||
result += fmt::format(
|
||||
", try_infer_integers={}, try_infer_dates={}, try_infer_datetimes={}",
|
||||
", try_infer_integers={}, try_infer_dates={}, try_infer_datetimes={}, try_infer_datetimes_only_datetime64={}",
|
||||
settings.try_infer_integers,
|
||||
settings.try_infer_dates,
|
||||
settings.try_infer_datetimes);
|
||||
settings.try_infer_datetimes,
|
||||
settings.try_infer_datetimes_only_datetime64);
|
||||
|
||||
/// Second, format-specific settings:
|
||||
switch (escaping_rule)
|
||||
|
@ -266,6 +266,7 @@ FormatSettings getFormatSettings(const ContextPtr & context, const Settings & se
|
||||
format_settings.try_infer_integers = settings.input_format_try_infer_integers;
|
||||
format_settings.try_infer_dates = settings.input_format_try_infer_dates;
|
||||
format_settings.try_infer_datetimes = settings.input_format_try_infer_datetimes;
|
||||
format_settings.try_infer_datetimes_only_datetime64 = settings.input_format_try_infer_datetimes_only_datetime64;
|
||||
format_settings.try_infer_exponent_floats = settings.input_format_try_infer_exponent_floats;
|
||||
format_settings.markdown.escape_special_characters = settings.output_format_markdown_escape_special_characters;
|
||||
format_settings.bson.output_string_as_string = settings.output_format_bson_string_as_string;
|
||||
|
@ -46,6 +46,7 @@ struct FormatSettings
|
||||
bool try_infer_integers = true;
|
||||
bool try_infer_dates = true;
|
||||
bool try_infer_datetimes = true;
|
||||
bool try_infer_datetimes_only_datetime64 = false;
|
||||
bool try_infer_exponent_floats = false;
|
||||
|
||||
enum class DateTimeInputFormat : uint8_t
|
||||
|
@ -306,37 +306,45 @@ namespace
|
||||
type_indexes.erase(TypeIndex::UInt64);
|
||||
}
|
||||
|
||||
/// If we have only Date and DateTime types, convert Date to DateTime,
|
||||
/// otherwise, convert all Date and DateTime to String.
|
||||
/// If we have only date/datetimes types (Date/DateTime/DateTime64), convert all of them to the common type,
|
||||
/// otherwise, convert all Date, DateTime and DateTime64 to String.
|
||||
void transformDatesAndDateTimes(DataTypes & data_types, TypeIndexesSet & type_indexes)
|
||||
{
|
||||
bool have_dates = type_indexes.contains(TypeIndex::Date);
|
||||
bool have_datetimes = type_indexes.contains(TypeIndex::DateTime64);
|
||||
bool all_dates_or_datetimes = (type_indexes.size() == (static_cast<size_t>(have_dates) + static_cast<size_t>(have_datetimes)));
|
||||
bool have_datetimes = type_indexes.contains(TypeIndex::DateTime);
|
||||
bool have_datetimes64 = type_indexes.contains(TypeIndex::DateTime64);
|
||||
bool all_dates_or_datetimes = (type_indexes.size() == (static_cast<size_t>(have_dates) + static_cast<size_t>(have_datetimes) + static_cast<size_t>(have_datetimes64)));
|
||||
|
||||
if (!all_dates_or_datetimes && (have_dates || have_datetimes))
|
||||
if (!all_dates_or_datetimes && (have_dates || have_datetimes || have_datetimes64))
|
||||
{
|
||||
for (auto & type : data_types)
|
||||
{
|
||||
if (isDate(type) || isDateTime64(type))
|
||||
if (isDate(type) || isDateTime(type) || isDateTime64(type))
|
||||
type = std::make_shared<DataTypeString>();
|
||||
}
|
||||
|
||||
type_indexes.erase(TypeIndex::Date);
|
||||
type_indexes.erase(TypeIndex::DateTime);
|
||||
type_indexes.erase(TypeIndex::DateTime64);
|
||||
type_indexes.insert(TypeIndex::String);
|
||||
return;
|
||||
}
|
||||
|
||||
if (have_dates && have_datetimes)
|
||||
for (auto & type : data_types)
|
||||
{
|
||||
for (auto & type : data_types)
|
||||
if (isDate(type) && (have_datetimes || have_datetimes64))
|
||||
{
|
||||
if (isDate(type))
|
||||
if (have_datetimes64)
|
||||
type = std::make_shared<DataTypeDateTime64>(9);
|
||||
else
|
||||
type = std::make_shared<DataTypeDateTime>();
|
||||
type_indexes.erase(TypeIndex::Date);
|
||||
}
|
||||
else if (isDateTime(type) && have_datetimes64)
|
||||
{
|
||||
type = std::make_shared<DataTypeDateTime64>(9);
|
||||
type_indexes.erase(TypeIndex::DateTime);
|
||||
}
|
||||
|
||||
type_indexes.erase(TypeIndex::Date);
|
||||
}
|
||||
}
|
||||
|
||||
@ -697,55 +705,87 @@ namespace
|
||||
|
||||
bool tryInferDate(std::string_view field)
|
||||
{
|
||||
if (field.empty())
|
||||
/// Minimum length of Date text representation is 8 (YYYY-M-D) and maximum is 10 (YYYY-MM-DD)
|
||||
if (field.size() < 8 || field.size() > 10)
|
||||
return false;
|
||||
|
||||
ReadBufferFromString buf(field);
|
||||
Float64 tmp_float;
|
||||
/// Check if it's just a number, and if so, don't try to infer Date from it,
|
||||
/// because we can interpret this number as a Date (for example 20000101 will be 2000-01-01)
|
||||
/// and it will lead to inferring Date instead of simple Int64/UInt64 in some cases.
|
||||
if (tryReadFloatText(tmp_float, buf) && buf.eof())
|
||||
return false;
|
||||
|
||||
buf.seek(0, SEEK_SET); /// Return position to the beginning
|
||||
|
||||
DayNum tmp;
|
||||
return tryReadDateText(tmp, buf) && buf.eof();
|
||||
}
|
||||
|
||||
bool tryInferDateTime(std::string_view field, const FormatSettings & settings)
|
||||
{
|
||||
if (field.empty())
|
||||
if (std::all_of(field.begin(), field.end(), isNumericASCII))
|
||||
return false;
|
||||
|
||||
ReadBufferFromString buf(field);
|
||||
Float64 tmp_float;
|
||||
DayNum tmp;
|
||||
return tryReadDateText(tmp, buf, DateLUT::instance(), /*allowed_delimiters=*/"-/:") && buf.eof();
|
||||
}
|
||||
|
||||
DataTypePtr tryInferDateTimeOrDateTime64(std::string_view field, const FormatSettings & settings)
|
||||
{
|
||||
/// Don't try to infer DateTime if string is too long.
|
||||
/// It's difficult to say what is the real maximum length of
|
||||
/// DateTime we can parse using BestEffort approach.
|
||||
/// 50 symbols is more or less valid limit for date times that makes sense.
|
||||
if (field.empty() || field.size() > 50)
|
||||
return nullptr;
|
||||
|
||||
/// Check that we have at least one digit, don't infer datetime form strings like "Apr"/"May"/etc.
|
||||
if (!std::any_of(field.begin(), field.end(), isNumericASCII))
|
||||
return nullptr;
|
||||
|
||||
/// Check if it's just a number, and if so, don't try to infer DateTime from it,
|
||||
/// because we can interpret this number as a timestamp and it will lead to
|
||||
/// inferring DateTime instead of simple Int64/Float64 in some cases.
|
||||
/// inferring DateTime instead of simple Int64 in some cases.
|
||||
if (std::all_of(field.begin(), field.end(), isNumericASCII))
|
||||
return nullptr;
|
||||
|
||||
ReadBufferFromString buf(field);
|
||||
Float64 tmp_float;
|
||||
/// Check if it's a float value, and if so, don't try to infer DateTime from it,
|
||||
/// because it will lead to inferring DateTime instead of simple Float64 in some cases.
|
||||
if (tryReadFloatText(tmp_float, buf) && buf.eof())
|
||||
return false;
|
||||
return nullptr;
|
||||
|
||||
buf.seek(0, SEEK_SET); /// Return position to the beginning
|
||||
if (!settings.try_infer_datetimes_only_datetime64)
|
||||
{
|
||||
time_t tmp;
|
||||
switch (settings.date_time_input_format)
|
||||
{
|
||||
case FormatSettings::DateTimeInputFormat::Basic:
|
||||
if (tryReadDateTimeText(tmp, buf, DateLUT::instance(), /*allowed_date_delimiters=*/"-/:", /*allowed_time_delimiters=*/":") && buf.eof())
|
||||
return std::make_shared<DataTypeDateTime>();
|
||||
break;
|
||||
case FormatSettings::DateTimeInputFormat::BestEffort:
|
||||
if (tryParseDateTimeBestEffortStrict(tmp, buf, DateLUT::instance(), DateLUT::instance("UTC"), /*allowed_date_delimiters=*/"-/:") && buf.eof())
|
||||
return std::make_shared<DataTypeDateTime>();
|
||||
break;
|
||||
case FormatSettings::DateTimeInputFormat::BestEffortUS:
|
||||
if (tryParseDateTimeBestEffortUSStrict(tmp, buf, DateLUT::instance(), DateLUT::instance("UTC"), /*allowed_date_delimiters=*/"-/:") && buf.eof())
|
||||
return std::make_shared<DataTypeDateTime>();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
buf.seek(0, SEEK_SET); /// Return position to the beginning
|
||||
DateTime64 tmp;
|
||||
switch (settings.date_time_input_format)
|
||||
{
|
||||
case FormatSettings::DateTimeInputFormat::Basic:
|
||||
if (tryReadDateTime64Text(tmp, 9, buf) && buf.eof())
|
||||
return true;
|
||||
if (tryReadDateTime64Text(tmp, 9, buf, DateLUT::instance(), /*allowed_date_delimiters=*/"-/:", /*allowed_time_delimiters=*/":") && buf.eof())
|
||||
return std::make_shared<DataTypeDateTime64>(9);
|
||||
break;
|
||||
case FormatSettings::DateTimeInputFormat::BestEffort:
|
||||
if (tryParseDateTime64BestEffort(tmp, 9, buf, DateLUT::instance(), DateLUT::instance("UTC")) && buf.eof())
|
||||
return true;
|
||||
if (tryParseDateTime64BestEffortStrict(tmp, 9, buf, DateLUT::instance(), DateLUT::instance("UTC"), /*allowed_date_delimiters=*/"-/:") && buf.eof())
|
||||
return std::make_shared<DataTypeDateTime64>(9);
|
||||
break;
|
||||
case FormatSettings::DateTimeInputFormat::BestEffortUS:
|
||||
if (tryParseDateTime64BestEffortUS(tmp, 9, buf, DateLUT::instance(), DateLUT::instance("UTC")) && buf.eof())
|
||||
return true;
|
||||
if (tryParseDateTime64BestEffortUSStrict(tmp, 9, buf, DateLUT::instance(), DateLUT::instance("UTC"), /*allowed_date_delimiters=*/"-/:") && buf.eof())
|
||||
return std::make_shared<DataTypeDateTime64>(9);
|
||||
break;
|
||||
}
|
||||
|
||||
return false;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <bool is_json>
|
||||
@ -1439,8 +1479,11 @@ DataTypePtr tryInferDateOrDateTimeFromString(std::string_view field, const Forma
|
||||
if (settings.try_infer_dates && tryInferDate(field))
|
||||
return std::make_shared<DataTypeDate>();
|
||||
|
||||
if (settings.try_infer_datetimes && tryInferDateTime(field, settings))
|
||||
return std::make_shared<DataTypeDateTime64>(9);
|
||||
if (settings.try_infer_datetimes)
|
||||
{
|
||||
if (auto type = tryInferDateTimeOrDateTime64(field, settings))
|
||||
return type;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -1271,7 +1271,7 @@ template void readJSONArrayInto<PaddedPODArray<UInt8>, void>(PaddedPODArray<UInt
|
||||
template bool readJSONArrayInto<PaddedPODArray<UInt8>, bool>(PaddedPODArray<UInt8> & s, ReadBuffer & buf);
|
||||
|
||||
template <typename ReturnType>
|
||||
ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf)
|
||||
ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf, const char * allowed_delimiters)
|
||||
{
|
||||
static constexpr bool throw_exception = std::is_same_v<ReturnType, void>;
|
||||
|
||||
@ -1318,6 +1318,9 @@ ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf)
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!isSymbolIn(*buf.position(), allowed_delimiters))
|
||||
return error();
|
||||
|
||||
++buf.position();
|
||||
|
||||
if (!append_digit(month))
|
||||
@ -1325,7 +1328,11 @@ ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf)
|
||||
append_digit(month);
|
||||
|
||||
if (!buf.eof() && !isNumericASCII(*buf.position()))
|
||||
{
|
||||
if (!isSymbolIn(*buf.position(), allowed_delimiters))
|
||||
return error();
|
||||
++buf.position();
|
||||
}
|
||||
else
|
||||
return error();
|
||||
|
||||
@ -1338,12 +1345,12 @@ ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf)
|
||||
return ReturnType(true);
|
||||
}
|
||||
|
||||
template void readDateTextFallback<void>(LocalDate &, ReadBuffer &);
|
||||
template bool readDateTextFallback<bool>(LocalDate &, ReadBuffer &);
|
||||
template void readDateTextFallback<void>(LocalDate &, ReadBuffer &, const char * allowed_delimiters);
|
||||
template bool readDateTextFallback<bool>(LocalDate &, ReadBuffer &, const char * allowed_delimiters);
|
||||
|
||||
|
||||
template <typename ReturnType, bool dt64_mode>
|
||||
ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & date_lut)
|
||||
ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & date_lut, const char * allowed_date_delimiters, const char * allowed_time_delimiters)
|
||||
{
|
||||
static constexpr bool throw_exception = std::is_same_v<ReturnType, void>;
|
||||
|
||||
@ -1413,6 +1420,9 @@ ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const D
|
||||
if (!isNumericASCII(s[0]) || !isNumericASCII(s[1]) || !isNumericASCII(s[2]) || !isNumericASCII(s[3])
|
||||
|| !isNumericASCII(s[5]) || !isNumericASCII(s[6]) || !isNumericASCII(s[8]) || !isNumericASCII(s[9]))
|
||||
return false;
|
||||
|
||||
if (!isSymbolIn(s[4], allowed_date_delimiters) || !isSymbolIn(s[7], allowed_date_delimiters))
|
||||
return false;
|
||||
}
|
||||
|
||||
UInt16 year = (s[0] - '0') * 1000 + (s[1] - '0') * 100 + (s[2] - '0') * 10 + (s[3] - '0');
|
||||
@ -1443,6 +1453,9 @@ ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const D
|
||||
if (!isNumericASCII(s[0]) || !isNumericASCII(s[1]) || !isNumericASCII(s[3]) || !isNumericASCII(s[4])
|
||||
|| !isNumericASCII(s[6]) || !isNumericASCII(s[7]))
|
||||
return false;
|
||||
|
||||
if (!isSymbolIn(s[2], allowed_time_delimiters) || !isSymbolIn(s[5], allowed_time_delimiters))
|
||||
return false;
|
||||
}
|
||||
|
||||
hour = (s[0] - '0') * 10 + (s[1] - '0');
|
||||
@ -1488,10 +1501,10 @@ ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const D
|
||||
return ReturnType(true);
|
||||
}
|
||||
|
||||
template void readDateTimeTextFallback<void, false>(time_t &, ReadBuffer &, const DateLUTImpl &);
|
||||
template void readDateTimeTextFallback<void, true>(time_t &, ReadBuffer &, const DateLUTImpl &);
|
||||
template bool readDateTimeTextFallback<bool, false>(time_t &, ReadBuffer &, const DateLUTImpl &);
|
||||
template bool readDateTimeTextFallback<bool, true>(time_t &, ReadBuffer &, const DateLUTImpl &);
|
||||
template void readDateTimeTextFallback<void, false>(time_t &, ReadBuffer &, const DateLUTImpl &, const char *, const char *);
|
||||
template void readDateTimeTextFallback<void, true>(time_t &, ReadBuffer &, const DateLUTImpl &, const char *, const char *);
|
||||
template bool readDateTimeTextFallback<bool, false>(time_t &, ReadBuffer &, const DateLUTImpl &, const char *, const char *);
|
||||
template bool readDateTimeTextFallback<bool, true>(time_t &, ReadBuffer &, const DateLUTImpl &, const char *, const char *);
|
||||
|
||||
|
||||
template <typename ReturnType>
|
||||
|
@ -703,13 +703,28 @@ struct NullOutput
|
||||
};
|
||||
|
||||
template <typename ReturnType>
|
||||
ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf);
|
||||
ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf, const char * allowed_delimiters);
|
||||
|
||||
inline bool isSymbolIn(char symbol, const char * symbols)
|
||||
{
|
||||
if (symbols == nullptr)
|
||||
return true;
|
||||
|
||||
const char * pos = symbols;
|
||||
while (*pos)
|
||||
{
|
||||
if (*pos == symbol)
|
||||
return true;
|
||||
++pos;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/// In YYYY-MM-DD format.
|
||||
/// For convenience, Month and Day parts can have single digit instead of two digits.
|
||||
/// Any separators other than '-' are supported.
|
||||
template <typename ReturnType = void>
|
||||
inline ReturnType readDateTextImpl(LocalDate & date, ReadBuffer & buf)
|
||||
inline ReturnType readDateTextImpl(LocalDate & date, ReadBuffer & buf, const char * allowed_delimiters = nullptr)
|
||||
{
|
||||
static constexpr bool throw_exception = std::is_same_v<ReturnType, void>;
|
||||
|
||||
@ -753,6 +768,9 @@ inline ReturnType readDateTextImpl(LocalDate & date, ReadBuffer & buf)
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!isSymbolIn(pos[-1], allowed_delimiters))
|
||||
return error();
|
||||
|
||||
if (!isNumericASCII(pos[0]))
|
||||
return error();
|
||||
|
||||
@ -768,6 +786,9 @@ inline ReturnType readDateTextImpl(LocalDate & date, ReadBuffer & buf)
|
||||
if (isNumericASCII(pos[-1]) || !isNumericASCII(pos[0]))
|
||||
return error();
|
||||
|
||||
if (!isSymbolIn(pos[-1], allowed_delimiters))
|
||||
return error();
|
||||
|
||||
day = pos[0] - '0';
|
||||
if (isNumericASCII(pos[1]))
|
||||
{
|
||||
@ -783,7 +804,7 @@ inline ReturnType readDateTextImpl(LocalDate & date, ReadBuffer & buf)
|
||||
return ReturnType(true);
|
||||
}
|
||||
else
|
||||
return readDateTextFallback<ReturnType>(date, buf);
|
||||
return readDateTextFallback<ReturnType>(date, buf, allowed_delimiters);
|
||||
}
|
||||
|
||||
inline void convertToDayNum(DayNum & date, ExtendedDayNum & from)
|
||||
@ -797,15 +818,15 @@ inline void convertToDayNum(DayNum & date, ExtendedDayNum & from)
|
||||
}
|
||||
|
||||
template <typename ReturnType = void>
|
||||
inline ReturnType readDateTextImpl(DayNum & date, ReadBuffer & buf, const DateLUTImpl & date_lut)
|
||||
inline ReturnType readDateTextImpl(DayNum & date, ReadBuffer & buf, const DateLUTImpl & date_lut, const char * allowed_delimiters = nullptr)
|
||||
{
|
||||
static constexpr bool throw_exception = std::is_same_v<ReturnType, void>;
|
||||
|
||||
LocalDate local_date;
|
||||
|
||||
if constexpr (throw_exception)
|
||||
readDateTextImpl<ReturnType>(local_date, buf);
|
||||
else if (!readDateTextImpl<ReturnType>(local_date, buf))
|
||||
readDateTextImpl<ReturnType>(local_date, buf, allowed_delimiters);
|
||||
else if (!readDateTextImpl<ReturnType>(local_date, buf, allowed_delimiters))
|
||||
return false;
|
||||
|
||||
ExtendedDayNum ret = date_lut.makeDayNum(local_date.year(), local_date.month(), local_date.day());
|
||||
@ -814,15 +835,15 @@ inline ReturnType readDateTextImpl(DayNum & date, ReadBuffer & buf, const DateLU
|
||||
}
|
||||
|
||||
template <typename ReturnType = void>
|
||||
inline ReturnType readDateTextImpl(ExtendedDayNum & date, ReadBuffer & buf, const DateLUTImpl & date_lut)
|
||||
inline ReturnType readDateTextImpl(ExtendedDayNum & date, ReadBuffer & buf, const DateLUTImpl & date_lut, const char * allowed_delimiters = nullptr)
|
||||
{
|
||||
static constexpr bool throw_exception = std::is_same_v<ReturnType, void>;
|
||||
|
||||
LocalDate local_date;
|
||||
|
||||
if constexpr (throw_exception)
|
||||
readDateTextImpl<ReturnType>(local_date, buf);
|
||||
else if (!readDateTextImpl<ReturnType>(local_date, buf))
|
||||
readDateTextImpl<ReturnType>(local_date, buf, allowed_delimiters);
|
||||
else if (!readDateTextImpl<ReturnType>(local_date, buf, allowed_delimiters))
|
||||
return false;
|
||||
|
||||
/// When the parameter is out of rule or out of range, Date32 uses 1925-01-01 as the default value (-DateLUT::instance().getDayNumOffsetEpoch(), -16436) and Date uses 1970-01-01.
|
||||
@ -846,19 +867,19 @@ inline void readDateText(ExtendedDayNum & date, ReadBuffer & buf, const DateLUTI
|
||||
readDateTextImpl<void>(date, buf, date_lut);
|
||||
}
|
||||
|
||||
inline bool tryReadDateText(LocalDate & date, ReadBuffer & buf)
|
||||
inline bool tryReadDateText(LocalDate & date, ReadBuffer & buf, const char * allowed_delimiters = nullptr)
|
||||
{
|
||||
return readDateTextImpl<bool>(date, buf);
|
||||
return readDateTextImpl<bool>(date, buf, allowed_delimiters);
|
||||
}
|
||||
|
||||
inline bool tryReadDateText(DayNum & date, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance())
|
||||
inline bool tryReadDateText(DayNum & date, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance(), const char * allowed_delimiters = nullptr)
|
||||
{
|
||||
return readDateTextImpl<bool>(date, buf, time_zone);
|
||||
return readDateTextImpl<bool>(date, buf, time_zone, allowed_delimiters);
|
||||
}
|
||||
|
||||
inline bool tryReadDateText(ExtendedDayNum & date, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance())
|
||||
inline bool tryReadDateText(ExtendedDayNum & date, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance(), const char * allowed_delimiters = nullptr)
|
||||
{
|
||||
return readDateTextImpl<bool>(date, buf, time_zone);
|
||||
return readDateTextImpl<bool>(date, buf, time_zone, allowed_delimiters);
|
||||
}
|
||||
|
||||
UUID parseUUID(std::span<const UInt8> src);
|
||||
@ -975,13 +996,13 @@ inline T parseFromString(std::string_view str)
|
||||
|
||||
|
||||
template <typename ReturnType = void, bool dt64_mode = false>
|
||||
ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & date_lut);
|
||||
ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & date_lut, const char * allowed_date_delimiters = nullptr, const char * allowed_time_delimiters = nullptr);
|
||||
|
||||
/** In YYYY-MM-DD hh:mm:ss or YYYY-MM-DD format, according to specified time zone.
|
||||
* As an exception, also supported parsing of unix timestamp in form of decimal number.
|
||||
*/
|
||||
template <typename ReturnType = void, bool dt64_mode = false>
|
||||
inline ReturnType readDateTimeTextImpl(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & date_lut)
|
||||
inline ReturnType readDateTimeTextImpl(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & date_lut, const char * allowed_date_delimiters = nullptr, const char * allowed_time_delimiters = nullptr)
|
||||
{
|
||||
static constexpr bool throw_exception = std::is_same_v<ReturnType, void>;
|
||||
|
||||
@ -1014,6 +1035,9 @@ inline ReturnType readDateTimeTextImpl(time_t & datetime, ReadBuffer & buf, cons
|
||||
if (!isNumericASCII(s[0]) || !isNumericASCII(s[1]) || !isNumericASCII(s[2]) || !isNumericASCII(s[3])
|
||||
|| !isNumericASCII(s[5]) || !isNumericASCII(s[6]) || !isNumericASCII(s[8]) || !isNumericASCII(s[9]))
|
||||
return ReturnType(false);
|
||||
|
||||
if (!isSymbolIn(s[4], allowed_date_delimiters) || !isSymbolIn(s[7], allowed_date_delimiters))
|
||||
return ReturnType(false);
|
||||
}
|
||||
|
||||
UInt16 year = (s[0] - '0') * 1000 + (s[1] - '0') * 100 + (s[2] - '0') * 10 + (s[3] - '0');
|
||||
@ -1033,6 +1057,9 @@ inline ReturnType readDateTimeTextImpl(time_t & datetime, ReadBuffer & buf, cons
|
||||
if (!isNumericASCII(s[11]) || !isNumericASCII(s[12]) || !isNumericASCII(s[14]) || !isNumericASCII(s[15])
|
||||
|| !isNumericASCII(s[17]) || !isNumericASCII(s[18]))
|
||||
return ReturnType(false);
|
||||
|
||||
if (!isSymbolIn(s[13], allowed_time_delimiters) || !isSymbolIn(s[16], allowed_time_delimiters))
|
||||
return ReturnType(false);
|
||||
}
|
||||
|
||||
hour = (s[11] - '0') * 10 + (s[12] - '0');
|
||||
@ -1057,11 +1084,11 @@ inline ReturnType readDateTimeTextImpl(time_t & datetime, ReadBuffer & buf, cons
|
||||
return readIntTextImpl<time_t, ReturnType, ReadIntTextCheckOverflow::CHECK_OVERFLOW>(datetime, buf);
|
||||
}
|
||||
else
|
||||
return readDateTimeTextFallback<ReturnType, dt64_mode>(datetime, buf, date_lut);
|
||||
return readDateTimeTextFallback<ReturnType, dt64_mode>(datetime, buf, date_lut, allowed_date_delimiters, allowed_time_delimiters);
|
||||
}
|
||||
|
||||
template <typename ReturnType>
|
||||
inline ReturnType readDateTimeTextImpl(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut)
|
||||
inline ReturnType readDateTimeTextImpl(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut, const char * allowed_date_delimiters = nullptr, const char * allowed_time_delimiters = nullptr)
|
||||
{
|
||||
static constexpr bool throw_exception = std::is_same_v<ReturnType, void>;
|
||||
|
||||
@ -1075,7 +1102,7 @@ inline ReturnType readDateTimeTextImpl(DateTime64 & datetime64, UInt32 scale, Re
|
||||
{
|
||||
try
|
||||
{
|
||||
readDateTimeTextImpl<ReturnType, true>(whole, buf, date_lut);
|
||||
readDateTimeTextImpl<ReturnType, true>(whole, buf, date_lut, allowed_date_delimiters, allowed_time_delimiters);
|
||||
}
|
||||
catch (const DB::Exception &)
|
||||
{
|
||||
@ -1085,7 +1112,7 @@ inline ReturnType readDateTimeTextImpl(DateTime64 & datetime64, UInt32 scale, Re
|
||||
}
|
||||
else
|
||||
{
|
||||
auto ok = readDateTimeTextImpl<ReturnType, true>(whole, buf, date_lut);
|
||||
auto ok = readDateTimeTextImpl<ReturnType, true>(whole, buf, date_lut, allowed_date_delimiters, allowed_time_delimiters);
|
||||
if (!ok && (buf.eof() || *buf.position() != '.'))
|
||||
return ReturnType(false);
|
||||
}
|
||||
@ -1168,14 +1195,14 @@ inline void readDateTime64Text(DateTime64 & datetime64, UInt32 scale, ReadBuffer
|
||||
readDateTimeTextImpl<void>(datetime64, scale, buf, date_lut);
|
||||
}
|
||||
|
||||
inline bool tryReadDateTimeText(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance())
|
||||
inline bool tryReadDateTimeText(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance(), const char * allowed_date_delimiters = nullptr, const char * allowed_time_delimiters = nullptr)
|
||||
{
|
||||
return readDateTimeTextImpl<bool>(datetime, buf, time_zone);
|
||||
return readDateTimeTextImpl<bool>(datetime, buf, time_zone, allowed_date_delimiters, allowed_time_delimiters);
|
||||
}
|
||||
|
||||
inline bool tryReadDateTime64Text(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance())
|
||||
inline bool tryReadDateTime64Text(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance(), const char * allowed_date_delimiters = nullptr, const char * allowed_time_delimiters = nullptr)
|
||||
{
|
||||
return readDateTimeTextImpl<bool>(datetime64, scale, buf, date_lut);
|
||||
return readDateTimeTextImpl<bool>(datetime64, scale, buf, date_lut, allowed_date_delimiters, allowed_time_delimiters);
|
||||
}
|
||||
|
||||
inline void readDateTimeText(LocalDateTime & datetime, ReadBuffer & buf)
|
||||
|
@ -46,7 +46,7 @@ namespace ProfileEvents
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
extern const Metric S3DiskNoKeyErrors;
|
||||
extern const Metric DiskS3NoSuchKeyErrors;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
@ -701,7 +701,7 @@ RequestResult Client::processRequestResult(RequestResult && outcome) const
|
||||
return std::forward<RequestResult>(outcome);
|
||||
|
||||
if (outcome.GetError().GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY)
|
||||
CurrentMetrics::add(CurrentMetrics::S3DiskNoKeyErrors);
|
||||
CurrentMetrics::add(CurrentMetrics::DiskS3NoSuchKeyErrors);
|
||||
|
||||
String enriched_message = fmt::format(
|
||||
"{} {}",
|
||||
|
@ -145,12 +145,16 @@ Aws::String AWSEC2MetadataClient::getDefaultCredentialsSecurely() const
|
||||
{
|
||||
String user_agent_string = awsComputeUserAgentString();
|
||||
auto [new_token, response_code] = getEC2MetadataToken(user_agent_string);
|
||||
if (response_code == Aws::Http::HttpResponseCode::BAD_REQUEST)
|
||||
if (response_code == Aws::Http::HttpResponseCode::BAD_REQUEST
|
||||
|| response_code == Aws::Http::HttpResponseCode::REQUEST_NOT_MADE)
|
||||
{
|
||||
/// At least the host should be available and reply, otherwise neither IMDSv2 nor IMDSv1 are usable.
|
||||
return {};
|
||||
}
|
||||
else if (response_code != Aws::Http::HttpResponseCode::OK || new_token.empty())
|
||||
{
|
||||
LOG_TRACE(logger, "Calling EC2MetadataService to get token failed, "
|
||||
"falling back to less secure way. HTTP response code: {}", response_code);
|
||||
"falling back to a less secure way. HTTP response code: {}", response_code);
|
||||
return getDefaultCredentials();
|
||||
}
|
||||
|
||||
@ -247,7 +251,7 @@ static Aws::String getAWSMetadataEndpoint()
|
||||
return ec2_metadata_service_endpoint;
|
||||
}
|
||||
|
||||
std::shared_ptr<AWSEC2MetadataClient> InitEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration)
|
||||
std::shared_ptr<AWSEC2MetadataClient> createEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration)
|
||||
{
|
||||
auto endpoint = getAWSMetadataEndpoint();
|
||||
return std::make_shared<AWSEC2MetadataClient>(client_configuration, endpoint.c_str());
|
||||
@ -781,11 +785,13 @@ S3CredentialsProviderChain::S3CredentialsProviderChain(
|
||||
|
||||
/// EC2MetadataService throttles by delaying the response so the service client should set a large read timeout.
|
||||
/// EC2MetadataService delay is in order of seconds so it only make sense to retry after a couple of seconds.
|
||||
aws_client_configuration.connectTimeoutMs = 1000;
|
||||
/// But the connection timeout should be small because there is the case when there is no IMDS at all,
|
||||
/// like outside of the cloud, on your own machines.
|
||||
aws_client_configuration.connectTimeoutMs = 10;
|
||||
aws_client_configuration.requestTimeoutMs = 1000;
|
||||
|
||||
aws_client_configuration.retryStrategy = std::make_shared<Aws::Client::DefaultRetryStrategy>(1, 1000);
|
||||
auto ec2_metadata_client = InitEC2MetadataClient(aws_client_configuration);
|
||||
auto ec2_metadata_client = createEC2MetadataClient(aws_client_configuration);
|
||||
auto config_loader = std::make_shared<AWSEC2InstanceProfileConfigLoader>(ec2_metadata_client, !credentials_configuration.use_insecure_imds_request);
|
||||
|
||||
AddProvider(std::make_shared<AWSInstanceProfileCredentialsProvider>(config_loader));
|
||||
|
@ -70,7 +70,7 @@ private:
|
||||
LoggerPtr logger;
|
||||
};
|
||||
|
||||
std::shared_ptr<AWSEC2MetadataClient> InitEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration);
|
||||
std::shared_ptr<AWSEC2MetadataClient> createEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration);
|
||||
|
||||
class AWSEC2InstanceProfileConfigLoader : public Aws::Config::AWSProfileConfigLoader
|
||||
{
|
||||
|
@ -128,7 +128,7 @@ void PocoHTTPClientConfiguration::updateSchemeAndRegion()
|
||||
}
|
||||
else
|
||||
{
|
||||
/// In global mode AWS C++ SDK send `us-east-1` but accept switching to another one if being suggested.
|
||||
/// In global mode AWS C++ SDK sends `us-east-1` but accepts switching to another one if being suggested.
|
||||
region = Aws::Region::AWS_GLOBAL;
|
||||
}
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
#include <IO/S3/URI.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Storages/NamedCollectionsHelpers.h>
|
||||
#include "Common/Macros.h"
|
||||
|
||||
#if USE_AWS_S3
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/Macros.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <Common/re2.h>
|
||||
@ -10,6 +10,7 @@
|
||||
|
||||
#include <boost/algorithm/string/case_conv.hpp>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -40,21 +41,13 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax)
|
||||
/// Case when AWS Private Link Interface is being used
|
||||
/// E.g. (bucket.vpce-07a1cd78f1bd55c5f-j3a3vg6w.s3.us-east-1.vpce.amazonaws.com/bucket-name/key)
|
||||
/// https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html
|
||||
static const RE2 aws_private_link_style_pattern(R"(bucket\.vpce\-([a-z0-9\-.]+)\.vpce.amazonaws.com(:\d{1,5})?)");
|
||||
static const RE2 aws_private_link_style_pattern(R"(bucket\.vpce\-([a-z0-9\-.]+)\.vpce\.amazonaws\.com(:\d{1,5})?)");
|
||||
|
||||
/// Case when bucket name and key represented in path of S3 URL.
|
||||
/// Case when bucket name and key represented in the path of S3 URL.
|
||||
/// E.g. (https://s3.region.amazonaws.com/bucket-name/key)
|
||||
/// https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#path-style-access
|
||||
static const RE2 path_style_pattern("^/([^/]*)/(.*)");
|
||||
|
||||
static constexpr auto S3 = "S3";
|
||||
static constexpr auto S3EXPRESS = "S3EXPRESS";
|
||||
static constexpr auto COSN = "COSN";
|
||||
static constexpr auto COS = "COS";
|
||||
static constexpr auto OBS = "OBS";
|
||||
static constexpr auto OSS = "OSS";
|
||||
static constexpr auto EOS = "EOS";
|
||||
|
||||
if (allow_archive_path_syntax)
|
||||
std::tie(uri_str, archive_pattern) = getURIAndArchivePattern(uri_);
|
||||
else
|
||||
@ -85,7 +78,7 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax)
|
||||
URIConverter::modifyURI(uri, mapper);
|
||||
}
|
||||
|
||||
storage_name = S3;
|
||||
storage_name = "S3";
|
||||
|
||||
if (uri.getHost().empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Host is empty in S3 URI.");
|
||||
@ -93,11 +86,13 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax)
|
||||
/// Extract object version ID from query string.
|
||||
bool has_version_id = false;
|
||||
for (const auto & [query_key, query_value] : uri.getQueryParameters())
|
||||
{
|
||||
if (query_key == "versionId")
|
||||
{
|
||||
version_id = query_value;
|
||||
has_version_id = true;
|
||||
}
|
||||
}
|
||||
|
||||
/// Poco::URI will ignore '?' when parsing the path, but if there is a versionId in the http parameter,
|
||||
/// '?' can not be used as a wildcard, otherwise it will be ambiguous.
|
||||
@ -129,15 +124,8 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax)
|
||||
}
|
||||
|
||||
boost::to_upper(name);
|
||||
/// For S3Express it will look like s3express-eun1-az1, i.e. contain region and AZ info
|
||||
if (name != S3 && !name.starts_with(S3EXPRESS) && name != COS && name != OBS && name != OSS && name != EOS)
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS,
|
||||
"Object storage system name is unrecognized in virtual hosted style S3 URI: {}",
|
||||
quoteString(name));
|
||||
|
||||
if (name == COS)
|
||||
storage_name = COSN;
|
||||
if (name == "COS")
|
||||
storage_name = "COSN";
|
||||
else
|
||||
storage_name = name;
|
||||
}
|
||||
@ -148,13 +136,22 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax)
|
||||
validateBucket(bucket, uri);
|
||||
}
|
||||
else
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bucket or key name are invalid in S3 URI.");
|
||||
{
|
||||
/// Custom endpoint, e.g. a public domain of Cloudflare R2,
|
||||
/// which could be served by a custom server-side code.
|
||||
storage_name = "S3";
|
||||
bucket = "default";
|
||||
is_virtual_hosted_style = false;
|
||||
endpoint = uri.getScheme() + "://" + uri.getAuthority();
|
||||
if (!uri.getPath().empty())
|
||||
key = uri.getPath().substr(1);
|
||||
}
|
||||
}
|
||||
|
||||
void URI::addRegionToURI(const std::string ®ion)
|
||||
{
|
||||
if (auto pos = endpoint.find("amazonaws.com"); pos != std::string::npos)
|
||||
endpoint = endpoint.substr(0, pos) + region + "." + endpoint.substr(pos);
|
||||
if (auto pos = endpoint.find(".amazonaws.com"); pos != std::string::npos)
|
||||
endpoint = endpoint.substr(0, pos) + "." + region + endpoint.substr(pos);
|
||||
}
|
||||
|
||||
void URI::validateBucket(const String & bucket, const Poco::URI & uri)
|
||||
|
@ -1,14 +1,14 @@
|
||||
#pragma once
|
||||
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#if USE_AWS_S3
|
||||
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <Poco/URI.h>
|
||||
|
||||
|
||||
namespace DB::S3
|
||||
{
|
||||
|
||||
@ -23,7 +23,7 @@ namespace DB::S3
|
||||
struct URI
|
||||
{
|
||||
Poco::URI uri;
|
||||
// Custom endpoint if URI scheme is not S3.
|
||||
// Custom endpoint if URI scheme, if not S3.
|
||||
std::string endpoint;
|
||||
std::string bucket;
|
||||
std::string key;
|
||||
|
@ -82,13 +82,14 @@ struct DateTimeSubsecondPart
|
||||
UInt8 digits;
|
||||
};
|
||||
|
||||
template <typename ReturnType, bool is_us_style>
|
||||
template <typename ReturnType, bool is_us_style, bool strict = false, bool is_64 = false>
|
||||
ReturnType parseDateTimeBestEffortImpl(
|
||||
time_t & res,
|
||||
ReadBuffer & in,
|
||||
const DateLUTImpl & local_time_zone,
|
||||
const DateLUTImpl & utc_time_zone,
|
||||
DateTimeSubsecondPart * fractional)
|
||||
DateTimeSubsecondPart * fractional,
|
||||
const char * allowed_date_delimiters = nullptr)
|
||||
{
|
||||
auto on_error = [&]<typename... FmtArgs>(int error_code [[maybe_unused]],
|
||||
FormatStringHelper<FmtArgs...> fmt_string [[maybe_unused]],
|
||||
@ -170,22 +171,36 @@ ReturnType parseDateTimeBestEffortImpl(
|
||||
fractional->digits = 3;
|
||||
readDecimalNumber<3>(fractional->value, digits + 10);
|
||||
}
|
||||
else if constexpr (strict)
|
||||
{
|
||||
/// Fractional part is not allowed.
|
||||
return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: unexpected fractional part");
|
||||
}
|
||||
return ReturnType(true);
|
||||
}
|
||||
else if (num_digits == 10 && !year && !has_time)
|
||||
{
|
||||
if (strict && month)
|
||||
return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: month component is duplicated");
|
||||
|
||||
/// This is unix timestamp.
|
||||
readDecimalNumber<10>(res, digits);
|
||||
return ReturnType(true);
|
||||
}
|
||||
else if (num_digits == 9 && !year && !has_time)
|
||||
{
|
||||
if (strict && month)
|
||||
return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: month component is duplicated");
|
||||
|
||||
/// This is unix timestamp.
|
||||
readDecimalNumber<9>(res, digits);
|
||||
return ReturnType(true);
|
||||
}
|
||||
else if (num_digits == 14 && !year && !has_time)
|
||||
{
|
||||
if (strict && month)
|
||||
return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: month component is duplicated");
|
||||
|
||||
/// This is YYYYMMDDhhmmss
|
||||
readDecimalNumber<4>(year, digits);
|
||||
readDecimalNumber<2>(month, digits + 4);
|
||||
@ -197,6 +212,9 @@ ReturnType parseDateTimeBestEffortImpl(
|
||||
}
|
||||
else if (num_digits == 8 && !year)
|
||||
{
|
||||
if (strict && month)
|
||||
return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: month component is duplicated");
|
||||
|
||||
/// This is YYYYMMDD
|
||||
readDecimalNumber<4>(year, digits);
|
||||
readDecimalNumber<2>(month, digits + 4);
|
||||
@ -272,6 +290,9 @@ ReturnType parseDateTimeBestEffortImpl(
|
||||
else
|
||||
return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: unexpected number of decimal digits after year and month: {}", num_digits);
|
||||
}
|
||||
|
||||
if (!isSymbolIn(delimiter_after_year, allowed_date_delimiters))
|
||||
return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: '{}' delimiter between date parts is not allowed", delimiter_after_year);
|
||||
}
|
||||
}
|
||||
else if (num_digits == 2 || num_digits == 1)
|
||||
@ -403,9 +424,16 @@ ReturnType parseDateTimeBestEffortImpl(
|
||||
else
|
||||
{
|
||||
if (day_of_month)
|
||||
{
|
||||
if (strict && hour)
|
||||
return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: hour component is duplicated");
|
||||
|
||||
hour = hour_or_day_of_month_or_month;
|
||||
}
|
||||
else
|
||||
{
|
||||
day_of_month = hour_or_day_of_month_or_month;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (num_digits != 0)
|
||||
@ -446,6 +474,11 @@ ReturnType parseDateTimeBestEffortImpl(
|
||||
fractional->digits = num_digits;
|
||||
readDecimalNumber(fractional->value, num_digits, digits);
|
||||
}
|
||||
else if (strict)
|
||||
{
|
||||
/// Fractional part is not allowed.
|
||||
return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: unexpected fractional part");
|
||||
}
|
||||
}
|
||||
else if (c == '+' || c == '-')
|
||||
{
|
||||
@ -582,12 +615,24 @@ ReturnType parseDateTimeBestEffortImpl(
|
||||
return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: neither Date nor Time was parsed successfully");
|
||||
|
||||
if (!day_of_month)
|
||||
{
|
||||
if constexpr (strict)
|
||||
return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: day of month is required");
|
||||
day_of_month = 1;
|
||||
}
|
||||
|
||||
if (!month)
|
||||
{
|
||||
if constexpr (strict)
|
||||
return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: month is required");
|
||||
month = 1;
|
||||
}
|
||||
|
||||
if (!year)
|
||||
{
|
||||
if constexpr (strict)
|
||||
return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: year is required");
|
||||
|
||||
/// If year is not specified, it will be the current year if the date is unknown or not greater than today,
|
||||
/// otherwise it will be the previous year.
|
||||
/// This convoluted logic is needed to parse the syslog format, which looks as follows: "Mar 3 01:33:48".
|
||||
@ -641,6 +686,20 @@ ReturnType parseDateTimeBestEffortImpl(
|
||||
}
|
||||
};
|
||||
|
||||
if constexpr (strict)
|
||||
{
|
||||
if constexpr (is_64)
|
||||
{
|
||||
if (year < 1900)
|
||||
return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime64: year {} is less than minimum supported year 1900", year);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (year < 1970)
|
||||
return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: year {} is less than minimum supported year 1970", year);
|
||||
}
|
||||
}
|
||||
|
||||
if (has_time_zone_offset)
|
||||
{
|
||||
res = utc_time_zone.makeDateTime(year, month, day_of_month, hour, minute, second);
|
||||
@ -654,20 +713,20 @@ ReturnType parseDateTimeBestEffortImpl(
|
||||
return ReturnType(true);
|
||||
}
|
||||
|
||||
template <typename ReturnType, bool is_us_style>
|
||||
ReturnType parseDateTime64BestEffortImpl(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone)
|
||||
template <typename ReturnType, bool is_us_style, bool strict = false>
|
||||
ReturnType parseDateTime64BestEffortImpl(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters = nullptr)
|
||||
{
|
||||
time_t whole;
|
||||
DateTimeSubsecondPart subsecond = {0, 0}; // needs to be explicitly initialized sine it could be missing from input string
|
||||
|
||||
if constexpr (std::is_same_v<ReturnType, bool>)
|
||||
{
|
||||
if (!parseDateTimeBestEffortImpl<bool, is_us_style>(whole, in, local_time_zone, utc_time_zone, &subsecond))
|
||||
if (!parseDateTimeBestEffortImpl<bool, is_us_style, strict, true>(whole, in, local_time_zone, utc_time_zone, &subsecond, allowed_date_delimiters))
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
parseDateTimeBestEffortImpl<ReturnType, is_us_style>(whole, in, local_time_zone, utc_time_zone, &subsecond);
|
||||
parseDateTimeBestEffortImpl<ReturnType, is_us_style, strict, true>(whole, in, local_time_zone, utc_time_zone, &subsecond, allowed_date_delimiters);
|
||||
}
|
||||
|
||||
|
||||
@ -730,4 +789,24 @@ bool tryParseDateTime64BestEffortUS(DateTime64 & res, UInt32 scale, ReadBuffer &
|
||||
return parseDateTime64BestEffortImpl<bool, true>(res, scale, in, local_time_zone, utc_time_zone);
|
||||
}
|
||||
|
||||
bool tryParseDateTimeBestEffortStrict(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters)
|
||||
{
|
||||
return parseDateTimeBestEffortImpl<bool, false, true>(res, in, local_time_zone, utc_time_zone, nullptr, allowed_date_delimiters);
|
||||
}
|
||||
|
||||
bool tryParseDateTimeBestEffortUSStrict(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters)
|
||||
{
|
||||
return parseDateTimeBestEffortImpl<bool, true, true>(res, in, local_time_zone, utc_time_zone, nullptr, allowed_date_delimiters);
|
||||
}
|
||||
|
||||
bool tryParseDateTime64BestEffortStrict(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters)
|
||||
{
|
||||
return parseDateTime64BestEffortImpl<bool, false, true>(res, scale, in, local_time_zone, utc_time_zone, allowed_date_delimiters);
|
||||
}
|
||||
|
||||
bool tryParseDateTime64BestEffortUSStrict(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters)
|
||||
{
|
||||
return parseDateTime64BestEffortImpl<bool, true, true>(res, scale, in, local_time_zone, utc_time_zone, allowed_date_delimiters);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -63,4 +63,12 @@ void parseDateTime64BestEffort(DateTime64 & res, UInt32 scale, ReadBuffer & in,
|
||||
bool tryParseDateTime64BestEffort(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone);
|
||||
void parseDateTime64BestEffortUS(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone);
|
||||
bool tryParseDateTime64BestEffortUS(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone);
|
||||
|
||||
/// More strict version of best effort parsing. Requires day, month and year to be present, checks for allowed
|
||||
/// delimiters between date components, makes additional correctness checks. Used in schema inference if date times.
|
||||
bool tryParseDateTimeBestEffortStrict(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters);
|
||||
bool tryParseDateTimeBestEffortUSStrict(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters);
|
||||
bool tryParseDateTime64BestEffortStrict(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters);
|
||||
bool tryParseDateTime64BestEffortUSStrict(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters);
|
||||
|
||||
}
|
||||
|
@ -206,11 +206,6 @@ TEST(S3UriTest, validPatterns)
|
||||
}
|
||||
}
|
||||
|
||||
TEST_P(S3UriTest, invalidPatterns)
|
||||
{
|
||||
ASSERT_ANY_THROW(S3::URI new_uri(GetParam()));
|
||||
}
|
||||
|
||||
TEST(S3UriTest, versionIdChecks)
|
||||
{
|
||||
for (const auto& test_case : TestCases)
|
||||
@ -223,19 +218,5 @@ TEST(S3UriTest, versionIdChecks)
|
||||
}
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(
|
||||
S3,
|
||||
S3UriTest,
|
||||
testing::Values(
|
||||
"https:///",
|
||||
"https://.s3.amazonaws.com/key",
|
||||
"https://s3.amazonaws.com/key",
|
||||
"https://jokserfn.s3amazonaws.com/key",
|
||||
"https://s3.amazonaws.com//",
|
||||
"https://amazonaws.com/",
|
||||
"https://amazonaws.com//",
|
||||
"https://amazonaws.com//key"));
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -804,7 +804,8 @@ bool FileCache::tryReserve(
|
||||
const size_t size,
|
||||
FileCacheReserveStat & reserve_stat,
|
||||
const UserInfo & user,
|
||||
size_t lock_wait_timeout_milliseconds)
|
||||
size_t lock_wait_timeout_milliseconds,
|
||||
std::string & failure_reason)
|
||||
{
|
||||
ProfileEventTimeIncrement<Microseconds> watch(ProfileEvents::FilesystemCacheReserveMicroseconds);
|
||||
|
||||
@ -817,6 +818,7 @@ bool FileCache::tryReserve(
|
||||
if (cache_is_being_resized.load(std::memory_order_relaxed))
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::FilesystemCacheFailToReserveSpaceBecauseOfCacheResize);
|
||||
failure_reason = "cache is being resized";
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -824,6 +826,7 @@ bool FileCache::tryReserve(
|
||||
if (!cache_lock)
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::FilesystemCacheFailToReserveSpaceBecauseOfLockContention);
|
||||
failure_reason = "cache contention";
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -847,6 +850,7 @@ bool FileCache::tryReserve(
|
||||
LOG_TEST(log, "Query limit exceeded, space reservation failed, "
|
||||
"recache_on_query_limit_exceeded is disabled (while reserving for {}:{})",
|
||||
file_segment.key(), file_segment.offset());
|
||||
failure_reason = "query limit exceeded";
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -877,6 +881,7 @@ bool FileCache::tryReserve(
|
||||
if (!query_priority->collectCandidatesForEviction(
|
||||
size, required_elements_num, reserve_stat, eviction_candidates, {}, user.user_id, cache_lock))
|
||||
{
|
||||
failure_reason = "cannot evict enough space for query limit";
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -891,11 +896,15 @@ bool FileCache::tryReserve(
|
||||
if (!main_priority->collectCandidatesForEviction(
|
||||
size, required_elements_num, reserve_stat, eviction_candidates, queue_iterator, user.user_id, cache_lock))
|
||||
{
|
||||
failure_reason = "cannot evict enough space";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!file_segment.getKeyMetadata()->createBaseDirectory())
|
||||
{
|
||||
failure_reason = "not enough space on device";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (eviction_candidates.size() > 0)
|
||||
{
|
||||
|
@ -165,7 +165,8 @@ public:
|
||||
size_t size,
|
||||
FileCacheReserveStat & stat,
|
||||
const UserInfo & user,
|
||||
size_t lock_wait_timeout_milliseconds);
|
||||
size_t lock_wait_timeout_milliseconds,
|
||||
std::string & failure_reason);
|
||||
|
||||
std::vector<FileSegment::Info> getFileSegmentInfos(const UserID & user_id);
|
||||
|
||||
|
@ -502,7 +502,11 @@ LockedKeyPtr FileSegment::lockKeyMetadata(bool assert_exists) const
|
||||
return metadata->tryLock();
|
||||
}
|
||||
|
||||
bool FileSegment::reserve(size_t size_to_reserve, size_t lock_wait_timeout_milliseconds, FileCacheReserveStat * reserve_stat)
|
||||
bool FileSegment::reserve(
|
||||
size_t size_to_reserve,
|
||||
size_t lock_wait_timeout_milliseconds,
|
||||
std::string & failure_reason,
|
||||
FileCacheReserveStat * reserve_stat)
|
||||
{
|
||||
if (!size_to_reserve)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Zero space reservation is not allowed");
|
||||
@ -554,7 +558,7 @@ bool FileSegment::reserve(size_t size_to_reserve, size_t lock_wait_timeout_milli
|
||||
if (!reserve_stat)
|
||||
reserve_stat = &dummy_stat;
|
||||
|
||||
bool reserved = cache->tryReserve(*this, size_to_reserve, *reserve_stat, getKeyMetadata()->user, lock_wait_timeout_milliseconds);
|
||||
bool reserved = cache->tryReserve(*this, size_to_reserve, *reserve_stat, getKeyMetadata()->user, lock_wait_timeout_milliseconds, failure_reason);
|
||||
|
||||
if (!reserved)
|
||||
setDownloadFailedUnlocked(lock());
|
||||
|
@ -201,7 +201,11 @@ public:
|
||||
|
||||
/// Try to reserve exactly `size` bytes (in addition to the getDownloadedSize() bytes already downloaded).
|
||||
/// Returns true if reservation was successful, false otherwise.
|
||||
bool reserve(size_t size_to_reserve, size_t lock_wait_timeout_milliseconds, FileCacheReserveStat * reserve_stat = nullptr);
|
||||
bool reserve(
|
||||
size_t size_to_reserve,
|
||||
size_t lock_wait_timeout_milliseconds,
|
||||
std::string & failure_reason,
|
||||
FileCacheReserveStat * reserve_stat = nullptr);
|
||||
|
||||
/// Write data into reserved space.
|
||||
void write(char * from, size_t size, size_t offset_in_file);
|
||||
|
@ -705,7 +705,8 @@ void CacheMetadata::downloadImpl(FileSegment & file_segment, std::optional<Memor
|
||||
{
|
||||
auto size = reader->available();
|
||||
|
||||
if (!file_segment.reserve(size, reserve_space_lock_wait_timeout_milliseconds))
|
||||
std::string failure_reason;
|
||||
if (!file_segment.reserve(size, reserve_space_lock_wait_timeout_milliseconds, failure_reason))
|
||||
{
|
||||
LOG_TEST(
|
||||
log, "Failed to reserve space during background download "
|
||||
|
@ -75,7 +75,8 @@ void WriteBufferToFileSegment::nextImpl()
|
||||
FileCacheReserveStat reserve_stat;
|
||||
/// In case of an error, we don't need to finalize the file segment
|
||||
/// because it will be deleted soon and completed in the holder's destructor.
|
||||
bool ok = file_segment->reserve(bytes_to_write, reserve_space_lock_wait_timeout_milliseconds, &reserve_stat);
|
||||
std::string failure_reason;
|
||||
bool ok = file_segment->reserve(bytes_to_write, reserve_space_lock_wait_timeout_milliseconds, failure_reason, &reserve_stat);
|
||||
|
||||
if (!ok)
|
||||
{
|
||||
@ -84,9 +85,10 @@ void WriteBufferToFileSegment::nextImpl()
|
||||
reserve_stat_msg += fmt::format("{} hold {}, can release {}; ",
|
||||
toString(kind), ReadableSize(stat.non_releasable_size), ReadableSize(stat.releasable_size));
|
||||
|
||||
throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "Failed to reserve {} bytes for {}: {}(segment info: {})",
|
||||
throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "Failed to reserve {} bytes for {}: reason {}, {}(segment info: {})",
|
||||
bytes_to_write,
|
||||
file_segment->getKind() == FileSegmentKind::Temporary ? "temporary file" : "the file in cache",
|
||||
failure_reason,
|
||||
reserve_stat_msg,
|
||||
file_segment->getInfoForLog()
|
||||
);
|
||||
|
@ -15,48 +15,115 @@ JoinOnKeyColumns::JoinOnKeyColumns(const Block & block, const Names & key_names_
|
||||
{
|
||||
}
|
||||
|
||||
template<> void AddedColumns<false>::buildOutput()
|
||||
{
|
||||
}
|
||||
template<>
|
||||
void AddedColumns<false>::buildOutput() {}
|
||||
|
||||
template<>
|
||||
void AddedColumns<false>::buildJoinGetOutput() {}
|
||||
|
||||
template<>
|
||||
template<bool from_row_list>
|
||||
void AddedColumns<false>::buildOutputFromBlocks() {}
|
||||
|
||||
template<>
|
||||
void AddedColumns<true>::buildOutput()
|
||||
{
|
||||
for (size_t i = 0; i < this->size(); ++i)
|
||||
if (!output_by_row_list)
|
||||
buildOutputFromBlocks<false>();
|
||||
else
|
||||
{
|
||||
auto& col = columns[i];
|
||||
size_t default_count = 0;
|
||||
auto apply_default = [&]()
|
||||
if (join_data_avg_perkey_rows < output_by_row_list_threshold)
|
||||
buildOutputFromBlocks<true>();
|
||||
else
|
||||
{
|
||||
if (default_count > 0)
|
||||
for (size_t i = 0; i < this->size(); ++i)
|
||||
{
|
||||
JoinCommon::addDefaultValues(*col, type_name[i].type, default_count);
|
||||
default_count = 0;
|
||||
}
|
||||
};
|
||||
|
||||
for (size_t j = 0; j < lazy_output.blocks.size(); ++j)
|
||||
{
|
||||
if (!lazy_output.blocks[j])
|
||||
{
|
||||
default_count++;
|
||||
continue;
|
||||
}
|
||||
apply_default();
|
||||
const auto & column_from_block = reinterpret_cast<const Block *>(lazy_output.blocks[j])->getByPosition(right_indexes[i]);
|
||||
/// If it's joinGetOrNull, we need to wrap not-nullable columns in StorageJoin.
|
||||
if (is_join_get)
|
||||
{
|
||||
if (auto * nullable_col = typeid_cast<ColumnNullable *>(col.get());
|
||||
nullable_col && !column_from_block.column->isNullable())
|
||||
auto & col = columns[i];
|
||||
for (auto row_ref_i : lazy_output.row_refs)
|
||||
{
|
||||
nullable_col->insertFromNotNullable(*column_from_block.column, lazy_output.row_nums[j]);
|
||||
continue;
|
||||
if (row_ref_i)
|
||||
{
|
||||
const RowRefList * row_ref_list = reinterpret_cast<const RowRefList *>(row_ref_i);
|
||||
for (auto it = row_ref_list->begin(); it.ok(); ++it)
|
||||
col->insertFrom(*it->block->getByPosition(right_indexes[i]).column, it->row_num);
|
||||
}
|
||||
else
|
||||
type_name[i].type->insertDefaultInto(*col);
|
||||
}
|
||||
}
|
||||
col->insertFrom(*column_from_block.column, lazy_output.row_nums[j]);
|
||||
}
|
||||
apply_default();
|
||||
}
|
||||
}
|
||||
|
||||
template<>
|
||||
void AddedColumns<true>::buildJoinGetOutput()
|
||||
{
|
||||
for (size_t i = 0; i < this->size(); ++i)
|
||||
{
|
||||
auto & col = columns[i];
|
||||
for (auto row_ref_i : lazy_output.row_refs)
|
||||
{
|
||||
if (!row_ref_i)
|
||||
{
|
||||
type_name[i].type->insertDefaultInto(*col);
|
||||
continue;
|
||||
}
|
||||
const auto * row_ref = reinterpret_cast<const RowRef *>(row_ref_i);
|
||||
const auto & column_from_block = row_ref->block->getByPosition(right_indexes[i]);
|
||||
if (auto * nullable_col = typeid_cast<ColumnNullable *>(col.get()); nullable_col && !column_from_block.column->isNullable())
|
||||
nullable_col->insertFromNotNullable(*column_from_block.column, row_ref->row_num);
|
||||
else
|
||||
col->insertFrom(*column_from_block.column, row_ref->row_num);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<>
|
||||
template<bool from_row_list>
|
||||
void AddedColumns<true>::buildOutputFromBlocks()
|
||||
{
|
||||
if (this->size() == 0)
|
||||
return;
|
||||
std::vector<const Block *> blocks;
|
||||
std::vector<UInt32> row_nums;
|
||||
blocks.reserve(lazy_output.row_refs.size());
|
||||
row_nums.reserve(lazy_output.row_refs.size());
|
||||
for (auto row_ref_i : lazy_output.row_refs)
|
||||
{
|
||||
if (row_ref_i)
|
||||
{
|
||||
if constexpr (from_row_list)
|
||||
{
|
||||
const RowRefList * row_ref_list = reinterpret_cast<const RowRefList *>(row_ref_i);
|
||||
for (auto it = row_ref_list->begin(); it.ok(); ++it)
|
||||
{
|
||||
blocks.emplace_back(it->block);
|
||||
row_nums.emplace_back(it->row_num);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
const RowRef * row_ref = reinterpret_cast<const RowRefList *>(row_ref_i);
|
||||
blocks.emplace_back(row_ref->block);
|
||||
row_nums.emplace_back(row_ref->row_num);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
blocks.emplace_back(nullptr);
|
||||
row_nums.emplace_back(0);
|
||||
}
|
||||
}
|
||||
for (size_t i = 0; i < this->size(); ++i)
|
||||
{
|
||||
auto & col = columns[i];
|
||||
for (size_t j = 0; j < blocks.size(); ++j)
|
||||
{
|
||||
if (blocks[j])
|
||||
col->insertFrom(*blocks[j]->getByPosition(right_indexes[i]).column, row_nums[j]);
|
||||
else
|
||||
type_name[i].type->insertDefaultInto(*col);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -72,29 +139,27 @@ void AddedColumns<false>::applyLazyDefaults()
|
||||
}
|
||||
|
||||
template<>
|
||||
void AddedColumns<true>::applyLazyDefaults()
|
||||
{
|
||||
}
|
||||
void AddedColumns<true>::applyLazyDefaults() {}
|
||||
|
||||
template <>
|
||||
void AddedColumns<false>::appendFromBlock(const Block & block, size_t row_num,const bool has_defaults)
|
||||
void AddedColumns<false>::appendFromBlock(const RowRef * row_ref, const bool has_defaults)
|
||||
{
|
||||
if (has_defaults)
|
||||
applyLazyDefaults();
|
||||
|
||||
#ifndef NDEBUG
|
||||
checkBlock(block);
|
||||
checkBlock(*row_ref->block);
|
||||
#endif
|
||||
if (is_join_get)
|
||||
{
|
||||
size_t right_indexes_size = right_indexes.size();
|
||||
for (size_t j = 0; j < right_indexes_size; ++j)
|
||||
{
|
||||
const auto & column_from_block = block.getByPosition(right_indexes[j]);
|
||||
const auto & column_from_block = row_ref->block->getByPosition(right_indexes[j]);
|
||||
if (auto * nullable_col = nullable_column_ptrs[j])
|
||||
nullable_col->insertFromNotNullable(*column_from_block.column, row_num);
|
||||
nullable_col->insertFromNotNullable(*column_from_block.column, row_ref->row_num);
|
||||
else
|
||||
columns[j]->insertFrom(*column_from_block.column, row_num);
|
||||
columns[j]->insertFrom(*column_from_block.column, row_ref->row_num);
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -102,22 +167,21 @@ void AddedColumns<false>::appendFromBlock(const Block & block, size_t row_num,co
|
||||
size_t right_indexes_size = right_indexes.size();
|
||||
for (size_t j = 0; j < right_indexes_size; ++j)
|
||||
{
|
||||
const auto & column_from_block = block.getByPosition(right_indexes[j]);
|
||||
columns[j]->insertFrom(*column_from_block.column, row_num);
|
||||
const auto & column_from_block = row_ref->block->getByPosition(right_indexes[j]);
|
||||
columns[j]->insertFrom(*column_from_block.column, row_ref->row_num);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
void AddedColumns<true>::appendFromBlock(const Block & block, size_t row_num, bool)
|
||||
void AddedColumns<true>::appendFromBlock(const RowRef * row_ref, bool)
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
checkBlock(block);
|
||||
checkBlock(*row_ref->block);
|
||||
#endif
|
||||
if (has_columns_to_add)
|
||||
{
|
||||
lazy_output.blocks.emplace_back(reinterpret_cast<UInt64>(&block));
|
||||
lazy_output.row_nums.emplace_back(static_cast<uint32_t>(row_num));
|
||||
lazy_output.row_refs.emplace_back(reinterpret_cast<UInt64>(row_ref));
|
||||
}
|
||||
}
|
||||
template<>
|
||||
@ -131,8 +195,7 @@ void AddedColumns<true>::appendDefaultRow()
|
||||
{
|
||||
if (has_columns_to_add)
|
||||
{
|
||||
lazy_output.blocks.emplace_back(0);
|
||||
lazy_output.row_nums.emplace_back(0);
|
||||
lazy_output.row_refs.emplace_back(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -50,8 +50,7 @@ public:
|
||||
|
||||
struct LazyOutput
|
||||
{
|
||||
PaddedPODArray<UInt64> blocks;
|
||||
PaddedPODArray<UInt32> row_nums;
|
||||
PaddedPODArray<UInt64> row_refs;
|
||||
};
|
||||
|
||||
AddedColumns(
|
||||
@ -76,8 +75,7 @@ public:
|
||||
if constexpr (lazy)
|
||||
{
|
||||
has_columns_to_add = num_columns_to_add > 0;
|
||||
lazy_output.blocks.reserve(rows_to_add);
|
||||
lazy_output.row_nums.reserve(rows_to_add);
|
||||
lazy_output.row_refs.reserve(rows_to_add);
|
||||
}
|
||||
|
||||
columns.reserve(num_columns_to_add);
|
||||
@ -115,18 +113,22 @@ public:
|
||||
if (columns[j]->isNullable() && !saved_column->isNullable())
|
||||
nullable_column_ptrs[j] = typeid_cast<ColumnNullable *>(columns[j].get());
|
||||
}
|
||||
join_data_avg_perkey_rows = join.getJoinedData()->avgPerKeyRows();
|
||||
output_by_row_list_threshold = join.getTableJoin().outputByRowListPerkeyRowsThreshold();
|
||||
}
|
||||
|
||||
size_t size() const { return columns.size(); }
|
||||
|
||||
void buildOutput();
|
||||
|
||||
void buildJoinGetOutput();
|
||||
|
||||
ColumnWithTypeAndName moveColumn(size_t i)
|
||||
{
|
||||
return ColumnWithTypeAndName(std::move(columns[i]), type_name[i].type, type_name[i].qualified_name);
|
||||
}
|
||||
|
||||
void appendFromBlock(const Block & block, size_t row_num, bool has_default);
|
||||
void appendFromBlock(const RowRef * row_ref, bool has_default);
|
||||
|
||||
void appendDefaultRow();
|
||||
|
||||
@ -134,6 +136,8 @@ public:
|
||||
|
||||
const IColumn & leftAsofKey() const { return *left_asof_key; }
|
||||
|
||||
static constexpr bool isLazy() { return lazy; }
|
||||
|
||||
Block left_block;
|
||||
std::vector<JoinOnKeyColumns> join_on_keys;
|
||||
ExpressionActionsPtr additional_filter_expression;
|
||||
@ -142,6 +146,9 @@ public:
|
||||
size_t rows_to_add;
|
||||
std::unique_ptr<IColumn::Offsets> offsets_to_replicate;
|
||||
bool need_filter = false;
|
||||
bool output_by_row_list = false;
|
||||
size_t join_data_avg_perkey_rows = 0;
|
||||
size_t output_by_row_list_threshold = 0;
|
||||
IColumn::Filter filter;
|
||||
|
||||
void reserve(bool need_replicate)
|
||||
@ -212,15 +219,22 @@ private:
|
||||
columns.back()->reserve(src_column.column->size());
|
||||
type_name.emplace_back(src_column.type, src_column.name, qualified_name);
|
||||
}
|
||||
|
||||
/** Build output from the blocks that extract from `RowRef` or `RowRefList`, to avoid block cache miss which may cause performance slow down.
|
||||
* And This problem would happen it we directly build output from `RowRef` or `RowRefList`.
|
||||
*/
|
||||
template<bool from_row_list>
|
||||
void buildOutputFromBlocks();
|
||||
};
|
||||
|
||||
/// Adapter class to pass into addFoundRowAll
|
||||
/// In joinRightColumnsWithAdditionalFilter we don't want to add rows directly into AddedColumns,
|
||||
/// because they need to be filtered by additional_filter_expression.
|
||||
class PreSelectedRows : public std::vector<RowRef>
|
||||
class PreSelectedRows : public std::vector<const RowRef *>
|
||||
{
|
||||
public:
|
||||
void appendFromBlock(const Block & block, size_t row_num, bool /* has_default */) { this->emplace_back(&block, row_num); }
|
||||
void appendFromBlock(const RowRef * row_ref, bool /* has_default */) { this->emplace_back(row_ref); }
|
||||
static constexpr bool isLazy() { return false; }
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -495,7 +495,7 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits)
|
||||
}
|
||||
|
||||
size_t rows = source_block.rows();
|
||||
|
||||
data->rows_to_join += rows;
|
||||
const auto & right_key_names = table_join->getAllNames(JoinTableSide::Right);
|
||||
ColumnPtrMap all_key_columns(right_key_names.size());
|
||||
for (const auto & column_name : right_key_names)
|
||||
@ -647,7 +647,7 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits)
|
||||
total_bytes = getTotalByteCount();
|
||||
}
|
||||
}
|
||||
|
||||
data->keys_to_join = total_rows;
|
||||
shrinkStoredBlocksToFit(total_bytes);
|
||||
|
||||
return table_join->sizeLimits().check(total_rows, total_bytes, "JOIN", ErrorCodes::SET_SIZE_LIMIT_EXCEEDED);
|
||||
|
@ -345,6 +345,18 @@ public:
|
||||
|
||||
size_t blocks_allocated_size = 0;
|
||||
size_t blocks_nullmaps_allocated_size = 0;
|
||||
|
||||
/// Number of rows of right table to join
|
||||
size_t rows_to_join = 0;
|
||||
/// Number of keys of right table to join
|
||||
size_t keys_to_join = 0;
|
||||
|
||||
size_t avgPerKeyRows() const
|
||||
{
|
||||
if (keys_to_join == 0)
|
||||
return 0;
|
||||
return rows_to_join / keys_to_join;
|
||||
}
|
||||
};
|
||||
|
||||
using RightTableDataPtr = std::shared_ptr<RightTableData>;
|
||||
|
@ -83,6 +83,7 @@ public:
|
||||
const Block & block_with_columns_to_add,
|
||||
const MapsTemplateVector & maps_,
|
||||
bool is_join_get = false);
|
||||
|
||||
private:
|
||||
template <typename KeyGetter, bool is_asof_join>
|
||||
static KeyGetter createKeyGetter(const ColumnRawPtrs & key_columns, const Sizes & key_sizes);
|
||||
@ -128,7 +129,7 @@ private:
|
||||
template <typename AddedColumns>
|
||||
static ColumnPtr buildAdditionalFilter(
|
||||
size_t left_start_row,
|
||||
const std::vector<RowRef> & selected_rows,
|
||||
const std::vector<const RowRef *> & selected_rows,
|
||||
const std::vector<size_t> & row_replicate_offset,
|
||||
AddedColumns & added_columns);
|
||||
|
||||
|
@ -95,7 +95,10 @@ Block HashJoinMethods<KIND, STRICTNESS, MapsTemplate>::joinBlockImpl(
|
||||
added_columns.join_on_keys.clear();
|
||||
Block remaining_block = sliceBlock(block, num_joined);
|
||||
|
||||
added_columns.buildOutput();
|
||||
if (is_join_get)
|
||||
added_columns.buildJoinGetOutput();
|
||||
else
|
||||
added_columns.buildOutput();
|
||||
for (size_t i = 0; i < added_columns.size(); ++i)
|
||||
block.insert(added_columns.moveColumn(i));
|
||||
|
||||
@ -339,6 +342,8 @@ size_t HashJoinMethods<KIND, STRICTNESS, MapsTemplate>::joinRightColumns(
|
||||
size_t rows = added_columns.rows_to_add;
|
||||
if constexpr (need_filter)
|
||||
added_columns.filter = IColumn::Filter(rows, 0);
|
||||
if constexpr (!flag_per_row && (STRICTNESS == JoinStrictness::All || (STRICTNESS == JoinStrictness::Semi && KIND == JoinKind::Right)))
|
||||
added_columns.output_by_row_list = true;
|
||||
|
||||
Arena pool;
|
||||
|
||||
@ -354,8 +359,8 @@ size_t HashJoinMethods<KIND, STRICTNESS, MapsTemplate>::joinRightColumns(
|
||||
{
|
||||
if (unlikely(current_offset >= max_joined_block_rows))
|
||||
{
|
||||
added_columns.offsets_to_replicate->resize_assume_reserved(i);
|
||||
added_columns.filter.resize_assume_reserved(i);
|
||||
added_columns.offsets_to_replicate->resize(i);
|
||||
added_columns.filter.resize(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -381,15 +386,15 @@ size_t HashJoinMethods<KIND, STRICTNESS, MapsTemplate>::joinRightColumns(
|
||||
const IColumn & left_asof_key = added_columns.leftAsofKey();
|
||||
|
||||
auto row_ref = mapped->findAsof(left_asof_key, i);
|
||||
if (row_ref.block)
|
||||
if (row_ref && row_ref->block)
|
||||
{
|
||||
setUsed<need_filter>(added_columns.filter, i);
|
||||
if constexpr (flag_per_row)
|
||||
used_flags.template setUsed<join_features.need_flags, flag_per_row>(row_ref.block, row_ref.row_num, 0);
|
||||
used_flags.template setUsed<join_features.need_flags, flag_per_row>(row_ref->block, row_ref->row_num, 0);
|
||||
else
|
||||
used_flags.template setUsed<join_features.need_flags, flag_per_row>(find_result);
|
||||
|
||||
added_columns.appendFromBlock(*row_ref.block, row_ref.row_num, join_features.add_missing);
|
||||
added_columns.appendFromBlock(row_ref, join_features.add_missing);
|
||||
}
|
||||
else
|
||||
addNotFoundRow<join_features.add_missing, join_features.need_replication>(added_columns, current_offset);
|
||||
@ -420,7 +425,7 @@ size_t HashJoinMethods<KIND, STRICTNESS, MapsTemplate>::joinRightColumns(
|
||||
if (used_once)
|
||||
{
|
||||
setUsed<need_filter>(added_columns.filter, i);
|
||||
added_columns.appendFromBlock(*mapped.block, mapped.row_num, join_features.add_missing);
|
||||
added_columns.appendFromBlock(&mapped, join_features.add_missing);
|
||||
}
|
||||
|
||||
break;
|
||||
@ -438,7 +443,7 @@ size_t HashJoinMethods<KIND, STRICTNESS, MapsTemplate>::joinRightColumns(
|
||||
{
|
||||
setUsed<need_filter>(added_columns.filter, i);
|
||||
used_flags.template setUsed<join_features.need_flags, flag_per_row>(find_result);
|
||||
added_columns.appendFromBlock(*mapped.block, mapped.row_num, join_features.add_missing);
|
||||
added_columns.appendFromBlock(&mapped, join_features.add_missing);
|
||||
|
||||
if (join_features.is_any_or_semi_join)
|
||||
{
|
||||
@ -477,7 +482,7 @@ template <JoinKind KIND, JoinStrictness STRICTNESS, typename MapsTemplate>
|
||||
template <typename AddedColumns>
|
||||
ColumnPtr HashJoinMethods<KIND, STRICTNESS, MapsTemplate>::buildAdditionalFilter(
|
||||
size_t left_start_row,
|
||||
const std::vector<RowRef> & selected_rows,
|
||||
const std::vector<const RowRef *> & selected_rows,
|
||||
const std::vector<size_t> & row_replicate_offset,
|
||||
AddedColumns & added_columns)
|
||||
{
|
||||
@ -489,7 +494,7 @@ ColumnPtr HashJoinMethods<KIND, STRICTNESS, MapsTemplate>::buildAdditionalFilter
|
||||
result_column = ColumnUInt8::create();
|
||||
break;
|
||||
}
|
||||
const Block & sample_right_block = *selected_rows.begin()->block;
|
||||
const Block & sample_right_block = *((*selected_rows.begin())->block);
|
||||
if (!sample_right_block || !added_columns.additional_filter_expression)
|
||||
{
|
||||
auto filter = ColumnUInt8::create();
|
||||
@ -519,8 +524,8 @@ ColumnPtr HashJoinMethods<KIND, STRICTNESS, MapsTemplate>::buildAdditionalFilter
|
||||
auto new_col = col.column->cloneEmpty();
|
||||
for (const auto & selected_row : selected_rows)
|
||||
{
|
||||
const auto & src_col = selected_row.block->getByPosition(right_col_pos);
|
||||
new_col->insertFrom(*src_col.column, selected_row.row_num);
|
||||
const auto & src_col = selected_row->block->getByPosition(right_col_pos);
|
||||
new_col->insertFrom(*src_col.column, selected_row->row_num);
|
||||
}
|
||||
executed_block.insert({std::move(new_col), col.type, col.name});
|
||||
}
|
||||
@ -700,26 +705,24 @@ size_t HashJoinMethods<KIND, STRICTNESS, MapsTemplate>::joinRightColumnsWithAddt
|
||||
{
|
||||
// For inner join, we need mark each right row'flag, because we only use each right row once.
|
||||
auto used_once = used_flags.template setUsedOnce<join_features.need_flags, true>(
|
||||
selected_right_row_it->block, selected_right_row_it->row_num, 0);
|
||||
(*selected_right_row_it)->block, (*selected_right_row_it)->row_num, 0);
|
||||
if (used_once)
|
||||
{
|
||||
any_matched = true;
|
||||
total_added_rows += 1;
|
||||
added_columns.appendFromBlock(
|
||||
*selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing);
|
||||
added_columns.appendFromBlock(*selected_right_row_it, join_features.add_missing);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
auto used_once = used_flags.template setUsedOnce<join_features.need_flags, true>(
|
||||
selected_right_row_it->block, selected_right_row_it->row_num, 0);
|
||||
(*selected_right_row_it)->block, (*selected_right_row_it)->row_num, 0);
|
||||
if (used_once)
|
||||
{
|
||||
any_matched = true;
|
||||
total_added_rows += 1;
|
||||
added_columns.appendFromBlock(
|
||||
*selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing);
|
||||
added_columns.appendFromBlock(*selected_right_row_it, join_features.add_missing);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -727,16 +730,14 @@ size_t HashJoinMethods<KIND, STRICTNESS, MapsTemplate>::joinRightColumnsWithAddt
|
||||
{
|
||||
any_matched = true;
|
||||
if constexpr (join_features.right && join_features.need_flags)
|
||||
used_flags.template setUsed<true, true>(selected_right_row_it->block, selected_right_row_it->row_num, 0);
|
||||
used_flags.template setUsed<true, true>((*selected_right_row_it)->block, (*selected_right_row_it)->row_num, 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
any_matched = true;
|
||||
total_added_rows += 1;
|
||||
added_columns.appendFromBlock(
|
||||
*selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing);
|
||||
used_flags.template setUsed<join_features.need_flags, true>(
|
||||
selected_right_row_it->block, selected_right_row_it->row_num, 0);
|
||||
added_columns.appendFromBlock(*selected_right_row_it, join_features.add_missing);
|
||||
used_flags.template setUsed<join_features.need_flags, true>((*selected_right_row_it)->block, (*selected_right_row_it)->row_num, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -756,8 +757,7 @@ size_t HashJoinMethods<KIND, STRICTNESS, MapsTemplate>::joinRightColumnsWithAddt
|
||||
if (filter_flags[replicated_row])
|
||||
{
|
||||
any_matched = true;
|
||||
added_columns.appendFromBlock(
|
||||
*selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing);
|
||||
added_columns.appendFromBlock(*selected_right_row_it, join_features.add_missing);
|
||||
total_added_rows += 1;
|
||||
}
|
||||
++selected_right_row_it;
|
||||
@ -767,8 +767,7 @@ size_t HashJoinMethods<KIND, STRICTNESS, MapsTemplate>::joinRightColumnsWithAddt
|
||||
if (filter_flags[replicated_row])
|
||||
{
|
||||
any_matched = true;
|
||||
added_columns.appendFromBlock(
|
||||
*selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing);
|
||||
added_columns.appendFromBlock(*selected_right_row_it, join_features.add_missing);
|
||||
total_added_rows += 1;
|
||||
selected_right_row_it = selected_right_row_it + row_replicate_offset[i] - replicated_row;
|
||||
break;
|
||||
|
@ -18,11 +18,25 @@ struct JoinFeatures
|
||||
static constexpr bool inner = KIND == JoinKind::Inner;
|
||||
static constexpr bool full = KIND == JoinKind::Full;
|
||||
|
||||
/** Whether we may need duplicate rows from the left table.
|
||||
* For example, when we have row (key1, attr1) in left table
|
||||
* and rows (key1, attr2), (key1, attr3) in right table,
|
||||
* then we need to duplicate row (key1, attr1) for each of joined rows from right table, so result will be
|
||||
* (key1, attr1, key1, attr2)
|
||||
* (key1, attr1, key1, attr3)
|
||||
*/
|
||||
static constexpr bool need_replication = is_all_join || (is_any_join && right) || (is_semi_join && right);
|
||||
|
||||
/// Whether we need to filter rows from the left table that do not have matches in the right table.
|
||||
static constexpr bool need_filter = !need_replication && (inner || right || (is_semi_join && left) || (is_anti_join && left));
|
||||
|
||||
/// Whether we need to add default values for columns from the left table.
|
||||
static constexpr bool add_missing = (left || full) && !is_semi_join;
|
||||
|
||||
/// Whether we need to store flags for rows from the right table table
|
||||
/// that indicates if they have matches in the left table.
|
||||
static constexpr bool need_flags = MapGetter<KIND, STRICTNESS, std::is_same_v<std::decay_t<Map>, HashJoin::MapsAll>>::flagged;
|
||||
|
||||
static constexpr bool is_maps_all = std::is_same_v<std::decay_t<Map>, HashJoin::MapsAll>;
|
||||
};
|
||||
|
||||
|
@ -104,7 +104,7 @@ void addFoundRowAll(
|
||||
{
|
||||
if (!known_rows.isKnown(std::make_pair(it->block, it->row_num)))
|
||||
{
|
||||
added.appendFromBlock(*it->block, it->row_num, false);
|
||||
added.appendFromBlock(*it, false);
|
||||
++current_offset;
|
||||
if (!new_known_rows_ptr)
|
||||
{
|
||||
@ -124,11 +124,16 @@ void addFoundRowAll(
|
||||
known_rows.add(std::cbegin(*new_known_rows_ptr), std::cend(*new_known_rows_ptr));
|
||||
}
|
||||
}
|
||||
else if constexpr (AddedColumns::isLazy())
|
||||
{
|
||||
added.appendFromBlock(&mapped, false);
|
||||
current_offset += mapped.rows;
|
||||
}
|
||||
else
|
||||
{
|
||||
for (auto it = mapped.begin(); it.ok(); ++it)
|
||||
{
|
||||
added.appendFromBlock(*it->block, it->row_num, false);
|
||||
added.appendFromBlock(*it, false);
|
||||
++current_offset;
|
||||
}
|
||||
}
|
||||
|
@ -1944,6 +1944,8 @@ BlockIO InterpreterCreateQuery::execute()
|
||||
FunctionNameNormalizer::visit(query_ptr.get());
|
||||
auto & create = query_ptr->as<ASTCreateQuery &>();
|
||||
|
||||
create.if_not_exists |= getContext()->getSettingsRef().create_if_not_exists;
|
||||
|
||||
bool is_create_database = create.database && !create.table;
|
||||
if (!create.cluster.empty() && !maybeRemoveOnCluster(query_ptr, getContext()))
|
||||
{
|
||||
|
@ -244,9 +244,6 @@ public:
|
||||
/// Same as checkTimeLimit but it never throws
|
||||
[[nodiscard]] bool checkTimeLimitSoft();
|
||||
|
||||
/// Use it in case of the query left in background to execute asynchronously
|
||||
void updateContext(ContextWeakPtr weak_context) { context = std::move(weak_context); }
|
||||
|
||||
/// Get the reference for the start of the query. Used to synchronize with other Stopwatches
|
||||
UInt64 getQueryCPUStartTime() { return watch.getStart(); }
|
||||
};
|
||||
|
@ -144,7 +144,7 @@ public:
|
||||
return low;
|
||||
}
|
||||
|
||||
RowRef findAsof(const IColumn & asof_column, size_t row_num) override
|
||||
RowRef * findAsof(const IColumn & asof_column, size_t row_num) override
|
||||
{
|
||||
sort();
|
||||
|
||||
@ -156,10 +156,10 @@ public:
|
||||
if (pos != entries.size())
|
||||
{
|
||||
size_t row_ref_index = entries[pos].row_ref_index;
|
||||
return row_refs[row_ref_index];
|
||||
return &row_refs[row_ref_index];
|
||||
}
|
||||
|
||||
return {nullptr, 0};
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -122,7 +122,7 @@ struct RowRefList : RowRef
|
||||
};
|
||||
|
||||
RowRefList() {} /// NOLINT
|
||||
RowRefList(const Block * block_, size_t row_num_) : RowRef(block_, row_num_) {}
|
||||
RowRefList(const Block * block_, size_t row_num_) : RowRef(block_, row_num_), rows(1) {}
|
||||
|
||||
ForwardIterator begin() const { return ForwardIterator(this); }
|
||||
|
||||
@ -135,8 +135,11 @@ struct RowRefList : RowRef
|
||||
*next = Batch(nullptr);
|
||||
}
|
||||
next = next->insert(std::move(row_ref), pool);
|
||||
++rows;
|
||||
}
|
||||
|
||||
public:
|
||||
SizeT rows = 0;
|
||||
private:
|
||||
Batch * next = nullptr;
|
||||
};
|
||||
@ -158,7 +161,7 @@ struct SortedLookupVectorBase
|
||||
virtual void insert(const IColumn &, const Block *, size_t) = 0;
|
||||
|
||||
// This needs to be synchronized internally
|
||||
virtual RowRef findAsof(const IColumn &, size_t) = 0;
|
||||
virtual RowRef * findAsof(const IColumn &, size_t) = 0;
|
||||
};
|
||||
|
||||
|
||||
|
@ -115,6 +115,7 @@ TableJoin::TableJoin(const Settings & settings, VolumePtr tmp_volume_, Temporary
|
||||
, partial_merge_join_left_table_buffer_bytes(settings.partial_merge_join_left_table_buffer_bytes)
|
||||
, max_files_to_merge(settings.join_on_disk_max_files_to_merge)
|
||||
, temporary_files_codec(settings.temporary_files_codec)
|
||||
, output_by_rowlist_perkey_rows_threshold(settings.join_output_by_rowlist_perkey_rows_threshold)
|
||||
, max_memory_usage(settings.max_memory_usage)
|
||||
, tmp_volume(tmp_volume_)
|
||||
, tmp_data(tmp_data_)
|
||||
|
@ -148,6 +148,7 @@ private:
|
||||
const size_t partial_merge_join_left_table_buffer_bytes = 0;
|
||||
const size_t max_files_to_merge = 0;
|
||||
const String temporary_files_codec = "LZ4";
|
||||
const size_t output_by_rowlist_perkey_rows_threshold = 0;
|
||||
|
||||
/// Value if setting max_memory_usage for query, can be used when max_bytes_in_join is not specified.
|
||||
size_t max_memory_usage = 0;
|
||||
@ -295,6 +296,7 @@ public:
|
||||
return join_use_nulls && isRightOrFull(kind());
|
||||
}
|
||||
|
||||
size_t outputByRowListPerkeyRowsThreshold() const { return output_by_rowlist_perkey_rows_threshold; }
|
||||
size_t defaultMaxBytes() const { return default_max_bytes; }
|
||||
size_t maxJoinedBlockRows() const { return max_joined_block_rows; }
|
||||
size_t maxRowsInRightBlock() const { return partial_merge_join_rows_in_right_blocks; }
|
||||
|
@ -246,7 +246,8 @@ void download(FileSegment & file_segment)
|
||||
ASSERT_EQ(file_segment.state(), State::DOWNLOADING);
|
||||
ASSERT_EQ(file_segment.getDownloadedSize(), 0);
|
||||
|
||||
ASSERT_TRUE(file_segment.reserve(file_segment.range().size(), 1000));
|
||||
std::string failure_reason;
|
||||
ASSERT_TRUE(file_segment.reserve(file_segment.range().size(), 1000, failure_reason));
|
||||
download(cache_base_path, file_segment);
|
||||
ASSERT_EQ(file_segment.state(), State::DOWNLOADING);
|
||||
|
||||
@ -258,7 +259,8 @@ void assertDownloadFails(FileSegment & file_segment)
|
||||
{
|
||||
ASSERT_EQ(file_segment.getOrSetDownloader(), FileSegment::getCallerId());
|
||||
ASSERT_EQ(file_segment.getDownloadedSize(), 0);
|
||||
ASSERT_FALSE(file_segment.reserve(file_segment.range().size(), 1000));
|
||||
std::string failure_reason;
|
||||
ASSERT_FALSE(file_segment.reserve(file_segment.range().size(), 1000, failure_reason));
|
||||
file_segment.complete();
|
||||
}
|
||||
|
||||
@ -957,10 +959,11 @@ TEST_F(FileCacheTest, temporaryData)
|
||||
|
||||
{
|
||||
ASSERT_EQ(some_data_holder->size(), 5);
|
||||
std::string failure_reason;
|
||||
for (auto & segment : *some_data_holder)
|
||||
{
|
||||
ASSERT_TRUE(segment->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
ASSERT_TRUE(segment->reserve(segment->range().size(), 1000));
|
||||
ASSERT_TRUE(segment->reserve(segment->range().size(), 1000, failure_reason));
|
||||
download(*segment);
|
||||
segment->complete();
|
||||
}
|
||||
|
@ -198,6 +198,29 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState & s
|
||||
print_database_table();
|
||||
}
|
||||
|
||||
if (sync_replica_mode != SyncReplicaMode::DEFAULT)
|
||||
{
|
||||
settings.ostr << ' ';
|
||||
print_keyword(magic_enum::enum_name(sync_replica_mode));
|
||||
|
||||
// If the mode is LIGHTWEIGHT and specific source replicas are specified
|
||||
if (sync_replica_mode == SyncReplicaMode::LIGHTWEIGHT && !src_replicas.empty())
|
||||
{
|
||||
settings.ostr << ' ';
|
||||
print_keyword("FROM");
|
||||
settings.ostr << ' ';
|
||||
|
||||
bool first = true;
|
||||
for (const auto & src : src_replicas)
|
||||
{
|
||||
if (!first)
|
||||
settings.ostr << ", ";
|
||||
first = false;
|
||||
settings.ostr << quoteString(src);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (query_settings)
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << settings.nl_or_ws << "SETTINGS " << (settings.hilite ? hilite_none : "");
|
||||
@ -233,28 +256,6 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState & s
|
||||
print_identifier(disk);
|
||||
}
|
||||
|
||||
if (sync_replica_mode != SyncReplicaMode::DEFAULT)
|
||||
{
|
||||
settings.ostr << ' ';
|
||||
print_keyword(magic_enum::enum_name(sync_replica_mode));
|
||||
|
||||
// If the mode is LIGHTWEIGHT and specific source replicas are specified
|
||||
if (sync_replica_mode == SyncReplicaMode::LIGHTWEIGHT && !src_replicas.empty())
|
||||
{
|
||||
settings.ostr << ' ';
|
||||
print_keyword("FROM");
|
||||
settings.ostr << ' ';
|
||||
|
||||
bool first = true;
|
||||
for (const auto & src : src_replicas)
|
||||
{
|
||||
if (!first)
|
||||
settings.ostr << ", ";
|
||||
first = false;
|
||||
settings.ostr << quoteString(src);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Type::SYNC_DATABASE_REPLICA:
|
||||
|
@ -96,7 +96,7 @@ bool ExecutingGraph::addEdges(uint64_t node)
|
||||
return was_edge_added;
|
||||
}
|
||||
|
||||
bool ExecutingGraph::expandPipeline(std::stack<uint64_t> & stack, uint64_t pid)
|
||||
ExecutingGraph::UpdateNodeStatus ExecutingGraph::expandPipeline(std::stack<uint64_t> & stack, uint64_t pid)
|
||||
{
|
||||
auto & cur_node = *nodes[pid];
|
||||
Processors new_processors;
|
||||
@ -108,7 +108,7 @@ bool ExecutingGraph::expandPipeline(std::stack<uint64_t> & stack, uint64_t pid)
|
||||
catch (...)
|
||||
{
|
||||
cur_node.exception = std::current_exception();
|
||||
return false;
|
||||
return UpdateNodeStatus::Exception;
|
||||
}
|
||||
|
||||
{
|
||||
@ -118,7 +118,7 @@ bool ExecutingGraph::expandPipeline(std::stack<uint64_t> & stack, uint64_t pid)
|
||||
{
|
||||
for (auto & processor : new_processors)
|
||||
processor->cancel();
|
||||
return false;
|
||||
return UpdateNodeStatus::Cancelled;
|
||||
}
|
||||
processors->insert(processors->end(), new_processors.begin(), new_processors.end());
|
||||
|
||||
@ -178,7 +178,7 @@ bool ExecutingGraph::expandPipeline(std::stack<uint64_t> & stack, uint64_t pid)
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
return UpdateNodeStatus::Done;
|
||||
}
|
||||
|
||||
void ExecutingGraph::initializeExecution(Queue & queue)
|
||||
@ -213,7 +213,7 @@ void ExecutingGraph::initializeExecution(Queue & queue)
|
||||
}
|
||||
|
||||
|
||||
bool ExecutingGraph::updateNode(uint64_t pid, Queue & queue, Queue & async_queue)
|
||||
ExecutingGraph::UpdateNodeStatus ExecutingGraph::updateNode(uint64_t pid, Queue & queue, Queue & async_queue)
|
||||
{
|
||||
std::stack<Edge *> updated_edges;
|
||||
std::stack<uint64_t> updated_processors;
|
||||
@ -309,7 +309,7 @@ bool ExecutingGraph::updateNode(uint64_t pid, Queue & queue, Queue & async_queue
|
||||
catch (...)
|
||||
{
|
||||
node.exception = std::current_exception();
|
||||
return false;
|
||||
return UpdateNodeStatus::Exception;
|
||||
}
|
||||
|
||||
#ifndef NDEBUG
|
||||
@ -386,8 +386,9 @@ bool ExecutingGraph::updateNode(uint64_t pid, Queue & queue, Queue & async_queue
|
||||
read_lock.unlock();
|
||||
{
|
||||
std::unique_lock lock(nodes_mutex);
|
||||
if (!expandPipeline(updated_processors, pid))
|
||||
return false;
|
||||
auto status = expandPipeline(updated_processors, pid);
|
||||
if (status != UpdateNodeStatus::Done)
|
||||
return status;
|
||||
}
|
||||
read_lock.lock();
|
||||
|
||||
@ -397,7 +398,7 @@ bool ExecutingGraph::updateNode(uint64_t pid, Queue & queue, Queue & async_queue
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
return UpdateNodeStatus::Done;
|
||||
}
|
||||
|
||||
void ExecutingGraph::cancel(bool cancel_all_processors)
|
||||
|
@ -138,10 +138,17 @@ public:
|
||||
/// Traverse graph the first time to update all the childless nodes.
|
||||
void initializeExecution(Queue & queue);
|
||||
|
||||
enum class UpdateNodeStatus
|
||||
{
|
||||
Done,
|
||||
Exception,
|
||||
Cancelled,
|
||||
};
|
||||
|
||||
/// Update processor with pid number (call IProcessor::prepare).
|
||||
/// Check parents and children of current processor and push them to stacks if they also need to be updated.
|
||||
/// If processor wants to be expanded, lock will be upgraded to get write access to pipeline.
|
||||
bool updateNode(uint64_t pid, Queue & queue, Queue & async_queue);
|
||||
UpdateNodeStatus updateNode(uint64_t pid, Queue & queue, Queue & async_queue);
|
||||
|
||||
void cancel(bool cancel_all_processors = true);
|
||||
|
||||
@ -155,7 +162,7 @@ private:
|
||||
|
||||
/// Update graph after processor (pid) returned ExpandPipeline status.
|
||||
/// All new nodes and nodes with updated ports are pushed into stack.
|
||||
bool expandPipeline(std::stack<uint64_t> & stack, uint64_t pid);
|
||||
UpdateNodeStatus expandPipeline(std::stack<uint64_t> & stack, uint64_t pid);
|
||||
|
||||
std::shared_ptr<Processors> processors;
|
||||
std::vector<bool> source_processors;
|
||||
|
@ -77,9 +77,9 @@ const Processors & PipelineExecutor::getProcessors() const
|
||||
return graph->getProcessors();
|
||||
}
|
||||
|
||||
void PipelineExecutor::cancel()
|
||||
void PipelineExecutor::cancel(ExecutionStatus reason)
|
||||
{
|
||||
cancelled = true;
|
||||
tryUpdateExecutionStatus(ExecutionStatus::Executing, reason);
|
||||
finish();
|
||||
graph->cancel();
|
||||
}
|
||||
@ -98,6 +98,11 @@ void PipelineExecutor::finish()
|
||||
tasks.finish();
|
||||
}
|
||||
|
||||
bool PipelineExecutor::tryUpdateExecutionStatus(ExecutionStatus expected, ExecutionStatus desired)
|
||||
{
|
||||
return execution_status.compare_exchange_strong(expected, desired);
|
||||
}
|
||||
|
||||
void PipelineExecutor::execute(size_t num_threads, bool concurrency_control)
|
||||
{
|
||||
checkTimeLimit();
|
||||
@ -120,7 +125,7 @@ void PipelineExecutor::execute(size_t num_threads, bool concurrency_control)
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
span.addAttribute(ExecutionStatus::fromCurrentException());
|
||||
span.addAttribute(DB::ExecutionStatus::fromCurrentException());
|
||||
|
||||
#ifndef NDEBUG
|
||||
LOG_TRACE(log, "Exception while executing query. Current state:\n{}", dumpPipeline());
|
||||
@ -169,7 +174,7 @@ bool PipelineExecutor::checkTimeLimitSoft()
|
||||
// We call cancel here so that all processors are notified and tasks waken up
|
||||
// so that the "break" is faster and doesn't wait for long events
|
||||
if (!continuing)
|
||||
cancel();
|
||||
cancel(ExecutionStatus::CancelledByTimeout);
|
||||
|
||||
return continuing;
|
||||
}
|
||||
@ -195,7 +200,8 @@ void PipelineExecutor::finalizeExecution()
|
||||
{
|
||||
checkTimeLimit();
|
||||
|
||||
if (cancelled)
|
||||
auto status = execution_status.load();
|
||||
if (status == ExecutionStatus::CancelledByTimeout || status == ExecutionStatus::CancelledByUser)
|
||||
return;
|
||||
|
||||
bool all_processors_finished = true;
|
||||
@ -271,7 +277,7 @@ void PipelineExecutor::executeStepImpl(size_t thread_num, std::atomic_bool * yie
|
||||
break;
|
||||
|
||||
if (!context.executeTask())
|
||||
cancel();
|
||||
cancel(ExecutionStatus::Exception);
|
||||
|
||||
if (tasks.isFinished())
|
||||
break;
|
||||
@ -289,11 +295,13 @@ void PipelineExecutor::executeStepImpl(size_t thread_num, std::atomic_bool * yie
|
||||
Queue async_queue;
|
||||
|
||||
/// Prepare processor after execution.
|
||||
if (!graph->updateNode(context.getProcessorID(), queue, async_queue))
|
||||
cancel();
|
||||
auto status = graph->updateNode(context.getProcessorID(), queue, async_queue);
|
||||
if (status == ExecutingGraph::UpdateNodeStatus::Exception)
|
||||
cancel(ExecutionStatus::Exception);
|
||||
|
||||
/// Push other tasks to global queue.
|
||||
tasks.pushTasks(queue, async_queue, context);
|
||||
if (status == ExecutingGraph::UpdateNodeStatus::Done)
|
||||
tasks.pushTasks(queue, async_queue, context);
|
||||
}
|
||||
|
||||
#ifndef NDEBUG
|
||||
@ -309,7 +317,7 @@ void PipelineExecutor::executeStepImpl(size_t thread_num, std::atomic_bool * yie
|
||||
{
|
||||
/// spawnThreads can throw an exception, for example CANNOT_SCHEDULE_TASK.
|
||||
/// We should cancel execution properly before rethrow.
|
||||
cancel();
|
||||
cancel(ExecutionStatus::Exception);
|
||||
throw;
|
||||
}
|
||||
|
||||
@ -328,6 +336,7 @@ void PipelineExecutor::executeStepImpl(size_t thread_num, std::atomic_bool * yie
|
||||
void PipelineExecutor::initializeExecution(size_t num_threads, bool concurrency_control)
|
||||
{
|
||||
is_execution_initialized = true;
|
||||
tryUpdateExecutionStatus(ExecutionStatus::NotStarted, ExecutionStatus::Executing);
|
||||
|
||||
size_t use_threads = num_threads;
|
||||
|
||||
@ -393,7 +402,7 @@ void PipelineExecutor::executeImpl(size_t num_threads, bool concurrency_control)
|
||||
{
|
||||
/// If finished_flag is not set, there was an exception.
|
||||
/// Cancel execution in this case.
|
||||
cancel();
|
||||
cancel(ExecutionStatus::Exception);
|
||||
if (pool)
|
||||
pool->wait();
|
||||
}
|
||||
|
@ -48,8 +48,20 @@ public:
|
||||
|
||||
const Processors & getProcessors() const;
|
||||
|
||||
enum class ExecutionStatus
|
||||
{
|
||||
NotStarted,
|
||||
Executing,
|
||||
Finished,
|
||||
Exception,
|
||||
CancelledByUser,
|
||||
CancelledByTimeout,
|
||||
};
|
||||
|
||||
/// Cancel execution. May be called from another thread.
|
||||
void cancel();
|
||||
void cancel() { cancel(ExecutionStatus::CancelledByUser); }
|
||||
|
||||
ExecutionStatus getExecutionStatus() const { return execution_status.load(); }
|
||||
|
||||
/// Cancel processors which only read data from source. May be called from another thread.
|
||||
void cancelReading();
|
||||
@ -81,7 +93,7 @@ private:
|
||||
/// system.opentelemetry_span_log
|
||||
bool trace_processors = false;
|
||||
|
||||
std::atomic_bool cancelled = false;
|
||||
std::atomic<ExecutionStatus> execution_status = ExecutionStatus::NotStarted;
|
||||
std::atomic_bool cancelled_reading = false;
|
||||
|
||||
LoggerPtr log = getLogger("PipelineExecutor");
|
||||
@ -105,6 +117,10 @@ private:
|
||||
void executeStepImpl(size_t thread_num, std::atomic_bool * yield_flag = nullptr);
|
||||
void executeSingleThread(size_t thread_num);
|
||||
void finish();
|
||||
void cancel(ExecutionStatus reason);
|
||||
|
||||
/// If execution_status == from, change it to desired.
|
||||
bool tryUpdateExecutionStatus(ExecutionStatus expected, ExecutionStatus desired);
|
||||
|
||||
String dumpPipeline() const;
|
||||
};
|
||||
|
@ -15,6 +15,7 @@ namespace DB
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int QUERY_WAS_CANCELLED;
|
||||
}
|
||||
|
||||
class PushingAsyncSource : public ISource
|
||||
@ -176,6 +177,16 @@ void PushingAsyncPipelineExecutor::start()
|
||||
data->thread = ThreadFromGlobalPool(std::move(func));
|
||||
}
|
||||
|
||||
[[noreturn]] static void throwOnExecutionStatus(PipelineExecutor::ExecutionStatus status)
|
||||
{
|
||||
if (status == PipelineExecutor::ExecutionStatus::CancelledByTimeout
|
||||
|| status == PipelineExecutor::ExecutionStatus::CancelledByUser)
|
||||
throw Exception(ErrorCodes::QUERY_WAS_CANCELLED, "Query was cancelled");
|
||||
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Pipeline for PushingPipelineExecutor was finished before all data was inserted");
|
||||
}
|
||||
|
||||
void PushingAsyncPipelineExecutor::push(Chunk chunk)
|
||||
{
|
||||
if (!started)
|
||||
@ -185,8 +196,7 @@ void PushingAsyncPipelineExecutor::push(Chunk chunk)
|
||||
data->rethrowExceptionIfHas();
|
||||
|
||||
if (!is_pushed)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Pipeline for PushingAsyncPipelineExecutor was finished before all data was inserted");
|
||||
throwOnExecutionStatus(data->executor->getExecutionStatus());
|
||||
}
|
||||
|
||||
void PushingAsyncPipelineExecutor::push(Block block)
|
||||
|
@ -11,6 +11,7 @@ namespace DB
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int QUERY_WAS_CANCELLED;
|
||||
}
|
||||
|
||||
class PushingSource : public ISource
|
||||
@ -80,6 +81,15 @@ const Block & PushingPipelineExecutor::getHeader() const
|
||||
return pushing_source->getPort().getHeader();
|
||||
}
|
||||
|
||||
[[noreturn]] static void throwOnExecutionStatus(PipelineExecutor::ExecutionStatus status)
|
||||
{
|
||||
if (status == PipelineExecutor::ExecutionStatus::CancelledByTimeout
|
||||
|| status == PipelineExecutor::ExecutionStatus::CancelledByUser)
|
||||
throw Exception(ErrorCodes::QUERY_WAS_CANCELLED, "Query was cancelled");
|
||||
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Pipeline for PushingPipelineExecutor was finished before all data was inserted");
|
||||
}
|
||||
|
||||
void PushingPipelineExecutor::start()
|
||||
{
|
||||
@ -91,8 +101,7 @@ void PushingPipelineExecutor::start()
|
||||
executor->setReadProgressCallback(pipeline.getReadProgressCallback());
|
||||
|
||||
if (!executor->executeStep(&input_wait_flag))
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Pipeline for PushingPipelineExecutor was finished before all data was inserted");
|
||||
throwOnExecutionStatus(executor->getExecutionStatus());
|
||||
}
|
||||
|
||||
void PushingPipelineExecutor::push(Chunk chunk)
|
||||
@ -103,8 +112,7 @@ void PushingPipelineExecutor::push(Chunk chunk)
|
||||
pushing_source->setData(std::move(chunk));
|
||||
|
||||
if (!executor->executeStep(&input_wait_flag))
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Pipeline for PushingPipelineExecutor was finished before all data was inserted");
|
||||
throwOnExecutionStatus(executor->getExecutionStatus());
|
||||
}
|
||||
|
||||
void PushingPipelineExecutor::push(Block block)
|
||||
|
@ -133,16 +133,31 @@ static ColumnWithTypeAndName readColumnWithStringData(const std::shared_ptr<arro
|
||||
std::shared_ptr<arrow::Buffer> buffer = chunk.value_data();
|
||||
const size_t chunk_length = chunk.length();
|
||||
|
||||
for (size_t offset_i = 0; offset_i != chunk_length; ++offset_i)
|
||||
const size_t null_count = chunk.null_count();
|
||||
if (null_count == 0)
|
||||
{
|
||||
if (!chunk.IsNull(offset_i) && buffer)
|
||||
for (size_t offset_i = 0; offset_i != chunk_length; ++offset_i)
|
||||
{
|
||||
const auto * raw_data = buffer->data() + chunk.value_offset(offset_i);
|
||||
column_chars_t.insert_assume_reserved(raw_data, raw_data + chunk.value_length(offset_i));
|
||||
}
|
||||
column_chars_t.emplace_back('\0');
|
||||
column_chars_t.emplace_back('\0');
|
||||
|
||||
column_offsets.emplace_back(column_chars_t.size());
|
||||
column_offsets.emplace_back(column_chars_t.size());
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t offset_i = 0; offset_i != chunk_length; ++offset_i)
|
||||
{
|
||||
if (!chunk.IsNull(offset_i) && buffer)
|
||||
{
|
||||
const auto * raw_data = buffer->data() + chunk.value_offset(offset_i);
|
||||
column_chars_t.insert_assume_reserved(raw_data, raw_data + chunk.value_length(offset_i));
|
||||
}
|
||||
column_chars_t.emplace_back('\0');
|
||||
|
||||
column_offsets.emplace_back(column_chars_t.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
return {std::move(internal_column), std::move(internal_type), column_name};
|
||||
|
@ -1143,24 +1143,42 @@ readColumnWithStringData(const orc::ColumnVectorBatch * orc_column, const orc::T
|
||||
reserver_size += 1;
|
||||
}
|
||||
|
||||
column_chars_t.reserve(reserver_size);
|
||||
column_offsets.reserve(orc_str_column->numElements);
|
||||
column_chars_t.resize_exact(reserver_size);
|
||||
column_offsets.resize_exact(orc_str_column->numElements);
|
||||
|
||||
size_t curr_offset = 0;
|
||||
for (size_t i = 0; i < orc_str_column->numElements; ++i)
|
||||
if (!orc_str_column->hasNulls)
|
||||
{
|
||||
if (!orc_str_column->hasNulls || orc_str_column->notNull[i])
|
||||
for (size_t i = 0; i < orc_str_column->numElements; ++i)
|
||||
{
|
||||
const auto * buf = orc_str_column->data[i];
|
||||
size_t buf_size = orc_str_column->length[i];
|
||||
column_chars_t.insert_assume_reserved(buf, buf + buf_size);
|
||||
memcpy(&column_chars_t[curr_offset], buf, buf_size);
|
||||
curr_offset += buf_size;
|
||||
|
||||
column_chars_t[curr_offset] = 0;
|
||||
++curr_offset;
|
||||
|
||||
column_offsets[i] = curr_offset;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t i = 0; i < orc_str_column->numElements; ++i)
|
||||
{
|
||||
if (orc_str_column->notNull[i])
|
||||
{
|
||||
const auto * buf = orc_str_column->data[i];
|
||||
size_t buf_size = orc_str_column->length[i];
|
||||
memcpy(&column_chars_t[curr_offset], buf, buf_size);
|
||||
curr_offset += buf_size;
|
||||
}
|
||||
|
||||
column_chars_t.push_back(0);
|
||||
++curr_offset;
|
||||
column_chars_t[curr_offset] = 0;
|
||||
++curr_offset;
|
||||
|
||||
column_offsets.push_back(curr_offset);
|
||||
column_offsets[i] = curr_offset;
|
||||
}
|
||||
}
|
||||
return {std::move(internal_column), std::move(internal_type), column_name};
|
||||
}
|
||||
|
@ -3,6 +3,11 @@
|
||||
#include <Processors/Merges/IMergingTransform.h>
|
||||
#include <Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h>
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event AggregatingSortedMilliseconds;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -29,6 +34,11 @@ public:
|
||||
}
|
||||
|
||||
String getName() const override { return "AggregatingSortedTransform"; }
|
||||
|
||||
void onFinish() override
|
||||
{
|
||||
logMergedStats(ProfileEvents::AggregatingSortedMilliseconds, "Aggregated sorted", getLogger("AggregatingSortedTransform"));
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -30,6 +30,8 @@ public:
|
||||
void consume(Input & input, size_t source_num) override;
|
||||
Status merge() override;
|
||||
|
||||
MergedStats getMergedStats() const override { return merged_data.getMergedStats(); }
|
||||
|
||||
/// Stores information for aggregation of SimpleAggregateFunction columns
|
||||
struct SimpleAggregateDescription
|
||||
{
|
||||
|
@ -126,6 +126,9 @@ IMergingAlgorithm::Status FinishAggregatingInOrderAlgorithm::merge()
|
||||
|
||||
Chunk FinishAggregatingInOrderAlgorithm::prepareToMerge()
|
||||
{
|
||||
total_merged_rows += accumulated_rows;
|
||||
total_merged_bytes += accumulated_bytes;
|
||||
|
||||
accumulated_rows = 0;
|
||||
accumulated_bytes = 0;
|
||||
|
||||
|
@ -50,6 +50,8 @@ public:
|
||||
void consume(Input & input, size_t source_num) override;
|
||||
Status merge() override;
|
||||
|
||||
MergedStats getMergedStats() const override { return {.bytes = accumulated_bytes, .rows = accumulated_rows, .blocks = chunk_num}; }
|
||||
|
||||
private:
|
||||
Chunk prepareToMerge();
|
||||
void addToAggregation();
|
||||
@ -92,6 +94,9 @@ private:
|
||||
UInt64 chunk_num = 0;
|
||||
size_t accumulated_rows = 0;
|
||||
size_t accumulated_bytes = 0;
|
||||
|
||||
size_t total_merged_rows = 0;
|
||||
size_t total_merged_bytes = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -33,6 +33,8 @@ public:
|
||||
const char * getName() const override { return "GraphiteRollupSortedAlgorithm"; }
|
||||
Status merge() override;
|
||||
|
||||
MergedStats getMergedStats() const override { return merged_data->getMergedStats(); }
|
||||
|
||||
struct ColumnsDefinition
|
||||
{
|
||||
size_t path_column_num;
|
||||
|
@ -1,7 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <Processors/Chunk.h>
|
||||
#include <variant>
|
||||
#include <Common/ProfileEvents.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -65,6 +65,15 @@ public:
|
||||
|
||||
IMergingAlgorithm() = default;
|
||||
virtual ~IMergingAlgorithm() = default;
|
||||
|
||||
struct MergedStats
|
||||
{
|
||||
UInt64 bytes = 0;
|
||||
UInt64 rows = 0;
|
||||
UInt64 blocks = 0;
|
||||
};
|
||||
|
||||
virtual MergedStats getMergedStats() const = 0;
|
||||
};
|
||||
|
||||
// TODO: use when compile with clang which could support it
|
||||
|
@ -16,6 +16,8 @@ public:
|
||||
void initialize(Inputs inputs) override;
|
||||
void consume(Input & input, size_t source_num) override;
|
||||
|
||||
MergedStats getMergedStats() const override { return merged_data->getMergedStats(); }
|
||||
|
||||
private:
|
||||
Block header;
|
||||
SortDescription description;
|
||||
|
@ -183,6 +183,8 @@ public:
|
||||
UInt64 totalAllocatedBytes() const { return total_allocated_bytes; }
|
||||
UInt64 maxBlockSize() const { return max_block_size; }
|
||||
|
||||
IMergingAlgorithm::MergedStats getMergedStats() const { return {.bytes = total_allocated_bytes, .rows = total_merged_rows, .blocks = total_chunks}; }
|
||||
|
||||
virtual ~MergedData() = default;
|
||||
|
||||
protected:
|
||||
|
@ -31,7 +31,7 @@ public:
|
||||
void consume(Input & input, size_t source_num) override;
|
||||
Status merge() override;
|
||||
|
||||
const MergedData & getMergedData() const { return merged_data; }
|
||||
MergedStats getMergedStats() const override { return merged_data.getMergedStats(); }
|
||||
|
||||
private:
|
||||
Block header;
|
||||
|
@ -30,6 +30,8 @@ public:
|
||||
void consume(Input & input, size_t source_num) override;
|
||||
Status merge() override;
|
||||
|
||||
MergedStats getMergedStats() const override { return merged_data.getMergedStats(); }
|
||||
|
||||
struct AggregateDescription;
|
||||
struct MapDescription;
|
||||
|
||||
|
@ -3,6 +3,11 @@
|
||||
#include <Processors/Merges/IMergingTransform.h>
|
||||
#include <Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h>
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event CollapsingSortedMilliseconds;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -36,6 +41,11 @@ public:
|
||||
}
|
||||
|
||||
String getName() const override { return "CollapsingSortedTransform"; }
|
||||
|
||||
void onFinish() override
|
||||
{
|
||||
logMergedStats(ProfileEvents::CollapsingSortedMilliseconds, "Collapsed sorted", getLogger("CollapsingSortedTransform"));
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -2,7 +2,10 @@
|
||||
|
||||
#include <Processors/Merges/Algorithms/IMergingAlgorithm.h>
|
||||
#include <Processors/IProcessor.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/formatReadable.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -110,6 +113,8 @@ public:
|
||||
|
||||
void work() override
|
||||
{
|
||||
Stopwatch watch{CLOCK_MONOTONIC_COARSE};
|
||||
|
||||
if (!state.init_chunks.empty())
|
||||
algorithm.initialize(std::move(state.init_chunks));
|
||||
|
||||
@ -147,6 +152,8 @@ public:
|
||||
// std::cerr << "Finished" << std::endl;
|
||||
state.is_finished = true;
|
||||
}
|
||||
|
||||
merging_elapsed_ns += watch.elapsedNanoseconds();
|
||||
}
|
||||
|
||||
protected:
|
||||
@ -156,7 +163,33 @@ protected:
|
||||
Algorithm algorithm;
|
||||
|
||||
/// Profile info.
|
||||
Stopwatch total_stopwatch {CLOCK_MONOTONIC_COARSE};
|
||||
UInt64 merging_elapsed_ns = 0;
|
||||
|
||||
void logMergedStats(ProfileEvents::Event elapsed_ms_event, std::string_view transform_message, LoggerPtr log) const
|
||||
{
|
||||
auto stats = algorithm.getMergedStats();
|
||||
|
||||
UInt64 elapsed_ms = merging_elapsed_ns / 1000000LL;
|
||||
ProfileEvents::increment(elapsed_ms_event, elapsed_ms);
|
||||
|
||||
/// Don't print info for small parts (< 1M rows)
|
||||
if (stats.rows < 1000000)
|
||||
return;
|
||||
|
||||
double seconds = static_cast<double>(merging_elapsed_ns) / 1000000000ULL;
|
||||
|
||||
if (seconds == 0.0)
|
||||
{
|
||||
LOG_DEBUG(log, "{}, {} blocks, {} rows, {} bytes in 0 sec.",
|
||||
transform_message, stats.blocks, stats.rows, stats.bytes);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_DEBUG(log, "{}, {} blocks, {} rows, {} bytes in {} sec., {} rows/sec., {}/sec.",
|
||||
transform_message, stats.blocks, stats.rows, stats.bytes,
|
||||
seconds, stats.rows / seconds, ReadableSize(stats.bytes / seconds));
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
using IMergingTransformBase::state;
|
||||
|
@ -1,9 +1,12 @@
|
||||
#include <Processors/Merges/MergingSortedTransform.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/formatReadable.h>
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event MergingSortedMilliseconds;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -18,7 +21,6 @@ MergingSortedTransform::MergingSortedTransform(
|
||||
UInt64 limit_,
|
||||
bool always_read_till_end_,
|
||||
WriteBuffer * out_row_sources_buf_,
|
||||
bool quiet_,
|
||||
bool use_average_block_sizes,
|
||||
bool have_all_inputs_)
|
||||
: IMergingTransform(
|
||||
@ -37,7 +39,6 @@ MergingSortedTransform::MergingSortedTransform(
|
||||
limit_,
|
||||
out_row_sources_buf_,
|
||||
use_average_block_sizes)
|
||||
, quiet(quiet_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -48,22 +49,7 @@ void MergingSortedTransform::onNewInput()
|
||||
|
||||
void MergingSortedTransform::onFinish()
|
||||
{
|
||||
if (quiet)
|
||||
return;
|
||||
|
||||
const auto & merged_data = algorithm.getMergedData();
|
||||
|
||||
auto log = getLogger("MergingSortedTransform");
|
||||
|
||||
double seconds = total_stopwatch.elapsedSeconds();
|
||||
|
||||
if (seconds == 0.0)
|
||||
LOG_DEBUG(log, "Merge sorted {} blocks, {} rows in 0 sec.", merged_data.totalChunks(), merged_data.totalMergedRows());
|
||||
else
|
||||
LOG_DEBUG(log, "Merge sorted {} blocks, {} rows in {} sec., {} rows/sec., {}/sec",
|
||||
merged_data.totalChunks(), merged_data.totalMergedRows(), seconds,
|
||||
merged_data.totalMergedRows() / seconds,
|
||||
ReadableSize(merged_data.totalAllocatedBytes() / seconds));
|
||||
logMergedStats(ProfileEvents::MergingSortedMilliseconds, "Merged sorted", getLogger("MergingSortedTransform"));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -21,7 +21,6 @@ public:
|
||||
UInt64 limit_ = 0,
|
||||
bool always_read_till_end_ = false,
|
||||
WriteBuffer * out_row_sources_buf_ = nullptr,
|
||||
bool quiet_ = false,
|
||||
bool use_average_block_sizes = false,
|
||||
bool have_all_inputs_ = true);
|
||||
|
||||
@ -30,9 +29,6 @@ public:
|
||||
protected:
|
||||
void onNewInput() override;
|
||||
void onFinish() override;
|
||||
|
||||
private:
|
||||
bool quiet = false;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -3,6 +3,10 @@
|
||||
#include <Processors/Merges/IMergingTransform.h>
|
||||
#include <Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h>
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event ReplacingSortedMilliseconds;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -38,6 +42,11 @@ public:
|
||||
}
|
||||
|
||||
String getName() const override { return "ReplacingSorted"; }
|
||||
|
||||
void onFinish() override
|
||||
{
|
||||
logMergedStats(ProfileEvents::ReplacingSortedMilliseconds, "Replaced sorted", getLogger("ReplacingSortedTransform"));
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -3,6 +3,11 @@
|
||||
#include <Processors/Merges/IMergingTransform.h>
|
||||
#include <Processors/Merges/Algorithms/SummingSortedAlgorithm.h>
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event SummingSortedMilliseconds;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -33,6 +38,11 @@ public:
|
||||
}
|
||||
|
||||
String getName() const override { return "SummingSortedTransform"; }
|
||||
|
||||
void onFinish() override
|
||||
{
|
||||
logMergedStats(ProfileEvents::SummingSortedMilliseconds, "Summed sorted", getLogger("SummingSortedTransform"));
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -3,6 +3,10 @@
|
||||
#include <Processors/Merges/IMergingTransform.h>
|
||||
#include <Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h>
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event VersionedCollapsingSortedMilliseconds;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -33,6 +37,11 @@ public:
|
||||
}
|
||||
|
||||
String getName() const override { return "VersionedCollapsingTransform"; }
|
||||
|
||||
void onFinish() override
|
||||
{
|
||||
logMergedStats(ProfileEvents::VersionedCollapsingSortedMilliseconds, "Versioned collapsed sorted", getLogger("VersionedCollapsingTransform"));
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -352,7 +352,15 @@ Pipe ReadFromMergeTree::readFromPoolParallelReplicas(
|
||||
|
||||
/// We have a special logic for local replica. It has to read less data, because in some cases it should
|
||||
/// merge states of aggregate functions or do some other important stuff other than reading from Disk.
|
||||
const auto multiplier = context->getSettingsRef().parallel_replicas_single_task_marks_count_multiplier;
|
||||
auto multiplier = context->getSettingsRef().parallel_replicas_single_task_marks_count_multiplier;
|
||||
const auto min_marks_for_concurrent_read_limit = std::numeric_limits<Int64>::max() >> 1;
|
||||
if (pool_settings.min_marks_for_concurrent_read > min_marks_for_concurrent_read_limit)
|
||||
{
|
||||
/// limit min marks to read in case it's big, happened in test since due to settings randomzation
|
||||
pool_settings.min_marks_for_concurrent_read = min_marks_for_concurrent_read_limit;
|
||||
multiplier = 1.0f;
|
||||
}
|
||||
|
||||
if (auto result = pool_settings.min_marks_for_concurrent_read * multiplier; canConvertTo<size_t>(result))
|
||||
pool_settings.min_marks_for_concurrent_read = static_cast<size_t>(result);
|
||||
else
|
||||
@ -521,7 +529,15 @@ Pipe ReadFromMergeTree::readInOrder(
|
||||
.number_of_current_replica = client_info.number_of_current_replica,
|
||||
};
|
||||
|
||||
const auto multiplier = context->getSettingsRef().parallel_replicas_single_task_marks_count_multiplier;
|
||||
auto multiplier = context->getSettingsRef().parallel_replicas_single_task_marks_count_multiplier;
|
||||
const auto min_marks_for_concurrent_read_limit = std::numeric_limits<Int64>::max() >> 1;
|
||||
if (pool_settings.min_marks_for_concurrent_read > min_marks_for_concurrent_read_limit)
|
||||
{
|
||||
/// limit min marks to read in case it's big, happened in test since due to settings randomzation
|
||||
pool_settings.min_marks_for_concurrent_read = min_marks_for_concurrent_read_limit;
|
||||
multiplier = 1.0f;
|
||||
}
|
||||
|
||||
if (auto result = pool_settings.min_marks_for_concurrent_read * multiplier; canConvertTo<size_t>(result))
|
||||
pool_settings.min_marks_for_concurrent_read = static_cast<size_t>(result);
|
||||
else
|
||||
|
@ -1,11 +1,15 @@
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Columns/ColumnSparse.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <iomanip>
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event GatheringColumnMilliseconds;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -33,6 +37,13 @@ ColumnGathererStream::ColumnGathererStream(
|
||||
throw Exception(ErrorCodes::EMPTY_DATA_PASSED, "There are no streams to gather");
|
||||
}
|
||||
|
||||
void ColumnGathererStream::updateStats(const IColumn & column)
|
||||
{
|
||||
merged_rows += column.size();
|
||||
merged_bytes += column.allocatedBytes();
|
||||
++merged_blocks;
|
||||
}
|
||||
|
||||
void ColumnGathererStream::initialize(Inputs inputs)
|
||||
{
|
||||
Columns source_columns;
|
||||
@ -82,7 +93,9 @@ IMergingAlgorithm::Status ColumnGathererStream::merge()
|
||||
{
|
||||
res.addColumn(source_to_fully_copy->column);
|
||||
}
|
||||
merged_rows += source_to_fully_copy->size;
|
||||
|
||||
updateStats(*source_to_fully_copy->column);
|
||||
|
||||
source_to_fully_copy->pos = source_to_fully_copy->size;
|
||||
source_to_fully_copy = nullptr;
|
||||
return Status(std::move(res));
|
||||
@ -96,8 +109,7 @@ IMergingAlgorithm::Status ColumnGathererStream::merge()
|
||||
{
|
||||
next_required_source = 0;
|
||||
Chunk res;
|
||||
merged_rows += sources.front().column->size();
|
||||
merged_bytes += sources.front().column->allocatedBytes();
|
||||
updateStats(*sources.front().column);
|
||||
res.addColumn(std::move(sources.front().column));
|
||||
sources.front().pos = sources.front().size = 0;
|
||||
return Status(std::move(res));
|
||||
@ -123,8 +135,8 @@ IMergingAlgorithm::Status ColumnGathererStream::merge()
|
||||
if (source_to_fully_copy && result_column->empty())
|
||||
{
|
||||
Chunk res;
|
||||
merged_rows += source_to_fully_copy->column->size();
|
||||
merged_bytes += source_to_fully_copy->column->allocatedBytes();
|
||||
updateStats(*source_to_fully_copy->column);
|
||||
|
||||
if (result_column->hasDynamicStructure())
|
||||
{
|
||||
auto col = result_column->cloneEmpty();
|
||||
@ -140,13 +152,13 @@ IMergingAlgorithm::Status ColumnGathererStream::merge()
|
||||
return Status(std::move(res));
|
||||
}
|
||||
|
||||
auto col = result_column->cloneEmpty();
|
||||
result_column.swap(col);
|
||||
auto return_column = result_column->cloneEmpty();
|
||||
result_column.swap(return_column);
|
||||
|
||||
Chunk res;
|
||||
merged_rows += col->size();
|
||||
merged_bytes += col->allocatedBytes();
|
||||
res.addColumn(std::move(col));
|
||||
updateStats(*return_column);
|
||||
|
||||
res.addColumn(std::move(return_column));
|
||||
return Status(std::move(res), row_sources_buf.eof() && !source_to_fully_copy);
|
||||
}
|
||||
|
||||
@ -185,31 +197,10 @@ ColumnGathererTransform::ColumnGathererTransform(
|
||||
toString(header.columns()));
|
||||
}
|
||||
|
||||
void ColumnGathererTransform::work()
|
||||
{
|
||||
Stopwatch stopwatch;
|
||||
IMergingTransform<ColumnGathererStream>::work();
|
||||
elapsed_ns += stopwatch.elapsedNanoseconds();
|
||||
}
|
||||
|
||||
void ColumnGathererTransform::onFinish()
|
||||
{
|
||||
auto merged_rows = algorithm.getMergedRows();
|
||||
auto merged_bytes = algorithm.getMergedRows();
|
||||
/// Don't print info for small parts (< 10M rows)
|
||||
if (merged_rows < 10000000)
|
||||
return;
|
||||
|
||||
double seconds = static_cast<double>(elapsed_ns) / 1000000000ULL;
|
||||
const auto & column_name = getOutputPort().getHeader().getByPosition(0).name;
|
||||
|
||||
if (seconds == 0.0)
|
||||
LOG_DEBUG(log, "Gathered column {} ({} bytes/elem.) in 0 sec.",
|
||||
column_name, static_cast<double>(merged_bytes) / merged_rows);
|
||||
else
|
||||
LOG_DEBUG(log, "Gathered column {} ({} bytes/elem.) in {} sec., {} rows/sec., {}/sec.",
|
||||
column_name, static_cast<double>(merged_bytes) / merged_rows, seconds,
|
||||
merged_rows / seconds, ReadableSize(merged_bytes / seconds));
|
||||
logMergedStats(ProfileEvents::GatheringColumnMilliseconds, fmt::format("Gathered column {}", column_name), log);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -72,10 +72,11 @@ public:
|
||||
template <typename Column>
|
||||
void gather(Column & column_res);
|
||||
|
||||
UInt64 getMergedRows() const { return merged_rows; }
|
||||
UInt64 getMergedBytes() const { return merged_bytes; }
|
||||
MergedStats getMergedStats() const override { return {.bytes = merged_bytes, .rows = merged_rows, .blocks = merged_blocks}; }
|
||||
|
||||
private:
|
||||
void updateStats(const IColumn & column);
|
||||
|
||||
/// Cache required fields
|
||||
struct Source
|
||||
{
|
||||
@ -105,6 +106,7 @@ private:
|
||||
ssize_t next_required_source = -1;
|
||||
UInt64 merged_rows = 0;
|
||||
UInt64 merged_bytes = 0;
|
||||
UInt64 merged_blocks = 0;
|
||||
};
|
||||
|
||||
class ColumnGathererTransform final : public IMergingTransform<ColumnGathererStream>
|
||||
@ -120,12 +122,8 @@ public:
|
||||
|
||||
String getName() const override { return "ColumnGathererTransform"; }
|
||||
|
||||
void work() override;
|
||||
|
||||
protected:
|
||||
void onFinish() override;
|
||||
UInt64 elapsed_ns = 0;
|
||||
|
||||
LoggerPtr log;
|
||||
};
|
||||
|
||||
|
@ -511,6 +511,16 @@ void MergeJoinAlgorithm::logElapsed(double seconds)
|
||||
stat.max_blocks_loaded);
|
||||
}
|
||||
|
||||
IMergingAlgorithm::MergedStats MergeJoinAlgorithm::getMergedStats() const
|
||||
{
|
||||
return
|
||||
{
|
||||
.bytes = stat.num_bytes[0] + stat.num_bytes[1],
|
||||
.rows = stat.num_rows[0] + stat.num_rows[1],
|
||||
.blocks = stat.num_blocks[0] + stat.num_blocks[1],
|
||||
};
|
||||
}
|
||||
|
||||
static void prepareChunk(Chunk & chunk)
|
||||
{
|
||||
if (!chunk)
|
||||
@ -547,6 +557,7 @@ void MergeJoinAlgorithm::consume(Input & input, size_t source_num)
|
||||
{
|
||||
stat.num_blocks[source_num] += 1;
|
||||
stat.num_rows[source_num] += input.chunk.getNumRows();
|
||||
stat.num_bytes[source_num] += input.chunk.allocatedBytes();
|
||||
}
|
||||
|
||||
prepareChunk(input.chunk);
|
||||
@ -1271,7 +1282,7 @@ MergeJoinTransform::MergeJoinTransform(
|
||||
|
||||
void MergeJoinTransform::onFinish()
|
||||
{
|
||||
algorithm.logElapsed(total_stopwatch.elapsedSeconds());
|
||||
algorithm.logElapsed(static_cast<double>(merging_elapsed_ns) / 1000000000ULL);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -245,6 +245,8 @@ public:
|
||||
void setAsofInequality(ASOFJoinInequality asof_inequality_);
|
||||
|
||||
void logElapsed(double seconds);
|
||||
MergedStats getMergedStats() const override;
|
||||
|
||||
private:
|
||||
std::optional<Status> handleAnyJoinState();
|
||||
Status anyJoin();
|
||||
@ -280,6 +282,7 @@ private:
|
||||
{
|
||||
size_t num_blocks[2] = {0, 0};
|
||||
size_t num_rows[2] = {0, 0};
|
||||
size_t num_bytes[2] = {0, 0};
|
||||
|
||||
size_t max_blocks_loaded = 0;
|
||||
};
|
||||
|
@ -185,7 +185,6 @@ void MergeSortingTransform::consume(Chunk chunk)
|
||||
|
||||
if (!external_merging_sorted)
|
||||
{
|
||||
bool quiet = false;
|
||||
bool have_all_inputs = false;
|
||||
bool use_average_block_sizes = false;
|
||||
|
||||
@ -199,7 +198,6 @@ void MergeSortingTransform::consume(Chunk chunk)
|
||||
limit,
|
||||
/*always_read_till_end_=*/ false,
|
||||
nullptr,
|
||||
quiet,
|
||||
use_average_block_sizes,
|
||||
have_all_inputs);
|
||||
|
||||
|
@ -58,6 +58,16 @@ static void prepareChunk(Chunk & chunk)
|
||||
chunk.setColumns(std::move(columns), num_rows);
|
||||
}
|
||||
|
||||
IMergingAlgorithm::MergedStats PasteJoinAlgorithm::getMergedStats() const
|
||||
{
|
||||
return
|
||||
{
|
||||
.bytes = stat.num_bytes[0] + stat.num_bytes[1],
|
||||
.rows = stat.num_rows[0] + stat.num_rows[1],
|
||||
.blocks = stat.num_blocks[0] + stat.num_blocks[1],
|
||||
};
|
||||
}
|
||||
|
||||
void PasteJoinAlgorithm::initialize(Inputs inputs)
|
||||
{
|
||||
if (inputs.size() != 2)
|
||||
|
@ -35,8 +35,7 @@ public:
|
||||
void initialize(Inputs inputs) override;
|
||||
void consume(Input & input, size_t source_num) override;
|
||||
Status merge() override;
|
||||
|
||||
void logElapsed(double seconds);
|
||||
MergedStats getMergedStats() const override;
|
||||
|
||||
private:
|
||||
Chunk createBlockWithDefaults(size_t source_num);
|
||||
@ -55,6 +54,7 @@ private:
|
||||
{
|
||||
size_t num_blocks[2] = {0, 0};
|
||||
size_t num_rows[2] = {0, 0};
|
||||
size_t num_bytes[2] = {0, 0};
|
||||
|
||||
size_t max_blocks_loaded = 0;
|
||||
};
|
||||
|
@ -1157,8 +1157,7 @@ void WindowTransform::appendChunk(Chunk & chunk)
|
||||
// Initialize output columns.
|
||||
for (auto & ws : workspaces)
|
||||
{
|
||||
if (ws.window_function_impl)
|
||||
block.casted_columns.push_back(ws.window_function_impl->castColumn(block.input_columns, ws.argument_column_indices));
|
||||
block.casted_columns.push_back(ws.window_function_impl ? ws.window_function_impl->castColumn(block.input_columns, ws.argument_column_indices) : nullptr);
|
||||
|
||||
block.output_columns.push_back(ws.aggregate_function->getResultType()
|
||||
->createColumn());
|
||||
|
@ -83,7 +83,7 @@ TEST(MergingSortedTest, SimpleBlockSizeTest)
|
||||
EXPECT_EQ(pipe.numOutputPorts(), 3);
|
||||
|
||||
auto transform = std::make_shared<MergingSortedTransform>(pipe.getHeader(), pipe.numOutputPorts(), sort_description,
|
||||
8192, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch, 0, false, nullptr, false, true);
|
||||
8192, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch, 0, false, nullptr, true);
|
||||
|
||||
pipe.addTransform(std::move(transform));
|
||||
|
||||
@ -125,7 +125,7 @@ TEST(MergingSortedTest, MoreInterestingBlockSizes)
|
||||
EXPECT_EQ(pipe.numOutputPorts(), 3);
|
||||
|
||||
auto transform = std::make_shared<MergingSortedTransform>(pipe.getHeader(), pipe.numOutputPorts(), sort_description,
|
||||
8192, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch, 0, false, nullptr, false, true);
|
||||
8192, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch, 0, false, nullptr, true);
|
||||
|
||||
pipe.addTransform(std::move(transform));
|
||||
|
||||
|
@ -8,10 +8,10 @@
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event MergesTimeMilliseconds;
|
||||
extern const Event MergedUncompressedBytes;
|
||||
extern const Event MergedRows;
|
||||
extern const Event Merge;
|
||||
extern const Event MutatedRows;
|
||||
extern const Event MutatedUncompressedBytes;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
@ -63,18 +63,17 @@ public:
|
||||
void updateWatch()
|
||||
{
|
||||
UInt64 watch_curr_elapsed = merge_list_element_ptr->watch.elapsed();
|
||||
ProfileEvents::increment(ProfileEvents::MergesTimeMilliseconds, (watch_curr_elapsed - watch_prev_elapsed) / 1000000);
|
||||
watch_prev_elapsed = watch_curr_elapsed;
|
||||
}
|
||||
|
||||
void operator() (const Progress & value)
|
||||
void operator()(const Progress & value)
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::MergedUncompressedBytes, value.read_bytes);
|
||||
if (stage.is_first)
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::MergedRows, value.read_rows);
|
||||
ProfileEvents::increment(ProfileEvents::Merge);
|
||||
}
|
||||
if (merge_list_element_ptr->is_mutation)
|
||||
updateProfileEvents(value, ProfileEvents::MutatedRows, ProfileEvents::MutatedUncompressedBytes);
|
||||
else
|
||||
updateProfileEvents(value, ProfileEvents::MergedRows, ProfileEvents::MergedUncompressedBytes);
|
||||
|
||||
|
||||
updateWatch();
|
||||
|
||||
merge_list_element_ptr->bytes_read_uncompressed += value.read_bytes;
|
||||
@ -90,6 +89,14 @@ public:
|
||||
std::memory_order_relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void updateProfileEvents(const Progress & value, ProfileEvents::Event rows_event, ProfileEvents::Event bytes_event) const
|
||||
{
|
||||
ProfileEvents::increment(bytes_event, value.read_bytes);
|
||||
if (stage.is_first)
|
||||
ProfileEvents::increment(rows_event, value.read_rows);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user