mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 08:32:02 +00:00
Merge branch 'master' into clang-18-ci
This commit is contained in:
commit
9a880c3bb9
@ -10,7 +10,7 @@
|
|||||||
|
|
||||||
# TODO Let clang-tidy check headers in further directories
|
# TODO Let clang-tidy check headers in further directories
|
||||||
# --> HeaderFilterRegex: '^.*/(src|base|programs|utils)/.*(h|hpp)$'
|
# --> HeaderFilterRegex: '^.*/(src|base|programs|utils)/.*(h|hpp)$'
|
||||||
HeaderFilterRegex: '^.*/(base)/.*(h|hpp)$'
|
HeaderFilterRegex: '^.*/(base|programs|utils)/.*(h|hpp)$'
|
||||||
|
|
||||||
Checks: '*,
|
Checks: '*,
|
||||||
-abseil-*,
|
-abseil-*,
|
||||||
|
18
.github/workflows/master.yml
vendored
18
.github/workflows/master.yml
vendored
@ -305,7 +305,7 @@ jobs:
|
|||||||
runner_type: style-checker-aarch64
|
runner_type: style-checker-aarch64
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
MarkReleaseReady:
|
MarkReleaseReady:
|
||||||
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
- BuilderBinDarwin
|
- BuilderBinDarwin
|
||||||
- BuilderBinDarwinAarch64
|
- BuilderBinDarwinAarch64
|
||||||
@ -313,9 +313,25 @@ jobs:
|
|||||||
- BuilderDebAarch64
|
- BuilderDebAarch64
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
steps:
|
steps:
|
||||||
|
- name: Debug
|
||||||
|
run: |
|
||||||
|
echo need with different filters
|
||||||
|
cat << 'EOF'
|
||||||
|
${{ toJSON(needs) }}
|
||||||
|
${{ toJSON(needs.*.result) }}
|
||||||
|
no failures ${{ !contains(needs.*.result, 'failure') }}
|
||||||
|
no skips ${{ !contains(needs.*.result, 'skipped') }}
|
||||||
|
no both ${{ !(contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
||||||
|
EOF
|
||||||
|
- name: Not ready
|
||||||
|
# fail the job to be able restart it
|
||||||
|
if: ${{ contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure') }}
|
||||||
|
run: exit 1
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
|
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
- name: Mark Commit Release Ready
|
- name: Mark Commit Release Ready
|
||||||
|
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 mark_release_ready.py
|
python3 mark_release_ready.py
|
||||||
|
855
.github/workflows/pull_request.yml
vendored
855
.github/workflows/pull_request.yml
vendored
@ -13,9 +13,7 @@ on: # yamllint disable-line rule:truthy
|
|||||||
- opened
|
- opened
|
||||||
branches:
|
branches:
|
||||||
- master
|
- master
|
||||||
##########################################################################################
|
|
||||||
##################################### SMALL CHECKS #######################################
|
|
||||||
##########################################################################################
|
|
||||||
jobs:
|
jobs:
|
||||||
RunConfig:
|
RunConfig:
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
@ -70,13 +68,13 @@ jobs:
|
|||||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --post --job-name 'Style check'
|
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --post --job-name 'Style check'
|
||||||
BuildDockers:
|
BuildDockers:
|
||||||
needs: [RunConfig]
|
needs: [RunConfig]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
|
||||||
uses: ./.github/workflows/reusable_docker.yml
|
uses: ./.github/workflows/reusable_docker.yml
|
||||||
with:
|
with:
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
StyleCheck:
|
StyleCheck:
|
||||||
needs: [RunConfig, BuildDockers]
|
needs: [RunConfig, BuildDockers]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Style check')}}
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Style check
|
test_name: Style check
|
||||||
@ -89,19 +87,9 @@ jobs:
|
|||||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||||
RCSK
|
RCSK
|
||||||
DocsCheck:
|
|
||||||
needs: [RunConfig, StyleCheck]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Docs check
|
|
||||||
runner_type: func-tester-aarch64
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
run_command: |
|
|
||||||
python3 docs_check.py
|
|
||||||
FastTest:
|
FastTest:
|
||||||
needs: [RunConfig, StyleCheck]
|
needs: [RunConfig, StyleCheck]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Fast test') }}
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Fast test
|
test_name: Fast test
|
||||||
@ -109,818 +97,83 @@ jobs:
|
|||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
run_command: |
|
run_command: |
|
||||||
python3 fast_test_check.py
|
python3 fast_test_check.py
|
||||||
CompatibilityCheckX86:
|
|
||||||
needs: [RunConfig, BuilderDebRelease]
|
################################# Main statges #################################
|
||||||
if: ${{ !failure() && !cancelled() }}
|
# for main CI chain
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
#
|
||||||
with:
|
Builds_1:
|
||||||
test_name: Compatibility check (amd64)
|
|
||||||
runner_type: style-checker
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
CompatibilityCheckAarch64:
|
|
||||||
needs: [RunConfig, BuilderDebAarch64]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Compatibility check (aarch64)
|
|
||||||
runner_type: style-checker
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
#########################################################################################
|
|
||||||
#################################### ORDINARY BUILDS ####################################
|
|
||||||
#########################################################################################
|
|
||||||
BuilderDebDebug:
|
|
||||||
needs: [RunConfig, FastTest]
|
needs: [RunConfig, FastTest]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Builds_1') }}
|
||||||
uses: ./.github/workflows/reusable_build.yml
|
# using callable wf (reusable_stage.yml) allows to group all nested jobs under a tab
|
||||||
|
uses: ./.github/workflows/reusable_build_stage.yml
|
||||||
with:
|
with:
|
||||||
build_name: package_debug
|
stage: Builds_1
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
BuilderDebRelease:
|
Tests_1:
|
||||||
needs: [RunConfig, FastTest]
|
needs: [RunConfig, Builds_1]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_1') }}
|
||||||
uses: ./.github/workflows/reusable_build.yml
|
# using callable wf (reusable_stage.yml) allows to group all nested jobs under a tab
|
||||||
|
uses: ./.github/workflows/reusable_test_stage.yml
|
||||||
with:
|
with:
|
||||||
build_name: package_release
|
stage: Tests_1
|
||||||
checkout_depth: 0
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
BuilderDebReleaseCoverage:
|
Builds_2:
|
||||||
needs: [RunConfig, FastTest]
|
needs: [RunConfig, Builds_1]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Builds_2') }}
|
||||||
uses: ./.github/workflows/reusable_build.yml
|
# using callable wf (reusable_stage.yml) allows to group all nested jobs under a tab
|
||||||
|
uses: ./.github/workflows/reusable_build_stage.yml
|
||||||
with:
|
with:
|
||||||
build_name: package_release_coverage
|
stage: Builds_2
|
||||||
checkout_depth: 0
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
BuilderDebAarch64:
|
Tests_2:
|
||||||
needs: [RunConfig, FastTest]
|
needs: [RunConfig, Builds_2]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }}
|
||||||
uses: ./.github/workflows/reusable_build.yml
|
# using callable wf (reusable_stage.yml) allows to group all nested jobs under a tab
|
||||||
|
uses: ./.github/workflows/reusable_test_stage.yml
|
||||||
with:
|
with:
|
||||||
build_name: package_aarch64
|
stage: Tests_2
|
||||||
checkout_depth: 0
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
BuilderBinRelease:
|
|
||||||
needs: [RunConfig, FastTest]
|
################################# Reports #################################
|
||||||
if: ${{ !failure() && !cancelled() }}
|
# Reports should by run even if Builds_1/2 fail, so put them separatly in wf (not in Tests_1/2)
|
||||||
uses: ./.github/workflows/reusable_build.yml
|
Builds_1_Report:
|
||||||
with:
|
|
||||||
build_name: binary_release
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
BuilderDebAsan:
|
|
||||||
needs: [RunConfig, FastTest]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_build.yml
|
|
||||||
with:
|
|
||||||
build_name: package_asan
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
BuilderDebUBsan:
|
|
||||||
needs: [RunConfig, FastTest]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_build.yml
|
|
||||||
with:
|
|
||||||
build_name: package_ubsan
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
BuilderDebTsan:
|
|
||||||
needs: [RunConfig, FastTest]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_build.yml
|
|
||||||
with:
|
|
||||||
build_name: package_tsan
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
BuilderDebMsan:
|
|
||||||
needs: [RunConfig, FastTest]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_build.yml
|
|
||||||
with:
|
|
||||||
build_name: package_msan
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
##########################################################################################
|
|
||||||
##################################### SPECIAL BUILDS #####################################
|
|
||||||
##########################################################################################
|
|
||||||
BuilderBinClangTidy:
|
|
||||||
needs: [RunConfig, FastTest]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_build.yml
|
|
||||||
with:
|
|
||||||
build_name: binary_tidy
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
BuilderBinDarwin:
|
|
||||||
needs: [RunConfig, FastTest]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_build.yml
|
|
||||||
with:
|
|
||||||
build_name: binary_darwin
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
BuilderBinAarch64:
|
|
||||||
needs: [RunConfig, FastTest]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_build.yml
|
|
||||||
with:
|
|
||||||
build_name: binary_aarch64
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
BuilderBinFreeBSD:
|
|
||||||
needs: [RunConfig, FastTest]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_build.yml
|
|
||||||
with:
|
|
||||||
build_name: binary_freebsd
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
BuilderBinDarwinAarch64:
|
|
||||||
needs: [RunConfig, FastTest]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_build.yml
|
|
||||||
with:
|
|
||||||
build_name: binary_darwin_aarch64
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
BuilderBinPPC64:
|
|
||||||
needs: [RunConfig, FastTest]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_build.yml
|
|
||||||
with:
|
|
||||||
build_name: binary_ppc64le
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
BuilderBinAmd64Compat:
|
|
||||||
needs: [RunConfig, FastTest]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_build.yml
|
|
||||||
with:
|
|
||||||
build_name: binary_amd64_compat
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
BuilderBinAmd64Musl:
|
|
||||||
needs: [RunConfig, FastTest]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_build.yml
|
|
||||||
with:
|
|
||||||
build_name: binary_amd64_musl
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
BuilderBinAarch64V80Compat:
|
|
||||||
needs: [RunConfig, FastTest]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_build.yml
|
|
||||||
with:
|
|
||||||
build_name: binary_aarch64_v80compat
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
BuilderBinRISCV64:
|
|
||||||
needs: [RunConfig, FastTest]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_build.yml
|
|
||||||
with:
|
|
||||||
build_name: binary_riscv64
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
BuilderBinS390X:
|
|
||||||
needs: [RunConfig, FastTest]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_build.yml
|
|
||||||
with:
|
|
||||||
build_name: binary_s390x
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
############################################################################################
|
|
||||||
##################################### Docker images #######################################
|
|
||||||
############################################################################################
|
|
||||||
DockerServerImage:
|
|
||||||
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Docker server image
|
|
||||||
runner_type: style-checker
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
DockerKeeperImage:
|
|
||||||
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Docker keeper image
|
|
||||||
runner_type: style-checker
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
############################################################################################
|
|
||||||
##################################### BUILD REPORTER #######################################
|
|
||||||
############################################################################################
|
|
||||||
BuilderReport:
|
|
||||||
# run report check for failed builds to indicate the CI error
|
# run report check for failed builds to indicate the CI error
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse build check') }}
|
||||||
needs:
|
needs:
|
||||||
- RunConfig
|
- RunConfig
|
||||||
- BuilderDebAarch64
|
- Builds_1
|
||||||
- BuilderDebAsan
|
|
||||||
- BuilderDebDebug
|
|
||||||
- BuilderDebMsan
|
|
||||||
- BuilderDebRelease
|
|
||||||
- BuilderDebTsan
|
|
||||||
- BuilderDebUBsan
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: ClickHouse build check
|
test_name: ClickHouse build check
|
||||||
runner_type: style-checker
|
runner_type: style-checker-aarch64
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
BuilderSpecialReport:
|
Builds_2_Report:
|
||||||
# run report check for failed builds to indicate the CI error
|
# run report check for failed builds to indicate the CI error
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse special build check') }}
|
||||||
needs:
|
needs:
|
||||||
- RunConfig
|
- RunConfig
|
||||||
- BuilderBinAarch64
|
- Builds_2
|
||||||
- BuilderBinDarwin
|
|
||||||
- BuilderBinDarwinAarch64
|
|
||||||
- BuilderBinFreeBSD
|
|
||||||
- BuilderBinPPC64
|
|
||||||
- BuilderBinRISCV64
|
|
||||||
- BuilderBinS390X
|
|
||||||
- BuilderBinAmd64Compat
|
|
||||||
- BuilderBinAarch64V80Compat
|
|
||||||
- BuilderBinClangTidy
|
|
||||||
- BuilderDebReleaseCoverage
|
|
||||||
- BuilderBinRelease
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: ClickHouse special build check
|
test_name: ClickHouse special build check
|
||||||
runner_type: style-checker
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
############################################################################################
|
|
||||||
#################################### INSTALL PACKAGES ######################################
|
|
||||||
############################################################################################
|
|
||||||
InstallPackagesTestRelease:
|
|
||||||
needs: [RunConfig, BuilderDebRelease]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Install packages (amd64)
|
|
||||||
runner_type: style-checker
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
run_command: |
|
|
||||||
python3 install_check.py "$CHECK_NAME"
|
|
||||||
InstallPackagesTestAarch64:
|
|
||||||
needs: [RunConfig, BuilderDebAarch64]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Install packages (arm64)
|
|
||||||
runner_type: style-checker-aarch64
|
runner_type: style-checker-aarch64
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
run_command: |
|
|
||||||
python3 install_check.py "$CHECK_NAME"
|
################################# Stage Final #################################
|
||||||
##############################################################################################
|
#
|
||||||
########################### FUNCTIONAl STATELESS TESTS #######################################
|
|
||||||
##############################################################################################
|
|
||||||
FunctionalStatelessTestRelease:
|
|
||||||
needs: [RunConfig, BuilderDebRelease]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateless tests (release)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatelessTestReleaseAnalyzerS3Replicated:
|
|
||||||
needs: [RunConfig, BuilderDebRelease]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateless tests (release, analyzer, s3, DatabaseReplicated)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatelessTestS3Debug:
|
|
||||||
needs: [RunConfig, BuilderDebDebug]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateless tests (debug, s3 storage)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatelessTestS3Tsan:
|
|
||||||
needs: [RunConfig, BuilderDebTsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateless tests (tsan, s3 storage)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatelessTestAarch64:
|
|
||||||
needs: [RunConfig, BuilderDebAarch64]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateless tests (aarch64)
|
|
||||||
runner_type: func-tester-aarch64
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatelessTestAsan:
|
|
||||||
needs: [RunConfig, BuilderDebAsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateless tests (asan)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatelessTestTsan:
|
|
||||||
needs: [RunConfig, BuilderDebTsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateless tests (tsan)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatelessTestMsan:
|
|
||||||
needs: [RunConfig, BuilderDebMsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateless tests (msan)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatelessTestUBsan:
|
|
||||||
needs: [RunConfig, BuilderDebUBsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateless tests (ubsan)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatelessTestDebug:
|
|
||||||
needs: [RunConfig, BuilderDebDebug]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateless tests (debug)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatelessTestFlakyCheck:
|
|
||||||
needs: [RunConfig, BuilderDebAsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateless tests flaky check (asan)
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
runner_type: func-tester
|
|
||||||
TestsBugfixCheck:
|
|
||||||
needs: [RunConfig, StyleCheck]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Bugfix validation
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
##############################################################################################
|
|
||||||
############################ FUNCTIONAl STATEFUL TESTS #######################################
|
|
||||||
##############################################################################################
|
|
||||||
FunctionalStatefulTestRelease:
|
|
||||||
needs: [RunConfig, BuilderDebRelease]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateful tests (release)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatefulTestAarch64:
|
|
||||||
needs: [RunConfig, BuilderDebAarch64]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateful tests (aarch64)
|
|
||||||
runner_type: func-tester-aarch64
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatefulTestAsan:
|
|
||||||
needs: [RunConfig, BuilderDebAsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateful tests (asan)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatefulTestTsan:
|
|
||||||
needs: [RunConfig, BuilderDebTsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateful tests (tsan)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatefulTestMsan:
|
|
||||||
needs: [RunConfig, BuilderDebMsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateful tests (msan)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatefulTestUBsan:
|
|
||||||
needs: [RunConfig, BuilderDebUBsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateful tests (ubsan)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatefulTestDebug:
|
|
||||||
needs: [RunConfig, BuilderDebDebug]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateful tests (debug)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
# Parallel replicas
|
|
||||||
FunctionalStatefulTestDebugParallelReplicas:
|
|
||||||
needs: [RunConfig, BuilderDebDebug]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateful tests (debug, ParallelReplicas)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatefulTestUBsanParallelReplicas:
|
|
||||||
needs: [RunConfig, BuilderDebUBsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateful tests (ubsan, ParallelReplicas)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatefulTestMsanParallelReplicas:
|
|
||||||
needs: [RunConfig, BuilderDebMsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateful tests (msan, ParallelReplicas)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatefulTestTsanParallelReplicas:
|
|
||||||
needs: [RunConfig, BuilderDebTsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateful tests (tsan, ParallelReplicas)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatefulTestAsanParallelReplicas:
|
|
||||||
needs: [RunConfig, BuilderDebAsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateful tests (asan, ParallelReplicas)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatefulTestReleaseParallelReplicas:
|
|
||||||
needs: [RunConfig, BuilderDebRelease]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateful tests (release, ParallelReplicas)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
##############################################################################################
|
|
||||||
########################### ClickBench #######################################################
|
|
||||||
##############################################################################################
|
|
||||||
ClickBenchAMD64:
|
|
||||||
needs: [RunConfig, BuilderDebRelease]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: ClickBench (amd64)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
run_command: |
|
|
||||||
python3 clickbench.py "$CHECK_NAME"
|
|
||||||
ClickBenchAarch64:
|
|
||||||
needs: [RunConfig, BuilderDebAarch64]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: ClickBench (aarch64)
|
|
||||||
runner_type: func-tester-aarch64
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
run_command: |
|
|
||||||
python3 clickbench.py "$CHECK_NAME"
|
|
||||||
##############################################################################################
|
|
||||||
######################################### STRESS TESTS #######################################
|
|
||||||
##############################################################################################
|
|
||||||
StressTestAsan:
|
|
||||||
needs: [RunConfig, BuilderDebAsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stress test (asan)
|
|
||||||
runner_type: stress-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
StressTestTsan:
|
|
||||||
needs: [RunConfig, BuilderDebTsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stress test (tsan)
|
|
||||||
runner_type: stress-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
StressTestMsan:
|
|
||||||
needs: [RunConfig, BuilderDebMsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stress test (msan)
|
|
||||||
runner_type: stress-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
StressTestUBsan:
|
|
||||||
needs: [RunConfig, BuilderDebUBsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stress test (ubsan)
|
|
||||||
runner_type: stress-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
StressTestDebug:
|
|
||||||
needs: [RunConfig, BuilderDebDebug]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stress test (debug)
|
|
||||||
runner_type: stress-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
##############################################################################################
|
|
||||||
######################################### UPGRADE CHECK ######################################
|
|
||||||
##############################################################################################
|
|
||||||
UpgradeCheckAsan:
|
|
||||||
needs: [RunConfig, BuilderDebAsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Upgrade check (asan)
|
|
||||||
runner_type: stress-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
UpgradeCheckTsan:
|
|
||||||
needs: [RunConfig, BuilderDebTsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Upgrade check (tsan)
|
|
||||||
runner_type: stress-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
UpgradeCheckMsan:
|
|
||||||
needs: [RunConfig, BuilderDebMsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Upgrade check (msan)
|
|
||||||
runner_type: stress-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
UpgradeCheckDebug:
|
|
||||||
needs: [RunConfig, BuilderDebDebug]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Upgrade check (debug)
|
|
||||||
runner_type: stress-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
##############################################################################################
|
|
||||||
##################################### AST FUZZERS ############################################
|
|
||||||
##############################################################################################
|
|
||||||
ASTFuzzerTestAsan:
|
|
||||||
needs: [RunConfig, BuilderDebAsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: AST fuzzer (asan)
|
|
||||||
runner_type: fuzzer-unit-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
ASTFuzzerTestTsan:
|
|
||||||
needs: [RunConfig, BuilderDebTsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: AST fuzzer (tsan)
|
|
||||||
runner_type: fuzzer-unit-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
ASTFuzzerTestUBSan:
|
|
||||||
needs: [RunConfig, BuilderDebUBsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: AST fuzzer (ubsan)
|
|
||||||
runner_type: fuzzer-unit-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
ASTFuzzerTestMSan:
|
|
||||||
needs: [RunConfig, BuilderDebMsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: AST fuzzer (msan)
|
|
||||||
runner_type: fuzzer-unit-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
ASTFuzzerTestDebug:
|
|
||||||
needs: [RunConfig, BuilderDebDebug]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: AST fuzzer (debug)
|
|
||||||
runner_type: fuzzer-unit-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
#############################################################################################
|
|
||||||
############################# INTEGRATION TESTS #############################################
|
|
||||||
#############################################################################################
|
|
||||||
IntegrationTestsAnalyzerAsan:
|
|
||||||
needs: [RunConfig, BuilderDebAsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Integration tests (asan, analyzer)
|
|
||||||
runner_type: stress-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
IntegrationTestsTsan:
|
|
||||||
needs: [RunConfig, BuilderDebTsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Integration tests (tsan)
|
|
||||||
runner_type: stress-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
IntegrationTestsAarch64:
|
|
||||||
needs: [RunConfig, BuilderDebAarch64]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Integration tests (aarch64)
|
|
||||||
# FIXME: there is no stress-tester for aarch64. func-tester-aarch64 is ok?
|
|
||||||
runner_type: func-tester-aarch64
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
IntegrationTestsFlakyCheck:
|
|
||||||
needs: [RunConfig, BuilderDebAsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Integration tests flaky check (asan)
|
|
||||||
runner_type: stress-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
#############################################################################################
|
|
||||||
#################################### UNIT TESTS #############################################
|
|
||||||
#############################################################################################
|
|
||||||
UnitTestsAsan:
|
|
||||||
needs: [RunConfig, BuilderDebAsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Unit tests (asan)
|
|
||||||
runner_type: fuzzer-unit-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
UnitTestsRelease:
|
|
||||||
needs: [RunConfig, BuilderBinRelease]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Unit tests (release)
|
|
||||||
runner_type: fuzzer-unit-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
UnitTestsTsan:
|
|
||||||
needs: [RunConfig, BuilderDebTsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Unit tests (tsan)
|
|
||||||
runner_type: fuzzer-unit-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
UnitTestsMsan:
|
|
||||||
needs: [RunConfig, BuilderDebMsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Unit tests (msan)
|
|
||||||
runner_type: fuzzer-unit-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
UnitTestsUBsan:
|
|
||||||
needs: [RunConfig, BuilderDebUBsan]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Unit tests (ubsan)
|
|
||||||
runner_type: fuzzer-unit-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
#############################################################################################
|
|
||||||
#################################### PERFORMANCE TESTS ######################################
|
|
||||||
#############################################################################################
|
|
||||||
PerformanceComparisonX86:
|
|
||||||
needs: [RunConfig, BuilderDebRelease]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Performance Comparison
|
|
||||||
runner_type: stress-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
PerformanceComparisonAarch:
|
|
||||||
needs: [RunConfig, BuilderDebAarch64]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Performance Comparison Aarch64
|
|
||||||
runner_type: func-tester-aarch64
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
##############################################################################################
|
|
||||||
###################################### SQLANCER FUZZERS ######################################
|
|
||||||
##############################################################################################
|
|
||||||
SQLancerTestRelease:
|
|
||||||
needs: [RunConfig, BuilderDebRelease]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: SQLancer (release)
|
|
||||||
runner_type: fuzzer-unit-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
SQLancerTestDebug:
|
|
||||||
needs: [RunConfig, BuilderDebDebug]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: SQLancer (debug)
|
|
||||||
runner_type: fuzzer-unit-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
needs:
|
needs: [Tests_1, Tests_2]
|
||||||
- BuilderReport
|
|
||||||
- BuilderSpecialReport
|
|
||||||
- DocsCheck
|
|
||||||
- FastTest
|
|
||||||
- TestsBugfixCheck
|
|
||||||
- FunctionalStatelessTestDebug
|
|
||||||
- FunctionalStatelessTestRelease
|
|
||||||
- FunctionalStatelessTestAarch64
|
|
||||||
- FunctionalStatelessTestAsan
|
|
||||||
- FunctionalStatelessTestTsan
|
|
||||||
- FunctionalStatelessTestMsan
|
|
||||||
- FunctionalStatelessTestUBsan
|
|
||||||
- FunctionalStatefulTestDebug
|
|
||||||
- FunctionalStatefulTestRelease
|
|
||||||
- FunctionalStatefulTestAarch64
|
|
||||||
- FunctionalStatefulTestAsan
|
|
||||||
- FunctionalStatefulTestTsan
|
|
||||||
- FunctionalStatefulTestMsan
|
|
||||||
- FunctionalStatefulTestUBsan
|
|
||||||
- FunctionalStatelessTestS3Debug
|
|
||||||
- FunctionalStatelessTestS3Tsan
|
|
||||||
- FunctionalStatelessTestReleaseAnalyzerS3Replicated
|
|
||||||
- FunctionalStatefulTestReleaseParallelReplicas
|
|
||||||
- FunctionalStatefulTestAsanParallelReplicas
|
|
||||||
- FunctionalStatefulTestTsanParallelReplicas
|
|
||||||
- FunctionalStatefulTestMsanParallelReplicas
|
|
||||||
- FunctionalStatefulTestUBsanParallelReplicas
|
|
||||||
- FunctionalStatefulTestDebugParallelReplicas
|
|
||||||
- StressTestDebug
|
|
||||||
- StressTestAsan
|
|
||||||
- StressTestTsan
|
|
||||||
- StressTestMsan
|
|
||||||
- StressTestUBsan
|
|
||||||
- UpgradeCheckAsan
|
|
||||||
- UpgradeCheckTsan
|
|
||||||
- UpgradeCheckMsan
|
|
||||||
- UpgradeCheckDebug
|
|
||||||
- ASTFuzzerTestDebug
|
|
||||||
- ASTFuzzerTestAsan
|
|
||||||
- ASTFuzzerTestTsan
|
|
||||||
- ASTFuzzerTestMSan
|
|
||||||
- ASTFuzzerTestUBSan
|
|
||||||
- IntegrationTestsAnalyzerAsan
|
|
||||||
- IntegrationTestsTsan
|
|
||||||
- IntegrationTestsAarch64
|
|
||||||
- IntegrationTestsFlakyCheck
|
|
||||||
- PerformanceComparisonX86
|
|
||||||
- PerformanceComparisonAarch
|
|
||||||
- UnitTestsAsan
|
|
||||||
- UnitTestsTsan
|
|
||||||
- UnitTestsMsan
|
|
||||||
- UnitTestsUBsan
|
|
||||||
- UnitTestsRelease
|
|
||||||
- CompatibilityCheckX86
|
|
||||||
- CompatibilityCheckAarch64
|
|
||||||
- SQLancerTestRelease
|
|
||||||
- SQLancerTestDebug
|
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
- name: Finish label
|
- name: Finish label
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 finish_check.py
|
python3 finish_check.py
|
||||||
python3 merge_pr.py --check-approved
|
|
||||||
##############################################################################################
|
|
||||||
############################ SQLLOGIC TEST ###################################################
|
|
||||||
##############################################################################################
|
|
||||||
SQLLogicTestRelease:
|
|
||||||
needs: [RunConfig, BuilderDebRelease]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Sqllogic test (release)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
##############################################################################################
|
|
||||||
##################################### SQL TEST ###############################################
|
|
||||||
##############################################################################################
|
|
||||||
SQLTest:
|
|
||||||
needs: [RunConfig, BuilderDebRelease]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: SQLTest
|
|
||||||
runner_type: fuzzer-unit-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
#############################################################################################
|
|
||||||
###################################### NOT IN FINISH ########################################
|
|
||||||
#############################################################################################
|
#############################################################################################
|
||||||
###################################### JEPSEN TESTS #########################################
|
###################################### JEPSEN TESTS #########################################
|
||||||
#############################################################################################
|
#############################################################################################
|
||||||
@ -931,19 +184,11 @@ jobs:
|
|||||||
# we need concurrency as the job uses dedicated instances in the cloud
|
# we need concurrency as the job uses dedicated instances in the cloud
|
||||||
concurrency:
|
concurrency:
|
||||||
group: jepsen
|
group: jepsen
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse Keeper Jepsen') }}
|
||||||
needs: [RunConfig, BuilderBinRelease]
|
# jepsen needs binary_release build which is in Builds_2
|
||||||
|
needs: [RunConfig, Builds_2]
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: ClickHouse Keeper Jepsen
|
test_name: ClickHouse Keeper Jepsen
|
||||||
runner_type: style-checker
|
runner_type: style-checker
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
#############################################################################################
|
|
||||||
####################################### libFuzzer ###########################################
|
|
||||||
#############################################################################################
|
|
||||||
libFuzzer:
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
needs: [RunConfig, StyleCheck]
|
|
||||||
uses: ./.github/workflows/libfuzzer.yml
|
|
||||||
with:
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
|
18
.github/workflows/release_branches.yml
vendored
18
.github/workflows/release_branches.yml
vendored
@ -206,7 +206,7 @@ jobs:
|
|||||||
runner_type: style-checker-aarch64
|
runner_type: style-checker-aarch64
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
MarkReleaseReady:
|
MarkReleaseReady:
|
||||||
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
- BuilderBinDarwin
|
- BuilderBinDarwin
|
||||||
- BuilderBinDarwinAarch64
|
- BuilderBinDarwinAarch64
|
||||||
@ -214,9 +214,25 @@ jobs:
|
|||||||
- BuilderDebAarch64
|
- BuilderDebAarch64
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
steps:
|
steps:
|
||||||
|
- name: Debug
|
||||||
|
run: |
|
||||||
|
echo need with different filters
|
||||||
|
cat << 'EOF'
|
||||||
|
${{ toJSON(needs) }}
|
||||||
|
${{ toJSON(needs.*.result) }}
|
||||||
|
no failures ${{ !contains(needs.*.result, 'failure') }}
|
||||||
|
no skips ${{ !contains(needs.*.result, 'skipped') }}
|
||||||
|
no both ${{ !(contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
||||||
|
EOF
|
||||||
|
- name: Not ready
|
||||||
|
# fail the job to be able restart it
|
||||||
|
if: ${{ contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure') }}
|
||||||
|
run: exit 1
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
|
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
- name: Mark Commit Release Ready
|
- name: Mark Commit Release Ready
|
||||||
|
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 mark_release_ready.py
|
python3 mark_release_ready.py
|
||||||
|
3
.github/workflows/reusable_build.yml
vendored
3
.github/workflows/reusable_build.yml
vendored
@ -43,7 +43,8 @@ jobs:
|
|||||||
runs-on: [self-hosted, '${{inputs.runner_type}}']
|
runs-on: [self-hosted, '${{inputs.runner_type}}']
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
# WIP: temporary try commit with limited perallelization of checkout
|
||||||
|
uses: ClickHouse/checkout@0be3f7b3098bae494d3ef5d29d2e0676fb606232
|
||||||
with:
|
with:
|
||||||
clear-repository: true
|
clear-repository: true
|
||||||
ref: ${{ fromJson(inputs.data).git_ref }}
|
ref: ${{ fromJson(inputs.data).git_ref }}
|
||||||
|
32
.github/workflows/reusable_build_stage.yml
vendored
Normal file
32
.github/workflows/reusable_build_stage.yml
vendored
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
### FIXME: merge reusable_test.yml and reusable_build.yml as they are almost identical
|
||||||
|
# and then merge reusable_build_stage.yml and reusable_test_stage.yml
|
||||||
|
|
||||||
|
name: BuildStageWF
|
||||||
|
'on':
|
||||||
|
workflow_call:
|
||||||
|
inputs:
|
||||||
|
stage:
|
||||||
|
description: stage name
|
||||||
|
type: string
|
||||||
|
required: true
|
||||||
|
data:
|
||||||
|
description: ci data
|
||||||
|
type: string
|
||||||
|
required: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
s:
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
job_name_and_runner_type: ${{ fromJson(inputs.data).stages_data[inputs.stage] }}
|
||||||
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
|
with:
|
||||||
|
build_name: ${{ matrix.job_name_and_runner_type.job_name }}
|
||||||
|
runner_type: ${{ matrix.job_name_and_runner_type.runner_type }}
|
||||||
|
# don't forget to pass force flag (no ci cache/no reuse) - once it's needed
|
||||||
|
force: false
|
||||||
|
# for now let's do I deep checkout for builds
|
||||||
|
checkout_depth: 0
|
||||||
|
data: ${{ inputs.data }}
|
25
.github/workflows/reusable_test_stage.yml
vendored
Normal file
25
.github/workflows/reusable_test_stage.yml
vendored
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
name: StageWF
|
||||||
|
'on':
|
||||||
|
workflow_call:
|
||||||
|
inputs:
|
||||||
|
stage:
|
||||||
|
description: stage name
|
||||||
|
type: string
|
||||||
|
required: true
|
||||||
|
data:
|
||||||
|
description: ci data
|
||||||
|
type: string
|
||||||
|
required: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
s:
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
job_name_and_runner_type: ${{ fromJson(inputs.data).stages_data[inputs.stage] }}
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
|
with:
|
||||||
|
test_name: ${{ matrix.job_name_and_runner_type.job_name }}
|
||||||
|
runner_type: ${{ matrix.job_name_and_runner_type.runner_type }}
|
||||||
|
data: ${{ inputs.data }}
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -165,7 +165,7 @@ tests/queries/0_stateless/*.expect.history
|
|||||||
tests/integration/**/_gen
|
tests/integration/**/_gen
|
||||||
|
|
||||||
# rust
|
# rust
|
||||||
/rust/**/target
|
/rust/**/target*
|
||||||
# It is autogenerated from *.in
|
# It is autogenerated from *.in
|
||||||
/rust/**/.cargo/config.toml
|
/rust/**/.cargo/config.toml
|
||||||
/rust/**/vendor
|
/rust/**/vendor
|
||||||
|
@ -319,7 +319,8 @@ if (COMPILER_CLANG)
|
|||||||
endif()
|
endif()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS}")
|
# Disable floating-point expression contraction in order to get consistent floating point calculation results across platforms
|
||||||
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffp-contract=off")
|
||||||
|
|
||||||
# Our built-in unwinder only supports DWARF version up to 4.
|
# Our built-in unwinder only supports DWARF version up to 4.
|
||||||
set (DEBUG_INFO_FLAGS "-g")
|
set (DEBUG_INFO_FLAGS "-g")
|
||||||
|
23
README.md
23
README.md
@ -31,15 +31,30 @@ curl https://clickhouse.com/ | sh
|
|||||||
* [Static Analysis (SonarCloud)](https://sonarcloud.io/project/issues?resolved=false&id=ClickHouse_ClickHouse) proposes C++ quality improvements.
|
* [Static Analysis (SonarCloud)](https://sonarcloud.io/project/issues?resolved=false&id=ClickHouse_ClickHouse) proposes C++ quality improvements.
|
||||||
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
|
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
|
||||||
|
|
||||||
|
## Monthly Release & Community Call
|
||||||
|
|
||||||
|
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
|
||||||
|
|
||||||
|
* [v24.3 Community Call](https://clickhouse.com/company/events/v24-3-community-release-call) - Mar 26
|
||||||
|
* [v24.4 Community Call](https://clickhouse.com/company/events/v24-4-community-release-call) - Apr 30
|
||||||
|
|
||||||
## Upcoming Events
|
## Upcoming Events
|
||||||
|
|
||||||
Keep an eye out for upcoming meetups around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com.
|
Keep an eye out for upcoming meetups and eventsaround the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
|
||||||
|
|
||||||
|
* [ClickHouse Meetup in Bellevue](https://www.meetup.com/clickhouse-seattle-user-group/events/298650371/) - Mar 11
|
||||||
|
* [ClickHouse Meetup at Ramp's Offices in NYC](https://www.meetup.com/clickhouse-new-york-user-group/events/298640542/) - Mar 19
|
||||||
|
* [ClickHouse Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/299479750/) - Mar 20
|
||||||
|
* [ClickHouse Meetup in Paris](https://www.meetup.com/clickhouse-france-user-group/events/298997115/) - Mar 21
|
||||||
|
* [ClickHouse Meetup in Bengaluru](https://www.meetup.com/clickhouse-bangalore-user-group/events/299479850/) - Mar 23
|
||||||
|
* [ClickHouse Meetup in Zurich](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/299628922/) - Apr 16
|
||||||
|
* [ClickHouse Meetup in Copenhagen](https://www.meetup.com/clickhouse-denmark-meetup-group/events/299629133/) - Apr 23
|
||||||
|
* [ClickHouse Meetup in Dubai](https://www.meetup.com/clickhouse-dubai-meetup-group/events/299629189/) - May 28
|
||||||
|
|
||||||
|
|
||||||
## Recent Recordings
|
## Recent Recordings
|
||||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||||
* **Recording available**: [**v24.1 Release Webinar**](https://www.youtube.com/watch?v=pBF9g0wGAGs) All the features of 24.1, one convenient video! Watch it now!
|
* **Recording available**: [**v24.2 Release Call**](https://www.youtube.com/watch?v=iN2y-TK8f3A) All the features of 24.2, one convenient video! Watch it now!
|
||||||
* **All release webinar recordings**: [YouTube playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3jAlSy1JxyP8zluvXaN3nxU)
|
|
||||||
|
|
||||||
|
|
||||||
## Interested in joining ClickHouse and making it your full-time job?
|
## Interested in joining ClickHouse and making it your full-time job?
|
||||||
|
|
||||||
|
@ -13,9 +13,10 @@ The following versions of ClickHouse server are currently being supported with s
|
|||||||
|
|
||||||
| Version | Supported |
|
| Version | Supported |
|
||||||
|:-|:-|
|
|:-|:-|
|
||||||
|
| 24.2 | ✔️ |
|
||||||
| 24.1 | ✔️ |
|
| 24.1 | ✔️ |
|
||||||
| 23.12 | ✔️ |
|
| 23.12 | ✔️ |
|
||||||
| 23.11 | ✔️ |
|
| 23.11 | ❌ |
|
||||||
| 23.10 | ❌ |
|
| 23.10 | ❌ |
|
||||||
| 23.9 | ❌ |
|
| 23.9 | ❌ |
|
||||||
| 23.8 | ✔️ |
|
| 23.8 | ✔️ |
|
||||||
|
@ -13,6 +13,7 @@ set (SRCS
|
|||||||
cgroupsv2.cpp
|
cgroupsv2.cpp
|
||||||
coverage.cpp
|
coverage.cpp
|
||||||
demangle.cpp
|
demangle.cpp
|
||||||
|
Decimal.cpp
|
||||||
getAvailableMemoryAmount.cpp
|
getAvailableMemoryAmount.cpp
|
||||||
getFQDNOrHostName.cpp
|
getFQDNOrHostName.cpp
|
||||||
getMemoryAmount.cpp
|
getMemoryAmount.cpp
|
||||||
|
87
base/base/Decimal.cpp
Normal file
87
base/base/Decimal.cpp
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
#include <base/Decimal.h>
|
||||||
|
#include <base/extended_types.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
/// Explicit template instantiations.
|
||||||
|
|
||||||
|
#define FOR_EACH_UNDERLYING_DECIMAL_TYPE(M) \
|
||||||
|
M(Int32) \
|
||||||
|
M(Int64) \
|
||||||
|
M(Int128) \
|
||||||
|
M(Int256)
|
||||||
|
|
||||||
|
#define FOR_EACH_UNDERLYING_DECIMAL_TYPE_PASS(M, X) \
|
||||||
|
M(Int32, X) \
|
||||||
|
M(Int64, X) \
|
||||||
|
M(Int128, X) \
|
||||||
|
M(Int256, X)
|
||||||
|
|
||||||
|
template <typename T> const Decimal<T> & Decimal<T>::operator += (const T & x) { value += x; return *this; }
|
||||||
|
template <typename T> const Decimal<T> & Decimal<T>::operator -= (const T & x) { value -= x; return *this; }
|
||||||
|
template <typename T> const Decimal<T> & Decimal<T>::operator *= (const T & x) { value *= x; return *this; }
|
||||||
|
template <typename T> const Decimal<T> & Decimal<T>::operator /= (const T & x) { value /= x; return *this; }
|
||||||
|
template <typename T> const Decimal<T> & Decimal<T>::operator %= (const T & x) { value %= x; return *this; }
|
||||||
|
|
||||||
|
template <typename T> void NO_SANITIZE_UNDEFINED Decimal<T>::addOverflow(const T & x) { value += x; }
|
||||||
|
|
||||||
|
/// Maybe this explicit instantiation affects performance since operators cannot be inlined.
|
||||||
|
|
||||||
|
template <typename T> template <typename U> const Decimal<T> & Decimal<T>::operator += (const Decimal<U> & x) { value += static_cast<T>(x.value); return *this; }
|
||||||
|
template <typename T> template <typename U> const Decimal<T> & Decimal<T>::operator -= (const Decimal<U> & x) { value -= static_cast<T>(x.value); return *this; }
|
||||||
|
template <typename T> template <typename U> const Decimal<T> & Decimal<T>::operator *= (const Decimal<U> & x) { value *= static_cast<T>(x.value); return *this; }
|
||||||
|
template <typename T> template <typename U> const Decimal<T> & Decimal<T>::operator /= (const Decimal<U> & x) { value /= static_cast<T>(x.value); return *this; }
|
||||||
|
template <typename T> template <typename U> const Decimal<T> & Decimal<T>::operator %= (const Decimal<U> & x) { value %= static_cast<T>(x.value); return *this; }
|
||||||
|
|
||||||
|
#define DISPATCH(TYPE_T, TYPE_U) \
|
||||||
|
template const Decimal<TYPE_T> & Decimal<TYPE_T>::operator += (const Decimal<TYPE_U> & x); \
|
||||||
|
template const Decimal<TYPE_T> & Decimal<TYPE_T>::operator -= (const Decimal<TYPE_U> & x); \
|
||||||
|
template const Decimal<TYPE_T> & Decimal<TYPE_T>::operator *= (const Decimal<TYPE_U> & x); \
|
||||||
|
template const Decimal<TYPE_T> & Decimal<TYPE_T>::operator /= (const Decimal<TYPE_U> & x); \
|
||||||
|
template const Decimal<TYPE_T> & Decimal<TYPE_T>::operator %= (const Decimal<TYPE_U> & x);
|
||||||
|
#define INVOKE(X) FOR_EACH_UNDERLYING_DECIMAL_TYPE_PASS(DISPATCH, X)
|
||||||
|
FOR_EACH_UNDERLYING_DECIMAL_TYPE(INVOKE);
|
||||||
|
#undef INVOKE
|
||||||
|
#undef DISPATCH
|
||||||
|
|
||||||
|
#define DISPATCH(TYPE) template struct Decimal<TYPE>;
|
||||||
|
FOR_EACH_UNDERLYING_DECIMAL_TYPE(DISPATCH)
|
||||||
|
#undef DISPATCH
|
||||||
|
|
||||||
|
template <typename T> bool operator< (const Decimal<T> & x, const Decimal<T> & y) { return x.value < y.value; }
|
||||||
|
template <typename T> bool operator> (const Decimal<T> & x, const Decimal<T> & y) { return x.value > y.value; }
|
||||||
|
template <typename T> bool operator<= (const Decimal<T> & x, const Decimal<T> & y) { return x.value <= y.value; }
|
||||||
|
template <typename T> bool operator>= (const Decimal<T> & x, const Decimal<T> & y) { return x.value >= y.value; }
|
||||||
|
template <typename T> bool operator== (const Decimal<T> & x, const Decimal<T> & y) { return x.value == y.value; }
|
||||||
|
template <typename T> bool operator!= (const Decimal<T> & x, const Decimal<T> & y) { return x.value != y.value; }
|
||||||
|
|
||||||
|
#define DISPATCH(TYPE) \
|
||||||
|
template bool operator< (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
|
||||||
|
template bool operator> (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
|
||||||
|
template bool operator<= (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
|
||||||
|
template bool operator>= (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
|
||||||
|
template bool operator== (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
|
||||||
|
template bool operator!= (const Decimal<TYPE> & x, const Decimal<TYPE> & y);
|
||||||
|
FOR_EACH_UNDERLYING_DECIMAL_TYPE(DISPATCH)
|
||||||
|
#undef DISPATCH
|
||||||
|
|
||||||
|
|
||||||
|
template <typename T> Decimal<T> operator+ (const Decimal<T> & x, const Decimal<T> & y) { return x.value + y.value; }
|
||||||
|
template <typename T> Decimal<T> operator- (const Decimal<T> & x, const Decimal<T> & y) { return x.value - y.value; }
|
||||||
|
template <typename T> Decimal<T> operator* (const Decimal<T> & x, const Decimal<T> & y) { return x.value * y.value; }
|
||||||
|
template <typename T> Decimal<T> operator/ (const Decimal<T> & x, const Decimal<T> & y) { return x.value / y.value; }
|
||||||
|
template <typename T> Decimal<T> operator- (const Decimal<T> & x) { return -x.value; }
|
||||||
|
|
||||||
|
#define DISPATCH(TYPE) \
|
||||||
|
template Decimal<TYPE> operator+ (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
|
||||||
|
template Decimal<TYPE> operator- (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
|
||||||
|
template Decimal<TYPE> operator* (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
|
||||||
|
template Decimal<TYPE> operator/ (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
|
||||||
|
template Decimal<TYPE> operator- (const Decimal<TYPE> & x);
|
||||||
|
FOR_EACH_UNDERLYING_DECIMAL_TYPE(DISPATCH)
|
||||||
|
#undef DISPATCH
|
||||||
|
|
||||||
|
#undef FOR_EACH_UNDERLYING_DECIMAL_TYPE_PASS
|
||||||
|
#undef FOR_EACH_UNDERLYING_DECIMAL_TYPE
|
||||||
|
}
|
@ -1,20 +1,28 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <base/extended_types.h>
|
#include <base/extended_types.h>
|
||||||
#include <base/Decimal_fwd.h>
|
#include <base/Decimal_fwd.h>
|
||||||
|
#include <base/types.h>
|
||||||
|
#include <base/defines.h>
|
||||||
|
|
||||||
#if !defined(NO_SANITIZE_UNDEFINED)
|
|
||||||
#if defined(__clang__)
|
|
||||||
#define NO_SANITIZE_UNDEFINED __attribute__((__no_sanitize__("undefined")))
|
|
||||||
#else
|
|
||||||
#define NO_SANITIZE_UNDEFINED
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
template <class> struct Decimal;
|
template <class> struct Decimal;
|
||||||
class DateTime64;
|
class DateTime64;
|
||||||
|
|
||||||
|
#define FOR_EACH_UNDERLYING_DECIMAL_TYPE(M) \
|
||||||
|
M(Int32) \
|
||||||
|
M(Int64) \
|
||||||
|
M(Int128) \
|
||||||
|
M(Int256)
|
||||||
|
|
||||||
|
#define FOR_EACH_UNDERLYING_DECIMAL_TYPE_PASS(M, X) \
|
||||||
|
M(Int32, X) \
|
||||||
|
M(Int64, X) \
|
||||||
|
M(Int128, X) \
|
||||||
|
M(Int256, X)
|
||||||
|
|
||||||
using Decimal32 = Decimal<Int32>;
|
using Decimal32 = Decimal<Int32>;
|
||||||
using Decimal64 = Decimal<Int64>;
|
using Decimal64 = Decimal<Int64>;
|
||||||
using Decimal128 = Decimal<Int128>;
|
using Decimal128 = Decimal<Int128>;
|
||||||
@ -55,36 +63,73 @@ struct Decimal
|
|||||||
return static_cast<U>(value);
|
return static_cast<U>(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
const Decimal<T> & operator += (const T & x) { value += x; return *this; }
|
const Decimal<T> & operator += (const T & x);
|
||||||
const Decimal<T> & operator -= (const T & x) { value -= x; return *this; }
|
const Decimal<T> & operator -= (const T & x);
|
||||||
const Decimal<T> & operator *= (const T & x) { value *= x; return *this; }
|
const Decimal<T> & operator *= (const T & x);
|
||||||
const Decimal<T> & operator /= (const T & x) { value /= x; return *this; }
|
const Decimal<T> & operator /= (const T & x);
|
||||||
const Decimal<T> & operator %= (const T & x) { value %= x; return *this; }
|
const Decimal<T> & operator %= (const T & x);
|
||||||
|
|
||||||
template <typename U> const Decimal<T> & operator += (const Decimal<U> & x) { value += x.value; return *this; }
|
template <typename U> const Decimal<T> & operator += (const Decimal<U> & x);
|
||||||
template <typename U> const Decimal<T> & operator -= (const Decimal<U> & x) { value -= x.value; return *this; }
|
template <typename U> const Decimal<T> & operator -= (const Decimal<U> & x);
|
||||||
template <typename U> const Decimal<T> & operator *= (const Decimal<U> & x) { value *= x.value; return *this; }
|
template <typename U> const Decimal<T> & operator *= (const Decimal<U> & x);
|
||||||
template <typename U> const Decimal<T> & operator /= (const Decimal<U> & x) { value /= x.value; return *this; }
|
template <typename U> const Decimal<T> & operator /= (const Decimal<U> & x);
|
||||||
template <typename U> const Decimal<T> & operator %= (const Decimal<U> & x) { value %= x.value; return *this; }
|
template <typename U> const Decimal<T> & operator %= (const Decimal<U> & x);
|
||||||
|
|
||||||
/// This is to avoid UB for sumWithOverflow()
|
/// This is to avoid UB for sumWithOverflow()
|
||||||
void NO_SANITIZE_UNDEFINED addOverflow(const T & x) { value += x; }
|
void NO_SANITIZE_UNDEFINED addOverflow(const T & x);
|
||||||
|
|
||||||
T value;
|
T value;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename T> inline bool operator< (const Decimal<T> & x, const Decimal<T> & y) { return x.value < y.value; }
|
#define DISPATCH(TYPE) extern template struct Decimal<TYPE>;
|
||||||
template <typename T> inline bool operator> (const Decimal<T> & x, const Decimal<T> & y) { return x.value > y.value; }
|
FOR_EACH_UNDERLYING_DECIMAL_TYPE(DISPATCH)
|
||||||
template <typename T> inline bool operator<= (const Decimal<T> & x, const Decimal<T> & y) { return x.value <= y.value; }
|
#undef DISPATCH
|
||||||
template <typename T> inline bool operator>= (const Decimal<T> & x, const Decimal<T> & y) { return x.value >= y.value; }
|
|
||||||
template <typename T> inline bool operator== (const Decimal<T> & x, const Decimal<T> & y) { return x.value == y.value; }
|
|
||||||
template <typename T> inline bool operator!= (const Decimal<T> & x, const Decimal<T> & y) { return x.value != y.value; }
|
|
||||||
|
|
||||||
template <typename T> inline Decimal<T> operator+ (const Decimal<T> & x, const Decimal<T> & y) { return x.value + y.value; }
|
#define DISPATCH(TYPE_T, TYPE_U) \
|
||||||
template <typename T> inline Decimal<T> operator- (const Decimal<T> & x, const Decimal<T> & y) { return x.value - y.value; }
|
extern template const Decimal<TYPE_T> & Decimal<TYPE_T>::operator += (const Decimal<TYPE_U> & x); \
|
||||||
template <typename T> inline Decimal<T> operator* (const Decimal<T> & x, const Decimal<T> & y) { return x.value * y.value; }
|
extern template const Decimal<TYPE_T> & Decimal<TYPE_T>::operator -= (const Decimal<TYPE_U> & x); \
|
||||||
template <typename T> inline Decimal<T> operator/ (const Decimal<T> & x, const Decimal<T> & y) { return x.value / y.value; }
|
extern template const Decimal<TYPE_T> & Decimal<TYPE_T>::operator *= (const Decimal<TYPE_U> & x); \
|
||||||
template <typename T> inline Decimal<T> operator- (const Decimal<T> & x) { return -x.value; }
|
extern template const Decimal<TYPE_T> & Decimal<TYPE_T>::operator /= (const Decimal<TYPE_U> & x); \
|
||||||
|
extern template const Decimal<TYPE_T> & Decimal<TYPE_T>::operator %= (const Decimal<TYPE_U> & x);
|
||||||
|
#define INVOKE(X) FOR_EACH_UNDERLYING_DECIMAL_TYPE_PASS(DISPATCH, X)
|
||||||
|
FOR_EACH_UNDERLYING_DECIMAL_TYPE(INVOKE);
|
||||||
|
#undef INVOKE
|
||||||
|
#undef DISPATCH
|
||||||
|
|
||||||
|
template <typename T> bool operator< (const Decimal<T> & x, const Decimal<T> & y);
|
||||||
|
template <typename T> bool operator> (const Decimal<T> & x, const Decimal<T> & y);
|
||||||
|
template <typename T> bool operator<= (const Decimal<T> & x, const Decimal<T> & y);
|
||||||
|
template <typename T> bool operator>= (const Decimal<T> & x, const Decimal<T> & y);
|
||||||
|
template <typename T> bool operator== (const Decimal<T> & x, const Decimal<T> & y);
|
||||||
|
template <typename T> bool operator!= (const Decimal<T> & x, const Decimal<T> & y);
|
||||||
|
|
||||||
|
#define DISPATCH(TYPE) \
|
||||||
|
extern template bool operator< (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
|
||||||
|
extern template bool operator> (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
|
||||||
|
extern template bool operator<= (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
|
||||||
|
extern template bool operator>= (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
|
||||||
|
extern template bool operator== (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
|
||||||
|
extern template bool operator!= (const Decimal<TYPE> & x, const Decimal<TYPE> & y);
|
||||||
|
FOR_EACH_UNDERLYING_DECIMAL_TYPE(DISPATCH)
|
||||||
|
#undef DISPATCH
|
||||||
|
|
||||||
|
template <typename T> Decimal<T> operator+ (const Decimal<T> & x, const Decimal<T> & y);
|
||||||
|
template <typename T> Decimal<T> operator- (const Decimal<T> & x, const Decimal<T> & y);
|
||||||
|
template <typename T> Decimal<T> operator* (const Decimal<T> & x, const Decimal<T> & y);
|
||||||
|
template <typename T> Decimal<T> operator/ (const Decimal<T> & x, const Decimal<T> & y);
|
||||||
|
template <typename T> Decimal<T> operator- (const Decimal<T> & x);
|
||||||
|
|
||||||
|
#define DISPATCH(TYPE) \
|
||||||
|
extern template Decimal<TYPE> operator+ (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
|
||||||
|
extern template Decimal<TYPE> operator- (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
|
||||||
|
extern template Decimal<TYPE> operator* (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
|
||||||
|
extern template Decimal<TYPE> operator/ (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
|
||||||
|
extern template Decimal<TYPE> operator- (const Decimal<TYPE> & x);
|
||||||
|
FOR_EACH_UNDERLYING_DECIMAL_TYPE(DISPATCH)
|
||||||
|
#undef DISPATCH
|
||||||
|
|
||||||
|
#undef FOR_EACH_UNDERLYING_DECIMAL_TYPE_PASS
|
||||||
|
#undef FOR_EACH_UNDERLYING_DECIMAL_TYPE
|
||||||
|
|
||||||
/// Distinguishable type to allow function resolution/deduction based on value type,
|
/// Distinguishable type to allow function resolution/deduction based on value type,
|
||||||
/// but also relatively easy to convert to/from Decimal64.
|
/// but also relatively easy to convert to/from Decimal64.
|
||||||
|
@ -10,14 +10,10 @@
|
|||||||
#define JSON_MAX_DEPTH 100
|
#define JSON_MAX_DEPTH 100
|
||||||
|
|
||||||
|
|
||||||
#ifdef __clang__
|
#pragma clang diagnostic push
|
||||||
# pragma clang diagnostic push
|
#pragma clang diagnostic ignored "-Wdeprecated-dynamic-exception-spec"
|
||||||
# pragma clang diagnostic ignored "-Wdeprecated-dynamic-exception-spec"
|
|
||||||
#endif
|
|
||||||
POCO_IMPLEMENT_EXCEPTION(JSONException, Poco::Exception, "JSONException") // NOLINT(cert-err60-cpp, modernize-use-noexcept, hicpp-use-noexcept)
|
POCO_IMPLEMENT_EXCEPTION(JSONException, Poco::Exception, "JSONException") // NOLINT(cert-err60-cpp, modernize-use-noexcept, hicpp-use-noexcept)
|
||||||
#ifdef __clang__
|
#pragma clang diagnostic pop
|
||||||
# pragma clang diagnostic pop
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
/// Read unsigned integer in a simple form from a non-0-terminated string.
|
/// Read unsigned integer in a simple form from a non-0-terminated string.
|
||||||
|
@ -39,14 +39,10 @@
|
|||||||
|
|
||||||
|
|
||||||
// NOLINTBEGIN(google-explicit-constructor)
|
// NOLINTBEGIN(google-explicit-constructor)
|
||||||
#ifdef __clang__
|
#pragma clang diagnostic push
|
||||||
# pragma clang diagnostic push
|
#pragma clang diagnostic ignored "-Wdeprecated-dynamic-exception-spec"
|
||||||
# pragma clang diagnostic ignored "-Wdeprecated-dynamic-exception-spec"
|
|
||||||
#endif
|
|
||||||
POCO_DECLARE_EXCEPTION(Foundation_API, JSONException, Poco::Exception)
|
POCO_DECLARE_EXCEPTION(Foundation_API, JSONException, Poco::Exception)
|
||||||
#ifdef __clang__
|
#pragma clang diagnostic pop
|
||||||
# pragma clang diagnostic pop
|
|
||||||
#endif
|
|
||||||
// NOLINTEND(google-explicit-constructor)
|
// NOLINTEND(google-explicit-constructor)
|
||||||
|
|
||||||
class JSON
|
class JSON
|
||||||
|
@ -13,11 +13,7 @@
|
|||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
|
||||||
|
|
||||||
# if defined(__clang__)
|
|
||||||
extern "C" void __llvm_profile_dump(); // NOLINT
|
extern "C" void __llvm_profile_dump(); // NOLINT
|
||||||
# elif defined(__GNUC__) || defined(__GNUG__)
|
|
||||||
extern "C" void __gcov_exit();
|
|
||||||
# endif
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -28,12 +24,7 @@ void dumpCoverageReportIfPossible()
|
|||||||
static std::mutex mutex;
|
static std::mutex mutex;
|
||||||
std::lock_guard lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
|
|
||||||
# if defined(__clang__)
|
|
||||||
__llvm_profile_dump(); // NOLINT
|
__llvm_profile_dump(); // NOLINT
|
||||||
# elif defined(__GNUC__) || defined(__GNUG__)
|
|
||||||
__gcov_exit();
|
|
||||||
# endif
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
/// including <base/defines.h>
|
/// including <base/defines.h>
|
||||||
/// - it should not have fallback to 0,
|
/// - it should not have fallback to 0,
|
||||||
/// since this may create false-positive detection (common problem)
|
/// since this may create false-positive detection (common problem)
|
||||||
#if defined(__clang__) && defined(__has_feature)
|
#if defined(__has_feature)
|
||||||
# define ch_has_feature __has_feature
|
# define ch_has_feature __has_feature
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -76,24 +76,11 @@
|
|||||||
/// Explicitly allow undefined behaviour for certain functions. Use it as a function attribute.
|
/// Explicitly allow undefined behaviour for certain functions. Use it as a function attribute.
|
||||||
/// It is useful in case when compiler cannot see (and exploit) it, but UBSan can.
|
/// It is useful in case when compiler cannot see (and exploit) it, but UBSan can.
|
||||||
/// Example: multiplication of signed integers with possibility of overflow when both sides are from user input.
|
/// Example: multiplication of signed integers with possibility of overflow when both sides are from user input.
|
||||||
#if defined(__clang__)
|
#define NO_SANITIZE_UNDEFINED __attribute__((__no_sanitize__("undefined")))
|
||||||
# define NO_SANITIZE_UNDEFINED __attribute__((__no_sanitize__("undefined")))
|
#define NO_SANITIZE_ADDRESS __attribute__((__no_sanitize__("address")))
|
||||||
# define NO_SANITIZE_ADDRESS __attribute__((__no_sanitize__("address")))
|
#define NO_SANITIZE_THREAD __attribute__((__no_sanitize__("thread")))
|
||||||
# define NO_SANITIZE_THREAD __attribute__((__no_sanitize__("thread")))
|
#define ALWAYS_INLINE_NO_SANITIZE_UNDEFINED __attribute__((__always_inline__, __no_sanitize__("undefined")))
|
||||||
# define ALWAYS_INLINE_NO_SANITIZE_UNDEFINED __attribute__((__always_inline__, __no_sanitize__("undefined")))
|
#define DISABLE_SANITIZER_INSTRUMENTATION __attribute__((disable_sanitizer_instrumentation))
|
||||||
#else /// It does not work in GCC. GCC 7 cannot recognize this attribute and GCC 8 simply ignores it.
|
|
||||||
# define NO_SANITIZE_UNDEFINED
|
|
||||||
# define NO_SANITIZE_ADDRESS
|
|
||||||
# define NO_SANITIZE_THREAD
|
|
||||||
# define ALWAYS_INLINE_NO_SANITIZE_UNDEFINED ALWAYS_INLINE
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(__clang__) && defined(__clang_major__) && __clang_major__ >= 14
|
|
||||||
# define DISABLE_SANITIZER_INSTRUMENTATION __attribute__((disable_sanitizer_instrumentation))
|
|
||||||
#else
|
|
||||||
# define DISABLE_SANITIZER_INSTRUMENTATION
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
#if !__has_include(<sanitizer/asan_interface.h>) || !defined(ADDRESS_SANITIZER)
|
#if !__has_include(<sanitizer/asan_interface.h>) || !defined(ADDRESS_SANITIZER)
|
||||||
# define ASAN_UNPOISON_MEMORY_REGION(a, b)
|
# define ASAN_UNPOISON_MEMORY_REGION(a, b)
|
||||||
@ -135,54 +122,33 @@
|
|||||||
|
|
||||||
/// Macros for Clang Thread Safety Analysis (TSA). They can be safely ignored by other compilers.
|
/// Macros for Clang Thread Safety Analysis (TSA). They can be safely ignored by other compilers.
|
||||||
/// Feel free to extend, but please stay close to https://clang.llvm.org/docs/ThreadSafetyAnalysis.html#mutexheader
|
/// Feel free to extend, but please stay close to https://clang.llvm.org/docs/ThreadSafetyAnalysis.html#mutexheader
|
||||||
#if defined(__clang__)
|
#define TSA_GUARDED_BY(...) __attribute__((guarded_by(__VA_ARGS__))) /// data is protected by given capability
|
||||||
# define TSA_GUARDED_BY(...) __attribute__((guarded_by(__VA_ARGS__))) /// data is protected by given capability
|
#define TSA_PT_GUARDED_BY(...) __attribute__((pt_guarded_by(__VA_ARGS__))) /// pointed-to data is protected by the given capability
|
||||||
# define TSA_PT_GUARDED_BY(...) __attribute__((pt_guarded_by(__VA_ARGS__))) /// pointed-to data is protected by the given capability
|
#define TSA_REQUIRES(...) __attribute__((requires_capability(__VA_ARGS__))) /// thread needs exclusive possession of given capability
|
||||||
# define TSA_REQUIRES(...) __attribute__((requires_capability(__VA_ARGS__))) /// thread needs exclusive possession of given capability
|
#define TSA_REQUIRES_SHARED(...) __attribute__((requires_shared_capability(__VA_ARGS__))) /// thread needs shared possession of given capability
|
||||||
# define TSA_REQUIRES_SHARED(...) __attribute__((requires_shared_capability(__VA_ARGS__))) /// thread needs shared possession of given capability
|
#define TSA_ACQUIRED_AFTER(...) __attribute__((acquired_after(__VA_ARGS__))) /// annotated lock must be locked after given lock
|
||||||
# define TSA_ACQUIRED_AFTER(...) __attribute__((acquired_after(__VA_ARGS__))) /// annotated lock must be locked after given lock
|
#define TSA_NO_THREAD_SAFETY_ANALYSIS __attribute__((no_thread_safety_analysis)) /// disable TSA for a function
|
||||||
# define TSA_NO_THREAD_SAFETY_ANALYSIS __attribute__((no_thread_safety_analysis)) /// disable TSA for a function
|
#define TSA_CAPABILITY(...) __attribute__((capability(__VA_ARGS__))) /// object of a class can be used as capability
|
||||||
# define TSA_CAPABILITY(...) __attribute__((capability(__VA_ARGS__))) /// object of a class can be used as capability
|
#define TSA_ACQUIRE(...) __attribute__((acquire_capability(__VA_ARGS__))) /// function acquires a capability, but does not release it
|
||||||
# define TSA_ACQUIRE(...) __attribute__((acquire_capability(__VA_ARGS__))) /// function acquires a capability, but does not release it
|
#define TSA_TRY_ACQUIRE(...) __attribute__((try_acquire_capability(__VA_ARGS__))) /// function tries to acquire a capability and returns a boolean value indicating success or failure
|
||||||
# define TSA_TRY_ACQUIRE(...) __attribute__((try_acquire_capability(__VA_ARGS__))) /// function tries to acquire a capability and returns a boolean value indicating success or failure
|
#define TSA_RELEASE(...) __attribute__((release_capability(__VA_ARGS__))) /// function releases the given capability
|
||||||
# define TSA_RELEASE(...) __attribute__((release_capability(__VA_ARGS__))) /// function releases the given capability
|
#define TSA_ACQUIRE_SHARED(...) __attribute__((acquire_shared_capability(__VA_ARGS__))) /// function acquires a shared capability, but does not release it
|
||||||
# define TSA_ACQUIRE_SHARED(...) __attribute__((acquire_shared_capability(__VA_ARGS__))) /// function acquires a shared capability, but does not release it
|
#define TSA_TRY_ACQUIRE_SHARED(...) __attribute__((try_acquire_shared_capability(__VA_ARGS__))) /// function tries to acquire a shared capability and returns a boolean value indicating success or failure
|
||||||
# define TSA_TRY_ACQUIRE_SHARED(...) __attribute__((try_acquire_shared_capability(__VA_ARGS__))) /// function tries to acquire a shared capability and returns a boolean value indicating success or failure
|
#define TSA_RELEASE_SHARED(...) __attribute__((release_shared_capability(__VA_ARGS__))) /// function releases the given shared capability
|
||||||
# define TSA_RELEASE_SHARED(...) __attribute__((release_shared_capability(__VA_ARGS__))) /// function releases the given shared capability
|
#define TSA_SCOPED_LOCKABLE __attribute__((scoped_lockable)) /// object of a class has scoped lockable capability
|
||||||
# define TSA_SCOPED_LOCKABLE __attribute__((scoped_lockable)) /// object of a class has scoped lockable capability
|
|
||||||
|
|
||||||
/// Macros for suppressing TSA warnings for specific reads/writes (instead of suppressing it for the whole function)
|
/// Macros for suppressing TSA warnings for specific reads/writes (instead of suppressing it for the whole function)
|
||||||
/// They use a lambda function to apply function attribute to a single statement. This enable us to suppress warnings locally instead of
|
/// They use a lambda function to apply function attribute to a single statement. This enable us to suppress warnings locally instead of
|
||||||
/// suppressing them in the whole function
|
/// suppressing them in the whole function
|
||||||
/// Consider adding a comment when using these macros.
|
/// Consider adding a comment when using these macros.
|
||||||
# define TSA_SUPPRESS_WARNING_FOR_READ(x) ([&]() TSA_NO_THREAD_SAFETY_ANALYSIS -> const auto & { return (x); }())
|
#define TSA_SUPPRESS_WARNING_FOR_READ(x) ([&]() TSA_NO_THREAD_SAFETY_ANALYSIS -> const auto & { return (x); }())
|
||||||
# define TSA_SUPPRESS_WARNING_FOR_WRITE(x) ([&]() TSA_NO_THREAD_SAFETY_ANALYSIS -> auto & { return (x); }())
|
#define TSA_SUPPRESS_WARNING_FOR_WRITE(x) ([&]() TSA_NO_THREAD_SAFETY_ANALYSIS -> auto & { return (x); }())
|
||||||
|
|
||||||
/// This macro is useful when only one thread writes to a member
|
/// This macro is useful when only one thread writes to a member
|
||||||
/// and you want to read this member from the same thread without locking a mutex.
|
/// and you want to read this member from the same thread without locking a mutex.
|
||||||
/// It's safe (because no concurrent writes are possible), but TSA generates a warning.
|
/// It's safe (because no concurrent writes are possible), but TSA generates a warning.
|
||||||
/// (Seems like there's no way to verify it, but it makes sense to distinguish it from TSA_SUPPRESS_WARNING_FOR_READ for readability)
|
/// (Seems like there's no way to verify it, but it makes sense to distinguish it from TSA_SUPPRESS_WARNING_FOR_READ for readability)
|
||||||
# define TSA_READ_ONE_THREAD(x) TSA_SUPPRESS_WARNING_FOR_READ(x)
|
#define TSA_READ_ONE_THREAD(x) TSA_SUPPRESS_WARNING_FOR_READ(x)
|
||||||
|
|
||||||
#else
|
|
||||||
# define TSA_GUARDED_BY(...)
|
|
||||||
# define TSA_PT_GUARDED_BY(...)
|
|
||||||
# define TSA_REQUIRES(...)
|
|
||||||
# define TSA_REQUIRES_SHARED(...)
|
|
||||||
# define TSA_NO_THREAD_SAFETY_ANALYSIS
|
|
||||||
# define TSA_CAPABILITY(...)
|
|
||||||
# define TSA_ACQUIRE(...)
|
|
||||||
# define TSA_TRY_ACQUIRE(...)
|
|
||||||
# define TSA_RELEASE(...)
|
|
||||||
# define TSA_ACQUIRE_SHARED(...)
|
|
||||||
# define TSA_TRY_ACQUIRE_SHARED(...)
|
|
||||||
# define TSA_RELEASE_SHARED(...)
|
|
||||||
# define TSA_SCOPED_LOCKABLE
|
|
||||||
|
|
||||||
# define TSA_SUPPRESS_WARNING_FOR_READ(x) (x)
|
|
||||||
# define TSA_SUPPRESS_WARNING_FOR_WRITE(x) (x)
|
|
||||||
# define TSA_READ_ONE_THREAD(x) TSA_SUPPRESS_WARNING_FOR_READ(x)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/// A template function for suppressing warnings about unused variables or function results.
|
/// A template function for suppressing warnings about unused variables or function results.
|
||||||
template <typename... Args>
|
template <typename... Args>
|
||||||
|
@ -64,6 +64,44 @@ template <> struct is_arithmetic<UInt256> { static constexpr bool value = true;
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
|
inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
|
||||||
|
|
||||||
|
#define FOR_EACH_ARITHMETIC_TYPE(M) \
|
||||||
|
M(DataTypeDate) \
|
||||||
|
M(DataTypeDate32) \
|
||||||
|
M(DataTypeDateTime) \
|
||||||
|
M(DataTypeInt8) \
|
||||||
|
M(DataTypeUInt8) \
|
||||||
|
M(DataTypeInt16) \
|
||||||
|
M(DataTypeUInt16) \
|
||||||
|
M(DataTypeInt32) \
|
||||||
|
M(DataTypeUInt32) \
|
||||||
|
M(DataTypeInt64) \
|
||||||
|
M(DataTypeUInt64) \
|
||||||
|
M(DataTypeInt128) \
|
||||||
|
M(DataTypeUInt128) \
|
||||||
|
M(DataTypeInt256) \
|
||||||
|
M(DataTypeUInt256) \
|
||||||
|
M(DataTypeFloat32) \
|
||||||
|
M(DataTypeFloat64)
|
||||||
|
|
||||||
|
#define FOR_EACH_ARITHMETIC_TYPE_PASS(M, X) \
|
||||||
|
M(DataTypeDate, X) \
|
||||||
|
M(DataTypeDate32, X) \
|
||||||
|
M(DataTypeDateTime, X) \
|
||||||
|
M(DataTypeInt8, X) \
|
||||||
|
M(DataTypeUInt8, X) \
|
||||||
|
M(DataTypeInt16, X) \
|
||||||
|
M(DataTypeUInt16, X) \
|
||||||
|
M(DataTypeInt32, X) \
|
||||||
|
M(DataTypeUInt32, X) \
|
||||||
|
M(DataTypeInt64, X) \
|
||||||
|
M(DataTypeUInt64, X) \
|
||||||
|
M(DataTypeInt128, X) \
|
||||||
|
M(DataTypeUInt128, X) \
|
||||||
|
M(DataTypeInt256, X) \
|
||||||
|
M(DataTypeUInt256, X) \
|
||||||
|
M(DataTypeFloat32, X) \
|
||||||
|
M(DataTypeFloat64, X)
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
struct make_unsigned // NOLINT(readability-identifier-naming)
|
struct make_unsigned // NOLINT(readability-identifier-naming)
|
||||||
{
|
{
|
||||||
|
@ -155,9 +155,7 @@ Out & dump(Out & out, const char * name, T && x) // NOLINT(cppcoreguidelines-mis
|
|||||||
return dumpValue(out, x) << "; ";
|
return dumpValue(out, x) << "; ";
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __clang__
|
|
||||||
#pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
|
#pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
|
||||||
#endif
|
|
||||||
|
|
||||||
#define DUMPVAR(VAR) ::dump(std::cerr, #VAR, (VAR));
|
#define DUMPVAR(VAR) ::dump(std::cerr, #VAR, (VAR));
|
||||||
#define DUMPHEAD std::cerr << __FILE__ << ':' << __LINE__ << " [ " << getThreadId() << " ] ";
|
#define DUMPHEAD std::cerr << __FILE__ << ':' << __LINE__ << " [ " << getThreadId() << " ] ";
|
||||||
|
@ -11,10 +11,8 @@
|
|||||||
/// Thread Sanitizer uses dl_iterate_phdr function on initialization and fails if we provide our own.
|
/// Thread Sanitizer uses dl_iterate_phdr function on initialization and fails if we provide our own.
|
||||||
#ifdef USE_PHDR_CACHE
|
#ifdef USE_PHDR_CACHE
|
||||||
|
|
||||||
#if defined(__clang__)
|
#pragma clang diagnostic ignored "-Wreserved-id-macro"
|
||||||
# pragma clang diagnostic ignored "-Wreserved-id-macro"
|
#pragma clang diagnostic ignored "-Wunused-macros"
|
||||||
# pragma clang diagnostic ignored "-Wunused-macros"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define __msan_unpoison(X, Y) // NOLINT
|
#define __msan_unpoison(X, Y) // NOLINT
|
||||||
#if defined(ch_has_feature)
|
#if defined(ch_has_feature)
|
||||||
@ -57,10 +55,6 @@ std::atomic<PHDRCache *> phdr_cache {};
|
|||||||
|
|
||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
#ifndef __clang__
|
|
||||||
[[gnu::visibility("default")]]
|
|
||||||
[[gnu::externally_visible]]
|
|
||||||
#endif
|
|
||||||
int dl_iterate_phdr(int (*callback) (dl_phdr_info * info, size_t size, void * data), void * data)
|
int dl_iterate_phdr(int (*callback) (dl_phdr_info * info, size_t size, void * data), void * data)
|
||||||
{
|
{
|
||||||
auto * current_phdr_cache = phdr_cache.load();
|
auto * current_phdr_cache = phdr_cache.load();
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
.hidden __syscall
|
.hidden __syscall
|
||||||
.type __syscall,%function
|
.type __syscall,%function
|
||||||
__syscall:
|
__syscall:
|
||||||
|
.cfi_startproc
|
||||||
uxtw x8,w0
|
uxtw x8,w0
|
||||||
mov x0,x1
|
mov x0,x1
|
||||||
mov x1,x2
|
mov x1,x2
|
||||||
@ -12,3 +13,4 @@ __syscall:
|
|||||||
mov x6,x7
|
mov x6,x7
|
||||||
svc 0
|
svc 0
|
||||||
ret
|
ret
|
||||||
|
.cfi_endproc
|
||||||
|
@ -20,11 +20,7 @@
|
|||||||
|
|
||||||
/// Suppress TSan since it is possible for this code to be called from multiple threads,
|
/// Suppress TSan since it is possible for this code to be called from multiple threads,
|
||||||
/// and initialization is safe to be done multiple times from multiple threads.
|
/// and initialization is safe to be done multiple times from multiple threads.
|
||||||
#if defined(__clang__)
|
#define NO_SANITIZE_THREAD __attribute__((__no_sanitize__("thread")))
|
||||||
# define NO_SANITIZE_THREAD __attribute__((__no_sanitize__("thread")))
|
|
||||||
#else
|
|
||||||
# define NO_SANITIZE_THREAD
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// We don't have libc struct available here.
|
// We don't have libc struct available here.
|
||||||
// Compute aux vector manually (from /proc/self/auxv).
|
// Compute aux vector manually (from /proc/self/auxv).
|
||||||
|
@ -6,11 +6,7 @@
|
|||||||
/// It is only enabled in debug build (its intended use is for CI checks).
|
/// It is only enabled in debug build (its intended use is for CI checks).
|
||||||
#if !defined(NDEBUG)
|
#if !defined(NDEBUG)
|
||||||
|
|
||||||
#if defined(__clang__)
|
#pragma clang diagnostic ignored "-Wincompatible-library-redeclaration"
|
||||||
#pragma clang diagnostic ignored "-Wincompatible-library-redeclaration"
|
|
||||||
#else
|
|
||||||
#pragma GCC diagnostic ignored "-Wbuiltin-declaration-mismatch"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/// We cannot use libc headers here.
|
/// We cannot use libc headers here.
|
||||||
long write(int, const void *, unsigned long);
|
long write(int, const void *, unsigned long);
|
||||||
|
2
contrib/aws
vendored
2
contrib/aws
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 5f0542b3ad7eef25b0540d37d778207e0345ea8f
|
Subproject commit 32870e234cac03e0ac46370c26858b0ffdf14200
|
2
contrib/cctz
vendored
2
contrib/cctz
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 8529bcef5cd996b7c0f4d7475286b76b5d126c4c
|
Subproject commit 7918cb7afe82e53428e39a045a437fdfd4f3df47
|
@ -1,8 +1,12 @@
|
|||||||
{
|
{
|
||||||
"docker/packager/binary": {
|
"docker/packager/binary-builder": {
|
||||||
"name": "clickhouse/binary-builder",
|
"name": "clickhouse/binary-builder",
|
||||||
"dependent": []
|
"dependent": []
|
||||||
},
|
},
|
||||||
|
"docker/packager/cctools": {
|
||||||
|
"name": "clickhouse/cctools",
|
||||||
|
"dependent": []
|
||||||
|
},
|
||||||
"docker/test/compatibility/centos": {
|
"docker/test/compatibility/centos": {
|
||||||
"name": "clickhouse/test-old-centos",
|
"name": "clickhouse/test-old-centos",
|
||||||
"dependent": []
|
"dependent": []
|
||||||
@ -30,7 +34,6 @@
|
|||||||
"docker/test/util": {
|
"docker/test/util": {
|
||||||
"name": "clickhouse/test-util",
|
"name": "clickhouse/test-util",
|
||||||
"dependent": [
|
"dependent": [
|
||||||
"docker/packager/binary",
|
|
||||||
"docker/test/base",
|
"docker/test/base",
|
||||||
"docker/test/fasttest"
|
"docker/test/fasttest"
|
||||||
]
|
]
|
||||||
@ -67,7 +70,9 @@
|
|||||||
},
|
},
|
||||||
"docker/test/fasttest": {
|
"docker/test/fasttest": {
|
||||||
"name": "clickhouse/fasttest",
|
"name": "clickhouse/fasttest",
|
||||||
"dependent": []
|
"dependent": [
|
||||||
|
"docker/packager/binary-builder"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
"docker/test/style": {
|
"docker/test/style": {
|
||||||
"name": "clickhouse/style-test",
|
"name": "clickhouse/style-test",
|
||||||
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="24.1.5.6"
|
ARG VERSION="24.2.1.2248"
|
||||||
ARG PACKAGES="clickhouse-keeper"
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
|
@ -1,43 +1,6 @@
|
|||||||
# docker build -t clickhouse/binary-builder .
|
# docker build -t clickhouse/binary-builder .
|
||||||
ARG FROM_TAG=latest
|
ARG FROM_TAG=latest
|
||||||
FROM clickhouse/test-util:latest AS cctools
|
FROM clickhouse/fasttest:$FROM_TAG
|
||||||
# The cctools are built always from the clickhouse/test-util:latest and cached inline
|
|
||||||
# Theoretically, it should improve rebuild speed significantly
|
|
||||||
ENV CC=clang-${LLVM_VERSION}
|
|
||||||
ENV CXX=clang++-${LLVM_VERSION}
|
|
||||||
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
|
||||||
# DO NOT PUT ANYTHING BEFORE THE NEXT TWO `RUN` DIRECTIVES
|
|
||||||
# THE MOST HEAVY OPERATION MUST BE THE FIRST IN THE CACHE
|
|
||||||
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
|
||||||
# libtapi is required to support .tbh format from recent MacOS SDKs
|
|
||||||
RUN git clone https://github.com/tpoechtrager/apple-libtapi.git \
|
|
||||||
&& cd apple-libtapi \
|
|
||||||
&& git checkout 15dfc2a8c9a2a89d06ff227560a69f5265b692f9 \
|
|
||||||
&& INSTALLPREFIX=/cctools ./build.sh \
|
|
||||||
&& ./install.sh \
|
|
||||||
&& cd .. \
|
|
||||||
&& rm -rf apple-libtapi
|
|
||||||
|
|
||||||
# Build and install tools for cross-linking to Darwin (x86-64)
|
|
||||||
# Build and install tools for cross-linking to Darwin (aarch64)
|
|
||||||
RUN git clone https://github.com/tpoechtrager/cctools-port.git \
|
|
||||||
&& cd cctools-port/cctools \
|
|
||||||
&& git checkout 2a3e1c2a6ff54a30f898b70cfb9ba1692a55fad7 \
|
|
||||||
&& ./configure --prefix=/cctools --with-libtapi=/cctools \
|
|
||||||
--target=x86_64-apple-darwin \
|
|
||||||
&& make install -j$(nproc) \
|
|
||||||
&& make clean \
|
|
||||||
&& ./configure --prefix=/cctools --with-libtapi=/cctools \
|
|
||||||
--target=aarch64-apple-darwin \
|
|
||||||
&& make install -j$(nproc) \
|
|
||||||
&& cd ../.. \
|
|
||||||
&& rm -rf cctools-port
|
|
||||||
|
|
||||||
# !!!!!!!!!!!
|
|
||||||
# END COMPILE
|
|
||||||
# !!!!!!!!!!!
|
|
||||||
|
|
||||||
FROM clickhouse/test-util:$FROM_TAG
|
|
||||||
ENV CC=clang-${LLVM_VERSION}
|
ENV CC=clang-${LLVM_VERSION}
|
||||||
ENV CXX=clang++-${LLVM_VERSION}
|
ENV CXX=clang++-${LLVM_VERSION}
|
||||||
|
|
||||||
@ -110,7 +73,8 @@ RUN curl -Lo /usr/bin/clang-tidy-cache \
|
|||||||
"https://raw.githubusercontent.com/matus-chochlik/ctcache/$CLANG_TIDY_SHA1/clang-tidy-cache" \
|
"https://raw.githubusercontent.com/matus-chochlik/ctcache/$CLANG_TIDY_SHA1/clang-tidy-cache" \
|
||||||
&& chmod +x /usr/bin/clang-tidy-cache
|
&& chmod +x /usr/bin/clang-tidy-cache
|
||||||
|
|
||||||
COPY --from=cctools /cctools /cctools
|
# If the cctools is updated, then first build it in the CI, then update here in a different commit
|
||||||
|
COPY --from=clickhouse/cctools:5a908f73878a /cctools /cctools
|
||||||
|
|
||||||
RUN mkdir /workdir && chmod 777 /workdir
|
RUN mkdir /workdir && chmod 777 /workdir
|
||||||
WORKDIR /workdir
|
WORKDIR /workdir
|
31
docker/packager/cctools/Dockerfile
Normal file
31
docker/packager/cctools/Dockerfile
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
# This is a hack to significantly reduce the build time of the clickhouse/binary-builder
|
||||||
|
# It's based on the assumption that we don't care of the cctools version so much
|
||||||
|
# It event does not depend on the clickhouse/fasttest in the `docker/images.json`
|
||||||
|
ARG FROM_TAG=latest
|
||||||
|
FROM clickhouse/fasttest:$FROM_TAG
|
||||||
|
|
||||||
|
ENV CC=clang-${LLVM_VERSION}
|
||||||
|
ENV CXX=clang++-${LLVM_VERSION}
|
||||||
|
|
||||||
|
RUN git clone https://github.com/tpoechtrager/apple-libtapi.git \
|
||||||
|
&& cd apple-libtapi \
|
||||||
|
&& git checkout 15dfc2a8c9a2a89d06ff227560a69f5265b692f9 \
|
||||||
|
&& INSTALLPREFIX=/cctools ./build.sh \
|
||||||
|
&& ./install.sh \
|
||||||
|
&& cd .. \
|
||||||
|
&& rm -rf apple-libtapi
|
||||||
|
|
||||||
|
# Build and install tools for cross-linking to Darwin (x86-64)
|
||||||
|
# Build and install tools for cross-linking to Darwin (aarch64)
|
||||||
|
RUN git clone https://github.com/tpoechtrager/cctools-port.git \
|
||||||
|
&& cd cctools-port/cctools \
|
||||||
|
&& git checkout 2a3e1c2a6ff54a30f898b70cfb9ba1692a55fad7 \
|
||||||
|
&& ./configure --prefix=/cctools --with-libtapi=/cctools \
|
||||||
|
--target=x86_64-apple-darwin \
|
||||||
|
&& make install -j$(nproc) \
|
||||||
|
&& make clean \
|
||||||
|
&& ./configure --prefix=/cctools --with-libtapi=/cctools \
|
||||||
|
--target=aarch64-apple-darwin \
|
||||||
|
&& make install -j$(nproc) \
|
||||||
|
&& cd ../.. \
|
||||||
|
&& rm -rf cctools-port
|
@ -1,16 +1,16 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
import subprocess
|
|
||||||
import os
|
|
||||||
import argparse
|
import argparse
|
||||||
import logging
|
import logging
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List, Optional
|
from typing import List, Optional
|
||||||
|
|
||||||
SCRIPT_PATH = Path(__file__).absolute()
|
SCRIPT_PATH = Path(__file__).absolute()
|
||||||
IMAGE_TYPE = "binary"
|
IMAGE_TYPE = "binary-builder"
|
||||||
IMAGE_NAME = f"clickhouse/{IMAGE_TYPE}-builder"
|
IMAGE_NAME = f"clickhouse/{IMAGE_TYPE}"
|
||||||
|
|
||||||
|
|
||||||
class BuildException(Exception):
|
class BuildException(Exception):
|
||||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="24.1.5.6"
|
ARG VERSION="24.2.1.2248"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="24.1.5.6"
|
ARG VERSION="24.2.1.2248"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# set non-empty deb_location_url url to create a docker image
|
# set non-empty deb_location_url url to create a docker image
|
||||||
|
@ -6,9 +6,18 @@ FROM clickhouse/test-util:$FROM_TAG
|
|||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install \
|
&& apt-get install \
|
||||||
brotli \
|
brotli \
|
||||||
|
clang-${LLVM_VERSION} \
|
||||||
|
clang-tidy-${LLVM_VERSION} \
|
||||||
|
cmake \
|
||||||
expect \
|
expect \
|
||||||
file \
|
file \
|
||||||
|
libclang-${LLVM_VERSION}-dev \
|
||||||
|
libclang-rt-${LLVM_VERSION}-dev \
|
||||||
|
lld-${LLVM_VERSION} \
|
||||||
|
llvm-${LLVM_VERSION} \
|
||||||
|
llvm-${LLVM_VERSION}-dev \
|
||||||
lsof \
|
lsof \
|
||||||
|
ninja-build \
|
||||||
odbcinst \
|
odbcinst \
|
||||||
psmisc \
|
psmisc \
|
||||||
python3 \
|
python3 \
|
||||||
@ -26,14 +35,50 @@ RUN apt-get update \
|
|||||||
|
|
||||||
RUN pip3 install numpy==1.26.3 scipy==1.12.0 pandas==1.5.3 Jinja2==3.1.3
|
RUN pip3 install numpy==1.26.3 scipy==1.12.0 pandas==1.5.3 Jinja2==3.1.3
|
||||||
|
|
||||||
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz"
|
# This symlink is required by gcc to find the lld linker
|
||||||
|
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
|
||||||
|
# for external_symbolizer_path
|
||||||
|
RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer
|
||||||
|
# FIXME: workaround for "The imported target "merge-fdata" references the file" error
|
||||||
|
# https://salsa.debian.org/pkg-llvm-team/llvm-toolchain/-/commit/992e52c0b156a5ba9c6a8a54f8c4857ddd3d371d
|
||||||
|
RUN sed -i '/_IMPORT_CHECK_FILES_FOR_\(mlir-\|llvm-bolt\|merge-fdata\|MLIR\)/ {s|^|#|}' /usr/lib/llvm-${LLVM_VERSION}/lib/cmake/llvm/LLVMExports-*.cmake
|
||||||
|
|
||||||
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
ARG CCACHE_VERSION=4.6.1
|
||||||
&& wget -nv -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \
|
RUN mkdir /tmp/ccache \
|
||||||
&& cp /tmp/clickhouse-odbc-tmp/lib64/*.so /usr/local/lib/ \
|
&& cd /tmp/ccache \
|
||||||
&& odbcinst -i -d -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbcinst.ini.sample \
|
&& curl -L \
|
||||||
&& odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \
|
-O https://github.com/ccache/ccache/releases/download/v$CCACHE_VERSION/ccache-$CCACHE_VERSION.tar.xz \
|
||||||
&& rm -rf /tmp/clickhouse-odbc-tmp
|
-O https://github.com/ccache/ccache/releases/download/v$CCACHE_VERSION/ccache-$CCACHE_VERSION.tar.xz.asc \
|
||||||
|
&& gpg --recv-keys --keyserver hkps://keyserver.ubuntu.com 5A939A71A46792CF57866A51996DDA075594ADB8 \
|
||||||
|
&& gpg --verify ccache-4.6.1.tar.xz.asc \
|
||||||
|
&& tar xf ccache-$CCACHE_VERSION.tar.xz \
|
||||||
|
&& cd /tmp/ccache/ccache-$CCACHE_VERSION \
|
||||||
|
&& cmake -DCMAKE_INSTALL_PREFIX=/usr \
|
||||||
|
-DCMAKE_BUILD_TYPE=None \
|
||||||
|
-DZSTD_FROM_INTERNET=ON \
|
||||||
|
-DREDIS_STORAGE_BACKEND=OFF \
|
||||||
|
-Wno-dev \
|
||||||
|
-B build \
|
||||||
|
-S . \
|
||||||
|
&& make VERBOSE=1 -C build \
|
||||||
|
&& make install -C build \
|
||||||
|
&& cd / \
|
||||||
|
&& rm -rf /tmp/ccache
|
||||||
|
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG SCCACHE_VERSION=v0.7.7
|
||||||
|
ENV SCCACHE_IGNORE_SERVER_IO_ERROR=1
|
||||||
|
# sccache requires a value for the region. So by default we use The Default Region
|
||||||
|
ENV SCCACHE_REGION=us-east-1
|
||||||
|
RUN arch=${TARGETARCH:-amd64} \
|
||||||
|
&& case $arch in \
|
||||||
|
amd64) rarch=x86_64 ;; \
|
||||||
|
arm64) rarch=aarch64 ;; \
|
||||||
|
esac \
|
||||||
|
&& curl -Ls "https://github.com/mozilla/sccache/releases/download/$SCCACHE_VERSION/sccache-$SCCACHE_VERSION-$rarch-unknown-linux-musl.tar.gz" | \
|
||||||
|
tar xz -C /tmp \
|
||||||
|
&& mv "/tmp/sccache-$SCCACHE_VERSION-$rarch-unknown-linux-musl/sccache" /usr/bin \
|
||||||
|
&& rm "/tmp/sccache-$SCCACHE_VERSION-$rarch-unknown-linux-musl" -r
|
||||||
|
|
||||||
# Give suid to gdb to grant it attach permissions
|
# Give suid to gdb to grant it attach permissions
|
||||||
# chmod 777 to make the container user independent
|
# chmod 777 to make the container user independent
|
||||||
|
@ -247,6 +247,12 @@ quit
|
|||||||
fuzzer_pid=$!
|
fuzzer_pid=$!
|
||||||
echo "Fuzzer pid is $fuzzer_pid"
|
echo "Fuzzer pid is $fuzzer_pid"
|
||||||
|
|
||||||
|
# The fuzzer_pid belongs to the timeout process.
|
||||||
|
actual_fuzzer_pid=$(ps -o pid= --ppid "$fuzzer_pid")
|
||||||
|
|
||||||
|
echo "Attaching gdb to the fuzzer itself"
|
||||||
|
gdb -batch -command script.gdb -p $actual_fuzzer_pid &
|
||||||
|
|
||||||
# Wait for the fuzzer to complete.
|
# Wait for the fuzzer to complete.
|
||||||
# Note that the 'wait || ...' thing is required so that the script doesn't
|
# Note that the 'wait || ...' thing is required so that the script doesn't
|
||||||
# exit because of 'set -e' when 'wait' returns nonzero code.
|
# exit because of 'set -e' when 'wait' returns nonzero code.
|
||||||
@ -337,10 +343,9 @@ quit
|
|||||||
# which is confusing.
|
# which is confusing.
|
||||||
task_exit_code=$fuzzer_exit_code
|
task_exit_code=$fuzzer_exit_code
|
||||||
echo "failure" > status.txt
|
echo "failure" > status.txt
|
||||||
{ rg -ao "Found error:.*" fuzzer.log \
|
echo "Achtung!" > description.txt
|
||||||
|| rg -ao "Exception:.*" fuzzer.log \
|
echo "Fuzzer went wrong with error code: ($fuzzer_exit_code). Its process died somehow when the server stayed alive. The server log probably won't tell you much so try to find information in other files." >>description.txt
|
||||||
|| echo "Fuzzer failed ($fuzzer_exit_code). See the logs." ; } \
|
{ rg -ao "Found error:.*" fuzzer.log || rg -ao "Exception:.*" fuzzer.log; } | tail -1 >>description.txt
|
||||||
| tail -1 > description.txt
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if test -f core.*; then
|
if test -f core.*; then
|
||||||
@ -386,7 +391,8 @@ if [ -f core.zst ]; then
|
|||||||
CORE_LINK='<a href="core.zst">core.zst</a>'
|
CORE_LINK='<a href="core.zst">core.zst</a>'
|
||||||
fi
|
fi
|
||||||
|
|
||||||
rg --text -F '<Fatal>' server.log > fatal.log ||:
|
# Keep all the lines in the paragraphs containing <Fatal> that either contain <Fatal> or don't start with 20... (year)
|
||||||
|
sed -n '/<Fatal>/,/^$/p' server.log | awk '/<Fatal>/ || !/^20/' > fatal.log ||:
|
||||||
FATAL_LINK=''
|
FATAL_LINK=''
|
||||||
if [ -s fatal.log ]; then
|
if [ -s fatal.log ]; then
|
||||||
FATAL_LINK='<a href="fatal.log">fatal.log</a>'
|
FATAL_LINK='<a href="fatal.log">fatal.log</a>'
|
||||||
|
@ -20,6 +20,8 @@ if [ -n "$WITH_LOCAL_BINARY" ]; then
|
|||||||
clickhouse_source="--clickhouse-source /clickhouse"
|
clickhouse_source="--clickhouse-source /clickhouse"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# $TESTS_TO_RUN comes from docker
|
||||||
|
# shellcheck disable=SC2153
|
||||||
tests_count="--test-count $TESTS_TO_RUN"
|
tests_count="--test-count $TESTS_TO_RUN"
|
||||||
tests_to_run="test-all"
|
tests_to_run="test-all"
|
||||||
workload=""
|
workload=""
|
||||||
@ -47,6 +49,6 @@ fi
|
|||||||
|
|
||||||
cd "$CLICKHOUSE_REPO_PATH/tests/jepsen.clickhouse"
|
cd "$CLICKHOUSE_REPO_PATH/tests/jepsen.clickhouse"
|
||||||
|
|
||||||
(lein run server $tests_to_run $workload --keeper "$KEEPER_NODE" $concurrency $nemesis $rate --nodes-file "$NODES_FILE_PATH" --username "$NODES_USERNAME" --logging-json --password "$NODES_PASSWORD" --time-limit "$TIME_LIMIT" --concurrency 50 $clickhouse_source $tests_count --reuse-binary || true) | tee "$TEST_OUTPUT/jepsen_run_all_tests.log"
|
(lein run server $tests_to_run "$workload" --keeper "$KEEPER_NODE" "$concurrency" "$nemesis" "$rate" --nodes-file "$NODES_FILE_PATH" --username "$NODES_USERNAME" --logging-json --password "$NODES_PASSWORD" --time-limit "$TIME_LIMIT" --concurrency 50 "$clickhouse_source" "$tests_count" --reuse-binary || true) | tee "$TEST_OUTPUT/jepsen_run_all_tests.log"
|
||||||
|
|
||||||
mv store "$TEST_OUTPUT/"
|
mv store "$TEST_OUTPUT/"
|
||||||
|
@ -24,17 +24,18 @@ RUN pip3 install \
|
|||||||
deepdiff \
|
deepdiff \
|
||||||
sqlglot
|
sqlglot
|
||||||
|
|
||||||
ARG odbc_repo="https://github.com/ClickHouse/clickhouse-odbc.git"
|
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.6.20200320/clickhouse-odbc-1.1.6-Linux.tar.gz"
|
||||||
|
|
||||||
|
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
||||||
|
&& cd /tmp/clickhouse-odbc-tmp \
|
||||||
|
&& curl -L ${odbc_driver_url} | tar --strip-components=1 -xz clickhouse-odbc-1.1.6-Linux \
|
||||||
|
&& mkdir /usr/local/lib64 -p \
|
||||||
|
&& cp /tmp/clickhouse-odbc-tmp/lib64/*.so /usr/local/lib64/ \
|
||||||
|
&& odbcinst -i -d -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbcinst.ini.sample \
|
||||||
|
&& odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \
|
||||||
|
&& sed -i 's"=libclickhouseodbc"=/usr/local/lib64/libclickhouseodbc"' /etc/odbcinst.ini \
|
||||||
|
&& rm -rf /tmp/clickhouse-odbc-tmp
|
||||||
|
|
||||||
RUN git clone --recursive ${odbc_repo} \
|
|
||||||
&& mkdir -p /clickhouse-odbc/build \
|
|
||||||
&& cmake -S /clickhouse-odbc -B /clickhouse-odbc/build \
|
|
||||||
&& ls /clickhouse-odbc/build/driver \
|
|
||||||
&& make -j 10 -C /clickhouse-odbc/build \
|
|
||||||
&& ls /clickhouse-odbc/build/driver \
|
|
||||||
&& mkdir -p /usr/local/lib64/ && cp /clickhouse-odbc/build/driver/lib*.so /usr/local/lib64/ \
|
|
||||||
&& odbcinst -i -d -f /clickhouse-odbc/packaging/odbcinst.ini.sample \
|
|
||||||
&& odbcinst -i -s -l -f /clickhouse-odbc/packaging/odbc.ini.sample
|
|
||||||
|
|
||||||
ENV TZ=Europe/Amsterdam
|
ENV TZ=Europe/Amsterdam
|
||||||
ENV MAX_RUN_TIME=9000
|
ENV MAX_RUN_TIME=9000
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
ARG FROM_TAG=latest
|
ARG FROM_TAG=latest
|
||||||
FROM clickhouse/test-base:$FROM_TAG
|
FROM clickhouse/test-base:$FROM_TAG
|
||||||
|
|
||||||
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz"
|
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.6.20200320/clickhouse-odbc-1.1.6-Linux.tar.gz"
|
||||||
|
|
||||||
# golang version 1.13 on Ubuntu 20 is enough for tests
|
# golang version 1.13 on Ubuntu 20 is enough for tests
|
||||||
RUN apt-get update -y \
|
RUN apt-get update -y \
|
||||||
@ -35,7 +35,6 @@ RUN apt-get update -y \
|
|||||||
sudo \
|
sudo \
|
||||||
tree \
|
tree \
|
||||||
unixodbc \
|
unixodbc \
|
||||||
wget \
|
|
||||||
rustc \
|
rustc \
|
||||||
cargo \
|
cargo \
|
||||||
zstd \
|
zstd \
|
||||||
@ -50,11 +49,14 @@ RUN apt-get update -y \
|
|||||||
RUN pip3 install numpy==1.26.3 scipy==1.12.0 pandas==1.5.3 Jinja2==3.1.3 pyarrow==15.0.0
|
RUN pip3 install numpy==1.26.3 scipy==1.12.0 pandas==1.5.3 Jinja2==3.1.3 pyarrow==15.0.0
|
||||||
|
|
||||||
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
||||||
&& wget -nv -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \
|
&& cd /tmp/clickhouse-odbc-tmp \
|
||||||
&& cp /tmp/clickhouse-odbc-tmp/lib64/*.so /usr/local/lib/ \
|
&& curl -L ${odbc_driver_url} | tar --strip-components=1 -xz clickhouse-odbc-1.1.6-Linux \
|
||||||
&& odbcinst -i -d -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbcinst.ini.sample \
|
&& mkdir /usr/local/lib64 -p \
|
||||||
&& odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \
|
&& cp /tmp/clickhouse-odbc-tmp/lib64/*.so /usr/local/lib64/ \
|
||||||
&& rm -rf /tmp/clickhouse-odbc-tmp
|
&& odbcinst -i -d -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbcinst.ini.sample \
|
||||||
|
&& odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \
|
||||||
|
&& sed -i 's"=libclickhouseodbc"=/usr/local/lib64/libclickhouseodbc"' /etc/odbcinst.ini \
|
||||||
|
&& rm -rf /tmp/clickhouse-odbc-tmp
|
||||||
|
|
||||||
ENV TZ=Europe/Amsterdam
|
ENV TZ=Europe/Amsterdam
|
||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
@ -70,11 +72,11 @@ ARG TARGETARCH
|
|||||||
|
|
||||||
# Download Minio-related binaries
|
# Download Minio-related binaries
|
||||||
RUN arch=${TARGETARCH:-amd64} \
|
RUN arch=${TARGETARCH:-amd64} \
|
||||||
&& wget "https://dl.min.io/server/minio/release/linux-${arch}/archive/minio.RELEASE.${MINIO_SERVER_VERSION}" -O ./minio \
|
&& curl -L "https://dl.min.io/server/minio/release/linux-${arch}/archive/minio.RELEASE.${MINIO_SERVER_VERSION}" -o ./minio \
|
||||||
&& wget "https://dl.min.io/client/mc/release/linux-${arch}/archive/mc.RELEASE.${MINIO_CLIENT_VERSION}" -O ./mc \
|
&& curl -L "https://dl.min.io/client/mc/release/linux-${arch}/archive/mc.RELEASE.${MINIO_CLIENT_VERSION}" -o ./mc \
|
||||||
&& chmod +x ./mc ./minio
|
&& chmod +x ./mc ./minio
|
||||||
|
|
||||||
RUN wget --no-verbose 'https://archive.apache.org/dist/hadoop/common/hadoop-3.3.1/hadoop-3.3.1.tar.gz' \
|
RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoop-3.3.1/hadoop-3.3.1.tar.gz' \
|
||||||
&& tar -xvf hadoop-3.3.1.tar.gz \
|
&& tar -xvf hadoop-3.3.1.tar.gz \
|
||||||
&& rm -rf hadoop-3.3.1.tar.gz
|
&& rm -rf hadoop-3.3.1.tar.gz
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
# shellcheck source=./utils.lib
|
||||||
source /utils.lib
|
source /utils.lib
|
||||||
|
|
||||||
function attach_gdb_to_clickhouse()
|
function attach_gdb_to_clickhouse()
|
||||||
|
@ -19,7 +19,7 @@ function escaped()
|
|||||||
|
|
||||||
function head_escaped()
|
function head_escaped()
|
||||||
{
|
{
|
||||||
head -n $FAILURE_CONTEXT_LINES $1 | escaped
|
head -n "$FAILURE_CONTEXT_LINES" "$1" | escaped
|
||||||
}
|
}
|
||||||
|
|
||||||
function unts()
|
function unts()
|
||||||
@ -29,15 +29,15 @@ function unts()
|
|||||||
|
|
||||||
function trim_server_logs()
|
function trim_server_logs()
|
||||||
{
|
{
|
||||||
head -n $FAILURE_CONTEXT_LINES "/test_output/$1" | grep -Eo " \[ [0-9]+ \] \{.*" | escaped
|
head -n "$FAILURE_CONTEXT_LINES" "/test_output/$1" | grep -Eo " \[ [0-9]+ \] \{.*" | escaped
|
||||||
}
|
}
|
||||||
|
|
||||||
function install_packages()
|
function install_packages()
|
||||||
{
|
{
|
||||||
dpkg -i $1/clickhouse-common-static_*.deb
|
dpkg -i "$1"/clickhouse-common-static_*.deb
|
||||||
dpkg -i $1/clickhouse-common-static-dbg_*.deb
|
dpkg -i "$1"/clickhouse-common-static-dbg_*.deb
|
||||||
dpkg -i $1/clickhouse-server_*.deb
|
dpkg -i "$1"/clickhouse-server_*.deb
|
||||||
dpkg -i $1/clickhouse-client_*.deb
|
dpkg -i "$1"/clickhouse-client_*.deb
|
||||||
}
|
}
|
||||||
|
|
||||||
function configure()
|
function configure()
|
||||||
@ -54,11 +54,11 @@ function configure()
|
|||||||
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
||||||
|
|
||||||
function randomize_config_boolean_value {
|
function randomize_config_boolean_value {
|
||||||
value=$(($RANDOM % 2))
|
value=$((RANDOM % 2))
|
||||||
sudo cat /etc/clickhouse-server/config.d/$2.xml \
|
sudo cat "/etc/clickhouse-server/config.d/$2.xml" \
|
||||||
| sed "s|<$1>[01]</$1>|<$1>$value</$1>|" \
|
| sed "s|<$1>[01]</$1>|<$1>$value</$1>|" \
|
||||||
> /etc/clickhouse-server/config.d/$2.xml.tmp
|
> "/etc/clickhouse-server/config.d/$2.xml.tmp"
|
||||||
sudo mv /etc/clickhouse-server/config.d/$2.xml.tmp /etc/clickhouse-server/config.d/$2.xml
|
sudo mv "/etc/clickhouse-server/config.d/$2.xml.tmp" "/etc/clickhouse-server/config.d/$2.xml"
|
||||||
}
|
}
|
||||||
|
|
||||||
if [[ -n "$RANDOMIZE_KEEPER_FEATURE_FLAGS" ]] && [[ "$RANDOMIZE_KEEPER_FEATURE_FLAGS" -eq 1 ]]; then
|
if [[ -n "$RANDOMIZE_KEEPER_FEATURE_FLAGS" ]] && [[ "$RANDOMIZE_KEEPER_FEATURE_FLAGS" -eq 1 ]]; then
|
||||||
@ -146,17 +146,17 @@ EOL
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function stop()
|
function stop_server()
|
||||||
{
|
{
|
||||||
local max_tries="${1:-90}"
|
local max_tries=90
|
||||||
local check_hang="${2:-true}"
|
local check_hang=true
|
||||||
local pid
|
local pid
|
||||||
# Preserve the pid, since the server can hung after the PID will be deleted.
|
# Preserve the pid, since the server can hung after the PID will be deleted.
|
||||||
pid="$(cat /var/run/clickhouse-server/clickhouse-server.pid)"
|
pid="$(cat /var/run/clickhouse-server/clickhouse-server.pid)"
|
||||||
|
|
||||||
clickhouse stop --max-tries "$max_tries" --do-not-kill && return
|
clickhouse stop --max-tries "$max_tries" --do-not-kill && return
|
||||||
|
|
||||||
if [ $check_hang == true ]
|
if [ "$check_hang" == true ]
|
||||||
then
|
then
|
||||||
# We failed to stop the server with SIGTERM. Maybe it hang, let's collect stacktraces.
|
# We failed to stop the server with SIGTERM. Maybe it hang, let's collect stacktraces.
|
||||||
# Add a special status just in case, so it will be possible to find in the CI DB
|
# Add a special status just in case, so it will be possible to find in the CI DB
|
||||||
@ -165,7 +165,7 @@ function stop()
|
|||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
# The server could finally stop while we were terminating gdb, let's recheck if it's still running
|
# The server could finally stop while we were terminating gdb, let's recheck if it's still running
|
||||||
kill -s 0 $pid || return
|
kill -s 0 "$pid" || return
|
||||||
echo -e "Possible deadlock on shutdown (see gdb.log)$FAIL" >> /test_output/test_results.tsv
|
echo -e "Possible deadlock on shutdown (see gdb.log)$FAIL" >> /test_output/test_results.tsv
|
||||||
echo "thread apply all backtrace (on stop)" >> /test_output/gdb.log
|
echo "thread apply all backtrace (on stop)" >> /test_output/gdb.log
|
||||||
timeout 30m gdb -batch -ex 'thread apply all backtrace' -p "$pid" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log
|
timeout 30m gdb -batch -ex 'thread apply all backtrace' -p "$pid" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log
|
||||||
@ -176,12 +176,13 @@ function stop()
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
function start()
|
function start_server()
|
||||||
{
|
{
|
||||||
counter=0
|
counter=0
|
||||||
|
max_attempt=120
|
||||||
until clickhouse-client --query "SELECT 1"
|
until clickhouse-client --query "SELECT 1"
|
||||||
do
|
do
|
||||||
if [ "$counter" -gt ${1:-120} ]
|
if [ "$counter" -gt "$max_attempt" ]
|
||||||
then
|
then
|
||||||
echo "Cannot start clickhouse-server"
|
echo "Cannot start clickhouse-server"
|
||||||
rg --text "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt ||:
|
rg --text "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt ||:
|
||||||
@ -286,9 +287,9 @@ function collect_query_and_trace_logs()
|
|||||||
|
|
||||||
function collect_core_dumps()
|
function collect_core_dumps()
|
||||||
{
|
{
|
||||||
find . -type f -maxdepth 1 -name 'core.*' | while read core; do
|
find . -type f -maxdepth 1 -name 'core.*' | while read -r core; do
|
||||||
zstd --threads=0 $core
|
zstd --threads=0 "$core"
|
||||||
mv $core.zst /test_output/
|
mv "$core.zst" /test_output/
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -16,7 +16,9 @@ ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
|||||||
|
|
||||||
# Stress tests and upgrade check uses similar code that was placed
|
# Stress tests and upgrade check uses similar code that was placed
|
||||||
# in a separate bash library. See tests/ci/stress_tests.lib
|
# in a separate bash library. See tests/ci/stress_tests.lib
|
||||||
|
# shellcheck source=../stateless/attach_gdb.lib
|
||||||
source /attach_gdb.lib
|
source /attach_gdb.lib
|
||||||
|
# shellcheck source=../stateless/stress_tests.lib
|
||||||
source /stress_tests.lib
|
source /stress_tests.lib
|
||||||
|
|
||||||
install_packages package_folder
|
install_packages package_folder
|
||||||
@ -55,7 +57,7 @@ azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
|||||||
|
|
||||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||||
|
|
||||||
start
|
start_server
|
||||||
|
|
||||||
setup_logs_replication
|
setup_logs_replication
|
||||||
|
|
||||||
@ -65,7 +67,7 @@ clickhouse-client --query "SHOW TABLES FROM datasets"
|
|||||||
|
|
||||||
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test"
|
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test"
|
||||||
|
|
||||||
stop
|
stop_server
|
||||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.initial.log
|
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.initial.log
|
||||||
|
|
||||||
# Randomize cache policies.
|
# Randomize cache policies.
|
||||||
@ -85,7 +87,7 @@ if [ "$cache_policy" = "SLRU" ]; then
|
|||||||
mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml
|
mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml
|
||||||
fi
|
fi
|
||||||
|
|
||||||
start
|
start_server
|
||||||
|
|
||||||
clickhouse-client --query "SHOW TABLES FROM datasets"
|
clickhouse-client --query "SHOW TABLES FROM datasets"
|
||||||
clickhouse-client --query "SHOW TABLES FROM test"
|
clickhouse-client --query "SHOW TABLES FROM test"
|
||||||
@ -188,7 +190,7 @@ clickhouse-client --query "SHOW TABLES FROM test"
|
|||||||
|
|
||||||
clickhouse-client --query "SYSTEM STOP THREAD FUZZER"
|
clickhouse-client --query "SYSTEM STOP THREAD FUZZER"
|
||||||
|
|
||||||
stop
|
stop_server
|
||||||
|
|
||||||
# Let's enable S3 storage by default
|
# Let's enable S3 storage by default
|
||||||
export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
||||||
@ -222,7 +224,7 @@ if [ $(( $(date +%-d) % 2 )) -eq 1 ]; then
|
|||||||
> /etc/clickhouse-server/config.d/enable_async_load_databases.xml
|
> /etc/clickhouse-server/config.d/enable_async_load_databases.xml
|
||||||
fi
|
fi
|
||||||
|
|
||||||
start
|
start_server
|
||||||
|
|
||||||
stress --hung-check --drop-databases --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION" --global-time-limit 1200 \
|
stress --hung-check --drop-databases --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION" --global-time-limit 1200 \
|
||||||
&& echo -e "Test script exit code$OK" >> /test_output/test_results.tsv \
|
&& echo -e "Test script exit code$OK" >> /test_output/test_results.tsv \
|
||||||
@ -232,18 +234,18 @@ stress --hung-check --drop-databases --output-folder test_output --skip-func-tes
|
|||||||
rg -Fa "No queries hung" /test_output/test_results.tsv | grep -Fa "OK" \
|
rg -Fa "No queries hung" /test_output/test_results.tsv | grep -Fa "OK" \
|
||||||
|| echo -e "Hung check failed, possible deadlock found (see hung_check.log)$FAIL$(head_escaped /test_output/hung_check.log)" >> /test_output/test_results.tsv
|
|| echo -e "Hung check failed, possible deadlock found (see hung_check.log)$FAIL$(head_escaped /test_output/hung_check.log)" >> /test_output/test_results.tsv
|
||||||
|
|
||||||
stop
|
stop_server
|
||||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.stress.log
|
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.stress.log
|
||||||
|
|
||||||
# NOTE Disable thread fuzzer before server start with data after stress test.
|
# NOTE Disable thread fuzzer before server start with data after stress test.
|
||||||
# In debug build it can take a lot of time.
|
# In debug build it can take a lot of time.
|
||||||
unset "${!THREAD_@}"
|
unset "${!THREAD_@}"
|
||||||
|
|
||||||
start
|
start_server
|
||||||
|
|
||||||
check_server_start
|
check_server_start
|
||||||
|
|
||||||
stop
|
stop_server
|
||||||
|
|
||||||
[ -f /var/log/clickhouse-server/clickhouse-server.log ] || echo -e "Server log does not exist\tFAIL"
|
[ -f /var/log/clickhouse-server/clickhouse-server.log ] || echo -e "Server log does not exist\tFAIL"
|
||||||
[ -f /var/log/clickhouse-server/stderr.log ] || echo -e "Stderr log does not exist\tFAIL"
|
[ -f /var/log/clickhouse-server/stderr.log ] || echo -e "Stderr log does not exist\tFAIL"
|
||||||
@ -272,7 +274,7 @@ clickhouse-local --structure "test String, res String, time Nullable(Float32), d
|
|||||||
(test like '%Signal 9%') DESC,
|
(test like '%Signal 9%') DESC,
|
||||||
(test like '%Fatal message%') DESC,
|
(test like '%Fatal message%') DESC,
|
||||||
rowNumberInAllBlocks()
|
rowNumberInAllBlocks()
|
||||||
LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv || echo "failure\tCannot parse test_results.tsv" > /test_output/check_status.tsv
|
LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv || echo -e "failure\tCannot parse test_results.tsv" > /test_output/check_status.tsv
|
||||||
[ -s /test_output/check_status.tsv ] || echo -e "success\tNo errors found" > /test_output/check_status.tsv
|
[ -s /test_output/check_status.tsv ] || echo -e "success\tNo errors found" > /test_output/check_status.tsv
|
||||||
|
|
||||||
# But OOMs in stress test are allowed
|
# But OOMs in stress test are allowed
|
||||||
|
@ -16,10 +16,10 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
|||||||
moreutils \
|
moreutils \
|
||||||
python3-fuzzywuzzy \
|
python3-fuzzywuzzy \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
shellcheck \
|
|
||||||
yamllint \
|
yamllint \
|
||||||
locales \
|
locales \
|
||||||
&& pip3 install black==23.1.0 boto3 codespell==2.2.1 mypy==1.3.0 PyGithub unidiff pylint==2.6.2 \
|
&& pip3 install black==23.12.0 boto3 codespell==2.2.1 mypy==1.8.0 PyGithub unidiff pylint==3.1.0 \
|
||||||
|
requests types-requests \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* \
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* \
|
||||||
&& rm -rf /root/.cache/pip
|
&& rm -rf /root/.cache/pip
|
||||||
@ -30,6 +30,19 @@ ENV LC_ALL en_US.UTF-8
|
|||||||
# Architecture of the image when BuildKit/buildx is used
|
# Architecture of the image when BuildKit/buildx is used
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
|
||||||
|
ARG SHELLCHECK_VERSION=0.9.0
|
||||||
|
RUN arch=${TARGETARCH:-amd64} \
|
||||||
|
&& case $arch in \
|
||||||
|
amd64) sarch=x86_64 ;; \
|
||||||
|
arm64) sarch=aarch64 ;; \
|
||||||
|
esac \
|
||||||
|
&& curl -L \
|
||||||
|
"https://github.com/koalaman/shellcheck/releases/download/v${SHELLCHECK_VERSION}/shellcheck-v${SHELLCHECK_VERSION}.linux.${sarch}.tar.xz" \
|
||||||
|
| tar xJ --strip=1 -C /tmp \
|
||||||
|
&& mv /tmp/shellcheck /usr/bin \
|
||||||
|
&& rm -rf /tmp/*
|
||||||
|
|
||||||
|
|
||||||
# Get act and actionlint from releases
|
# Get act and actionlint from releases
|
||||||
RUN arch=${TARGETARCH:-amd64} \
|
RUN arch=${TARGETARCH:-amd64} \
|
||||||
&& case $arch in \
|
&& case $arch in \
|
||||||
@ -47,5 +60,4 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
|
|
||||||
|
|
||||||
COPY run.sh /
|
COPY run.sh /
|
||||||
COPY process_style_check_result.py /
|
|
||||||
CMD ["/bin/bash", "/run.sh"]
|
CMD ["/bin/bash", "/run.sh"]
|
||||||
|
@ -16,7 +16,9 @@ ln -s /usr/share/clickhouse-test/ci/get_previous_release_tag.py /usr/bin/get_pre
|
|||||||
|
|
||||||
# Stress tests and upgrade check uses similar code that was placed
|
# Stress tests and upgrade check uses similar code that was placed
|
||||||
# in a separate bash library. See tests/ci/stress_tests.lib
|
# in a separate bash library. See tests/ci/stress_tests.lib
|
||||||
|
# shellcheck source=../stateless/attach_gdb.lib
|
||||||
source /attach_gdb.lib
|
source /attach_gdb.lib
|
||||||
|
# shellcheck source=../stateless/stress_tests.lib
|
||||||
source /stress_tests.lib
|
source /stress_tests.lib
|
||||||
|
|
||||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
||||||
@ -77,6 +79,18 @@ remove_keeper_config "async_replication" "1"
|
|||||||
# create_if_not_exists feature flag doesn't exist on some older versions
|
# create_if_not_exists feature flag doesn't exist on some older versions
|
||||||
remove_keeper_config "create_if_not_exists" "[01]"
|
remove_keeper_config "create_if_not_exists" "[01]"
|
||||||
|
|
||||||
|
#todo: remove these after 24.3 released.
|
||||||
|
sudo cat /etc/clickhouse-server/config.d/azure_storage_conf.xml \
|
||||||
|
| sed "s|<object_storage_type>azure|<object_storage_type>azure_blob_storage|" \
|
||||||
|
> /etc/clickhouse-server/config.d/azure_storage_conf.xml.tmp
|
||||||
|
sudo mv /etc/clickhouse-server/config.d/azure_storage_conf.xml.tmp /etc/clickhouse-server/config.d/azure_storage_conf.xml
|
||||||
|
|
||||||
|
#todo: remove these after 24.3 released.
|
||||||
|
sudo cat /etc/clickhouse-server/config.d/storage_conf.xml \
|
||||||
|
| sed "s|<object_storage_type>local|<object_storage_type>local_blob_storage|" \
|
||||||
|
> /etc/clickhouse-server/config.d/storage_conf.xml.tmp
|
||||||
|
sudo mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml
|
||||||
|
|
||||||
# latest_logs_cache_size_threshold setting doesn't exist on some older versions
|
# latest_logs_cache_size_threshold setting doesn't exist on some older versions
|
||||||
remove_keeper_config "latest_logs_cache_size_threshold" "[[:digit:]]\+"
|
remove_keeper_config "latest_logs_cache_size_threshold" "[[:digit:]]\+"
|
||||||
|
|
||||||
@ -111,6 +125,18 @@ sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
|
|||||||
> /etc/clickhouse-server/config.d/keeper_port.xml.tmp
|
> /etc/clickhouse-server/config.d/keeper_port.xml.tmp
|
||||||
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
||||||
|
|
||||||
|
#todo: remove these after 24.3 released.
|
||||||
|
sudo cat /etc/clickhouse-server/config.d/azure_storage_conf.xml \
|
||||||
|
| sed "s|<object_storage_type>azure|<object_storage_type>azure_blob_storage|" \
|
||||||
|
> /etc/clickhouse-server/config.d/azure_storage_conf.xml.tmp
|
||||||
|
sudo mv /etc/clickhouse-server/config.d/azure_storage_conf.xml.tmp /etc/clickhouse-server/config.d/azure_storage_conf.xml
|
||||||
|
|
||||||
|
#todo: remove these after 24.3 released.
|
||||||
|
sudo cat /etc/clickhouse-server/config.d/storage_conf.xml \
|
||||||
|
| sed "s|<object_storage_type>local|<object_storage_type>local_blob_storage|" \
|
||||||
|
> /etc/clickhouse-server/config.d/storage_conf.xml.tmp
|
||||||
|
sudo mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml
|
||||||
|
|
||||||
# async_replication setting doesn't exist on some older versions
|
# async_replication setting doesn't exist on some older versions
|
||||||
remove_keeper_config "async_replication" "1"
|
remove_keeper_config "async_replication" "1"
|
||||||
|
|
||||||
@ -337,7 +363,7 @@ clickhouse-local --structure "test String, res String, time Nullable(Float32), d
|
|||||||
(test like '%Changed settings%') DESC,
|
(test like '%Changed settings%') DESC,
|
||||||
(test like '%New settings%') DESC,
|
(test like '%New settings%') DESC,
|
||||||
rowNumberInAllBlocks()
|
rowNumberInAllBlocks()
|
||||||
LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv || echo "failure\tCannot parse test_results.tsv" > /test_output/check_status.tsv
|
LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv || echo -e "failure\tCannot parse test_results.tsv" > /test_output/check_status.tsv
|
||||||
[ -s /test_output/check_status.tsv ] || echo -e "success\tNo errors found" > /test_output/check_status.tsv
|
[ -s /test_output/check_status.tsv ] || echo -e "success\tNo errors found" > /test_output/check_status.tsv
|
||||||
|
|
||||||
# But OOMs in stress test are allowed
|
# But OOMs in stress test are allowed
|
||||||
|
@ -41,20 +41,11 @@ RUN apt-get update \
|
|||||||
bash \
|
bash \
|
||||||
bsdmainutils \
|
bsdmainutils \
|
||||||
build-essential \
|
build-essential \
|
||||||
clang-${LLVM_VERSION} \
|
|
||||||
clang-tidy-${LLVM_VERSION} \
|
|
||||||
cmake \
|
|
||||||
gdb \
|
gdb \
|
||||||
git \
|
git \
|
||||||
gperf \
|
gperf \
|
||||||
libclang-rt-${LLVM_VERSION}-dev \
|
|
||||||
lld-${LLVM_VERSION} \
|
|
||||||
llvm-${LLVM_VERSION} \
|
|
||||||
llvm-${LLVM_VERSION}-dev \
|
|
||||||
libclang-${LLVM_VERSION}-dev \
|
|
||||||
moreutils \
|
moreutils \
|
||||||
nasm \
|
nasm \
|
||||||
ninja-build \
|
|
||||||
pigz \
|
pigz \
|
||||||
rename \
|
rename \
|
||||||
software-properties-common \
|
software-properties-common \
|
||||||
@ -63,49 +54,4 @@ RUN apt-get update \
|
|||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
# This symlink is required by gcc to find the lld linker
|
|
||||||
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
|
|
||||||
# for external_symbolizer_path
|
|
||||||
RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer
|
|
||||||
# FIXME: workaround for "The imported target "merge-fdata" references the file" error
|
|
||||||
# https://salsa.debian.org/pkg-llvm-team/llvm-toolchain/-/commit/992e52c0b156a5ba9c6a8a54f8c4857ddd3d371d
|
|
||||||
RUN sed -i '/_IMPORT_CHECK_FILES_FOR_\(mlir-\|llvm-bolt\|merge-fdata\|MLIR\)/ {s|^|#|}' /usr/lib/llvm-${LLVM_VERSION}/lib/cmake/llvm/LLVMExports-*.cmake
|
|
||||||
|
|
||||||
ARG CCACHE_VERSION=4.6.1
|
|
||||||
RUN mkdir /tmp/ccache \
|
|
||||||
&& cd /tmp/ccache \
|
|
||||||
&& curl -L \
|
|
||||||
-O https://github.com/ccache/ccache/releases/download/v$CCACHE_VERSION/ccache-$CCACHE_VERSION.tar.xz \
|
|
||||||
-O https://github.com/ccache/ccache/releases/download/v$CCACHE_VERSION/ccache-$CCACHE_VERSION.tar.xz.asc \
|
|
||||||
&& gpg --recv-keys --keyserver hkps://keyserver.ubuntu.com 5A939A71A46792CF57866A51996DDA075594ADB8 \
|
|
||||||
&& gpg --verify ccache-4.6.1.tar.xz.asc \
|
|
||||||
&& tar xf ccache-$CCACHE_VERSION.tar.xz \
|
|
||||||
&& cd /tmp/ccache/ccache-$CCACHE_VERSION \
|
|
||||||
&& cmake -DCMAKE_INSTALL_PREFIX=/usr \
|
|
||||||
-DCMAKE_BUILD_TYPE=None \
|
|
||||||
-DZSTD_FROM_INTERNET=ON \
|
|
||||||
-DREDIS_STORAGE_BACKEND=OFF \
|
|
||||||
-Wno-dev \
|
|
||||||
-B build \
|
|
||||||
-S . \
|
|
||||||
&& make VERBOSE=1 -C build \
|
|
||||||
&& make install -C build \
|
|
||||||
&& cd / \
|
|
||||||
&& rm -rf /tmp/ccache
|
|
||||||
|
|
||||||
ARG TARGETARCH
|
|
||||||
ARG SCCACHE_VERSION=v0.5.4
|
|
||||||
ENV SCCACHE_IGNORE_SERVER_IO_ERROR=1
|
|
||||||
# sccache requires a value for the region. So by default we use The Default Region
|
|
||||||
ENV SCCACHE_REGION=us-east-1
|
|
||||||
RUN arch=${TARGETARCH:-amd64} \
|
|
||||||
&& case $arch in \
|
|
||||||
amd64) rarch=x86_64 ;; \
|
|
||||||
arm64) rarch=aarch64 ;; \
|
|
||||||
esac \
|
|
||||||
&& curl -Ls "https://github.com/mozilla/sccache/releases/download/$SCCACHE_VERSION/sccache-$SCCACHE_VERSION-$rarch-unknown-linux-musl.tar.gz" | \
|
|
||||||
tar xz -C /tmp \
|
|
||||||
&& mv "/tmp/sccache-$SCCACHE_VERSION-$rarch-unknown-linux-musl/sccache" /usr/bin \
|
|
||||||
&& rm "/tmp/sccache-$SCCACHE_VERSION-$rarch-unknown-linux-musl" -r
|
|
||||||
|
|
||||||
COPY process_functional_tests_result.py /
|
COPY process_functional_tests_result.py /
|
||||||
|
29
docs/changelogs/v23.3.20.27-lts.md
Normal file
29
docs/changelogs/v23.3.20.27-lts.md
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.3.20.27-lts (cc974ba4f81) FIXME as compared to v23.3.19.32-lts (c4d4ca8ec02)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#58818](https://github.com/ClickHouse/ClickHouse/issues/58818): Add `SYSTEM JEMALLOC PURGE` for purging unused jemalloc pages, `SYSTEM JEMALLOC [ ENABLE | DISABLE | FLUSH ] PROFILE` for controlling jemalloc profile if the profiler is enabled. Add jemalloc-related 4LW command in Keeper: `jmst` for dumping jemalloc stats, `jmfp`, `jmep`, `jmdp` for controlling jemalloc profile if the profiler is enabled. [#58665](https://github.com/ClickHouse/ClickHouse/pull/58665) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Backported in [#59877](https://github.com/ClickHouse/ClickHouse/issues/59877): If you want to run initdb scripts every time when ClickHouse container is starting you shoud initialize environment varible CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS. [#59808](https://github.com/ClickHouse/ClickHouse/pull/59808) ([Alexander Nikolaev](https://github.com/AlexNik)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix working with read buffers in StreamingFormatExecutor [#57438](https://github.com/ClickHouse/ClickHouse/pull/57438) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix double destroy call on exception throw in addBatchLookupTable8 [#58745](https://github.com/ClickHouse/ClickHouse/pull/58745) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix: LIMIT BY and LIMIT in distributed query [#59153](https://github.com/ClickHouse/ClickHouse/pull/59153) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix translate() with FixedString input [#59356](https://github.com/ClickHouse/ClickHouse/pull/59356) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix leftPad / rightPad function with FixedString input [#59739](https://github.com/ClickHouse/ClickHouse/pull/59739) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix cosineDistance crash with Nullable [#60150](https://github.com/ClickHouse/ClickHouse/pull/60150) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Fix possible race in ManyAggregatedData dtor. [#58624](https://github.com/ClickHouse/ClickHouse/pull/58624) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Make ZooKeeper actually sequentialy consistent [#59735](https://github.com/ClickHouse/ClickHouse/pull/59735) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
|
39
docs/changelogs/v23.8.10.43-lts.md
Normal file
39
docs/changelogs/v23.8.10.43-lts.md
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.8.10.43-lts (a278225bba9) FIXME as compared to v23.8.9.54-lts (192a1d231fa)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#58819](https://github.com/ClickHouse/ClickHouse/issues/58819): Add `SYSTEM JEMALLOC PURGE` for purging unused jemalloc pages, `SYSTEM JEMALLOC [ ENABLE | DISABLE | FLUSH ] PROFILE` for controlling jemalloc profile if the profiler is enabled. Add jemalloc-related 4LW command in Keeper: `jmst` for dumping jemalloc stats, `jmfp`, `jmep`, `jmdp` for controlling jemalloc profile if the profiler is enabled. [#58665](https://github.com/ClickHouse/ClickHouse/pull/58665) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#60286](https://github.com/ClickHouse/ClickHouse/issues/60286): Copy S3 file GCP fallback to buffer copy in case GCP returned `Internal Error` with `GATEWAY_TIMEOUT` HTTP error code. [#60164](https://github.com/ClickHouse/ClickHouse/pull/60164) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Backported in [#59879](https://github.com/ClickHouse/ClickHouse/issues/59879): If you want to run initdb scripts every time when ClickHouse container is starting you shoud initialize environment varible CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS. [#59808](https://github.com/ClickHouse/ClickHouse/pull/59808) ([Alexander Nikolaev](https://github.com/AlexNik)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Background merges correctly use temporary data storage in the cache [#57275](https://github.com/ClickHouse/ClickHouse/pull/57275) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* MergeTree mutations reuse source part index granularity [#57352](https://github.com/ClickHouse/ClickHouse/pull/57352) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix double destroy call on exception throw in addBatchLookupTable8 [#58745](https://github.com/ClickHouse/ClickHouse/pull/58745) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix JSONExtract function for LowCardinality(Nullable) columns [#58808](https://github.com/ClickHouse/ClickHouse/pull/58808) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix: LIMIT BY and LIMIT in distributed query [#59153](https://github.com/ClickHouse/ClickHouse/pull/59153) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix translate() with FixedString input [#59356](https://github.com/ClickHouse/ClickHouse/pull/59356) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix error "Read beyond last offset" for AsynchronousBoundedReadBuffer [#59630](https://github.com/ClickHouse/ClickHouse/pull/59630) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix query start time on non initial queries [#59662](https://github.com/ClickHouse/ClickHouse/pull/59662) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix leftPad / rightPad function with FixedString input [#59739](https://github.com/ClickHouse/ClickHouse/pull/59739) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* rabbitmq: fix having neither acked nor nacked messages [#59775](https://github.com/ClickHouse/ClickHouse/pull/59775) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix cosineDistance crash with Nullable [#60150](https://github.com/ClickHouse/ClickHouse/pull/60150) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Fix rare race in external sort/aggregation with temporary data in cache [#58013](https://github.com/ClickHouse/ClickHouse/pull/58013) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix possible race in ManyAggregatedData dtor. [#58624](https://github.com/ClickHouse/ClickHouse/pull/58624) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix 02720_row_policy_column_with_dots [#59453](https://github.com/ClickHouse/ClickHouse/pull/59453) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Pin python dependencies in stateless tests [#59663](https://github.com/ClickHouse/ClickHouse/pull/59663) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Make ZooKeeper actually sequentialy consistent [#59735](https://github.com/ClickHouse/ClickHouse/pull/59735) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Remove broken test while we fix it [#60547](https://github.com/ClickHouse/ClickHouse/pull/60547) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
462
docs/changelogs/v24.2.1.2248-stable.md
Normal file
462
docs/changelogs/v24.2.1.2248-stable.md
Normal file
@ -0,0 +1,462 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.2.1.2248-stable (891689a4150) FIXME as compared to v24.1.1.2048-stable (5a024dfc093)
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
* Validate suspicious/experimental types in nested types. Previously we didn't validate such types (except JSON) in nested types like Array/Tuple/Map. [#59385](https://github.com/ClickHouse/ClickHouse/pull/59385) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* The sort clause `ORDER BY ALL` (introduced with v23.12) is replaced by `ORDER BY *`. The previous syntax was too error-prone for tables with a column `all`. [#59450](https://github.com/ClickHouse/ClickHouse/pull/59450) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Rename the setting `extract_kvp_max_pairs_per_row` to `extract_key_value_pairs_max_pairs_per_row`. The bug (unnecessary abbreviation in the setting name) was introduced in https://github.com/ClickHouse/ClickHouse/pull/43606. Fix the documentation of this setting. [#59683](https://github.com/ClickHouse/ClickHouse/pull/59683) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Rename the setting extract_kvp_max_pairs_per_row to extract_key_value_pairs_max_pairs_per_row. The bug (unnecessary abbreviation in the setting name) was introduced in https://github.com/ClickHouse/ClickHouse/pull/43606. Fix the documentation of this setting. [#59960](https://github.com/ClickHouse/ClickHouse/pull/59960) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Add sanity check for number of threads and block sizes. [#60138](https://github.com/ClickHouse/ClickHouse/pull/60138) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
* Added maximum sequential login failures to the quota. [#54737](https://github.com/ClickHouse/ClickHouse/pull/54737) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Added new syntax which allows to specify definer user in View/Materialized View. This allows to execute selects/inserts from views without explicit grants for underlying tables. [#54901](https://github.com/ClickHouse/ClickHouse/pull/54901) ([pufit](https://github.com/pufit)).
|
||||||
|
* Backup & Restore support for AzureBlobStorage resolves [#50747](https://github.com/ClickHouse/ClickHouse/issues/50747). [#56988](https://github.com/ClickHouse/ClickHouse/pull/56988) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Implemented automatic conversion of merge tree tables of different kinds to replicated engine. Create empty `convert_to_replicated` file in table's data directory (`/clickhouse/store/xxx/xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy/`) and that table will be converted automatically on next server start. [#57798](https://github.com/ClickHouse/ClickHouse/pull/57798) ([Kirill](https://github.com/kirillgarbar)).
|
||||||
|
* Added table function `mergeTreeIndex`. It represents the contents of index and marks files of `MergeTree` tables. It can be used for introspection. Syntax: `mergeTreeIndex(database, table, [with_marks = true])` where `database.table` is an existing table with `MergeTree` engine. [#58140](https://github.com/ClickHouse/ClickHouse/pull/58140) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Added function `seriesOutliersTukey` to detect outliers in series data using Tukey's fences algorithm. [#58632](https://github.com/ClickHouse/ClickHouse/pull/58632) ([Bhavna Jindal](https://github.com/bhavnajindal)).
|
||||||
|
* The user can now specify the template string directly in the query using `format_schema_rows_template` as an alternative to `format_template_row`. Closes [#31363](https://github.com/ClickHouse/ClickHouse/issues/31363). [#59088](https://github.com/ClickHouse/ClickHouse/pull/59088) ([Shaun Struwig](https://github.com/Blargian)).
|
||||||
|
* Try to detect file format automatically during schema inference if it's unknown in `file/s3/hdfs/url/azureBlobStorage` engines. Closes [#50576](https://github.com/ClickHouse/ClickHouse/issues/50576). [#59092](https://github.com/ClickHouse/ClickHouse/pull/59092) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Add function variantType that returns Enum with variant type name for each row. [#59398](https://github.com/ClickHouse/ClickHouse/pull/59398) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Added query `ALTER TABLE table FORGET PARTITION partition` that removes ZooKeeper nodes, related to an empty partition. [#59507](https://github.com/ClickHouse/ClickHouse/pull/59507) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Support JWT credentials file for the NATS table engine. [#59543](https://github.com/ClickHouse/ClickHouse/pull/59543) ([Nickolaj Jepsen](https://github.com/nickolaj-jepsen)).
|
||||||
|
* Provides new aggregate function ‘groupArrayIntersect’. Follows up: [#49862](https://github.com/ClickHouse/ClickHouse/issues/49862). [#59598](https://github.com/ClickHouse/ClickHouse/pull/59598) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Implemented system.dns_cache table, which can be useful for debugging DNS issues. [#59856](https://github.com/ClickHouse/ClickHouse/pull/59856) ([Kirill Nikiforov](https://github.com/allmazz)).
|
||||||
|
* The codec `LZ4HC` will accept a new level 2, which is faster than the previous minimum level 3, at the expense of less compression. In previous versions, `LZ4HC(2)` and less was the same as `LZ4HC(3)`. Author: [Cyan4973](https://github.com/Cyan4973). [#60090](https://github.com/ClickHouse/ClickHouse/pull/60090) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Implemented system.dns_cache table, which can be useful for debugging DNS issues. New server setting dns_cache_max_size. [#60257](https://github.com/ClickHouse/ClickHouse/pull/60257) ([Kirill Nikiforov](https://github.com/allmazz)).
|
||||||
|
* Support single-argument version for the merge table function, as `merge(['db_name', ] 'tables_regexp')`. [#60372](https://github.com/ClickHouse/ClickHouse/pull/60372) ([豪肥肥](https://github.com/HowePa)).
|
||||||
|
* Added new syntax which allows to specify definer user in View/Materialized View. This allows to execute selects/inserts from views without explicit grants for underlying tables. [#60439](https://github.com/ClickHouse/ClickHouse/pull/60439) ([pufit](https://github.com/pufit)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
* Eliminates min/max/any/anyLast aggregators of GROUP BY keys in SELECT section. [#52230](https://github.com/ClickHouse/ClickHouse/pull/52230) ([JackyWoo](https://github.com/JackyWoo)).
|
||||||
|
* Vectorized distance functions used in vector search. [#58866](https://github.com/ClickHouse/ClickHouse/pull/58866) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Continue optimizing branch miss of if function when result type is float*/decimal*/int* , follow up of https://github.com/ClickHouse/ClickHouse/pull/57885. [#59148](https://github.com/ClickHouse/ClickHouse/pull/59148) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Optimize if function when input type is map, speed up by ~10x. [#59413](https://github.com/ClickHouse/ClickHouse/pull/59413) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Improve performance of Int8 type by implementing strict aliasing. [#59485](https://github.com/ClickHouse/ClickHouse/pull/59485) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Optimize performance of sum/avg conditionally for bigint and big decimal types by reducing branch miss. [#59504](https://github.com/ClickHouse/ClickHouse/pull/59504) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Improve performance of SELECTs with active mutations. [#59531](https://github.com/ClickHouse/ClickHouse/pull/59531) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Optimized function `isNotNull` with AVX2. [#59621](https://github.com/ClickHouse/ClickHouse/pull/59621) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Reuse the result of `FunctionFactory::instance().get("isNotNull", context)` and `FunctionFactory::instance().get("assumeNotNull", context)`. Make sure it is called once during the lifetime of `FunctionCoalesce`. [#59627](https://github.com/ClickHouse/ClickHouse/pull/59627) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Improve ASOF JOIN performance for sorted or almost sorted data. [#59731](https://github.com/ClickHouse/ClickHouse/pull/59731) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Primary key will use less amount of memory. [#60049](https://github.com/ClickHouse/ClickHouse/pull/60049) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Improve memory usage for primary key and some other operations. [#60050](https://github.com/ClickHouse/ClickHouse/pull/60050) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* The tables' primary keys will be loaded in memory lazily on first access. This is controlled by the new MergeTree setting `primary_key_lazy_load`, which is on by default. This provides several advantages: - it will not be loaded for tables that are not used; - if there is not enough memory, an exception will be thrown on first use instead of at server startup. This provides several disadvantages: - the latency of loading the primary key will be paid on the first query rather than before accepting connections; this theoretically may introduce a thundering-herd problem. This closes [#11188](https://github.com/ClickHouse/ClickHouse/issues/11188). [#60093](https://github.com/ClickHouse/ClickHouse/pull/60093) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Vectorized function `dotProduct` which is useful for vector search. [#60202](https://github.com/ClickHouse/ClickHouse/pull/60202) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* As is shown in Fig 1, the replacement of "&&" with "&" could generate the SIMD code. ![image](https://github.com/ClickHouse/ClickHouse/assets/26588299/a5a72ac4-6dc6-4d52-835a-4f512e55f0b9) Fig 1. Code compiled from '&&' (left) and '&' (right). [#60498](https://github.com/ClickHouse/ClickHouse/pull/60498) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Added support for parameterized view with analyzer to not analyze create parameterized view. Refactor existing parameterized view logic to not analyze create parameterized view. [#54211](https://github.com/ClickHouse/ClickHouse/pull/54211) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Do not consider data part as broken if projection is broken. Closes [#56593](https://github.com/ClickHouse/ClickHouse/issues/56593). [#56864](https://github.com/ClickHouse/ClickHouse/pull/56864) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add short-circuit ability for `dictGetOrDefault` function. Closes [#52098](https://github.com/ClickHouse/ClickHouse/issues/52098). [#57767](https://github.com/ClickHouse/ClickHouse/pull/57767) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Running `ALTER COLUMN MATERIALIZE` on a column with `DEFAULT` or `MATERIALIZED` expression now writes the correct values: The default value for existing parts with default value or the non-default value for existing parts with non-default value. Previously, the default value was written for all existing parts. [#58023](https://github.com/ClickHouse/ClickHouse/pull/58023) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Enabled a backoff logic (e.g. exponential). Will provide an ability for reduced CPU usage, memory usage and log file sizes. [#58036](https://github.com/ClickHouse/ClickHouse/pull/58036) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||||
|
* Add improvement to count InitialQuery. [#58195](https://github.com/ClickHouse/ClickHouse/pull/58195) ([Unalian](https://github.com/Unalian)).
|
||||||
|
* Support negative positional arguments. Closes [#57736](https://github.com/ClickHouse/ClickHouse/issues/57736). [#58292](https://github.com/ClickHouse/ClickHouse/pull/58292) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Implement auto-adjustment for asynchronous insert timeouts. The following settings are introduced: async_insert_poll_timeout_ms, async_insert_use_adaptive_busy_timeout, async_insert_busy_timeout_min_ms, async_insert_busy_timeout_max_ms, async_insert_busy_timeout_increase_rate, async_insert_busy_timeout_decrease_rate. [#58486](https://github.com/ClickHouse/ClickHouse/pull/58486) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||||
|
* Allow to define `volume_priority` in `storage_configuration`. [#58533](https://github.com/ClickHouse/ClickHouse/pull/58533) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||||
|
* Add support for Date32 type in T64 codec. [#58738](https://github.com/ClickHouse/ClickHouse/pull/58738) ([Hongbin Ma](https://github.com/binmahone)).
|
||||||
|
* Support `LEFT JOIN`, `ALL INNER JOIN`, and simple subqueries for parallel replicas (only with analyzer). New setting `parallel_replicas_prefer_local_join` chooses local `JOIN` execution (by default) vs `GLOBAL JOIN`. All tables should exist on every replica from `cluster_for_parallel_replicas`. New settings `min_external_table_block_size_rows` and `min_external_table_block_size_bytes` are used to squash small blocks that are sent for temporary tables (only with analyzer). [#58916](https://github.com/ClickHouse/ClickHouse/pull/58916) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Allow trailing commas in types with several items. [#59119](https://github.com/ClickHouse/ClickHouse/pull/59119) ([Aleksandr Musorin](https://github.com/AVMusorin)).
|
||||||
|
* Allow parallel and distributed processing for `S3Queue` table engine. For distributed processing use setting `s3queue_total_shards_num` (by default `1`). Setting `s3queue_processing_threads_num` previously was not allowed for Ordered processing mode, now it is allowed. Warning: settings `s3queue_processing_threads_num`(processing threads per each shard) and `s3queue_total_shards_num` for ordered mode change how metadata is stored (make the number of `max_processed_file` nodes equal to `s3queue_processing_threads_num * s3queue_total_shards_num`), so they must be the same for all shards and cannot be changed once at least one shard is created. [#59167](https://github.com/ClickHouse/ClickHouse/pull/59167) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Allow concurrent table creation in `DatabaseReplicated` during `recoverLostReplica`. [#59277](https://github.com/ClickHouse/ClickHouse/pull/59277) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Settings for the Distributed table engine can now be specified in the server configuration file (similar to MergeTree settings), e.g. ``` <distributed> <flush_on_detach>false</flush_on_detach> </distributed> ```. [#59291](https://github.com/ClickHouse/ClickHouse/pull/59291) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Use MergeTree as a default table engine. It makes the usability much better, and closer to ClickHouse Cloud. [#59316](https://github.com/ClickHouse/ClickHouse/pull/59316) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Retry disconnects and expired sessions when reading `system.zookeeper`. This is helpful when reading many rows from `system.zookeeper` table especially in the presence of fault-injected disconnects. [#59388](https://github.com/ClickHouse/ClickHouse/pull/59388) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Do not interpret numbers with leading zeroes as octals when `input_format_values_interpret_expressions=0`. [#59403](https://github.com/ClickHouse/ClickHouse/pull/59403) ([Joanna Hulboj](https://github.com/jh0x)).
|
||||||
|
* At startup and whenever config files are changed, ClickHouse updates the hard memory limits of its total memory tracker. These limits are computed based on various server settings and cgroups limits (on Linux). Previously, setting `/sys/fs/cgroup/memory.max` (for cgroups v2) was hard-coded. As a result, cgroup v2 memory limits configured for nested groups (hierarchies), e.g. `/sys/fs/cgroup/my/nested/group/memory.max` were ignored. This is now fixed. The behavior of v1 memory limits remains unchanged. [#59435](https://github.com/ClickHouse/ClickHouse/pull/59435) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* New profile events added to observe the time spent on calculating PK/projections/secondary indices during `INSERT`-s. [#59436](https://github.com/ClickHouse/ClickHouse/pull/59436) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Allow to define a starting point for S3Queue with Ordered mode at creation using setting `s3queue_last_processed_path`. [#59446](https://github.com/ClickHouse/ClickHouse/pull/59446) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Keeper improvement: cache only a certain amount of logs in-memory controlled by `latest_logs_cache_size_threshold` and `commit_logs_cache_size_threshold`. [#59460](https://github.com/ClickHouse/ClickHouse/pull/59460) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Made comments for system tables also available in `system.tables` in `clickhouse-local`. [#59493](https://github.com/ClickHouse/ClickHouse/pull/59493) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Don't infer floats in exponential notation by default. Add a setting `input_format_try_infer_exponent_floats` that will restore previous behaviour (disabled by default). Closes [#59476](https://github.com/ClickHouse/ClickHouse/issues/59476). [#59500](https://github.com/ClickHouse/ClickHouse/pull/59500) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Allow alter operations to be surrounded by parenthesis. The emission of parentheses can be controlled by the `format_alter_operations_with_parentheses` config. By default in formatted queries the parentheses are emitted as we store the formatted alter operations in some places as metadata (e.g.: mutations). The new syntax clarifies some of the queries where alter operations end in a list. E.g.: `ALTER TABLE x MODIFY TTL date GROUP BY a, b, DROP COLUMN c` cannot be parsed properly with the old syntax. In the new syntax the query `ALTER TABLE x (MODIFY TTL date GROUP BY a, b), (DROP COLUMN c)` is obvious. Older versions are not able to read the new syntax, therefore using the new syntax might cause issues if newer and older version of ClickHouse are mixed in a single cluster. [#59532](https://github.com/ClickHouse/ClickHouse/pull/59532) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* The previous default value equals to 1 MB for `async_insert_max_data_size` appeared to be too small. The new one would be 10 MiB. [#59536](https://github.com/ClickHouse/ClickHouse/pull/59536) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Previously the whole result was accumulated in memory and returned as one big chunk. This change should help to reduce memory consumption when reading many rows from `system.zookeeper`, allow showing intermediate progress (how many rows have been read so far) and avoid hitting connection timeout when result set is big. [#59545](https://github.com/ClickHouse/ClickHouse/pull/59545) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Now dashboard understands both compressed and uncompressed state of URL's #hash (backward compatibility). Continuation of [#59124](https://github.com/ClickHouse/ClickHouse/issues/59124) . [#59548](https://github.com/ClickHouse/ClickHouse/pull/59548) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Bumped Intel QPL (used by codec `DEFLATE_QPL`) from v1.3.1 to v1.4.0 . Also fixed a bug for polling timeout mechanism, as we observed in same cases timeout won't work properly, if timeout happen, IAA and CPU may process buffer concurrently. So far, we'd better make sure IAA codec status is not QPL_STS_BEING_PROCESSED, then fallback to SW codec. [#59551](https://github.com/ClickHouse/ClickHouse/pull/59551) ([jasperzhu](https://github.com/jinjunzh)).
|
||||||
|
* Keeper improvement: reduce size of data node even more. [#59592](https://github.com/ClickHouse/ClickHouse/pull/59592) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Do not show a warning about the server version in ClickHouse Cloud because ClickHouse Cloud handles seamless upgrades automatically. [#59657](https://github.com/ClickHouse/ClickHouse/pull/59657) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* After self-extraction temporary binary is moved instead copying. [#59661](https://github.com/ClickHouse/ClickHouse/pull/59661) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Fix stack unwinding on MacOS. This closes [#53653](https://github.com/ClickHouse/ClickHouse/issues/53653). [#59690](https://github.com/ClickHouse/ClickHouse/pull/59690) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Check for stack overflow in parsers even if the user misconfigured the `max_parser_depth` setting to a very high value. This closes [#59622](https://github.com/ClickHouse/ClickHouse/issues/59622). [#59697](https://github.com/ClickHouse/ClickHouse/pull/59697) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Perform synchronous inserts if dependent MV deduplication is enabled through deduplicate_blocks_in_dependent_materialized_views=1. [#59699](https://github.com/ClickHouse/ClickHouse/pull/59699) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||||
|
* Added settings `split_parts_ranges_into_intersecting_and_non_intersecting_final` and `split_intersecting_parts_ranges_into_layers_final`. This settings are needed to disable optimizations for queries with `FINAL` and needed for debug only. [#59705](https://github.com/ClickHouse/ClickHouse/pull/59705) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Unify xml and sql created named collection behaviour in kafka storage. [#59710](https://github.com/ClickHouse/ClickHouse/pull/59710) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
|
||||||
|
* In case when `merge_max_block_size_bytes` is small enough and tables contain wide rows (strings or tuples) background merges may stuck in an endless loop. This behaviour is fixed. Follow-up for https://github.com/ClickHouse/ClickHouse/pull/59340. [#59812](https://github.com/ClickHouse/ClickHouse/pull/59812) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Allow uuid in replica_path if CREATE TABLE explicitly has it. [#59908](https://github.com/ClickHouse/ClickHouse/pull/59908) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Add column `metadata_version` of ReplicatedMergeTree table in `system.tables` system table. [#59942](https://github.com/ClickHouse/ClickHouse/pull/59942) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Keeper improvement: send only Keeper related metrics/events for Prometheus. [#59945](https://github.com/ClickHouse/ClickHouse/pull/59945) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* The dashboard will display metrics across different ClickHouse versions even if the structure of system tables has changed after the upgrade. [#59967](https://github.com/ClickHouse/ClickHouse/pull/59967) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Allow loading AZ info from a file. [#59976](https://github.com/ClickHouse/ClickHouse/pull/59976) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Keeper improvement: add retries on failures for Disk related operations. [#59980](https://github.com/ClickHouse/ClickHouse/pull/59980) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Add new config setting `backups.remove_backup_files_after_failure`: ``` <clickhouse> <backups> <remove_backup_files_after_failure>true</remove_backup_files_after_failure> </backups> </clickhouse> ```. [#60002](https://github.com/ClickHouse/ClickHouse/pull/60002) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Use multiple threads while reading the metadata of tables from a backup while executing the RESTORE command. [#60040](https://github.com/ClickHouse/ClickHouse/pull/60040) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Now if `StorageBuffer` has more than 1 shard (`num_layers` > 1) background flush will happen simultaneously for all shards in multiple threads. [#60111](https://github.com/ClickHouse/ClickHouse/pull/60111) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Support specifying users for specific S3 settings in config using `user` key. [#60144](https://github.com/ClickHouse/ClickHouse/pull/60144) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Copy S3 file GCP fallback to buffer copy in case GCP returned `Internal Error` with `GATEWAY_TIMEOUT` HTTP error code. [#60164](https://github.com/ClickHouse/ClickHouse/pull/60164) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Implement comparison operator for Variant values and proper Field inserting into Variant column. Don't allow creating `Variant` type with similar variant types by default (allow uder a setting `allow_suspicious_variant_types`) Closes [#59996](https://github.com/ClickHouse/ClickHouse/issues/59996). Closes [#59850](https://github.com/ClickHouse/ClickHouse/issues/59850). [#60198](https://github.com/ClickHouse/ClickHouse/pull/60198) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Short circuit execution for `ULIDStringToDateTime`. [#60211](https://github.com/ClickHouse/ClickHouse/pull/60211) ([Juan Madurga](https://github.com/jlmadurga)).
|
||||||
|
* Added `query_id` column for tables `system.backups` and `system.backup_log`. Added error stacktrace to `error` column. [#60220](https://github.com/ClickHouse/ClickHouse/pull/60220) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Connections through the MySQL port now automatically run with setting `prefer_column_name_to_alias = 1` to support QuickSight out-of-the-box. Also, settings `mysql_map_string_to_text_in_show_columns` and `mysql_map_fixed_string_to_text_in_show_columns` are now enabled by default, affecting also only MySQL connections. This increases compatibility with more BI tools. [#60365](https://github.com/ClickHouse/ClickHouse/pull/60365) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* When output format is Pretty format and a block consists of a single numeric value which exceeds one million, A readable number will be printed on table right. e.g. ``` ┌──────count()─┐ │ 233765663884 │ -- 233.77 billion └──────────────┘ ```. [#60379](https://github.com/ClickHouse/ClickHouse/pull/60379) ([rogeryk](https://github.com/rogeryk)).
|
||||||
|
* Fix a race condition in JavaScript code leading to duplicate charts on top of each other. [#60392](https://github.com/ClickHouse/ClickHouse/pull/60392) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Check for stack overflow in parsers even if the user misconfigured the `max_parser_depth` setting to a very high value. This closes [#59622](https://github.com/ClickHouse/ClickHouse/issues/59622). [#60434](https://github.com/ClickHouse/ClickHouse/pull/60434) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Added builds and tests with coverage collection with introspection. Continuation of [#56102](https://github.com/ClickHouse/ClickHouse/issues/56102). [#58792](https://github.com/ClickHouse/ClickHouse/pull/58792) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Include `pytest-reportlog` in integration test CI runner Dockerfile to enable JSON test reports. [#58926](https://github.com/ClickHouse/ClickHouse/pull/58926) ([MyroTk](https://github.com/MyroTk)).
|
||||||
|
* Update the rust toolchain in `corrosion-cmake` when the CMake cross-compilation toolchain variable is set. [#59309](https://github.com/ClickHouse/ClickHouse/pull/59309) ([Aris Tritas](https://github.com/aris-aiven)).
|
||||||
|
* Add some fuzzing to ASTLiterals. [#59383](https://github.com/ClickHouse/ClickHouse/pull/59383) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* If you want to run initdb scripts every time when ClickHouse container is starting you shoud initialize environment varible CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS. [#59808](https://github.com/ClickHouse/ClickHouse/pull/59808) ([Alexander Nikolaev](https://github.com/AlexNik)).
|
||||||
|
* Remove ability to disable generic clickhouse components (like server/client/...), but keep some that requires extra libraries (like ODBC or keeper). [#59857](https://github.com/ClickHouse/ClickHouse/pull/59857) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Query fuzzer will fuzz SETTINGS inside queries. [#60087](https://github.com/ClickHouse/ClickHouse/pull/60087) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add test that validates projections still work after attach partition. [#60415](https://github.com/ClickHouse/ClickHouse/pull/60415) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||||
|
* Add test that validates attach partition fails if structure differs because of materialized column. [#60418](https://github.com/ClickHouse/ClickHouse/pull/60418) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||||
|
* Add support for building ClickHouse with clang-19 (master). [#60448](https://github.com/ClickHouse/ClickHouse/pull/60448) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Speedup check-whitespaces check. [#60496](https://github.com/ClickHouse/ClickHouse/pull/60496) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Non ready set in TTL WHERE. [#57430](https://github.com/ClickHouse/ClickHouse/pull/57430) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix quantilesGK bug [#58216](https://github.com/ClickHouse/ClickHouse/pull/58216) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Disable parallel replicas JOIN with CTE (not analyzer) [#59239](https://github.com/ClickHouse/ClickHouse/pull/59239) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix bug with `intDiv` for decimal arguments [#59243](https://github.com/ClickHouse/ClickHouse/pull/59243) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Fix translate() with FixedString input [#59356](https://github.com/ClickHouse/ClickHouse/pull/59356) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix digest calculation in Keeper [#59439](https://github.com/ClickHouse/ClickHouse/pull/59439) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix stacktraces for binaries without debug symbols [#59444](https://github.com/ClickHouse/ClickHouse/pull/59444) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix `ASTAlterCommand::formatImpl` in case of column specific settings… [#59445](https://github.com/ClickHouse/ClickHouse/pull/59445) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fix `SELECT * FROM [...] ORDER BY ALL` with Analyzer [#59462](https://github.com/ClickHouse/ClickHouse/pull/59462) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||||
|
* Fix possible uncaught exception during distributed query cancellation [#59487](https://github.com/ClickHouse/ClickHouse/pull/59487) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Make MAX use the same rules as permutation for complex types [#59498](https://github.com/ClickHouse/ClickHouse/pull/59498) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix corner case when passing `update_insert_deduplication_token_in_dependent_materialized_views` [#59544](https://github.com/ClickHouse/ClickHouse/pull/59544) ([Jordi Villar](https://github.com/jrdi)).
|
||||||
|
* Fix incorrect result of arrayElement / map[] on empty value [#59594](https://github.com/ClickHouse/ClickHouse/pull/59594) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix crash in topK when merging empty states [#59603](https://github.com/ClickHouse/ClickHouse/pull/59603) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix distributed table with a constant sharding key [#59606](https://github.com/ClickHouse/ClickHouse/pull/59606) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix_kql_issue_found_by_wingfuzz [#59626](https://github.com/ClickHouse/ClickHouse/pull/59626) ([Yong Wang](https://github.com/kashwy)).
|
||||||
|
* Fix error "Read beyond last offset" for AsynchronousBoundedReadBuffer [#59630](https://github.com/ClickHouse/ClickHouse/pull/59630) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Maintain function alias in RewriteSumFunctionWithSumAndCountVisitor [#59658](https://github.com/ClickHouse/ClickHouse/pull/59658) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix query start time on non initial queries [#59662](https://github.com/ClickHouse/ClickHouse/pull/59662) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Validate types of arguments for `minmax` skipping index [#59733](https://github.com/ClickHouse/ClickHouse/pull/59733) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix leftPad / rightPad function with FixedString input [#59739](https://github.com/ClickHouse/ClickHouse/pull/59739) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix AST fuzzer issue in function `countMatches` [#59752](https://github.com/ClickHouse/ClickHouse/pull/59752) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* rabbitmq: fix having neither acked nor nacked messages [#59775](https://github.com/ClickHouse/ClickHouse/pull/59775) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix StorageURL doing some of the query execution in single thread [#59833](https://github.com/ClickHouse/ClickHouse/pull/59833) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* s3queue: fix uninitialized value [#59897](https://github.com/ClickHouse/ClickHouse/pull/59897) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix parsing of partition expressions surrounded by parens [#59901](https://github.com/ClickHouse/ClickHouse/pull/59901) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fix crash in JSONColumnsWithMetadata format over http [#59925](https://github.com/ClickHouse/ClickHouse/pull/59925) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Do not rewrite sum() to count() if return value differs in analyzer [#59926](https://github.com/ClickHouse/ClickHouse/pull/59926) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* UniqExactSet read crash fix [#59928](https://github.com/ClickHouse/ClickHouse/pull/59928) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* ReplicatedMergeTree invalid metadata_version fix [#59946](https://github.com/ClickHouse/ClickHouse/pull/59946) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix data race in `StorageDistributed` [#59987](https://github.com/ClickHouse/ClickHouse/pull/59987) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Run init scripts when option is enabled rather than disabled [#59991](https://github.com/ClickHouse/ClickHouse/pull/59991) ([jktng](https://github.com/jktng)).
|
||||||
|
* Fix scale conversion for DateTime64 [#60004](https://github.com/ClickHouse/ClickHouse/pull/60004) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Fix INSERT into SQLite with single quote (by escaping single quotes with a quote instead of backslash) [#60015](https://github.com/ClickHouse/ClickHouse/pull/60015) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix several logical errors in arrayFold [#60022](https://github.com/ClickHouse/ClickHouse/pull/60022) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix optimize_uniq_to_count removing the column alias [#60026](https://github.com/ClickHouse/ClickHouse/pull/60026) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix possible exception from s3queue table on drop [#60036](https://github.com/ClickHouse/ClickHouse/pull/60036) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix formatting of NOT with single literals [#60042](https://github.com/ClickHouse/ClickHouse/pull/60042) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Use max_query_size from context in DDLLogEntry instead of hardcoded 4096 [#60083](https://github.com/ClickHouse/ClickHouse/pull/60083) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix inconsistent formatting of queries [#60095](https://github.com/ClickHouse/ClickHouse/pull/60095) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix inconsistent formatting of explain in subqueries [#60102](https://github.com/ClickHouse/ClickHouse/pull/60102) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix cosineDistance crash with Nullable [#60150](https://github.com/ClickHouse/ClickHouse/pull/60150) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Allow casting of bools in string representation to to true bools [#60160](https://github.com/ClickHouse/ClickHouse/pull/60160) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix system.s3queue_log [#60166](https://github.com/ClickHouse/ClickHouse/pull/60166) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix arrayReduce with nullable aggregate function name [#60188](https://github.com/ClickHouse/ClickHouse/pull/60188) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix actions execution during preliminary filtering (PK, partition pruning) [#60196](https://github.com/ClickHouse/ClickHouse/pull/60196) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Hide sensitive info for s3queue [#60233](https://github.com/ClickHouse/ClickHouse/pull/60233) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Revert "Replace `ORDER BY ALL` by `ORDER BY *`" [#60248](https://github.com/ClickHouse/ClickHouse/pull/60248) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix http exception codes. [#60252](https://github.com/ClickHouse/ClickHouse/pull/60252) ([Austin Kothig](https://github.com/kothiga)).
|
||||||
|
* s3queue: fix bug (also fixes flaky test_storage_s3_queue/test.py::test_shards_distributed) [#60282](https://github.com/ClickHouse/ClickHouse/pull/60282) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix use-of-uninitialized-value and invalid result in hashing functions with IPv6 [#60359](https://github.com/ClickHouse/ClickHouse/pull/60359) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix OptimizeDateOrDateTimeConverterWithPreimageVisitor with null arguments [#60453](https://github.com/ClickHouse/ClickHouse/pull/60453) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Merging [#59674](https://github.com/ClickHouse/ClickHouse/issues/59674). [#60470](https://github.com/ClickHouse/ClickHouse/pull/60470) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Correctly check keys in s3Cluster [#60477](https://github.com/ClickHouse/ClickHouse/pull/60477) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
|
||||||
|
#### CI Fix or Improvement (changelog entry is not required)
|
||||||
|
|
||||||
|
* ... [#60457](https://github.com/ClickHouse/ClickHouse/pull/60457) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* ... [#60512](https://github.com/ClickHouse/ClickHouse/pull/60512) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Arm and amd docker build jobs use similar job names and thus overwrite job reports - aarch64 and amd64 suffixes added to fix this. [#60554](https://github.com/ClickHouse/ClickHouse/pull/60554) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* ... [#60557](https://github.com/ClickHouse/ClickHouse/pull/60557) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* BUG: build job can report success cache record on failed build Add a check relying on job report fail. [#60587](https://github.com/ClickHouse/ClickHouse/pull/60587) ([Max K.](https://github.com/maxknv)).
|
||||||
|
|
||||||
|
#### NO CL ENTRY
|
||||||
|
|
||||||
|
* NO CL ENTRY: 'Revert "Revert "Add new aggregation function groupArraySorted()""'. [#59003](https://github.com/ClickHouse/ClickHouse/pull/59003) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* NO CL ENTRY: 'Revert "Update libxml2 version to address some bogus security issues"'. [#59479](https://github.com/ClickHouse/ClickHouse/pull/59479) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* NO CL ENTRY: 'Revert "Poco Logger small refactoring"'. [#59509](https://github.com/ClickHouse/ClickHouse/pull/59509) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* NO CL ENTRY: 'Revert "Revert "Poco Logger small refactoring""'. [#59564](https://github.com/ClickHouse/ClickHouse/pull/59564) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* NO CL ENTRY: 'Revert "MergeTree FINAL optimization diagnostics and settings"'. [#59702](https://github.com/ClickHouse/ClickHouse/pull/59702) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* NO CL ENTRY: 'Revert "Use `MergeTree` as a default table engine"'. [#59711](https://github.com/ClickHouse/ClickHouse/pull/59711) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* NO CL ENTRY: 'Revert "Rename a setting"'. [#59754](https://github.com/ClickHouse/ClickHouse/pull/59754) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* NO CL ENTRY: 'Revert "Less error prone interface of read buffers"'. [#59911](https://github.com/ClickHouse/ClickHouse/pull/59911) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* NO CL ENTRY: 'Revert "Update version_date.tsv and changelogs after v24.1.4.19-stable"'. [#59973](https://github.com/ClickHouse/ClickHouse/pull/59973) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* NO CL ENTRY: 'Revert "ReplicatedMergeTree invalid metadata_version fix"'. [#60058](https://github.com/ClickHouse/ClickHouse/pull/60058) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* NO CL ENTRY: 'Revert "Revert "ReplicatedMergeTree invalid metadata_version fix""'. [#60078](https://github.com/ClickHouse/ClickHouse/pull/60078) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* NO CL ENTRY: 'Revert "Implement system.dns_cache table"'. [#60085](https://github.com/ClickHouse/ClickHouse/pull/60085) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* NO CL ENTRY: 'Revert "Restriction for the access key id for s3."'. [#60181](https://github.com/ClickHouse/ClickHouse/pull/60181) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* NO CL ENTRY: 'Revert "Do not retry queries if container is down in integration tests"'. [#60215](https://github.com/ClickHouse/ClickHouse/pull/60215) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* NO CL ENTRY: 'Revert "Check stack size in Parser"'. [#60216](https://github.com/ClickHouse/ClickHouse/pull/60216) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* NO CL ENTRY: 'Revert "Support resource request canceling"'. [#60253](https://github.com/ClickHouse/ClickHouse/pull/60253) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* NO CL ENTRY: 'Revert "Add definers for views"'. [#60350](https://github.com/ClickHouse/ClickHouse/pull/60350) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* NO CL ENTRY: 'Update build-osx.md'. [#60380](https://github.com/ClickHouse/ClickHouse/pull/60380) ([rogeryk](https://github.com/rogeryk)).
|
||||||
|
* NO CL ENTRY: 'Revert "Fix: IAST::clone() for RENAME"'. [#60398](https://github.com/ClickHouse/ClickHouse/pull/60398) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* NO CL ENTRY: 'Revert "Add table function `mergeTreeIndex`"'. [#60428](https://github.com/ClickHouse/ClickHouse/pull/60428) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* NO CL ENTRY: 'Revert "Userspace page cache"'. [#60550](https://github.com/ClickHouse/ClickHouse/pull/60550) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* NO CL ENTRY: 'Revert "Analyzer: compute ALIAS columns right after reading"'. [#60570](https://github.com/ClickHouse/ClickHouse/pull/60570) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Analyzer: support aliases and distributed JOINs in StorageMerge [#50894](https://github.com/ClickHouse/ClickHouse/pull/50894) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Userspace page cache [#53770](https://github.com/ClickHouse/ClickHouse/pull/53770) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Simplify optimize-push-to-prewhere from query plan [#58554](https://github.com/ClickHouse/ClickHouse/pull/58554) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Create ch/chc/chl symlinks by cmake as well (for develop mode) [#58609](https://github.com/ClickHouse/ClickHouse/pull/58609) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* CI: ci cache. step 1 [#58664](https://github.com/ClickHouse/ClickHouse/pull/58664) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Enable building JIT with UBSAN [#58952](https://github.com/ClickHouse/ClickHouse/pull/58952) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Support resource request canceling [#59032](https://github.com/ClickHouse/ClickHouse/pull/59032) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Analyzer: Do not resolve remote table id on initiator [#59073](https://github.com/ClickHouse/ClickHouse/pull/59073) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Analyzer: Add cast for ConstantNode from constant folding [#59121](https://github.com/ClickHouse/ClickHouse/pull/59121) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Fix the default value of `async_insert_max_data_size` in EN document [#59161](https://github.com/ClickHouse/ClickHouse/pull/59161) ([Alex Cheng](https://github.com/Alex-Cheng)).
|
||||||
|
* CI: Add ARM integration tests [#59241](https://github.com/ClickHouse/ClickHouse/pull/59241) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Fix getting filename from read buffer wrappers [#59298](https://github.com/ClickHouse/ClickHouse/pull/59298) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Update AWS SDK to 1.11.234 [#59299](https://github.com/ClickHouse/ClickHouse/pull/59299) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Split `ISlotControl` from `ConcurrencyControl` [#59313](https://github.com/ClickHouse/ClickHouse/pull/59313) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Some small fixes for docker images [#59337](https://github.com/ClickHouse/ClickHouse/pull/59337) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* CI: bugfix-validate, integration, functional test scripts updates [#59348](https://github.com/ClickHouse/ClickHouse/pull/59348) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* MaterializedMySQL: Fix gtid_after_attach_test to retry on detach [#59370](https://github.com/ClickHouse/ClickHouse/pull/59370) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* Poco Logger small refactoring [#59375](https://github.com/ClickHouse/ClickHouse/pull/59375) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Add sanity checks for function return types [#59379](https://github.com/ClickHouse/ClickHouse/pull/59379) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Cleanup connection pool surroundings [#59380](https://github.com/ClickHouse/ClickHouse/pull/59380) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix ARRAY JOIN with subcolumns [#59382](https://github.com/ClickHouse/ClickHouse/pull/59382) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Update curl submodule to be version 8.50 to address the irrelevant CVE-2023-46218 and CVE-2023-49219, which we don't care about at all. [#59384](https://github.com/ClickHouse/ClickHouse/pull/59384) ([josh-hildred](https://github.com/josh-hildred)).
|
||||||
|
* Update libxml2 version to address some bogus security issues [#59386](https://github.com/ClickHouse/ClickHouse/pull/59386) ([josh-hildred](https://github.com/josh-hildred)).
|
||||||
|
* Update version after release [#59393](https://github.com/ClickHouse/ClickHouse/pull/59393) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Job names [#59395](https://github.com/ClickHouse/ClickHouse/pull/59395) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* CI: fix status and report for docker server jobs [#59396](https://github.com/ClickHouse/ClickHouse/pull/59396) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Update version_date.tsv and changelogs after v24.1.1.2048-stable [#59397](https://github.com/ClickHouse/ClickHouse/pull/59397) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Forward declaration for PeekableReadBuffer [#59399](https://github.com/ClickHouse/ClickHouse/pull/59399) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Progress bar: use FQDN to differentiate metrics from different hosts [#59404](https://github.com/ClickHouse/ClickHouse/pull/59404) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix test test_stop_other_host_during_backup [#59432](https://github.com/ClickHouse/ClickHouse/pull/59432) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Update run.sh [#59433](https://github.com/ClickHouse/ClickHouse/pull/59433) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Post a failure status if can not run the CI [#59440](https://github.com/ClickHouse/ClickHouse/pull/59440) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Safer Rust (catch panic with catch_unwind()) [#59447](https://github.com/ClickHouse/ClickHouse/pull/59447) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* More parallel insert-select pipeline [#59448](https://github.com/ClickHouse/ClickHouse/pull/59448) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* CLion says these headers are unused [#59451](https://github.com/ClickHouse/ClickHouse/pull/59451) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix 02720_row_policy_column_with_dots [#59453](https://github.com/ClickHouse/ClickHouse/pull/59453) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Fix problem detected by UBSAN [#59461](https://github.com/ClickHouse/ClickHouse/pull/59461) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Analyzer: Fix denny_crane [#59483](https://github.com/ClickHouse/ClickHouse/pull/59483) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix `00191_aggregating_merge_tree_and_final` [#59494](https://github.com/ClickHouse/ClickHouse/pull/59494) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Avoid running all checks when `aspell-dict.txt` was changed [#59496](https://github.com/ClickHouse/ClickHouse/pull/59496) ([Aleksandr Musorin](https://github.com/AVMusorin)).
|
||||||
|
* Fixes for binary.html [#59499](https://github.com/ClickHouse/ClickHouse/pull/59499) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Parallel replicas: better initial replicas failover (2) [#59501](https://github.com/ClickHouse/ClickHouse/pull/59501) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Update version_date.tsv and changelogs after v24.1.2.5-stable [#59510](https://github.com/ClickHouse/ClickHouse/pull/59510) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.12.3.40-stable [#59511](https://github.com/ClickHouse/ClickHouse/pull/59511) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.11.5.29-stable [#59515](https://github.com/ClickHouse/ClickHouse/pull/59515) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update memory tracker periodically with cgroup memory usage [#59516](https://github.com/ClickHouse/ClickHouse/pull/59516) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Remove a scary message if an error is retryable [#59517](https://github.com/ClickHouse/ClickHouse/pull/59517) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Update the peter-evans/create-pull-request action to v6 [#59520](https://github.com/ClickHouse/ClickHouse/pull/59520) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix usage of StatusType [#59527](https://github.com/ClickHouse/ClickHouse/pull/59527) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Analyzer fix test_select_access_rights/test_main.py::test_select_count [#59528](https://github.com/ClickHouse/ClickHouse/pull/59528) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* GRPCServer: do not call value() on empty optional query_info [#59533](https://github.com/ClickHouse/ClickHouse/pull/59533) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Use ConnectionPoolPtr instead of raw pointer [#59534](https://github.com/ClickHouse/ClickHouse/pull/59534) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix data race with `AggregatedDataVariants` [#59537](https://github.com/ClickHouse/ClickHouse/pull/59537) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Refactoring of dashboard state encoding [#59554](https://github.com/ClickHouse/ClickHouse/pull/59554) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* CI: ci_cache, enable await [#59555](https://github.com/ClickHouse/ClickHouse/pull/59555) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Bump libssh to 0.9.8 [#59563](https://github.com/ClickHouse/ClickHouse/pull/59563) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* MultiVersion use mutex [#59565](https://github.com/ClickHouse/ClickHouse/pull/59565) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix aws submodule reference [#59566](https://github.com/ClickHouse/ClickHouse/pull/59566) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Add missed #include <bit> and <exception> [#59567](https://github.com/ClickHouse/ClickHouse/pull/59567) ([Mikhnenko Sasha](https://github.com/4JustMe4)).
|
||||||
|
* CI: nightly job to update latest docker tag only [#59586](https://github.com/ClickHouse/ClickHouse/pull/59586) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Analyzer: compute ALIAS columns right after reading [#59595](https://github.com/ClickHouse/ClickHouse/pull/59595) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Add another sanity check for function return types [#59605](https://github.com/ClickHouse/ClickHouse/pull/59605) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Update README.md [#59610](https://github.com/ClickHouse/ClickHouse/pull/59610) ([Tyler Hannan](https://github.com/tylerhannan)).
|
||||||
|
* Updated a list of trusted contributors [#59616](https://github.com/ClickHouse/ClickHouse/pull/59616) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* CI: fix ast fuzzer job report (slack bot issue) [#59629](https://github.com/ClickHouse/ClickHouse/pull/59629) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* MergeTree FINAL optimization diagnostics and settings [#59650](https://github.com/ClickHouse/ClickHouse/pull/59650) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix default path when path is not specified in config [#59654](https://github.com/ClickHouse/ClickHouse/pull/59654) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Follow up for [#59277](https://github.com/ClickHouse/ClickHouse/issues/59277) [#59659](https://github.com/ClickHouse/ClickHouse/pull/59659) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Pin python dependencies in stateless tests [#59663](https://github.com/ClickHouse/ClickHouse/pull/59663) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Unquote FLAG_LATEST to fix issue with empty argument [#59672](https://github.com/ClickHouse/ClickHouse/pull/59672) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Temporarily remove a feature that doesn't work [#59688](https://github.com/ClickHouse/ClickHouse/pull/59688) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* ConnectionEstablisher: remove unused is_finished [#59706](https://github.com/ClickHouse/ClickHouse/pull/59706) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Add test for increase-always autoscaling lambda [#59709](https://github.com/ClickHouse/ClickHouse/pull/59709) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Remove SourceWithKeyCondition from ReadFromStorageStep [#59720](https://github.com/ClickHouse/ClickHouse/pull/59720) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Make ZooKeeper actually sequentialy consistent [#59735](https://github.com/ClickHouse/ClickHouse/pull/59735) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Add assertions around FixedString code [#59737](https://github.com/ClickHouse/ClickHouse/pull/59737) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix skipping unused shards with analyzer [#59741](https://github.com/ClickHouse/ClickHouse/pull/59741) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix DB type check - now it'll refuse to create in Replicated databases [#59743](https://github.com/ClickHouse/ClickHouse/pull/59743) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Analyzer: Fix test_replicating_constants/test.py::test_different_versions [#59750](https://github.com/ClickHouse/ClickHouse/pull/59750) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Fix dashboard params default values [#59753](https://github.com/ClickHouse/ClickHouse/pull/59753) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Fix logical optimizer with LowCardinality in new analyzer [#59766](https://github.com/ClickHouse/ClickHouse/pull/59766) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Update libuv [#59773](https://github.com/ClickHouse/ClickHouse/pull/59773) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Followup [#50894](https://github.com/ClickHouse/ClickHouse/issues/50894) [#59774](https://github.com/ClickHouse/ClickHouse/pull/59774) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* CI: ci test await [#59778](https://github.com/ClickHouse/ClickHouse/pull/59778) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Better logging for adaptive async timeouts [#59781](https://github.com/ClickHouse/ClickHouse/pull/59781) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||||
|
* Fix broken youtube embedding in ne-tormozit.md [#59782](https://github.com/ClickHouse/ClickHouse/pull/59782) ([Shaun Struwig](https://github.com/Blargian)).
|
||||||
|
* Hide URL/S3 'headers' argument in SHOW CREATE [#59787](https://github.com/ClickHouse/ClickHouse/pull/59787) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Fix special build reports in release branches [#59797](https://github.com/ClickHouse/ClickHouse/pull/59797) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* CI: do not reuse builds on release branches [#59798](https://github.com/ClickHouse/ClickHouse/pull/59798) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Update version_date.tsv and changelogs after v24.1.3.31-stable [#59799](https://github.com/ClickHouse/ClickHouse/pull/59799) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.12.4.15-stable [#59800](https://github.com/ClickHouse/ClickHouse/pull/59800) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Analyzer: fix test_access_for_functions/test.py::test_access_rights_for_function [#59801](https://github.com/ClickHouse/ClickHouse/pull/59801) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Analyzer: Fix test_wrong_db_or_table_name/test.py::test_wrong_table_name [#59806](https://github.com/ClickHouse/ClickHouse/pull/59806) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* CI: await tune ups [#59807](https://github.com/ClickHouse/ClickHouse/pull/59807) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Enforce tests with enabled analyzer in CI [#59814](https://github.com/ClickHouse/ClickHouse/pull/59814) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Handle different timestamp related aspects of zip-files [#59815](https://github.com/ClickHouse/ClickHouse/pull/59815) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix settings history azure_max_single_part_copy_size [#59819](https://github.com/ClickHouse/ClickHouse/pull/59819) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Updated a list of trusted contributors [#59844](https://github.com/ClickHouse/ClickHouse/pull/59844) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Add check for recursiveRemoveLowCardinality() [#59845](https://github.com/ClickHouse/ClickHouse/pull/59845) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Better warning for disabled kernel.task_delayacct [#59846](https://github.com/ClickHouse/ClickHouse/pull/59846) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Reintroduce 02590_interserver_mode_client_info_initial_query_start_time [#59851](https://github.com/ClickHouse/ClickHouse/pull/59851) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Respect CMAKE_OSX_DEPLOYMENT_TARGET for Rust targets [#59852](https://github.com/ClickHouse/ClickHouse/pull/59852) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Do not reinitialize ZooKeeperWithFaultInjection on each chunk [#59854](https://github.com/ClickHouse/ClickHouse/pull/59854) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Fix: check if std::function is set before calling it [#59858](https://github.com/ClickHouse/ClickHouse/pull/59858) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix long shutdown of FileLog storage [#59873](https://github.com/ClickHouse/ClickHouse/pull/59873) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* tests: fix 02322_sql_insert_format flakiness [#59874](https://github.com/ClickHouse/ClickHouse/pull/59874) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Follow up for [#58554](https://github.com/ClickHouse/ClickHouse/issues/58554). Cleanup. [#59889](https://github.com/ClickHouse/ClickHouse/pull/59889) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* CI: Fix job failures due to jepsen artifacts [#59890](https://github.com/ClickHouse/ClickHouse/pull/59890) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Add test 02988_join_using_prewhere_pushdown [#59892](https://github.com/ClickHouse/ClickHouse/pull/59892) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Do not pull mutations if pulling replication log had been stopped [#59895](https://github.com/ClickHouse/ClickHouse/pull/59895) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix `02982_comments_in_system_tables` [#59896](https://github.com/ClickHouse/ClickHouse/pull/59896) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Refactor Squashing for inserts. [#59899](https://github.com/ClickHouse/ClickHouse/pull/59899) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Do not rebuild a lambda package if it is updated [#59902](https://github.com/ClickHouse/ClickHouse/pull/59902) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix self-extracting: macOS doesn't allow to run renamed executable - copy instead [#59906](https://github.com/ClickHouse/ClickHouse/pull/59906) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Update tests with indexHint for analyzer. [#59907](https://github.com/ClickHouse/ClickHouse/pull/59907) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Petite cleanup around macros and ReplicatedMergeTree [#59909](https://github.com/ClickHouse/ClickHouse/pull/59909) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix: absence of closing record in query_log for failed insert over http [#59910](https://github.com/ClickHouse/ClickHouse/pull/59910) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Decrease logging level for http retriable errors to Warning (and fix 00157_cache_dictionary flakiness) [#59920](https://github.com/ClickHouse/ClickHouse/pull/59920) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Remove `test_distributed_backward_compatability` [#59921](https://github.com/ClickHouse/ClickHouse/pull/59921) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Commands node args should add rvalue to push_back to reduce object copy cost [#59922](https://github.com/ClickHouse/ClickHouse/pull/59922) ([xuzifu666](https://github.com/xuzifu666)).
|
||||||
|
* tests: fix 02981_vertical_merges_memory_usage flakiness [#59923](https://github.com/ClickHouse/ClickHouse/pull/59923) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Analyzer: Update broken integration tests list [#59924](https://github.com/ClickHouse/ClickHouse/pull/59924) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* CI: integration tests to mysql80 [#59939](https://github.com/ClickHouse/ClickHouse/pull/59939) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Register StorageMergeTree exception message fix [#59941](https://github.com/ClickHouse/ClickHouse/pull/59941) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Replace lambdas with pointers to members to simplify stacks [#59944](https://github.com/ClickHouse/ClickHouse/pull/59944) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Analyzer: Fix test_user_defined_object_persistence [#59948](https://github.com/ClickHouse/ClickHouse/pull/59948) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Analyzer: Fix test_mutations_with_merge_tree [#59951](https://github.com/ClickHouse/ClickHouse/pull/59951) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Cleanups [#59964](https://github.com/ClickHouse/ClickHouse/pull/59964) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Update version_date.tsv and changelogs after v24.1.4.19-stable [#59966](https://github.com/ClickHouse/ClickHouse/pull/59966) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Less conflicts [#59968](https://github.com/ClickHouse/ClickHouse/pull/59968) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* We don't have external dictionaries from Aerospike [#59969](https://github.com/ClickHouse/ClickHouse/pull/59969) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix max num to warn message [#59972](https://github.com/ClickHouse/ClickHouse/pull/59972) ([Jordi Villar](https://github.com/jrdi)).
|
||||||
|
* Analyzer: Fix test_settings_profile [#59975](https://github.com/ClickHouse/ClickHouse/pull/59975) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Update version_date.tsv and changelogs after v24.1.4.20-stable [#59978](https://github.com/ClickHouse/ClickHouse/pull/59978) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Analyzer: Fix test_storage_rabbitmq [#59981](https://github.com/ClickHouse/ClickHouse/pull/59981) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Analyzer: Fix test_shard_level_const_function [#59983](https://github.com/ClickHouse/ClickHouse/pull/59983) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Add newlines to SettingsChangesHistory to maybe have less conflicts [#59984](https://github.com/ClickHouse/ClickHouse/pull/59984) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Remove context from comparison functions. [#59985](https://github.com/ClickHouse/ClickHouse/pull/59985) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Update version_date.tsv and changelogs after v24.1.5.6-stable [#59993](https://github.com/ClickHouse/ClickHouse/pull/59993) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Fix mark release ready [#59994](https://github.com/ClickHouse/ClickHouse/pull/59994) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Revert "Insert synchronously if dependent MV deduplication is enabled" [#59998](https://github.com/ClickHouse/ClickHouse/pull/59998) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||||
|
* Fix obviously wrong (but non significant) error in dictionaries [#60005](https://github.com/ClickHouse/ClickHouse/pull/60005) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Inhibit randomization in some tests [#60009](https://github.com/ClickHouse/ClickHouse/pull/60009) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* The code should not be complex [#60010](https://github.com/ClickHouse/ClickHouse/pull/60010) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Exclude test run from a slow build [#60011](https://github.com/ClickHouse/ClickHouse/pull/60011) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix broken lambdas formatting [#60012](https://github.com/ClickHouse/ClickHouse/pull/60012) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Verify formatting consistency on the server-side [#60013](https://github.com/ClickHouse/ClickHouse/pull/60013) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Analyzer: Fix test_sql_user_defined_functions_on_cluster [#60019](https://github.com/ClickHouse/ClickHouse/pull/60019) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Fix 02981_vertical_merges_memory_usage with SharedMergeTree [#60028](https://github.com/ClickHouse/ClickHouse/pull/60028) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix 01656_test_query_log_factories_info with analyzer. [#60037](https://github.com/ClickHouse/ClickHouse/pull/60037) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Ability to detect undead ZooKeeper sessions [#60044](https://github.com/ClickHouse/ClickHouse/pull/60044) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Disable tests with coverage [#60047](https://github.com/ClickHouse/ClickHouse/pull/60047) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Print CPU flags at startup [#60075](https://github.com/ClickHouse/ClickHouse/pull/60075) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Cleanup: less confusion between config priority and balancing priority in connection pools [#60077](https://github.com/ClickHouse/ClickHouse/pull/60077) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Temporary table already exists exception message fix [#60080](https://github.com/ClickHouse/ClickHouse/pull/60080) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Refactor prewhere and primary key optimization [#60082](https://github.com/ClickHouse/ClickHouse/pull/60082) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Bump curl to version 4.6.0 [#60084](https://github.com/ClickHouse/ClickHouse/pull/60084) ([josh-hildred](https://github.com/josh-hildred)).
|
||||||
|
* Check wrong abbreviations [#60086](https://github.com/ClickHouse/ClickHouse/pull/60086) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove the check for formatting consistency from the Fuzzer [#60088](https://github.com/ClickHouse/ClickHouse/pull/60088) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Avoid overflow in settings [#60089](https://github.com/ClickHouse/ClickHouse/pull/60089) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* A small preparation for better handling of primary key in memory [#60092](https://github.com/ClickHouse/ClickHouse/pull/60092) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Move threadPoolCallbackRunner to the "Common" folder [#60097](https://github.com/ClickHouse/ClickHouse/pull/60097) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Speed up the CI [#60106](https://github.com/ClickHouse/ClickHouse/pull/60106) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Insignificant changes [#60108](https://github.com/ClickHouse/ClickHouse/pull/60108) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Do not retry queries if container is down in integration tests [#60109](https://github.com/ClickHouse/ClickHouse/pull/60109) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Better check for inconsistent formatting [#60110](https://github.com/ClickHouse/ClickHouse/pull/60110) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* skip printing meaningless log [#60123](https://github.com/ClickHouse/ClickHouse/pull/60123) ([conic](https://github.com/conicl)).
|
||||||
|
* Implement TODO [#60124](https://github.com/ClickHouse/ClickHouse/pull/60124) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix bad log message [#60125](https://github.com/ClickHouse/ClickHouse/pull/60125) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix data race in `IMergeTreeDataPart` [#60139](https://github.com/ClickHouse/ClickHouse/pull/60139) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Add new setting to changes history [#60141](https://github.com/ClickHouse/ClickHouse/pull/60141) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Analyzer: fix row level filters with PREWHERE + additional filters [#60142](https://github.com/ClickHouse/ClickHouse/pull/60142) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Tests: query log for inserts over http [#60143](https://github.com/ClickHouse/ClickHouse/pull/60143) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix build in master [#60151](https://github.com/ClickHouse/ClickHouse/pull/60151) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Add setting history check to stateless tests [#60154](https://github.com/ClickHouse/ClickHouse/pull/60154) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Mini cleanup of CPUID.h [#60155](https://github.com/ClickHouse/ClickHouse/pull/60155) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix: custom key failover test flakiness [#60158](https://github.com/ClickHouse/ClickHouse/pull/60158) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Skip sanity checks on secondary CREATE query [#60159](https://github.com/ClickHouse/ClickHouse/pull/60159) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Remove extensively aggressive check [#60162](https://github.com/ClickHouse/ClickHouse/pull/60162) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix wrong message during compilation [#60178](https://github.com/ClickHouse/ClickHouse/pull/60178) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a test for [#44318](https://github.com/ClickHouse/ClickHouse/issues/44318) [#60179](https://github.com/ClickHouse/ClickHouse/pull/60179) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add test for 59437 [#60191](https://github.com/ClickHouse/ClickHouse/pull/60191) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* CI: hot fix for gh statuses [#60201](https://github.com/ClickHouse/ClickHouse/pull/60201) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Limit libarchive format to what we use [#60203](https://github.com/ClickHouse/ClickHouse/pull/60203) ([San](https://github.com/santrancisco)).
|
||||||
|
* Fix bucket region discovery [#60204](https://github.com/ClickHouse/ClickHouse/pull/60204) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix `test_backup_restore_s3/test.py::test_user_specific_auth` [#60210](https://github.com/ClickHouse/ClickHouse/pull/60210) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* CI: combine analyzer, s3, dbreplicated into one job [#60224](https://github.com/ClickHouse/ClickHouse/pull/60224) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Slightly better Keeper loading from snapshot [#60226](https://github.com/ClickHouse/ClickHouse/pull/60226) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix: IAST::clone() for RENAME [#60227](https://github.com/ClickHouse/ClickHouse/pull/60227) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Treat 2+ in allow_experimental_parallel_reading_from_replicas as 2 [#60228](https://github.com/ClickHouse/ClickHouse/pull/60228) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* CI: random job pick support [#60229](https://github.com/ClickHouse/ClickHouse/pull/60229) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Fix analyzer - hide arguments for secret functions [#60230](https://github.com/ClickHouse/ClickHouse/pull/60230) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Backups delete suspicious file [#60231](https://github.com/ClickHouse/ClickHouse/pull/60231) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* CI: random sanitizer for parallel repl in PR wf [#60234](https://github.com/ClickHouse/ClickHouse/pull/60234) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* CI: use aarch runner for runconfig job [#60236](https://github.com/ClickHouse/ClickHouse/pull/60236) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Add test for 60232 [#60244](https://github.com/ClickHouse/ClickHouse/pull/60244) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Make cloud sync required [#60245](https://github.com/ClickHouse/ClickHouse/pull/60245) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Tests from [#60094](https://github.com/ClickHouse/ClickHouse/issues/60094) [#60256](https://github.com/ClickHouse/ClickHouse/pull/60256) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove bad check in Keeper [#60266](https://github.com/ClickHouse/ClickHouse/pull/60266) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix integration `test_backup_restore_s3` [#60269](https://github.com/ClickHouse/ClickHouse/pull/60269) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Ignore valid 'No such key errors' in stress tests [#60270](https://github.com/ClickHouse/ClickHouse/pull/60270) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Stress test: Include the first sanitizer block message in the report [#60283](https://github.com/ClickHouse/ClickHouse/pull/60283) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Update analyzer_tech_debt.txt [#60303](https://github.com/ClickHouse/ClickHouse/pull/60303) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Minor fixes for hashed dictionary [#60310](https://github.com/ClickHouse/ClickHouse/pull/60310) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Install tailscale during AMI build and set it up on runners [#60316](https://github.com/ClickHouse/ClickHouse/pull/60316) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* CI: remove Integration tests asan and release from PR wf [#60327](https://github.com/ClickHouse/ClickHouse/pull/60327) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Fix - analyzer related - "executable" function subquery arguments. [#60339](https://github.com/ClickHouse/ClickHouse/pull/60339) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Update settings.md to correct the description for setting `max_concurrent_queries_for_user` [#60343](https://github.com/ClickHouse/ClickHouse/pull/60343) ([Alex Cheng](https://github.com/Alex-Cheng)).
|
||||||
|
* Fix rapidjson submodule [#60346](https://github.com/ClickHouse/ClickHouse/pull/60346) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Validate experimental and suspicious types inside nested types under a setting [#60353](https://github.com/ClickHouse/ClickHouse/pull/60353) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Update 01158_zookeeper_log_long.sql [#60357](https://github.com/ClickHouse/ClickHouse/pull/60357) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Add missed #include <mutex> [#60358](https://github.com/ClickHouse/ClickHouse/pull/60358) ([Mikhnenko Sasha](https://github.com/4JustMe4)).
|
||||||
|
* Follow up [#60082](https://github.com/ClickHouse/ClickHouse/issues/60082) [#60360](https://github.com/ClickHouse/ClickHouse/pull/60360) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Remove ALTER LIVE VIEW [#60370](https://github.com/ClickHouse/ClickHouse/pull/60370) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Detect io_uring in tests [#60373](https://github.com/ClickHouse/ClickHouse/pull/60373) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Expose fatal.log separately for fuzzer [#60374](https://github.com/ClickHouse/ClickHouse/pull/60374) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Minor changes for dashboard [#60387](https://github.com/ClickHouse/ClickHouse/pull/60387) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove unused method [#60388](https://github.com/ClickHouse/ClickHouse/pull/60388) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Allow to map UI handlers to different paths [#60389](https://github.com/ClickHouse/ClickHouse/pull/60389) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove old tags from integration tests [#60407](https://github.com/ClickHouse/ClickHouse/pull/60407) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Update `liburing` to 2.5 [#60409](https://github.com/ClickHouse/ClickHouse/pull/60409) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix undefined-behavior in case of too big max_execution_time setting [#60419](https://github.com/ClickHouse/ClickHouse/pull/60419) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix wrong log message in Fuzzer [#60425](https://github.com/ClickHouse/ClickHouse/pull/60425) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix unrestricted reads from keeper [#60429](https://github.com/ClickHouse/ClickHouse/pull/60429) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Split update_mergeable_check into two functions to force trigger the status [#60431](https://github.com/ClickHouse/ClickHouse/pull/60431) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Revert "Revert "Add table function `mergeTreeIndex`"" [#60435](https://github.com/ClickHouse/ClickHouse/pull/60435) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Revert "Merge pull request [#56864](https://github.com/ClickHouse/ClickHouse/issues/56864) from ClickHouse/broken-projections-better-handling" [#60436](https://github.com/ClickHouse/ClickHouse/pull/60436) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Keeper: fix moving changelog files between disks [#60442](https://github.com/ClickHouse/ClickHouse/pull/60442) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Replace deprecated distutils by vendored packaging [#60444](https://github.com/ClickHouse/ClickHouse/pull/60444) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Do not fail the build if ci-logs is not healthy [#60445](https://github.com/ClickHouse/ClickHouse/pull/60445) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Move setting `enable_order_by_all` out of the experimental setting section [#60449](https://github.com/ClickHouse/ClickHouse/pull/60449) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Minor: Replace `boost::algorithm::starts_with()` by `std::string::starts_with()` [#60450](https://github.com/ClickHouse/ClickHouse/pull/60450) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Minor: Replace boost::algorithm::ends_with() by std::string::ends_with() [#60454](https://github.com/ClickHouse/ClickHouse/pull/60454) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* CI: remove input params for job scripts [#60455](https://github.com/ClickHouse/ClickHouse/pull/60455) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Fix: 02496_remove_redundant_sorting_analyzer [#60456](https://github.com/ClickHouse/ClickHouse/pull/60456) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* PR template fix to include ci fix category [#60461](https://github.com/ClickHouse/ClickHouse/pull/60461) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Reduce iterations in 01383_log_broken_table [#60465](https://github.com/ClickHouse/ClickHouse/pull/60465) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Merge [#57434](https://github.com/ClickHouse/ClickHouse/issues/57434) [#60466](https://github.com/ClickHouse/ClickHouse/pull/60466) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix bad test: looks like an obvious race condition, but I didn't check in detail. [#60471](https://github.com/ClickHouse/ClickHouse/pull/60471) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Make test slower [#60472](https://github.com/ClickHouse/ClickHouse/pull/60472) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix cgroups v1 rss parsing in CgroupsMemoryUsageObserver [#60481](https://github.com/ClickHouse/ClickHouse/pull/60481) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* CI: fix pr check status to not fail mergeable check [#60483](https://github.com/ClickHouse/ClickHouse/pull/60483) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Report respects skipped builds [#60488](https://github.com/ClickHouse/ClickHouse/pull/60488) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* CI: quick style fix [#60490](https://github.com/ClickHouse/ClickHouse/pull/60490) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Decrease logging level for http retriable errors to Info [#60508](https://github.com/ClickHouse/ClickHouse/pull/60508) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Remove broken test while we fix it [#60547](https://github.com/ClickHouse/ClickHouse/pull/60547) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
@ -14,20 +14,6 @@ Supported platforms:
|
|||||||
- PowerPC 64 LE (experimental)
|
- PowerPC 64 LE (experimental)
|
||||||
- RISC-V 64 (experimental)
|
- RISC-V 64 (experimental)
|
||||||
|
|
||||||
## Building in docker
|
|
||||||
We use the docker image `clickhouse/binary-builder` for our CI builds. It contains everything necessary to build the binary and packages. There is a script `docker/packager/packager` to ease the image usage:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# define a directory for the output artifacts
|
|
||||||
output_dir="build_results"
|
|
||||||
# a simplest build
|
|
||||||
./docker/packager/packager --package-type=binary --output-dir "$output_dir"
|
|
||||||
# build debian packages
|
|
||||||
./docker/packager/packager --package-type=deb --output-dir "$output_dir"
|
|
||||||
# by default, debian packages use thin LTO, so we can override it to speed up the build
|
|
||||||
CMAKE_FLAGS='-DENABLE_THINLTO=' ./docker/packager/packager --package-type=deb --output-dir "./$(git rev-parse --show-cdup)/build_results"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Building on Ubuntu
|
## Building on Ubuntu
|
||||||
|
|
||||||
The following tutorial is based on Ubuntu Linux.
|
The following tutorial is based on Ubuntu Linux.
|
||||||
@ -37,6 +23,7 @@ The minimum recommended Ubuntu version for development is 22.04 LTS.
|
|||||||
### Install Prerequisites {#install-prerequisites}
|
### Install Prerequisites {#install-prerequisites}
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
|
sudo apt-get update
|
||||||
sudo apt-get install git cmake ccache python3 ninja-build nasm yasm gawk lsb-release wget software-properties-common gnupg
|
sudo apt-get install git cmake ccache python3 ninja-build nasm yasm gawk lsb-release wget software-properties-common gnupg
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -57,7 +44,7 @@ sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
|
|||||||
|
|
||||||
For other Linux distributions - check the availability of LLVM's [prebuild packages](https://releases.llvm.org/download.html).
|
For other Linux distributions - check the availability of LLVM's [prebuild packages](https://releases.llvm.org/download.html).
|
||||||
|
|
||||||
As of August 2023, clang-16 or higher will work.
|
As of March 2024, clang-17 or higher will work.
|
||||||
GCC as a compiler is not supported.
|
GCC as a compiler is not supported.
|
||||||
To build with a specific Clang version:
|
To build with a specific Clang version:
|
||||||
|
|
||||||
@ -133,3 +120,17 @@ mkdir build
|
|||||||
cmake -S . -B build
|
cmake -S . -B build
|
||||||
cmake --build build
|
cmake --build build
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Building in docker
|
||||||
|
We use the docker image `clickhouse/binary-builder` for our CI builds. It contains everything necessary to build the binary and packages. There is a script `docker/packager/packager` to ease the image usage:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# define a directory for the output artifacts
|
||||||
|
output_dir="build_results"
|
||||||
|
# a simplest build
|
||||||
|
./docker/packager/packager --package-type=binary --output-dir "$output_dir"
|
||||||
|
# build debian packages
|
||||||
|
./docker/packager/packager --package-type=deb --output-dir "$output_dir"
|
||||||
|
# by default, debian packages use thin LTO, so we can override it to speed up the build
|
||||||
|
CMAKE_FLAGS='-DENABLE_THINLTO=' ./docker/packager/packager --package-type=deb --output-dir "./$(git rev-parse --show-cdup)/build_results"
|
||||||
|
```
|
||||||
|
@ -7,6 +7,7 @@ title: Formats for Input and Output Data
|
|||||||
|
|
||||||
ClickHouse can accept and return data in various formats. A format supported for input can be used to parse the data provided to `INSERT`s, to perform `SELECT`s from a file-backed table such as File, URL or HDFS, or to read a dictionary. A format supported for output can be used to arrange the
|
ClickHouse can accept and return data in various formats. A format supported for input can be used to parse the data provided to `INSERT`s, to perform `SELECT`s from a file-backed table such as File, URL or HDFS, or to read a dictionary. A format supported for output can be used to arrange the
|
||||||
results of a `SELECT`, and to perform `INSERT`s into a file-backed table.
|
results of a `SELECT`, and to perform `INSERT`s into a file-backed table.
|
||||||
|
All format names are case insensitive.
|
||||||
|
|
||||||
The supported formats are:
|
The supported formats are:
|
||||||
|
|
||||||
|
@ -549,6 +549,48 @@ Result:
|
|||||||
└───────┴─────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
└───────┴─────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
##### input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects
|
||||||
|
|
||||||
|
Enabling this setting allows to use String type for ambiguous paths during named tuples inference from JSON objects (when `input_format_json_try_infer_named_tuples_from_objects` is enabled) instead of an exception.
|
||||||
|
It allows to read JSON objects as named Tuples even if there are ambiguous paths.
|
||||||
|
|
||||||
|
Disabled by default.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
With disabled setting:
|
||||||
|
```sql
|
||||||
|
SET input_format_json_try_infer_named_tuples_from_objects = 1;
|
||||||
|
SET input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects = 0;
|
||||||
|
DESC format(JSONEachRow, '{"obj" : {"a" : 42}}, {"obj" : {"a" : {"b" : "Hello"}}}');
|
||||||
|
```
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```text
|
||||||
|
Code: 636. DB::Exception: The table structure cannot be extracted from a JSONEachRow format file. Error:
|
||||||
|
Code: 117. DB::Exception: JSON objects have ambiguous data: in some objects path 'a' has type 'Int64' and in some - 'Tuple(b String)'. You can enable setting input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects to use String type for path 'a'. (INCORRECT_DATA) (version 24.3.1.1).
|
||||||
|
You can specify the structure manually. (CANNOT_EXTRACT_TABLE_STRUCTURE)
|
||||||
|
```
|
||||||
|
|
||||||
|
With enabled setting:
|
||||||
|
```sql
|
||||||
|
SET input_format_json_try_infer_named_tuples_from_objects = 1;
|
||||||
|
SET input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects = 1;
|
||||||
|
DESC format(JSONEachRow, '{"obj" : "a" : 42}, {"obj" : {"a" : {"b" : "Hello"}}}');
|
||||||
|
SELECT * FROM format(JSONEachRow, '{"obj" : {"a" : 42}}, {"obj" : {"a" : {"b" : "Hello"}}}');
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
```text
|
||||||
|
┌─name─┬─type──────────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||||
|
│ obj │ Tuple(a Nullable(String)) │ │ │ │ │ │
|
||||||
|
└──────┴───────────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
|
┌─obj─────────────────┐
|
||||||
|
│ ('42') │
|
||||||
|
│ ('{"b" : "Hello"}') │
|
||||||
|
└─────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
##### input_format_json_read_objects_as_strings
|
##### input_format_json_read_objects_as_strings
|
||||||
|
|
||||||
Enabling this setting allows reading nested JSON objects as strings.
|
Enabling this setting allows reading nested JSON objects as strings.
|
||||||
@ -1554,6 +1596,28 @@ DESC format(JSONEachRow, $$
|
|||||||
└──────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
└──────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### input_format_try_infer_exponent_floats
|
||||||
|
|
||||||
|
If enabled, ClickHouse will try to infer floats in exponential form for text formats (except JSON where numbers in exponential form are always inferred).
|
||||||
|
|
||||||
|
Disabled by default.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET input_format_try_infer_exponent_floats = 1;
|
||||||
|
DESC format(CSV,
|
||||||
|
$$1.1E10
|
||||||
|
2.3e-12
|
||||||
|
42E00
|
||||||
|
$$)
|
||||||
|
```
|
||||||
|
```response
|
||||||
|
┌─name─┬─type──────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||||
|
│ c1 │ Nullable(Float64) │ │ │ │ │ │
|
||||||
|
└──────┴───────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## Self describing formats {#self-describing-formats}
|
## Self describing formats {#self-describing-formats}
|
||||||
|
|
||||||
Self-describing formats contain information about the structure of the data in the data itself,
|
Self-describing formats contain information about the structure of the data in the data itself,
|
||||||
|
@ -467,7 +467,7 @@ Enabled by default.
|
|||||||
|
|
||||||
Allow to use String type for JSON keys that contain only `Null`/`{}`/`[]` in data sample during schema inference.
|
Allow to use String type for JSON keys that contain only `Null`/`{}`/`[]` in data sample during schema inference.
|
||||||
In JSON formats any value can be read as String, and we can avoid errors like `Cannot determine type for column 'column_name' by first 25000 rows of data, most likely this column contains only Nulls or empty Arrays/Maps` during schema inference
|
In JSON formats any value can be read as String, and we can avoid errors like `Cannot determine type for column 'column_name' by first 25000 rows of data, most likely this column contains only Nulls or empty Arrays/Maps` during schema inference
|
||||||
by using String type for keys with unknown types.
|
by using String type for keys with unknown types.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
@ -891,7 +891,7 @@ Default value: `,`.
|
|||||||
|
|
||||||
If it is set to true, allow strings in single quotes.
|
If it is set to true, allow strings in single quotes.
|
||||||
|
|
||||||
Enabled by default.
|
Disabled by default.
|
||||||
|
|
||||||
### format_csv_allow_double_quotes {#format_csv_allow_double_quotes}
|
### format_csv_allow_double_quotes {#format_csv_allow_double_quotes}
|
||||||
|
|
||||||
@ -1605,7 +1605,7 @@ possible values:
|
|||||||
- `1` — Enabled. Pretty formats will use ANSI escape sequences except for `NoEscapes` formats.
|
- `1` — Enabled. Pretty formats will use ANSI escape sequences except for `NoEscapes` formats.
|
||||||
- `auto` - Enabled if `stdout` is a terminal except for `NoEscapes` formats.
|
- `auto` - Enabled if `stdout` is a terminal except for `NoEscapes` formats.
|
||||||
|
|
||||||
Default value is `auto`.
|
Default value is `auto`.
|
||||||
|
|
||||||
### output_format_pretty_grid_charset {#output_format_pretty_grid_charset}
|
### output_format_pretty_grid_charset {#output_format_pretty_grid_charset}
|
||||||
|
|
||||||
|
@ -3954,6 +3954,7 @@ Possible values:
|
|||||||
- `none` — Is similar to throw, but distributed DDL query returns no result set.
|
- `none` — Is similar to throw, but distributed DDL query returns no result set.
|
||||||
- `null_status_on_timeout` — Returns `NULL` as execution status in some rows of result set instead of throwing `TIMEOUT_EXCEEDED` if query is not finished on the corresponding hosts.
|
- `null_status_on_timeout` — Returns `NULL` as execution status in some rows of result set instead of throwing `TIMEOUT_EXCEEDED` if query is not finished on the corresponding hosts.
|
||||||
- `never_throw` — Do not throw `TIMEOUT_EXCEEDED` and do not rethrow exceptions if query has failed on some hosts.
|
- `never_throw` — Do not throw `TIMEOUT_EXCEEDED` and do not rethrow exceptions if query has failed on some hosts.
|
||||||
|
- `none_only_active` - similar to `none`, but doesn't wait for inactive replicas of the `Replicated` database. Note: with this mode it's impossible to figure out that the query was not executed on some replica and will be executed in background.
|
||||||
- `null_status_on_timeout_only_active` — similar to `null_status_on_timeout`, but doesn't wait for inactive replicas of the `Replicated` database
|
- `null_status_on_timeout_only_active` — similar to `null_status_on_timeout`, but doesn't wait for inactive replicas of the `Replicated` database
|
||||||
- `throw_only_active` — similar to `throw`, but doesn't wait for inactive replicas of the `Replicated` database
|
- `throw_only_active` — similar to `throw`, but doesn't wait for inactive replicas of the `Replicated` database
|
||||||
|
|
||||||
|
@ -275,6 +275,16 @@ Cache profile events:
|
|||||||
|
|
||||||
- `CachedWriteBufferCacheWriteBytes`, `CachedWriteBufferCacheWriteMicroseconds`
|
- `CachedWriteBufferCacheWriteBytes`, `CachedWriteBufferCacheWriteMicroseconds`
|
||||||
|
|
||||||
|
## Using in-memory cache (userspace page cache) {#userspace-page-cache}
|
||||||
|
|
||||||
|
The File Cache described above stores cached data in local files. Alternatively, object-store-based disks can be configured to use "Userspace Page Cache", which is RAM-only. Userspace page cache is recommended only if file cache can't be used for some reason, e.g. if the machine doesn't have a local disk at all. Note that file cache effectively uses RAM for caching too, since the OS caches contents of local files.
|
||||||
|
|
||||||
|
To enable userspace page cache for disks that don't use file cache, use setting `use_page_cache_for_disks_without_file_cache`.
|
||||||
|
|
||||||
|
By default, on Linux, the userspace page cache will use all available memory, similar to the OS page cache. In tools like `top` and `ps`, the clickhouse server process will typically show resident set size near 100% of the machine's RAM - this is normal, and most of this memory is actually reclaimable by the OS on memory pressure (`MADV_FREE`). This behavior can be disabled with server setting `page_cache_use_madv_free = 0`, making the userspace page cache just use a fixed amount of memory `page_cache_size` with no special interaction with the OS. On Mac OS, `page_cache_use_madv_free` is always disabled as it doesn't have lazy `MADV_FREE`.
|
||||||
|
|
||||||
|
Unfortunately, `page_cache_use_madv_free` makes it difficult to tell if the server is close to running out of memory, since the RSS metric becomes useless. Async metric `UnreclaimableRSS` shows the amount of physical memory used by the server, excluding the memory reclaimable by the OS: `select value from system.asynchronous_metrics where metric = 'UnreclaimableRSS'`. Use it for monitoring instead of RSS. This metric is only available if `page_cache_use_madv_free` is enabled.
|
||||||
|
|
||||||
## Storing Data on Web Server {#storing-data-on-webserver}
|
## Storing Data on Web Server {#storing-data-on-webserver}
|
||||||
|
|
||||||
There is a tool `clickhouse-static-files-uploader`, which prepares a data directory for a given table (`SELECT data_paths FROM system.tables WHERE name = 'table_name'`). For each table you need, you get a directory of files. These files can be uploaded to, for example, a web server with static files. After this preparation, you can load this table into any ClickHouse server via `DiskWeb`.
|
There is a tool `clickhouse-static-files-uploader`, which prepares a data directory for a given table (`SELECT data_paths FROM system.tables WHERE name = 'table_name'`). For each table you need, you get a directory of files. These files can be uploaded to, for example, a web server with static files. After this preparation, you can load this table into any ClickHouse server via `DiskWeb`.
|
||||||
|
@ -201,12 +201,12 @@ Arguments:
|
|||||||
|
|
||||||
- `-S`, `--structure` — table structure for input data.
|
- `-S`, `--structure` — table structure for input data.
|
||||||
- `--input-format` — input format, `TSV` by default.
|
- `--input-format` — input format, `TSV` by default.
|
||||||
- `-f`, `--file` — path to data, `stdin` by default.
|
- `-F`, `--file` — path to data, `stdin` by default.
|
||||||
- `-q`, `--query` — queries to execute with `;` as delimiter. `--query` can be specified multiple times, e.g. `--query "SELECT 1" --query "SELECT 2"`. Cannot be used simultaneously with `--queries-file`.
|
- `-q`, `--query` — queries to execute with `;` as delimiter. `--query` can be specified multiple times, e.g. `--query "SELECT 1" --query "SELECT 2"`. Cannot be used simultaneously with `--queries-file`.
|
||||||
- `--queries-file` - file path with queries to execute. `--queries-file` can be specified multiple times, e.g. `--query queries1.sql --query queries2.sql`. Cannot be used simultaneously with `--query`.
|
- `--queries-file` - file path with queries to execute. `--queries-file` can be specified multiple times, e.g. `--query queries1.sql --query queries2.sql`. Cannot be used simultaneously with `--query`.
|
||||||
- `--multiquery, -n` – If specified, multiple queries separated by semicolons can be listed after the `--query` option. For convenience, it is also possible to omit `--query` and pass the queries directly after `--multiquery`.
|
- `--multiquery, -n` – If specified, multiple queries separated by semicolons can be listed after the `--query` option. For convenience, it is also possible to omit `--query` and pass the queries directly after `--multiquery`.
|
||||||
- `-N`, `--table` — table name where to put output data, `table` by default.
|
- `-N`, `--table` — table name where to put output data, `table` by default.
|
||||||
- `--format`, `--output-format` — output format, `TSV` by default.
|
- `-f`, `--format`, `--output-format` — output format, `TSV` by default.
|
||||||
- `-d`, `--database` — default database, `_local` by default.
|
- `-d`, `--database` — default database, `_local` by default.
|
||||||
- `--stacktrace` — whether to dump debug output in case of exception.
|
- `--stacktrace` — whether to dump debug output in case of exception.
|
||||||
- `--echo` — print query before execution.
|
- `--echo` — print query before execution.
|
||||||
|
@ -14,8 +14,6 @@
|
|||||||
|
|
||||||
- `N` – The number of elements to return.
|
- `N` – The number of elements to return.
|
||||||
|
|
||||||
If the parameter is omitted, default value is the size of input.
|
|
||||||
|
|
||||||
- `column` – The value (Integer, String, Float and other Generic types).
|
- `column` – The value (Integer, String, Float and other Generic types).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -36,13 +34,12 @@
|
|||||||
Gets all the String implementations of all numbers in column:
|
Gets all the String implementations of all numbers in column:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT groupArraySorted(str) FROM (SELECT toString(number) as str FROM numbers(5));
|
SELECT groupArraySorted(5)(str) FROM (SELECT toString(number) as str FROM numbers(5));
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌─groupArraySorted(str)────────┐
|
┌─groupArraySorted(5)(str)─┐
|
||||||
│ ['0','1','2','3','4'] │
|
│ ['0','1','2','3','4'] │
|
||||||
└──────────────────────────────┘
|
└──────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
@ -394,8 +394,7 @@ Result:
|
|||||||
|
|
||||||
## toYear
|
## toYear
|
||||||
|
|
||||||
Converts a date or date with time to the year number (AD) as `UInt16` value.
|
Returns the year component (AD) of a date or date with time.
|
||||||
|
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
@ -431,7 +430,7 @@ Result:
|
|||||||
|
|
||||||
## toQuarter
|
## toQuarter
|
||||||
|
|
||||||
Converts a date or date with time to the quarter number (1-4) as `UInt8` value.
|
Returns the quarter (1-4) of a date or date with time.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
@ -465,10 +464,9 @@ Result:
|
|||||||
└──────────────────────────────────────────────┘
|
└──────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## toMonth
|
## toMonth
|
||||||
|
|
||||||
Converts a date or date with time to the month number (1-12) as `UInt8` value.
|
Returns the month component (1-12) of a date or date with time.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
@ -504,7 +502,7 @@ Result:
|
|||||||
|
|
||||||
## toDayOfYear
|
## toDayOfYear
|
||||||
|
|
||||||
Converts a date or date with time to the number of the day of the year (1-366) as `UInt16` value.
|
Returns the number of the day within the year (1-366) of a date or date with time.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
@ -540,7 +538,7 @@ Result:
|
|||||||
|
|
||||||
## toDayOfMonth
|
## toDayOfMonth
|
||||||
|
|
||||||
Converts a date or date with time to the number of the day in the month (1-31) as `UInt8` value.
|
Returns the number of the day within the month (1-31) of a date or date with time.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
@ -576,7 +574,7 @@ Result:
|
|||||||
|
|
||||||
## toDayOfWeek
|
## toDayOfWeek
|
||||||
|
|
||||||
Converts a date or date with time to the number of the day in the week as `UInt8` value.
|
Returns the number of the day within the week of a date or date with time.
|
||||||
|
|
||||||
The two-argument form of `toDayOfWeek()` enables you to specify whether the week starts on Monday or Sunday, and whether the return value should be in the range from 0 to 6 or 1 to 7. If the mode argument is omitted, the default mode is 0. The time zone of the date can be specified as the third argument.
|
The two-argument form of `toDayOfWeek()` enables you to specify whether the week starts on Monday or Sunday, and whether the return value should be in the range from 0 to 6 or 1 to 7. If the mode argument is omitted, the default mode is 0. The time zone of the date can be specified as the third argument.
|
||||||
|
|
||||||
@ -627,7 +625,7 @@ Result:
|
|||||||
|
|
||||||
## toHour
|
## toHour
|
||||||
|
|
||||||
Converts a date with time to the number of the hour in 24-hour time (0-23) as `UInt8` value.
|
Returns the hour component (0-24) of a date with time.
|
||||||
|
|
||||||
Assumes that if clocks are moved ahead, it is by one hour and occurs at 2 a.m., and if clocks are moved back, it is by one hour and occurs at 3 a.m. (which is not always exactly when it occurs - it depends on the timezone).
|
Assumes that if clocks are moved ahead, it is by one hour and occurs at 2 a.m., and if clocks are moved back, it is by one hour and occurs at 3 a.m. (which is not always exactly when it occurs - it depends on the timezone).
|
||||||
|
|
||||||
@ -641,7 +639,7 @@ Alias: `HOUR`
|
|||||||
|
|
||||||
**Arguments**
|
**Arguments**
|
||||||
|
|
||||||
- `value` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
- `value` - a [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
@ -665,7 +663,7 @@ Result:
|
|||||||
|
|
||||||
## toMinute
|
## toMinute
|
||||||
|
|
||||||
Converts a date with time to the number of the minute of the hour (0-59) as `UInt8` value.
|
Returns the minute component (0-59) a date with time.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
@ -677,7 +675,7 @@ Alias: `MINUTE`
|
|||||||
|
|
||||||
**Arguments**
|
**Arguments**
|
||||||
|
|
||||||
- `value` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
- `value` - a [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
@ -701,7 +699,7 @@ Result:
|
|||||||
|
|
||||||
## toSecond
|
## toSecond
|
||||||
|
|
||||||
Converts a date with time to the second in the minute (0-59) as `UInt8` value. Leap seconds are not considered.
|
Returns the second component (0-59) of a date with time. Leap seconds are not considered.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
@ -713,7 +711,7 @@ Alias: `SECOND`
|
|||||||
|
|
||||||
**Arguments**
|
**Arguments**
|
||||||
|
|
||||||
- `value` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
- `value` - a [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
@ -735,6 +733,40 @@ Result:
|
|||||||
└─────────────────────────────────────────────┘
|
└─────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## toMillisecond
|
||||||
|
|
||||||
|
Returns the millisecond component (0-999) of a date with time.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
toMillisecond(value)
|
||||||
|
```
|
||||||
|
|
||||||
|
*Arguments**
|
||||||
|
|
||||||
|
- `value` - [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||||
|
|
||||||
|
Alias: `MILLISECOND`
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT toMillisecond(toDateTime64('2023-04-21 10:20:30.456', 3))
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌──toMillisecond(toDateTime64('2023-04-21 10:20:30.456', 3))─┐
|
||||||
|
│ 456 │
|
||||||
|
└────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- The millisecond in the minute (0 - 59) of the given date/time
|
||||||
|
|
||||||
|
Type: `UInt16`
|
||||||
|
|
||||||
## toUnixTimestamp
|
## toUnixTimestamp
|
||||||
|
|
||||||
Converts a string, a date or a date with time to the [Unix Timestamp](https://en.wikipedia.org/wiki/Unix_time) in `UInt32` representation.
|
Converts a string, a date or a date with time to the [Unix Timestamp](https://en.wikipedia.org/wiki/Unix_time) in `UInt32` representation.
|
||||||
|
@ -272,10 +272,16 @@ ALTER TABLE table_name MODIFY COLUMN column_name RESET SETTING max_compress_bloc
|
|||||||
|
|
||||||
## MATERIALIZE COLUMN
|
## MATERIALIZE COLUMN
|
||||||
|
|
||||||
Materializes or updates a column with an expression for a default value (`DEFAULT` or `MATERIALIZED`).
|
Materializes a column with a `DEFAULT` or `MATERIALIZED` value expression.
|
||||||
It is used if it is necessary to add or update a column with a complicated expression, because evaluating such an expression directly on `SELECT` executing turns out to be expensive.
|
This statement can be used to rewrite existing column data after a `DEFAULT` or `MATERIALIZED` expression has been added or updated (which only updates the metadata but does not change existing data).
|
||||||
Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||||
|
|
||||||
|
For columns with a new or updated `MATERIALIZED` value expression, all existing rows are rewritten.
|
||||||
|
|
||||||
|
For columns with a new or updated `DEFAULT` value expression, the behavior depends on the ClickHouse version:
|
||||||
|
- In ClickHouse < v24.2, all existing rows are rewritten.
|
||||||
|
- ClickHouse >= v24.2 distinguishes if a row value in a column with `DEFAULT` value expression was explicitly specified when it was inserted, or not, i.e. calculated from the `DEFAULT` value expression. If the value was explicitly specified, ClickHouse keeps it as is. If the value was was calculated, ClickHouse changes it to the new or updated `MATERIALIZED` value expression.
|
||||||
|
|
||||||
Syntax:
|
Syntax:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
@ -202,6 +202,13 @@ Hierarchy of privileges:
|
|||||||
- `S3`
|
- `S3`
|
||||||
- [dictGet](#grant-dictget)
|
- [dictGet](#grant-dictget)
|
||||||
- [displaySecretsInShowAndSelect](#grant-display-secrets)
|
- [displaySecretsInShowAndSelect](#grant-display-secrets)
|
||||||
|
- [NAMED COLLECTION ADMIN](#grant-named-collection-admin)
|
||||||
|
- `CREATE NAMED COLLECTION`
|
||||||
|
- `DROP NAMED COLLECTION`
|
||||||
|
- `ALTER NAMED COLLECTION`
|
||||||
|
- `SHOW NAMED COLLECTIONS`
|
||||||
|
- `SHOW NAMED COLLECTIONS SECRETS`
|
||||||
|
- `NAMED COLLECTION`
|
||||||
|
|
||||||
Examples of how this hierarchy is treated:
|
Examples of how this hierarchy is treated:
|
||||||
|
|
||||||
@ -498,6 +505,25 @@ and
|
|||||||
[`format_display_secrets_in_show_and_select` format setting](../../operations/settings/formats#format_display_secrets_in_show_and_select)
|
[`format_display_secrets_in_show_and_select` format setting](../../operations/settings/formats#format_display_secrets_in_show_and_select)
|
||||||
are turned on.
|
are turned on.
|
||||||
|
|
||||||
|
### NAMED COLLECTION ADMIN
|
||||||
|
|
||||||
|
Allows a certain operation on a specified named collection. Before version 23.7 it was called NAMED COLLECTION CONTROL, and after 23.7 NAMED COLLECTION ADMIN was added and NAMED COLLECTION CONTROL is preserved as an alias.
|
||||||
|
|
||||||
|
- `NAMED COLLECTION ADMIN`. Level: `NAMED_COLLECTION`. Aliases: `NAMED COLLECTION CONTROL`
|
||||||
|
- `CREATE NAMED COLLECTION`. Level: `NAMED_COLLECTION`
|
||||||
|
- `DROP NAMED COLLECTION`. Level: `NAMED_COLLECTION`
|
||||||
|
- `ALTER NAMED COLLECTION`. Level: `NAMED_COLLECTION`
|
||||||
|
- `SHOW NAMED COLLECTIONS`. Level: `NAMED_COLLECTION`. Aliases: `SHOW NAMED COLLECTIONS`
|
||||||
|
- `SHOW NAMED COLLECTIONS SECRETS`. Level: `NAMED_COLLECTION`. Aliases: `SHOW NAMED COLLECTIONS SECRETS`
|
||||||
|
- `NAMED COLLECTION`. Level: `NAMED_COLLECTION`. Aliases: `NAMED COLLECTION USAGE, USE NAMED COLLECTION`
|
||||||
|
|
||||||
|
Unlike all other grants (CREATE, DROP, ALTER, SHOW) grant NAMED COLLECTION was added only in 23.7, while all others were added earlier - in 22.12.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
Assuming a named collection is called abc, we grant privilege CREATE NAMED COLLECTION to user john.
|
||||||
|
- `GRANT CREATE NAMED COLLECTION ON abc TO john`
|
||||||
|
|
||||||
### ALL
|
### ALL
|
||||||
|
|
||||||
Grants all the privileges on regulated entity to a user account or a role.
|
Grants all the privileges on regulated entity to a user account or a role.
|
||||||
|
@ -5,7 +5,12 @@ sidebar_label: Window Functions
|
|||||||
title: Window Functions
|
title: Window Functions
|
||||||
---
|
---
|
||||||
|
|
||||||
ClickHouse supports the standard grammar for defining windows and window functions. The following features are currently supported:
|
Windows functions let you perform calculations across a set of rows that are related to the current row.
|
||||||
|
Some of the calculations that you can do are similar to those that can be done with an aggregate function, but a window function doesn't cause rows to be grouped into a single output - the individual rows are still returned.
|
||||||
|
|
||||||
|
## Standard Window Functions
|
||||||
|
|
||||||
|
ClickHouse supports the standard grammar for defining windows and window functions. The table below indicates whether a feature is currently supported.
|
||||||
|
|
||||||
| Feature | Support or workaround |
|
| Feature | Support or workaround |
|
||||||
|------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
@ -25,6 +30,8 @@ ClickHouse supports the standard grammar for defining windows and window functio
|
|||||||
|
|
||||||
## ClickHouse-specific Window Functions
|
## ClickHouse-specific Window Functions
|
||||||
|
|
||||||
|
There are also the following window function that's specific to ClickHouse:
|
||||||
|
|
||||||
### nonNegativeDerivative(metric_column, timestamp_column[, INTERVAL X UNITS])
|
### nonNegativeDerivative(metric_column, timestamp_column[, INTERVAL X UNITS])
|
||||||
|
|
||||||
Finds non-negative derivative for given `metric_column` by `timestamp_column`.
|
Finds non-negative derivative for given `metric_column` by `timestamp_column`.
|
||||||
@ -33,40 +40,6 @@ The computed value is the following for each row:
|
|||||||
- `0` for 1st row,
|
- `0` for 1st row,
|
||||||
- ${metric_i - metric_{i-1} \over timestamp_i - timestamp_{i-1}} * interval$ for $i_th$ row.
|
- ${metric_i - metric_{i-1} \over timestamp_i - timestamp_{i-1}} * interval$ for $i_th$ row.
|
||||||
|
|
||||||
## References
|
|
||||||
|
|
||||||
### GitHub Issues
|
|
||||||
|
|
||||||
The roadmap for the initial support of window functions is [in this issue](https://github.com/ClickHouse/ClickHouse/issues/18097).
|
|
||||||
|
|
||||||
All GitHub issues related to window functions have the [comp-window-functions](https://github.com/ClickHouse/ClickHouse/labels/comp-window-functions) tag.
|
|
||||||
|
|
||||||
### Tests
|
|
||||||
|
|
||||||
These tests contain the examples of the currently supported grammar:
|
|
||||||
|
|
||||||
https://github.com/ClickHouse/ClickHouse/blob/master/tests/performance/window_functions.xml
|
|
||||||
|
|
||||||
https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/01591_window_functions.sql
|
|
||||||
|
|
||||||
### Postgres Docs
|
|
||||||
|
|
||||||
https://www.postgresql.org/docs/current/sql-select.html#SQL-WINDOW
|
|
||||||
|
|
||||||
https://www.postgresql.org/docs/devel/sql-expressions.html#SYNTAX-WINDOW-FUNCTIONS
|
|
||||||
|
|
||||||
https://www.postgresql.org/docs/devel/functions-window.html
|
|
||||||
|
|
||||||
https://www.postgresql.org/docs/devel/tutorial-window.html
|
|
||||||
|
|
||||||
### MySQL Docs
|
|
||||||
|
|
||||||
https://dev.mysql.com/doc/refman/8.0/en/window-function-descriptions.html
|
|
||||||
|
|
||||||
https://dev.mysql.com/doc/refman/8.0/en/window-functions-usage.html
|
|
||||||
|
|
||||||
https://dev.mysql.com/doc/refman/8.0/en/window-functions-frames.html
|
|
||||||
|
|
||||||
## Syntax
|
## Syntax
|
||||||
|
|
||||||
```text
|
```text
|
||||||
@ -80,20 +53,7 @@ WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
|||||||
- `PARTITION BY` - defines how to break a resultset into groups.
|
- `PARTITION BY` - defines how to break a resultset into groups.
|
||||||
- `ORDER BY` - defines how to order rows inside the group during calculation aggregate_function.
|
- `ORDER BY` - defines how to order rows inside the group during calculation aggregate_function.
|
||||||
- `ROWS or RANGE` - defines bounds of a frame, aggregate_function is calculated within a frame.
|
- `ROWS or RANGE` - defines bounds of a frame, aggregate_function is calculated within a frame.
|
||||||
- `WINDOW` - allows to reuse a window definition with multiple expressions.
|
- `WINDOW` - allows multiple expressions to use the same window definition.
|
||||||
|
|
||||||
### Functions
|
|
||||||
|
|
||||||
These functions can be used only as a window function.
|
|
||||||
|
|
||||||
- `row_number()` - Number the current row within its partition starting from 1.
|
|
||||||
- `first_value(x)` - Return the first non-NULL value evaluated within its ordered frame.
|
|
||||||
- `last_value(x)` - Return the last non-NULL value evaluated within its ordered frame.
|
|
||||||
- `nth_value(x, offset)` - Return the first non-NULL value evaluated against the nth row (offset) in its ordered frame.
|
|
||||||
- `rank()` - Rank the current row within its partition with gaps.
|
|
||||||
- `dense_rank()` - Rank the current row within its partition without gaps.
|
|
||||||
- `lagInFrame(x)` - Return a value evaluated at the row that is at a specified physical offset row before the current row within the ordered frame.
|
|
||||||
- `leadInFrame(x)` - Return a value evaluated at the row that is offset rows after the current row within the ordered frame.
|
|
||||||
|
|
||||||
```text
|
```text
|
||||||
PARTITION
|
PARTITION
|
||||||
@ -112,8 +72,23 @@ These functions can be used only as a window function.
|
|||||||
└─────────────────┘ <--- UNBOUNDED FOLLOWING (END of the PARTITION)
|
└─────────────────┘ <--- UNBOUNDED FOLLOWING (END of the PARTITION)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Functions
|
||||||
|
|
||||||
|
These functions can be used only as a window function.
|
||||||
|
|
||||||
|
- `row_number()` - Number the current row within its partition starting from 1.
|
||||||
|
- `first_value(x)` - Return the first non-NULL value evaluated within its ordered frame.
|
||||||
|
- `last_value(x)` - Return the last non-NULL value evaluated within its ordered frame.
|
||||||
|
- `nth_value(x, offset)` - Return the first non-NULL value evaluated against the nth row (offset) in its ordered frame.
|
||||||
|
- `rank()` - Rank the current row within its partition with gaps.
|
||||||
|
- `dense_rank()` - Rank the current row within its partition without gaps.
|
||||||
|
- `lagInFrame(x)` - Return a value evaluated at the row that is at a specified physical offset row before the current row within the ordered frame.
|
||||||
|
- `leadInFrame(x)` - Return a value evaluated at the row that is offset rows after the current row within the ordered frame.
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
|
Let's have a look at some examples of how window functions can be used.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE TABLE wf_partition
|
CREATE TABLE wf_partition
|
||||||
(
|
(
|
||||||
@ -589,6 +564,41 @@ ORDER BY
|
|||||||
└──────────────┴─────────────────────┴───────┴─────────────────────────┘
|
└──────────────┴─────────────────────┴───────┴─────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
### GitHub Issues
|
||||||
|
|
||||||
|
The roadmap for the initial support of window functions is [in this issue](https://github.com/ClickHouse/ClickHouse/issues/18097).
|
||||||
|
|
||||||
|
All GitHub issues related to window functions have the [comp-window-functions](https://github.com/ClickHouse/ClickHouse/labels/comp-window-functions) tag.
|
||||||
|
|
||||||
|
### Tests
|
||||||
|
|
||||||
|
These tests contain the examples of the currently supported grammar:
|
||||||
|
|
||||||
|
https://github.com/ClickHouse/ClickHouse/blob/master/tests/performance/window_functions.xml
|
||||||
|
|
||||||
|
https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/01591_window_functions.sql
|
||||||
|
|
||||||
|
### Postgres Docs
|
||||||
|
|
||||||
|
https://www.postgresql.org/docs/current/sql-select.html#SQL-WINDOW
|
||||||
|
|
||||||
|
https://www.postgresql.org/docs/devel/sql-expressions.html#SYNTAX-WINDOW-FUNCTIONS
|
||||||
|
|
||||||
|
https://www.postgresql.org/docs/devel/functions-window.html
|
||||||
|
|
||||||
|
https://www.postgresql.org/docs/devel/tutorial-window.html
|
||||||
|
|
||||||
|
### MySQL Docs
|
||||||
|
|
||||||
|
https://dev.mysql.com/doc/refman/8.0/en/window-function-descriptions.html
|
||||||
|
|
||||||
|
https://dev.mysql.com/doc/refman/8.0/en/window-functions-usage.html
|
||||||
|
|
||||||
|
https://dev.mysql.com/doc/refman/8.0/en/window-functions-frames.html
|
||||||
|
|
||||||
|
|
||||||
## Related Content
|
## Related Content
|
||||||
|
|
||||||
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
||||||
|
@ -38,6 +38,7 @@ ClickHouse Keeper может использоваться как равноце
|
|||||||
- `dead_session_check_period_ms` — частота, с которой ClickHouse Keeper проверяет мертвые сессии и удаляет их, в миллисекундах (по умолчанию: 500).
|
- `dead_session_check_period_ms` — частота, с которой ClickHouse Keeper проверяет мертвые сессии и удаляет их, в миллисекундах (по умолчанию: 500).
|
||||||
- `election_timeout_lower_bound_ms` — время, после которого последователь может инициировать перевыбор лидера, если не получил от него контрольный сигнал (по умолчанию: 1000).
|
- `election_timeout_lower_bound_ms` — время, после которого последователь может инициировать перевыбор лидера, если не получил от него контрольный сигнал (по умолчанию: 1000).
|
||||||
- `election_timeout_upper_bound_ms` — время, после которого последователь должен инициировать перевыбор лидера, если не получил от него контрольный сигнал (по умолчанию: 2000).
|
- `election_timeout_upper_bound_ms` — время, после которого последователь должен инициировать перевыбор лидера, если не получил от него контрольный сигнал (по умолчанию: 2000).
|
||||||
|
- `leadership_expiry_ms` — Если лидер не получает ответа от достаточного количества последователей в течение этого промежутка времени, он добровольно отказывается от своего руководства. При настройке 0 автоматически устанавливается 20 - кратное значение `heart_beat_interval_ms`, а при настройке меньше 0 лидер не отказывается от лидерства (по умолчанию 0).
|
||||||
- `force_sync` — вызывать `fsync` при каждой записи в журнал координации (по умолчанию: true).
|
- `force_sync` — вызывать `fsync` при каждой записи в журнал координации (по умолчанию: true).
|
||||||
- `four_letter_word_white_list` — список разрешенных 4-х буквенных команд (по умолчанию: "conf,cons,crst,envi,ruok,srst,srvr,stat,wchc,wchs,dirs,mntr,isro").
|
- `four_letter_word_white_list` — список разрешенных 4-х буквенных команд (по умолчанию: "conf,cons,crst,envi,ruok,srst,srvr,stat,wchc,wchs,dirs,mntr,isro").
|
||||||
- `fresh_log_gap` — минимальное отставание от лидера в количестве записей журнала после которого последователь считает себя актуальным (по умолчанию: 200).
|
- `fresh_log_gap` — минимальное отставание от лидера в количестве записей журнала после которого последователь считает себя актуальным (по умолчанию: 200).
|
||||||
@ -209,6 +210,7 @@ dead_session_check_period_ms=500
|
|||||||
heart_beat_interval_ms=500
|
heart_beat_interval_ms=500
|
||||||
election_timeout_lower_bound_ms=1000
|
election_timeout_lower_bound_ms=1000
|
||||||
election_timeout_upper_bound_ms=2000
|
election_timeout_upper_bound_ms=2000
|
||||||
|
leadership_expiry_ms=0
|
||||||
reserved_log_items=1000000000000000
|
reserved_log_items=1000000000000000
|
||||||
snapshot_distance=10000
|
snapshot_distance=10000
|
||||||
auto_forwarding=true
|
auto_forwarding=true
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
---
|
---
|
||||||
slug: /zh/getting-started/example-datasets/opensky
|
slug: /zh/getting-started/example-datasets/opensky
|
||||||
sidebar_label: 空中交通数据
|
sidebar_label: 空中交通数据
|
||||||
description: 该数据集中的数据是从完整的 OpenSky 数据集中衍生而来的,对其中的数据进行了必要的清理,用以展示在 COVID-19 期间空中交通的发展。
|
description: 该数据集中的数据是从完整的 OpenSky 数据集中衍生而来的,对其中的数据进行了必要的清理,用以展示在 COVID-19 期间空中交通的发展。
|
||||||
@ -53,12 +53,12 @@ CREATE TABLE opensky
|
|||||||
ls -1 flightlist_*.csv.gz | xargs -P100 -I{} bash -c 'gzip -c -d "{}" | clickhouse-client --date_time_input_format best_effort --query "INSERT INTO opensky FORMAT CSVWithNames"'
|
ls -1 flightlist_*.csv.gz | xargs -P100 -I{} bash -c 'gzip -c -d "{}" | clickhouse-client --date_time_input_format best_effort --query "INSERT INTO opensky FORMAT CSVWithNames"'
|
||||||
```
|
```
|
||||||
|
|
||||||
- 这里我们将文件列表(`ls -1 flightlist_*.csv.gz`)传递给`xargs`以进行并行处理。 `xargs -P100` 指定最多使用 100 个并行工作程序,但由于我们只有 30 个文件,工作程序的数量将只有 30 个。
|
- 这里我们将文件列表(`ls -1 flightlist_*.csv.gz`)传递给`xargs`以进行并行处理。 `xargs -P100` 指定最多使用 100 个并行工作程序,但由于我们只有 30 个文件,工作程序的数量将只有 30 个。
|
||||||
- 对于每个文件,`xargs` 将通过 `bash -c` 为每个文件运行一个脚本文件。该脚本通过使用 `{}` 表示文件名占位符,然后 `xargs` 由命令进行填充(使用 `-I{}`)。
|
- 对于每个文件,`xargs` 将通过 `bash -c` 为每个文件运行一个脚本文件。该脚本通过使用 `{}` 表示文件名占位符,然后 `xargs` 由命令进行填充(使用 `-I{}`)。
|
||||||
- 该脚本会将文件 (`gzip -c -d "{}"`) 解压缩到标准输出(`-c` 参数),并将输出重定向到 `clickhouse-client`。
|
- 该脚本会将文件 (`gzip -c -d "{}"`) 解压缩到标准输出(`-c` 参数),并将输出重定向到 `clickhouse-client`。
|
||||||
- 我们还要求使用扩展解析器解析 [DateTime](../../sql-reference/data-types/datetime.md) 字段 ([--date_time_input_format best_effort](../../operations/settings/ settings.md#settings-date_time_input_format)) 以识别具有时区偏移的 ISO-8601 格式。
|
- 我们还要求使用扩展解析器解析 [DateTime](/docs/zh/sql-reference/data-types/datetime.md) 字段 ([--date_time_input_format best_effort](/docs/zh/operations/settings/settings.md#settings-date_time_input_format)) 以识别具有时区偏移的 ISO-8601 格式。
|
||||||
|
|
||||||
最后,`clickhouse-client` 会以 [CSVWithNames](../../interfaces/formats.md#csvwithnames) 格式读取输入数据然后执行插入。
|
最后,`clickhouse-client` 会以 [CSVWithNames](/docs/zh/interfaces/formats.md#csvwithnames) 格式读取输入数据然后执行插入。
|
||||||
|
|
||||||
并行导入需要 24 秒。
|
并行导入需要 24 秒。
|
||||||
|
|
||||||
|
@ -45,6 +45,7 @@ ClickHouse Keeper 完全可以作为ZooKeeper的独立替代品或者作为Click
|
|||||||
- `heart_beat_interval_ms` — ClickHouse Keeper的leader发送心跳频率(毫秒)(默认为500)。
|
- `heart_beat_interval_ms` — ClickHouse Keeper的leader发送心跳频率(毫秒)(默认为500)。
|
||||||
- `election_timeout_lower_bound_ms` — 如果follower在此间隔内没有收到leader的心跳,那么它可以启动leader选举(默认为1000).
|
- `election_timeout_lower_bound_ms` — 如果follower在此间隔内没有收到leader的心跳,那么它可以启动leader选举(默认为1000).
|
||||||
- `election_timeout_upper_bound_ms` — 如果follower在此间隔内没有收到leader的心跳,那么它必须启动leader选举(默认为2000)。
|
- `election_timeout_upper_bound_ms` — 如果follower在此间隔内没有收到leader的心跳,那么它必须启动leader选举(默认为2000)。
|
||||||
|
- `leadership_expiry_ms` — 如果leader在此间隔内没有收到足够的follower回复,那么他会主动放弃领导权。当被设置为0时会自动设置为`heart_beat_interval_ms`的20倍,当被设置小于0时leader不会主动放弃领导权(默认为0)。
|
||||||
- `rotate_log_storage_interval` — 单个文件中存储的日志记录数量(默认100000条)。
|
- `rotate_log_storage_interval` — 单个文件中存储的日志记录数量(默认100000条)。
|
||||||
- `reserved_log_items` — 在压缩之前需要存储多少协调日志记录(默认100000)。
|
- `reserved_log_items` — 在压缩之前需要存储多少协调日志记录(默认100000)。
|
||||||
- `snapshot_distance` — ClickHouse Keeper创建新快照的频率(以日志记录的数量为单位)(默认100000)。
|
- `snapshot_distance` — ClickHouse Keeper创建新快照的频率(以日志记录的数量为单位)(默认100000)。
|
||||||
@ -214,6 +215,7 @@ dead_session_check_period_ms=500
|
|||||||
heart_beat_interval_ms=500
|
heart_beat_interval_ms=500
|
||||||
election_timeout_lower_bound_ms=1000
|
election_timeout_lower_bound_ms=1000
|
||||||
election_timeout_upper_bound_ms=2000
|
election_timeout_upper_bound_ms=2000
|
||||||
|
leadership_expiry_ms=0
|
||||||
reserved_log_items=1000000000000000
|
reserved_log_items=1000000000000000
|
||||||
snapshot_distance=10000
|
snapshot_distance=10000
|
||||||
auto_forwarding=true
|
auto_forwarding=true
|
||||||
|
@ -649,11 +649,22 @@ log_query_threads=1
|
|||||||
|
|
||||||
## max_query_size {#settings-max_query_size}
|
## max_query_size {#settings-max_query_size}
|
||||||
|
|
||||||
查询的最大部分,可以被带到RAM用于使用SQL解析器进行解析。
|
SQL 解析器解析的查询字符串的最大字节数。 INSERT 查询的 VALUES 子句中的数据由单独的流解析器(消耗 O(1) RAM)处理,并且不受此限制的影响。
|
||||||
插入查询还包含由单独的流解析器(消耗O(1)RAM)处理的插入数据,这些数据不包含在此限制中。
|
|
||||||
|
|
||||||
默认值:256KiB。
|
默认值:256KiB。
|
||||||
|
|
||||||
|
|
||||||
|
## max_parser_depth {#max_parser_depth}
|
||||||
|
|
||||||
|
限制递归下降解析器中的最大递归深度。允许控制堆栈大小。
|
||||||
|
|
||||||
|
可能的值:
|
||||||
|
|
||||||
|
- 正整数。
|
||||||
|
- 0 — 递归深度不受限制。
|
||||||
|
|
||||||
|
默认值:1000。
|
||||||
|
|
||||||
## interactive_delay {#interactive-delay}
|
## interactive_delay {#interactive-delay}
|
||||||
|
|
||||||
以微秒为单位的间隔,用于检查请求执行是否已被取消并发送进度。
|
以微秒为单位的间隔,用于检查请求执行是否已被取消并发送进度。
|
||||||
@ -1064,6 +1075,28 @@ ClickHouse生成异常
|
|||||||
|
|
||||||
默认值:0。
|
默认值:0。
|
||||||
|
|
||||||
|
## optimize_functions_to_subcolumns {#optimize_functions_to_subcolumns}
|
||||||
|
|
||||||
|
启用或禁用通过将某些函数转换为读取子列的优化。这减少了要读取的数据量。
|
||||||
|
|
||||||
|
这些函数可以转化为:
|
||||||
|
|
||||||
|
- [length](../../sql-reference/functions/array-functions.md/#array_functions-length) 读取 [size0](../../sql-reference/data-types/array.md/#array-size)子列。
|
||||||
|
- [empty](../../sql-reference/functions/array-functions.md/#empty函数) 读取 [size0](../../sql-reference/data-types/array.md/#array-size)子列。
|
||||||
|
- [notEmpty](../../sql-reference/functions/array-functions.md/#notempty函数) 读取 [size0](../../sql-reference/data-types/array.md/#array-size)子列。
|
||||||
|
- [isNull](../../sql-reference/operators/index.md#operator-is-null) 读取 [null](../../sql-reference/data-types/nullable. md/#finding-null) 子列。
|
||||||
|
- [isNotNull](../../sql-reference/operators/index.md#is-not-null) 读取 [null](../../sql-reference/data-types/nullable. md/#finding-null) 子列。
|
||||||
|
- [count](../../sql-reference/aggregate-functions/reference/count.md) 读取 [null](../../sql-reference/data-types/nullable.md/#finding-null) 子列。
|
||||||
|
- [mapKeys](../../sql-reference/functions/tuple-map-functions.mdx/#mapkeys) 读取 [keys](../../sql-reference/data-types/map.md/#map-subcolumns) 子列。
|
||||||
|
- [mapValues](../../sql-reference/functions/tuple-map-functions.mdx/#mapvalues) 读取 [values](../../sql-reference/data-types/map.md/#map-subcolumns) 子列。
|
||||||
|
|
||||||
|
可能的值:
|
||||||
|
|
||||||
|
- 0 — 禁用优化。
|
||||||
|
- 1 — 优化已启用。
|
||||||
|
|
||||||
|
默认值:`0`。
|
||||||
|
|
||||||
## distributed_replica_error_half_life {#settings-distributed_replica_error_half_life}
|
## distributed_replica_error_half_life {#settings-distributed_replica_error_half_life}
|
||||||
|
|
||||||
- 类型:秒
|
- 类型:秒
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
slug: /zh/sql-reference/data-types/array
|
slug: /zh/sql-reference/data-types/array
|
||||||
---
|
---
|
||||||
# 阵列(T) {#data-type-array}
|
# 数组(T) {#data-type-array}
|
||||||
|
|
||||||
由 `T` 类型元素组成的数组。
|
由 `T` 类型元素组成的数组。
|
||||||
|
|
||||||
@ -66,3 +66,27 @@ SELECT array(1, 'a')
|
|||||||
Received exception from server (version 1.1.54388):
|
Received exception from server (version 1.1.54388):
|
||||||
Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not.
|
Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not.
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## 数组大小 {#array-size}
|
||||||
|
|
||||||
|
可以使用 `size0` 子列找到数组的大小,而无需读取整个列。对于多维数组,您可以使用 `sizeN-1`,其中 `N` 是所需的维度。
|
||||||
|
|
||||||
|
**例子**
|
||||||
|
|
||||||
|
SQL查询:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE t_arr (`arr` Array(Array(Array(UInt32)))) ENGINE = MergeTree ORDER BY tuple();
|
||||||
|
|
||||||
|
INSERT INTO t_arr VALUES ([[[12, 13, 0, 1],[12]]]);
|
||||||
|
|
||||||
|
SELECT arr.size0, arr.size1, arr.size2 FROM t_arr;
|
||||||
|
```
|
||||||
|
|
||||||
|
结果:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─arr.size0─┬─arr.size1─┬─arr.size2─┐
|
||||||
|
│ 1 │ [2] │ [[4,1]] │
|
||||||
|
└───────────┴───────────┴───────────┘
|
||||||
|
```
|
||||||
|
@ -20,6 +20,33 @@ slug: /zh/sql-reference/data-types/nullable
|
|||||||
|
|
||||||
掩码文件中的条目允许ClickHouse区分每个表行的对应数据类型的«NULL»和默认值由于有额外的文件,«Nullable»列比普通列消耗更多的存储空间
|
掩码文件中的条目允许ClickHouse区分每个表行的对应数据类型的«NULL»和默认值由于有额外的文件,«Nullable»列比普通列消耗更多的存储空间
|
||||||
|
|
||||||
|
## null子列 {#finding-null}
|
||||||
|
|
||||||
|
通过使用 `null` 子列可以在列中查找 `NULL` 值,而无需读取整个列。如果对应的值为 `NULL`,则返回 `1`,否则返回 `0`。
|
||||||
|
|
||||||
|
**示例**
|
||||||
|
|
||||||
|
SQL查询:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE TABLE nullable (`n` Nullable(UInt32)) ENGINE = MergeTree ORDER BY tuple();
|
||||||
|
|
||||||
|
INSERT INTO nullable VALUES (1) (NULL) (2) (NULL);
|
||||||
|
|
||||||
|
SELECT n.null FROM nullable;
|
||||||
|
```
|
||||||
|
|
||||||
|
结果:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─n.null─┐
|
||||||
|
│ 0 │
|
||||||
|
│ 1 │
|
||||||
|
│ 0 │
|
||||||
|
│ 1 │
|
||||||
|
└────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## 用法示例 {#yong-fa-shi-li}
|
## 用法示例 {#yong-fa-shi-li}
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
|
@ -567,10 +567,6 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#ifndef __clang__
|
|
||||||
#pragma GCC optimize("-fno-var-tracking-assignments")
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||||
{
|
{
|
||||||
using namespace DB;
|
using namespace DB;
|
||||||
|
@ -51,10 +51,6 @@
|
|||||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||||
#include <Formats/registerFormats.h>
|
#include <Formats/registerFormats.h>
|
||||||
|
|
||||||
#ifndef __clang__
|
|
||||||
#pragma GCC optimize("-fno-var-tracking-assignments")
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace fs = std::filesystem;
|
namespace fs = std::filesystem;
|
||||||
using namespace std::literals;
|
using namespace std::literals;
|
||||||
|
|
||||||
|
@ -259,7 +259,7 @@ ShardPriority getReplicasPriority(const Cluster::Addresses & replicas, const std
|
|||||||
res.is_remote = 1;
|
res.is_remote = 1;
|
||||||
for (const auto & replica : replicas)
|
for (const auto & replica : replicas)
|
||||||
{
|
{
|
||||||
if (isLocalAddress(DNSResolver::instance().resolveHost(replica.host_name)))
|
if (isLocalAddress(DNSResolver::instance().resolveHostAllInOriginOrder(replica.host_name).front()))
|
||||||
{
|
{
|
||||||
res.is_remote = 0;
|
res.is_remote = 0;
|
||||||
break;
|
break;
|
||||||
|
@ -102,7 +102,7 @@ struct TaskStateWithOwner
|
|||||||
return TaskStateWithOwner(state, owner).toString();
|
return TaskStateWithOwner(state, owner).toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
String toString()
|
String toString() const
|
||||||
{
|
{
|
||||||
WriteBufferFromOwnString wb;
|
WriteBufferFromOwnString wb;
|
||||||
wb << static_cast<UInt32>(state) << "\n" << escape << owner;
|
wb << static_cast<UInt32>(state) << "\n" << escape << owner;
|
||||||
|
@ -180,7 +180,7 @@ public:
|
|||||||
auto logger = getLogger("ClusterCopier");
|
auto logger = getLogger("ClusterCopier");
|
||||||
if (rsp.error == Coordination::Error::ZOK)
|
if (rsp.error == Coordination::Error::ZOK)
|
||||||
{
|
{
|
||||||
switch (rsp.type)
|
switch (rsp.type) /// NOLINT(bugprone-switch-missing-default-case)
|
||||||
{
|
{
|
||||||
case Coordination::CREATED:
|
case Coordination::CREATED:
|
||||||
LOG_DEBUG(logger, "CleanStateClock change: CREATED, at {}", rsp.path);
|
LOG_DEBUG(logger, "CleanStateClock change: CREATED, at {}", rsp.path);
|
||||||
|
@ -841,7 +841,7 @@ void LocalServer::addOptions(OptionsDescription & options_description)
|
|||||||
|
|
||||||
/// If structure argument is omitted then initial query is not generated
|
/// If structure argument is omitted then initial query is not generated
|
||||||
("structure,S", po::value<std::string>(), "structure of the initial table (list of column and type names)")
|
("structure,S", po::value<std::string>(), "structure of the initial table (list of column and type names)")
|
||||||
("file,f", po::value<std::string>(), "path to file with data of the initial table (stdin if not specified)")
|
("file,F", po::value<std::string>(), "path to file with data of the initial table (stdin if not specified)")
|
||||||
|
|
||||||
("input-format", po::value<std::string>(), "input format of the initial table data")
|
("input-format", po::value<std::string>(), "input format of the initial table data")
|
||||||
("output-format", po::value<std::string>(), "default output format")
|
("output-format", po::value<std::string>(), "default output format")
|
||||||
|
@ -40,7 +40,6 @@ public:
|
|||||||
|
|
||||||
explicit ConnectionHolder(const String & connection_string_)
|
explicit ConnectionHolder(const String & connection_string_)
|
||||||
: pool(nullptr)
|
: pool(nullptr)
|
||||||
, connection()
|
|
||||||
, connection_string(connection_string_)
|
, connection_string(connection_string_)
|
||||||
{
|
{
|
||||||
updateConnection();
|
updateConnection();
|
||||||
@ -143,7 +142,7 @@ public:
|
|||||||
{
|
{
|
||||||
std::lock_guard lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
|
|
||||||
if (!factory.count(connection_string))
|
if (!factory.contains(connection_string))
|
||||||
factory.emplace(std::make_pair(connection_string, std::make_shared<nanodbc::Pool>(pool_size)));
|
factory.emplace(std::make_pair(connection_string, std::make_shared<nanodbc::Pool>(pool_size)));
|
||||||
|
|
||||||
auto & pool = factory[connection_string];
|
auto & pool = factory[connection_string];
|
||||||
|
@ -184,7 +184,7 @@ static bool jemallocOptionEnabled(const char *name)
|
|||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static bool jemallocOptionEnabled(const char *) { return 0; }
|
static bool jemallocOptionEnabled(const char *) { return false; }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int mainEntryClickHouseServer(int argc, char ** argv)
|
int mainEntryClickHouseServer(int argc, char ** argv)
|
||||||
@ -1228,6 +1228,13 @@ try
|
|||||||
}
|
}
|
||||||
global_context->setMarkCache(mark_cache_policy, mark_cache_size, mark_cache_size_ratio);
|
global_context->setMarkCache(mark_cache_policy, mark_cache_size, mark_cache_size_ratio);
|
||||||
|
|
||||||
|
size_t page_cache_size = server_settings.page_cache_size;
|
||||||
|
if (page_cache_size != 0)
|
||||||
|
global_context->setPageCache(
|
||||||
|
server_settings.page_cache_chunk_size, server_settings.page_cache_mmap_size,
|
||||||
|
page_cache_size, server_settings.page_cache_use_madv_free,
|
||||||
|
server_settings.page_cache_use_transparent_huge_pages);
|
||||||
|
|
||||||
String index_uncompressed_cache_policy = server_settings.index_uncompressed_cache_policy;
|
String index_uncompressed_cache_policy = server_settings.index_uncompressed_cache_policy;
|
||||||
size_t index_uncompressed_cache_size = server_settings.index_uncompressed_cache_size;
|
size_t index_uncompressed_cache_size = server_settings.index_uncompressed_cache_size;
|
||||||
double index_uncompressed_cache_size_ratio = server_settings.index_uncompressed_cache_size_ratio;
|
double index_uncompressed_cache_size_ratio = server_settings.index_uncompressed_cache_size_ratio;
|
||||||
@ -1874,7 +1881,6 @@ try
|
|||||||
{
|
{
|
||||||
total_memory_tracker.setSampleMaxAllocationSize(server_settings.total_memory_profiler_sample_max_allocation_size);
|
total_memory_tracker.setSampleMaxAllocationSize(server_settings.total_memory_profiler_sample_max_allocation_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -1889,10 +1895,6 @@ try
|
|||||||
" when two different stack unwinding methods will interfere with each other.");
|
" when two different stack unwinding methods will interfere with each other.");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if !defined(__x86_64__)
|
|
||||||
LOG_INFO(log, "Query Profiler and TraceCollector is only tested on x86_64. It also known to not work under qemu-user.");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (!hasPHDRCache())
|
if (!hasPHDRCache())
|
||||||
LOG_INFO(log, "Query Profiler and TraceCollector are disabled because they require PHDR cache to be created"
|
LOG_INFO(log, "Query Profiler and TraceCollector are disabled because they require PHDR cache to be created"
|
||||||
" (otherwise the function 'dl_iterate_phdr' is not lock free and not async-signal safe).");
|
" (otherwise the function 'dl_iterate_phdr' is not lock free and not async-signal safe).");
|
||||||
|
@ -1,6 +1,4 @@
|
|||||||
# vim: ft=config
|
[tool.pylint.BASIC]
|
||||||
|
|
||||||
[BASIC]
|
|
||||||
max-module-lines=2000
|
max-module-lines=2000
|
||||||
# due to SQL
|
# due to SQL
|
||||||
max-line-length=200
|
max-line-length=200
|
||||||
@ -9,11 +7,13 @@ max-branches=50
|
|||||||
max-nested-blocks=10
|
max-nested-blocks=10
|
||||||
max-statements=200
|
max-statements=200
|
||||||
|
|
||||||
[FORMAT]
|
[tool.pylint.FORMAT]
|
||||||
ignore-long-lines = (# )?<?https?://\S+>?$
|
#ignore-long-lines = (# )?<?https?://\S+>?$
|
||||||
|
|
||||||
[MESSAGES CONTROL]
|
[tool.pylint.'MESSAGES CONTROL']
|
||||||
disable = missing-docstring,
|
# pytest.mark.parametrize is not callable (not-callable)
|
||||||
|
disable = '''
|
||||||
|
missing-docstring,
|
||||||
too-few-public-methods,
|
too-few-public-methods,
|
||||||
invalid-name,
|
invalid-name,
|
||||||
too-many-arguments,
|
too-many-arguments,
|
||||||
@ -26,18 +26,15 @@ disable = missing-docstring,
|
|||||||
wildcard-import,
|
wildcard-import,
|
||||||
unused-wildcard-import,
|
unused-wildcard-import,
|
||||||
singleton-comparison,
|
singleton-comparison,
|
||||||
# pytest.mark.parametrize is not callable (not-callable)
|
|
||||||
not-callable,
|
not-callable,
|
||||||
# https://github.com/PyCQA/pylint/issues/3882
|
|
||||||
# [Python 3.9] Value 'Optional' is unsubscriptable (unsubscriptable-object) (also Union)
|
|
||||||
unsubscriptable-object,
|
|
||||||
# Drop them one day:
|
|
||||||
redefined-outer-name,
|
redefined-outer-name,
|
||||||
broad-except,
|
broad-except,
|
||||||
bare-except,
|
bare-except,
|
||||||
no-else-return,
|
no-else-return,
|
||||||
global-statement
|
global-statement
|
||||||
|
'''
|
||||||
|
|
||||||
[SIMILARITIES]
|
[tool.pylint.SIMILARITIES]
|
||||||
# due to SQL
|
# due to SQL
|
||||||
min-similarity-lines=1000
|
min-similarity-lines=1000
|
||||||
|
|
9
rust/Cargo.lock
generated
9
rust/Cargo.lock
generated
@ -6,6 +6,7 @@ version = 3
|
|||||||
name = "_ch_rust_prql"
|
name = "_ch_rust_prql"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"anstream",
|
||||||
"prqlc",
|
"prqlc",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
]
|
]
|
||||||
@ -698,9 +699,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "miniz_oxide"
|
name = "miniz_oxide"
|
||||||
version = "0.7.1"
|
version = "0.7.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7"
|
checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"adler",
|
"adler",
|
||||||
]
|
]
|
||||||
@ -751,9 +752,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "object"
|
name = "object"
|
||||||
version = "0.32.1"
|
version = "0.32.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0"
|
checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"memchr",
|
"memchr",
|
||||||
]
|
]
|
||||||
|
@ -4,6 +4,7 @@ name = "_ch_rust_prql"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
anstream = {version = "0.6.12"}
|
||||||
prqlc = {version = "0.11.3", default-features = false}
|
prqlc = {version = "0.11.3", default-features = false}
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
|
|
||||||
|
@ -14,39 +14,41 @@ fn set_output(result: String, out: *mut *mut u8, out_size: *mut u64) {
|
|||||||
*out_ptr = CString::new(result).unwrap().into_raw() as *mut u8;
|
*out_ptr = CString::new(result).unwrap().into_raw() as *mut u8;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Converts a PRQL query from a raw C string to SQL, returning an error code if the conversion fails.
|
||||||
pub unsafe extern "C" fn prql_to_sql_impl(
|
pub unsafe extern "C" fn prql_to_sql_impl(
|
||||||
query: *const u8,
|
query: *const u8,
|
||||||
size: u64,
|
size: u64,
|
||||||
out: *mut *mut u8,
|
out: *mut *mut u8,
|
||||||
out_size: *mut u64,
|
out_size: *mut u64,
|
||||||
) -> i64 {
|
) -> i64 {
|
||||||
let query_vec = unsafe { slice::from_raw_parts(query, size.try_into().unwrap()) }.to_vec();
|
let query_vec = slice::from_raw_parts(query, size.try_into().unwrap()).to_vec();
|
||||||
let maybe_prql_query = String::from_utf8(query_vec);
|
let Ok(query_str) = String::from_utf8(query_vec) else {
|
||||||
if maybe_prql_query.is_err() {
|
|
||||||
set_output(
|
set_output(
|
||||||
String::from("The PRQL query must be UTF-8 encoded!"),
|
"The PRQL query must be UTF-8 encoded!".to_string(),
|
||||||
out,
|
out,
|
||||||
out_size,
|
out_size,
|
||||||
);
|
);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
};
|
||||||
let prql_query = maybe_prql_query.unwrap();
|
|
||||||
let opts = &Options {
|
let opts = Options {
|
||||||
format: true,
|
format: true,
|
||||||
target: Target::Sql(Some(Dialect::ClickHouse)),
|
target: Target::Sql(Some(Dialect::ClickHouse)),
|
||||||
signature_comment: false,
|
signature_comment: false,
|
||||||
color: false,
|
color: false,
|
||||||
};
|
};
|
||||||
let (is_err, res) = match prqlc::compile(&prql_query, &opts) {
|
|
||||||
Ok(sql_str) => (false, sql_str),
|
|
||||||
Err(err) => (true, err.to_string()),
|
|
||||||
};
|
|
||||||
|
|
||||||
set_output(res, out, out_size);
|
if let Ok(sql_str) = prqlc::compile(&query_str, &opts) {
|
||||||
|
// NOTE: Over at PRQL we're considering to un-deprecate & re-enable the
|
||||||
match is_err {
|
// `color: false` option. If that happens, we can remove the `strip_str`
|
||||||
true => 1,
|
// here, which strips color codes from the output.
|
||||||
false => 0,
|
use anstream::adapter::strip_str;
|
||||||
|
let sql_str = strip_str(&sql_str).to_string();
|
||||||
|
set_output(sql_str, out, out_size);
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
set_output("PRQL compilation failed!".to_string(), out, out_size);
|
||||||
|
1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -57,17 +59,50 @@ pub unsafe extern "C" fn prql_to_sql(
|
|||||||
out: *mut *mut u8,
|
out: *mut *mut u8,
|
||||||
out_size: *mut u64,
|
out_size: *mut u64,
|
||||||
) -> i64 {
|
) -> i64 {
|
||||||
let ret = panic::catch_unwind(|| {
|
// NOTE: using cxxbridge we can return proper Result<> type.
|
||||||
return prql_to_sql_impl(query, size, out, out_size);
|
panic::catch_unwind(|| prql_to_sql_impl(query, size, out, out_size)).unwrap_or_else(|_| {
|
||||||
});
|
set_output("prqlc panicked".to_string(), out, out_size);
|
||||||
return match ret {
|
1
|
||||||
// NOTE: using cxxbridge we can return proper Result<> type.
|
})
|
||||||
Err(_err) => 1,
|
|
||||||
Ok(res) => res,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern "C" fn prql_free_pointer(ptr_to_free: *mut u8) {
|
pub unsafe extern "C" fn prql_free_pointer(ptr_to_free: *mut u8) {
|
||||||
std::mem::drop(CString::from_raw(ptr_to_free as *mut c_char));
|
std::mem::drop(CString::from_raw(ptr_to_free as *mut c_char));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use std::ffi::{CStr, CString};
|
||||||
|
|
||||||
|
/// A test helper to offer a rust interface to the C bindings
|
||||||
|
fn run_compile(query: &str) -> (String, i64) {
|
||||||
|
let query_cstr = CString::new(query).unwrap();
|
||||||
|
let query_ptr = query_cstr.as_ptr() as *const u8;
|
||||||
|
let query_size = query_cstr.to_bytes_with_nul().len() as u64 - 1; // Excluding the null terminator
|
||||||
|
|
||||||
|
let mut out: *mut u8 = std::ptr::null_mut();
|
||||||
|
let mut out_size = 0_u64;
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
let success = prql_to_sql(query_ptr, query_size, &mut out, &mut out_size);
|
||||||
|
let output = CStr::from_ptr(out as *const i8)
|
||||||
|
.to_str()
|
||||||
|
.unwrap()
|
||||||
|
.to_string();
|
||||||
|
prql_free_pointer(out);
|
||||||
|
(output, success)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_prql_to_sql() {
|
||||||
|
assert!(run_compile("from x").0.contains("SELECT"));
|
||||||
|
assert!(run_compile("asdf").1 == 1);
|
||||||
|
// In prqlc 0.11.3, this is a panic, so that allows us to test that the
|
||||||
|
// panic is caught. When we upgrade prqlc, it won't be a panic any
|
||||||
|
// longer.
|
||||||
|
assert!(run_compile("x -> y").1 == 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use skim::prelude::*;
|
|
||||||
use term::terminfo::TermInfo;
|
|
||||||
use cxx::{CxxString, CxxVector};
|
use cxx::{CxxString, CxxVector};
|
||||||
|
use skim::prelude::*;
|
||||||
use std::panic;
|
use std::panic;
|
||||||
|
use term::terminfo::TermInfo;
|
||||||
|
|
||||||
#[cxx::bridge]
|
#[cxx::bridge]
|
||||||
mod ffi {
|
mod ffi {
|
||||||
@ -16,7 +16,7 @@ struct Item {
|
|||||||
}
|
}
|
||||||
impl Item {
|
impl Item {
|
||||||
fn new(text: String) -> Self {
|
fn new(text: String) -> Self {
|
||||||
return Self{
|
Self {
|
||||||
// Text that will be printed by skim, and will be used for matching.
|
// Text that will be printed by skim, and will be used for matching.
|
||||||
//
|
//
|
||||||
// Text that will be shown should not contains new lines since in this case skim may
|
// Text that will be shown should not contains new lines since in this case skim may
|
||||||
@ -24,16 +24,16 @@ impl Item {
|
|||||||
text_no_newlines: text.replace("\n", " "),
|
text_no_newlines: text.replace("\n", " "),
|
||||||
// This will be used when the match had been selected.
|
// This will be used when the match had been selected.
|
||||||
orig_text: text,
|
orig_text: text,
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl SkimItem for Item {
|
impl SkimItem for Item {
|
||||||
fn text(&self) -> Cow<str> {
|
fn text(&self) -> Cow<str> {
|
||||||
return Cow::Borrowed(&self.text_no_newlines);
|
Cow::Borrowed(&self.text_no_newlines)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn output(&self) -> Cow<str> {
|
fn output(&self) -> Cow<str> {
|
||||||
return Cow::Borrowed(&self.orig_text);
|
Cow::Borrowed(&self.orig_text)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -88,14 +88,11 @@ fn skim_impl(prefix: &CxxString, words: &CxxVector<CxxString>) -> Result<String,
|
|||||||
if output.selected_items.is_empty() {
|
if output.selected_items.is_empty() {
|
||||||
return Err("No items had been selected".to_string());
|
return Err("No items had been selected".to_string());
|
||||||
}
|
}
|
||||||
return Ok(output.selected_items[0].output().to_string());
|
Ok(output.selected_items[0].output().to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn skim(prefix: &CxxString, words: &CxxVector<CxxString>) -> Result<String, String> {
|
fn skim(prefix: &CxxString, words: &CxxVector<CxxString>) -> Result<String, String> {
|
||||||
let ret = panic::catch_unwind(|| {
|
match panic::catch_unwind(|| skim_impl(prefix, words)) {
|
||||||
return skim_impl(prefix, words);
|
|
||||||
});
|
|
||||||
return match ret {
|
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let e = if let Some(s) = err.downcast_ref::<String>() {
|
let e = if let Some(s) = err.downcast_ref::<String>() {
|
||||||
format!("{}", s)
|
format!("{}", s)
|
||||||
@ -105,7 +102,7 @@ fn skim(prefix: &CxxString, words: &CxxVector<CxxString>) -> Result<String, Stri
|
|||||||
format!("Unknown panic type: {:?}", err.type_id())
|
format!("Unknown panic type: {:?}", err.type_id())
|
||||||
};
|
};
|
||||||
Err(format!("Rust panic: {:?}", e))
|
Err(format!("Rust panic: {:?}", e))
|
||||||
},
|
}
|
||||||
Ok(res) => res,
|
Ok(res) => res,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -163,6 +163,7 @@ enum class AccessType
|
|||||||
M(SYSTEM_DROP_FILESYSTEM_CACHE, "SYSTEM DROP FILESYSTEM CACHE, DROP FILESYSTEM CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
|
M(SYSTEM_DROP_FILESYSTEM_CACHE, "SYSTEM DROP FILESYSTEM CACHE, DROP FILESYSTEM CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||||
M(SYSTEM_DROP_DISTRIBUTED_CACHE, "SYSTEM DROP DISTRIBUTED CACHE, DROP DISTRIBUTED CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
|
M(SYSTEM_DROP_DISTRIBUTED_CACHE, "SYSTEM DROP DISTRIBUTED CACHE, DROP DISTRIBUTED CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||||
M(SYSTEM_SYNC_FILESYSTEM_CACHE, "SYSTEM REPAIR FILESYSTEM CACHE, REPAIR FILESYSTEM CACHE, SYNC FILESYSTEM CACHE", GLOBAL, SYSTEM) \
|
M(SYSTEM_SYNC_FILESYSTEM_CACHE, "SYSTEM REPAIR FILESYSTEM CACHE, REPAIR FILESYSTEM CACHE, SYNC FILESYSTEM CACHE", GLOBAL, SYSTEM) \
|
||||||
|
M(SYSTEM_DROP_PAGE_CACHE, "SYSTEM DROP PAGE CACHE, DROP PAGE CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||||
M(SYSTEM_DROP_SCHEMA_CACHE, "SYSTEM DROP SCHEMA CACHE, DROP SCHEMA CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
|
M(SYSTEM_DROP_SCHEMA_CACHE, "SYSTEM DROP SCHEMA CACHE, DROP SCHEMA CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||||
M(SYSTEM_DROP_FORMAT_SCHEMA_CACHE, "SYSTEM DROP FORMAT SCHEMA CACHE, DROP FORMAT SCHEMA CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
|
M(SYSTEM_DROP_FORMAT_SCHEMA_CACHE, "SYSTEM DROP FORMAT SCHEMA CACHE, DROP FORMAT SCHEMA CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||||
M(SYSTEM_DROP_S3_CLIENT_CACHE, "SYSTEM DROP S3 CLIENT, DROP S3 CLIENT CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
|
M(SYSTEM_DROP_S3_CLIENT_CACHE, "SYSTEM DROP S3 CLIENT, DROP S3 CLIENT CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||||
|
@ -55,7 +55,7 @@ namespace
|
|||||||
{
|
{
|
||||||
IPAddress addr_v6 = toIPv6(address);
|
IPAddress addr_v6 = toIPv6(address);
|
||||||
|
|
||||||
auto host_addresses = DNSResolver::instance().resolveHostAll(host);
|
auto host_addresses = DNSResolver::instance().resolveHostAllInOriginOrder(host);
|
||||||
|
|
||||||
for (const auto & addr : host_addresses)
|
for (const auto & addr : host_addresses)
|
||||||
{
|
{
|
||||||
|
@ -49,71 +49,135 @@ String QuotaTypeInfo::valueToStringWithName(QuotaValue value) const
|
|||||||
|
|
||||||
const QuotaTypeInfo & QuotaTypeInfo::get(QuotaType type)
|
const QuotaTypeInfo & QuotaTypeInfo::get(QuotaType type)
|
||||||
{
|
{
|
||||||
static constexpr auto make_info = [](const char * raw_name_, UInt64 output_denominator_)
|
static constexpr auto make_info = [](const char * raw_name_, String current_usage_description_, String max_allowed_usage_description_, UInt64 output_denominator_)
|
||||||
{
|
{
|
||||||
String init_name = raw_name_;
|
String init_name = raw_name_;
|
||||||
boost::to_lower(init_name);
|
boost::to_lower(init_name);
|
||||||
String init_keyword = raw_name_;
|
String init_keyword = raw_name_;
|
||||||
boost::replace_all(init_keyword, "_", " ");
|
boost::replace_all(init_keyword, "_", " ");
|
||||||
bool init_output_as_float = (output_denominator_ != 1);
|
bool init_output_as_float = (output_denominator_ != 1);
|
||||||
return QuotaTypeInfo{raw_name_, std::move(init_name), std::move(init_keyword), init_output_as_float, output_denominator_};
|
return QuotaTypeInfo
|
||||||
|
{
|
||||||
|
.raw_name = raw_name_,
|
||||||
|
.name = std::move(init_name),
|
||||||
|
.keyword = std::move(init_keyword),
|
||||||
|
.current_usage_description = std::move(current_usage_description_),
|
||||||
|
.max_allowed_usage_description = std::move(max_allowed_usage_description_),
|
||||||
|
.output_as_float = init_output_as_float,
|
||||||
|
.output_denominator = output_denominator_
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
switch (type)
|
switch (type)
|
||||||
{
|
{
|
||||||
case QuotaType::QUERIES:
|
case QuotaType::QUERIES:
|
||||||
{
|
{
|
||||||
static const auto info = make_info("QUERIES", 1);
|
static const auto info = make_info(
|
||||||
|
"QUERIES",
|
||||||
|
"The current number of executed queries.",
|
||||||
|
"The maximum allowed number of queries of all types allowed to be executed.",
|
||||||
|
1
|
||||||
|
);
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
case QuotaType::QUERY_SELECTS:
|
case QuotaType::QUERY_SELECTS:
|
||||||
{
|
{
|
||||||
static const auto info = make_info("QUERY_SELECTS", 1);
|
static const auto info = make_info(
|
||||||
|
"QUERY_SELECTS",
|
||||||
|
"The current number of executed SELECT queries.",
|
||||||
|
"The maximum allowed number of SELECT queries allowed to be executed.",
|
||||||
|
1
|
||||||
|
);
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
case QuotaType::QUERY_INSERTS:
|
case QuotaType::QUERY_INSERTS:
|
||||||
{
|
{
|
||||||
static const auto info = make_info("QUERY_INSERTS", 1);
|
static const auto info = make_info(
|
||||||
|
"QUERY_INSERTS",
|
||||||
|
"The current number of executed INSERT queries.",
|
||||||
|
"The maximum allowed number of INSERT queries allowed to be executed.",
|
||||||
|
1
|
||||||
|
);
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
case QuotaType::ERRORS:
|
case QuotaType::ERRORS:
|
||||||
{
|
{
|
||||||
static const auto info = make_info("ERRORS", 1);
|
static const auto info = make_info(
|
||||||
|
"ERRORS",
|
||||||
|
"The current number of queries resulted in an error.",
|
||||||
|
"The maximum number of queries resulted in an error allowed within the specified period of time.",
|
||||||
|
1
|
||||||
|
);
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
case QuotaType::RESULT_ROWS:
|
case QuotaType::RESULT_ROWS:
|
||||||
{
|
{
|
||||||
static const auto info = make_info("RESULT_ROWS", 1);
|
static const auto info = make_info(
|
||||||
|
"RESULT_ROWS",
|
||||||
|
"The current total number of rows in the result set of all queries within the current period of time.",
|
||||||
|
"The maximum total number of rows in the result set of all queries allowed within the specified period of time.",
|
||||||
|
1
|
||||||
|
);
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
case QuotaType::RESULT_BYTES:
|
case QuotaType::RESULT_BYTES:
|
||||||
{
|
{
|
||||||
static const auto info = make_info("RESULT_BYTES", 1);
|
static const auto info = make_info(
|
||||||
|
"RESULT_BYTES",
|
||||||
|
"The current total number of bytes in the result set of all queries within the current period of time.",
|
||||||
|
"The maximum total number of bytes in the result set of all queries allowed within the specified period of time.",
|
||||||
|
1
|
||||||
|
);
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
case QuotaType::READ_ROWS:
|
case QuotaType::READ_ROWS:
|
||||||
{
|
{
|
||||||
static const auto info = make_info("READ_ROWS", 1);
|
static const auto info = make_info(
|
||||||
|
"READ_ROWS",
|
||||||
|
"The current total number of rows read during execution of all queries within the current period of time.",
|
||||||
|
"The maximum number of rows to read during execution of all queries allowed within the specified period of time.",
|
||||||
|
1
|
||||||
|
);
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
case QuotaType::READ_BYTES:
|
case QuotaType::READ_BYTES:
|
||||||
{
|
{
|
||||||
static const auto info = make_info("READ_BYTES", 1);
|
static const auto info = make_info(
|
||||||
|
"READ_BYTES",
|
||||||
|
"The current total number of bytes read during execution of all queries within the current period of time.",
|
||||||
|
"The maximum number of bytes to read during execution of all queries allowed within the specified period of time.",
|
||||||
|
1
|
||||||
|
);
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
case QuotaType::EXECUTION_TIME:
|
case QuotaType::EXECUTION_TIME:
|
||||||
{
|
{
|
||||||
static const auto info = make_info("EXECUTION_TIME", 1000000000 /* execution_time is stored in nanoseconds */);
|
static const auto info = make_info(
|
||||||
|
"EXECUTION_TIME",
|
||||||
|
"The current total amount of time (in nanoseconds) spent to execute queries within the current period of time",
|
||||||
|
"The maximum amount of time (in nanoseconds) allowed for all queries to execute within the specified period of time",
|
||||||
|
1000000000 /* execution_time is stored in nanoseconds */
|
||||||
|
);
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
case QuotaType::WRITTEN_BYTES:
|
case QuotaType::WRITTEN_BYTES:
|
||||||
{
|
{
|
||||||
static const auto info = make_info("WRITTEN_BYTES", 1);
|
static const auto info = make_info(
|
||||||
|
"WRITTEN_BYTES",
|
||||||
|
"The current total number of bytes written during execution of all queries within the current period of time.",
|
||||||
|
"The maximum number of bytes to written during execution of all queries allowed within the specified period of time.",
|
||||||
|
1
|
||||||
|
);
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
case QuotaType::FAILED_SEQUENTIAL_AUTHENTICATIONS:
|
case QuotaType::FAILED_SEQUENTIAL_AUTHENTICATIONS:
|
||||||
{
|
{
|
||||||
static const auto info = make_info("FAILED_SEQUENTIAL_AUTHENTICATIONS", 1);
|
static const auto info = make_info(
|
||||||
|
"FAILED_SEQUENTIAL_AUtheNTICATIONS",
|
||||||
|
"The current number of consecutive authentication failures within the current period of time.",
|
||||||
|
"The maximum number of consecutive authentication failures allowed within the specified period of time.",
|
||||||
|
1
|
||||||
|
);
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
case QuotaType::MAX: break;
|
case QuotaType::MAX: break;
|
||||||
|
@ -33,6 +33,8 @@ struct QuotaTypeInfo
|
|||||||
const char * const raw_name = "";
|
const char * const raw_name = "";
|
||||||
const String name; /// Lowercased with underscores, e.g. "result_rows".
|
const String name; /// Lowercased with underscores, e.g. "result_rows".
|
||||||
const String keyword; /// Uppercased with spaces, e.g. "RESULT ROWS".
|
const String keyword; /// Uppercased with spaces, e.g. "RESULT ROWS".
|
||||||
|
const String current_usage_description;
|
||||||
|
const String max_allowed_usage_description;
|
||||||
const bool output_as_float = false;
|
const bool output_as_float = false;
|
||||||
const UInt64 output_denominator = 1;
|
const UInt64 output_denominator = 1;
|
||||||
String valueToString(QuotaValue value) const;
|
String valueToString(QuotaValue value) const;
|
||||||
|
@ -33,7 +33,7 @@ String toString(RowPolicyFilterType type)
|
|||||||
|
|
||||||
const RowPolicyFilterTypeInfo & RowPolicyFilterTypeInfo::get(RowPolicyFilterType type_)
|
const RowPolicyFilterTypeInfo & RowPolicyFilterTypeInfo::get(RowPolicyFilterType type_)
|
||||||
{
|
{
|
||||||
static constexpr auto make_info = [](const char * raw_name_)
|
static constexpr auto make_info = [](const char * raw_name_, const String & comment_)
|
||||||
{
|
{
|
||||||
String init_name = raw_name_;
|
String init_name = raw_name_;
|
||||||
boost::to_lower(init_name);
|
boost::to_lower(init_name);
|
||||||
@ -41,14 +41,17 @@ const RowPolicyFilterTypeInfo & RowPolicyFilterTypeInfo::get(RowPolicyFilterType
|
|||||||
String init_command = init_name.substr(0, underscore_pos);
|
String init_command = init_name.substr(0, underscore_pos);
|
||||||
boost::to_upper(init_command);
|
boost::to_upper(init_command);
|
||||||
bool init_is_check = (std::string_view{init_name}.substr(underscore_pos + 1) == "check");
|
bool init_is_check = (std::string_view{init_name}.substr(underscore_pos + 1) == "check");
|
||||||
return RowPolicyFilterTypeInfo{raw_name_, std::move(init_name), std::move(init_command), init_is_check};
|
return RowPolicyFilterTypeInfo{raw_name_, std::move(init_name), std::move(init_command), comment_, init_is_check};
|
||||||
};
|
};
|
||||||
|
|
||||||
switch (type_)
|
switch (type_)
|
||||||
{
|
{
|
||||||
case RowPolicyFilterType::SELECT_FILTER:
|
case RowPolicyFilterType::SELECT_FILTER:
|
||||||
{
|
{
|
||||||
static const auto info = make_info("SELECT_FILTER");
|
static const auto info = make_info(
|
||||||
|
"SELECT_FILTER",
|
||||||
|
"Expression which is used for filtering in SELECT queries."
|
||||||
|
);
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
#if 0 /// Row-level security for INSERT, UPDATE, DELETE is not implemented yet.
|
#if 0 /// Row-level security for INSERT, UPDATE, DELETE is not implemented yet.
|
||||||
|
@ -52,6 +52,7 @@ struct RowPolicyFilterTypeInfo
|
|||||||
const char * const raw_name;
|
const char * const raw_name;
|
||||||
const String name; /// Lowercased with underscores, e.g. "select_filter".
|
const String name; /// Lowercased with underscores, e.g. "select_filter".
|
||||||
const String command; /// Uppercased without last word, e.g. "SELECT".
|
const String command; /// Uppercased without last word, e.g. "SELECT".
|
||||||
|
const String description;
|
||||||
const bool is_check; /// E.g. false for SELECT_FILTER.
|
const bool is_check; /// E.g. false for SELECT_FILTER.
|
||||||
static const RowPolicyFilterTypeInfo & get(RowPolicyFilterType type);
|
static const RowPolicyFilterTypeInfo & get(RowPolicyFilterType type);
|
||||||
};
|
};
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#include <Analyzer/ConstantNode.h>
|
#include <Analyzer/ConstantNode.h>
|
||||||
|
|
||||||
|
#include <Analyzer/FunctionNode.h>
|
||||||
|
|
||||||
#include <Common/assert_cast.h>
|
#include <Common/assert_cast.h>
|
||||||
#include <Common/FieldVisitorToString.h>
|
#include <Common/FieldVisitorToString.h>
|
||||||
#include <Common/SipHash.h>
|
#include <Common/SipHash.h>
|
||||||
@ -38,52 +40,9 @@ ConstantNode::ConstantNode(Field value_)
|
|||||||
: ConstantNode(value_, applyVisitor(FieldToDataType(), value_))
|
: ConstantNode(value_, applyVisitor(FieldToDataType(), value_))
|
||||||
{}
|
{}
|
||||||
|
|
||||||
void ConstantNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const
|
bool ConstantNode::requiresCastCall() const
|
||||||
{
|
|
||||||
buffer << std::string(indent, ' ') << "CONSTANT id: " << format_state.getNodeId(this);
|
|
||||||
|
|
||||||
if (hasAlias())
|
|
||||||
buffer << ", alias: " << getAlias();
|
|
||||||
|
|
||||||
buffer << ", constant_value: " << constant_value->getValue().dump();
|
|
||||||
buffer << ", constant_value_type: " << constant_value->getType()->getName();
|
|
||||||
|
|
||||||
if (getSourceExpression())
|
|
||||||
{
|
|
||||||
buffer << '\n' << std::string(indent + 2, ' ') << "EXPRESSION" << '\n';
|
|
||||||
getSourceExpression()->dumpTreeImpl(buffer, format_state, indent + 4);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ConstantNode::isEqualImpl(const IQueryTreeNode & rhs) const
|
|
||||||
{
|
|
||||||
const auto & rhs_typed = assert_cast<const ConstantNode &>(rhs);
|
|
||||||
return *constant_value == *rhs_typed.constant_value && value_string == rhs_typed.value_string;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ConstantNode::updateTreeHashImpl(HashState & hash_state) const
|
|
||||||
{
|
|
||||||
auto type_name = constant_value->getType()->getName();
|
|
||||||
hash_state.update(type_name.size());
|
|
||||||
hash_state.update(type_name);
|
|
||||||
|
|
||||||
hash_state.update(value_string.size());
|
|
||||||
hash_state.update(value_string);
|
|
||||||
}
|
|
||||||
|
|
||||||
QueryTreeNodePtr ConstantNode::cloneImpl() const
|
|
||||||
{
|
|
||||||
return std::make_shared<ConstantNode>(constant_value, source_expression);
|
|
||||||
}
|
|
||||||
|
|
||||||
ASTPtr ConstantNode::toASTImpl(const ConvertToASTOptions & options) const
|
|
||||||
{
|
{
|
||||||
const auto & constant_value_literal = constant_value->getValue();
|
const auto & constant_value_literal = constant_value->getValue();
|
||||||
auto constant_value_ast = std::make_shared<ASTLiteral>(constant_value_literal);
|
|
||||||
|
|
||||||
if (!options.add_cast_for_constants)
|
|
||||||
return constant_value_ast;
|
|
||||||
|
|
||||||
bool need_to_add_cast_function = false;
|
bool need_to_add_cast_function = false;
|
||||||
auto constant_value_literal_type = constant_value_literal.getType();
|
auto constant_value_literal_type = constant_value_literal.getType();
|
||||||
WhichDataType constant_value_type(constant_value->getType());
|
WhichDataType constant_value_type(constant_value->getType());
|
||||||
@ -131,7 +90,72 @@ ASTPtr ConstantNode::toASTImpl(const ConvertToASTOptions & options) const
|
|||||||
// Add cast if constant was created as a result of constant folding.
|
// Add cast if constant was created as a result of constant folding.
|
||||||
// Constant folding may lead to type transformation and literal on shard
|
// Constant folding may lead to type transformation and literal on shard
|
||||||
// may have a different type.
|
// may have a different type.
|
||||||
if (need_to_add_cast_function || source_expression != nullptr)
|
return need_to_add_cast_function || source_expression != nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ConstantNode::receivedFromInitiatorServer() const
|
||||||
|
{
|
||||||
|
if (!hasSourceExpression())
|
||||||
|
return false;
|
||||||
|
|
||||||
|
auto * cast_function = getSourceExpression()->as<FunctionNode>();
|
||||||
|
if (!cast_function || cast_function->getFunctionName() != "_CAST")
|
||||||
|
return false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ConstantNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const
|
||||||
|
{
|
||||||
|
buffer << std::string(indent, ' ') << "CONSTANT id: " << format_state.getNodeId(this);
|
||||||
|
|
||||||
|
if (hasAlias())
|
||||||
|
buffer << ", alias: " << getAlias();
|
||||||
|
|
||||||
|
buffer << ", constant_value: ";
|
||||||
|
if (mask_id)
|
||||||
|
buffer << "[HIDDEN id: " << mask_id << "]";
|
||||||
|
else
|
||||||
|
buffer << constant_value->getValue().dump();
|
||||||
|
|
||||||
|
buffer << ", constant_value_type: " << constant_value->getType()->getName();
|
||||||
|
|
||||||
|
if (!mask_id && getSourceExpression())
|
||||||
|
{
|
||||||
|
buffer << '\n' << std::string(indent + 2, ' ') << "EXPRESSION" << '\n';
|
||||||
|
getSourceExpression()->dumpTreeImpl(buffer, format_state, indent + 4);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ConstantNode::isEqualImpl(const IQueryTreeNode & rhs) const
|
||||||
|
{
|
||||||
|
const auto & rhs_typed = assert_cast<const ConstantNode &>(rhs);
|
||||||
|
return *constant_value == *rhs_typed.constant_value && value_string == rhs_typed.value_string;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ConstantNode::updateTreeHashImpl(HashState & hash_state) const
|
||||||
|
{
|
||||||
|
auto type_name = constant_value->getType()->getName();
|
||||||
|
hash_state.update(type_name.size());
|
||||||
|
hash_state.update(type_name);
|
||||||
|
|
||||||
|
hash_state.update(value_string.size());
|
||||||
|
hash_state.update(value_string);
|
||||||
|
}
|
||||||
|
|
||||||
|
QueryTreeNodePtr ConstantNode::cloneImpl() const
|
||||||
|
{
|
||||||
|
return std::make_shared<ConstantNode>(constant_value, source_expression);
|
||||||
|
}
|
||||||
|
|
||||||
|
ASTPtr ConstantNode::toASTImpl(const ConvertToASTOptions & options) const
|
||||||
|
{
|
||||||
|
const auto & constant_value_literal = constant_value->getValue();
|
||||||
|
auto constant_value_ast = std::make_shared<ASTLiteral>(constant_value_literal);
|
||||||
|
|
||||||
|
if (!options.add_cast_for_constants)
|
||||||
|
return constant_value_ast;
|
||||||
|
|
||||||
|
if (requiresCastCall())
|
||||||
{
|
{
|
||||||
auto constant_type_name_ast = std::make_shared<ASTLiteral>(constant_value->getType()->getName());
|
auto constant_type_name_ast = std::make_shared<ASTLiteral>(constant_value->getType()->getName());
|
||||||
return makeASTFunction("_CAST", std::move(constant_value_ast), std::move(constant_type_name_ast));
|
return makeASTFunction("_CAST", std::move(constant_value_ast), std::move(constant_type_name_ast));
|
||||||
|
@ -75,6 +75,17 @@ public:
|
|||||||
return constant_value->getType();
|
return constant_value->getType();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Check if conversion to AST requires wrapping with _CAST function.
|
||||||
|
bool requiresCastCall() const;
|
||||||
|
|
||||||
|
/// Check if constant is a result of _CAST function constant folding.
|
||||||
|
bool receivedFromInitiatorServer() const;
|
||||||
|
|
||||||
|
void setMaskId(size_t id)
|
||||||
|
{
|
||||||
|
mask_id = id;
|
||||||
|
}
|
||||||
|
|
||||||
void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override;
|
void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
@ -90,6 +101,7 @@ private:
|
|||||||
ConstantValuePtr constant_value;
|
ConstantValuePtr constant_value;
|
||||||
String value_string;
|
String value_string;
|
||||||
QueryTreeNodePtr source_expression;
|
QueryTreeNodePtr source_expression;
|
||||||
|
size_t mask_id = 0;
|
||||||
|
|
||||||
static constexpr size_t children_size = 0;
|
static constexpr size_t children_size = 0;
|
||||||
};
|
};
|
||||||
|
372
src/Analyzer/FunctionSecretArgumentsFinderTreeNode.h
Normal file
372
src/Analyzer/FunctionSecretArgumentsFinderTreeNode.h
Normal file
@ -0,0 +1,372 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Parsers/FunctionSecretArgumentsFinder.h>
|
||||||
|
#include <Analyzer/ConstantNode.h>
|
||||||
|
#include <Analyzer/FunctionNode.h>
|
||||||
|
#include <Analyzer/IQueryTreeNode.h>
|
||||||
|
#include <Analyzer/IdentifierNode.h>
|
||||||
|
#include <Analyzer/ListNode.h>
|
||||||
|
#include <Common/KnownObjectNames.h>
|
||||||
|
#include <Core/QualifiedTableName.h>
|
||||||
|
|
||||||
|
#include <boost/algorithm/string/predicate.hpp>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
/// Finds arguments of a specified function which should not be displayed for most users for security reasons.
|
||||||
|
/// That involves passwords and secret keys.
|
||||||
|
class FunctionSecretArgumentsFinderTreeNode
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit FunctionSecretArgumentsFinderTreeNode(const FunctionNode & function_) : function(function_), arguments(function.getArguments())
|
||||||
|
{
|
||||||
|
if (arguments.getNodes().empty())
|
||||||
|
return;
|
||||||
|
|
||||||
|
findFunctionSecretArguments();
|
||||||
|
}
|
||||||
|
|
||||||
|
struct Result
|
||||||
|
{
|
||||||
|
/// Result constructed by default means no arguments will be hidden.
|
||||||
|
size_t start = static_cast<size_t>(-1);
|
||||||
|
size_t count = 0; /// Mostly it's either 0 or 1. There are only a few cases where `count` can be greater than 1 (e.g. see `encrypt`).
|
||||||
|
/// In all known cases secret arguments are consecutive
|
||||||
|
bool are_named = false; /// Arguments like `password = 'password'` are considered as named arguments.
|
||||||
|
/// E.g. "headers" in `url('..', headers('foo' = '[HIDDEN]'))`
|
||||||
|
std::vector<std::string> nested_maps;
|
||||||
|
|
||||||
|
bool hasSecrets() const
|
||||||
|
{
|
||||||
|
return count != 0 || !nested_maps.empty();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
FunctionSecretArgumentsFinder::Result getResult() const { return result; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
const FunctionNode & function;
|
||||||
|
const ListNode & arguments;
|
||||||
|
FunctionSecretArgumentsFinder::Result result;
|
||||||
|
|
||||||
|
void markSecretArgument(size_t index, bool argument_is_named = false)
|
||||||
|
{
|
||||||
|
if (index >= arguments.getNodes().size())
|
||||||
|
return;
|
||||||
|
if (!result.count)
|
||||||
|
{
|
||||||
|
result.start = index;
|
||||||
|
result.are_named = argument_is_named;
|
||||||
|
}
|
||||||
|
chassert(index >= result.start); /// We always check arguments consecutively
|
||||||
|
result.count = index + 1 - result.start;
|
||||||
|
if (!argument_is_named)
|
||||||
|
result.are_named = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void findFunctionSecretArguments()
|
||||||
|
{
|
||||||
|
const auto & name = function.getFunctionName();
|
||||||
|
|
||||||
|
if ((name == "mysql") || (name == "postgresql") || (name == "mongodb"))
|
||||||
|
{
|
||||||
|
/// mysql('host:port', 'database', 'table', 'user', 'password', ...)
|
||||||
|
/// postgresql('host:port', 'database', 'table', 'user', 'password', ...)
|
||||||
|
/// mongodb('host:port', 'database', 'collection', 'user', 'password', ...)
|
||||||
|
findMySQLFunctionSecretArguments();
|
||||||
|
}
|
||||||
|
else if ((name == "s3") || (name == "cosn") || (name == "oss") ||
|
||||||
|
(name == "deltaLake") || (name == "hudi") || (name == "iceberg"))
|
||||||
|
{
|
||||||
|
/// s3('url', 'aws_access_key_id', 'aws_secret_access_key', ...)
|
||||||
|
findS3FunctionSecretArguments(/* is_cluster_function= */ false);
|
||||||
|
}
|
||||||
|
else if (name == "s3Cluster")
|
||||||
|
{
|
||||||
|
/// s3Cluster('cluster_name', 'url', 'aws_access_key_id', 'aws_secret_access_key', ...)
|
||||||
|
findS3FunctionSecretArguments(/* is_cluster_function= */ true);
|
||||||
|
}
|
||||||
|
else if ((name == "remote") || (name == "remoteSecure"))
|
||||||
|
{
|
||||||
|
/// remote('addresses_expr', 'db', 'table', 'user', 'password', ...)
|
||||||
|
findRemoteFunctionSecretArguments();
|
||||||
|
}
|
||||||
|
else if ((name == "encrypt") || (name == "decrypt") ||
|
||||||
|
(name == "aes_encrypt_mysql") || (name == "aes_decrypt_mysql") ||
|
||||||
|
(name == "tryDecrypt"))
|
||||||
|
{
|
||||||
|
/// encrypt('mode', 'plaintext', 'key' [, iv, aad])
|
||||||
|
findEncryptionFunctionSecretArguments();
|
||||||
|
}
|
||||||
|
else if (name == "url")
|
||||||
|
{
|
||||||
|
findURLSecretArguments();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void findMySQLFunctionSecretArguments()
|
||||||
|
{
|
||||||
|
if (isNamedCollectionName(0))
|
||||||
|
{
|
||||||
|
/// mysql(named_collection, ..., password = 'password', ...)
|
||||||
|
findSecretNamedArgument("password", 1);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/// mysql('host:port', 'database', 'table', 'user', 'password', ...)
|
||||||
|
markSecretArgument(4);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the number of arguments excluding "headers" and "extra_credentials" (which should
|
||||||
|
/// always be at the end). Marks "headers" as secret, if found.
|
||||||
|
size_t excludeS3OrURLNestedMaps()
|
||||||
|
{
|
||||||
|
const auto & nodes = arguments.getNodes();
|
||||||
|
size_t count = nodes.size();
|
||||||
|
while (count > 0)
|
||||||
|
{
|
||||||
|
const FunctionNode * f = nodes.at(count - 1)->as<FunctionNode>();
|
||||||
|
if (!f)
|
||||||
|
break;
|
||||||
|
if (f->getFunctionName() == "headers")
|
||||||
|
result.nested_maps.push_back(f->getFunctionName());
|
||||||
|
else if (f->getFunctionName() != "extra_credentials")
|
||||||
|
break;
|
||||||
|
count -= 1;
|
||||||
|
}
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
void findS3FunctionSecretArguments(bool is_cluster_function)
|
||||||
|
{
|
||||||
|
/// s3Cluster('cluster_name', 'url', ...) has 'url' as its second argument.
|
||||||
|
size_t url_arg_idx = is_cluster_function ? 1 : 0;
|
||||||
|
|
||||||
|
if (!is_cluster_function && isNamedCollectionName(0))
|
||||||
|
{
|
||||||
|
/// s3(named_collection, ..., secret_access_key = 'secret_access_key', ...)
|
||||||
|
findSecretNamedArgument("secret_access_key", 1);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// We should check other arguments first because we don't need to do any replacement in case of
|
||||||
|
/// s3('url', NOSIGN, 'format' [, 'compression'] [, extra_credentials(..)] [, headers(..)])
|
||||||
|
/// s3('url', 'format', 'structure' [, 'compression'] [, extra_credentials(..)] [, headers(..)])
|
||||||
|
size_t count = excludeS3OrURLNestedMaps();
|
||||||
|
if ((url_arg_idx + 3 <= count) && (count <= url_arg_idx + 4))
|
||||||
|
{
|
||||||
|
String second_arg;
|
||||||
|
if (tryGetStringFromArgument(url_arg_idx + 1, &second_arg))
|
||||||
|
{
|
||||||
|
if (boost::iequals(second_arg, "NOSIGN"))
|
||||||
|
return; /// The argument after 'url' is "NOSIGN".
|
||||||
|
|
||||||
|
if (second_arg == "auto" || KnownFormatNames::instance().exists(second_arg))
|
||||||
|
return; /// The argument after 'url' is a format: s3('url', 'format', ...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// We're going to replace 'aws_secret_access_key' with '[HIDDEN]' for the following signatures:
|
||||||
|
/// s3('url', 'aws_access_key_id', 'aws_secret_access_key', ...)
|
||||||
|
/// s3Cluster('cluster_name', 'url', 'aws_access_key_id', 'aws_secret_access_key', 'format', 'compression')
|
||||||
|
if (url_arg_idx + 2 < count)
|
||||||
|
markSecretArgument(url_arg_idx + 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
void findURLSecretArguments()
|
||||||
|
{
|
||||||
|
if (!isNamedCollectionName(0))
|
||||||
|
excludeS3OrURLNestedMaps();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool tryGetStringFromArgument(size_t arg_idx, String * res, bool allow_identifier = true) const
|
||||||
|
{
|
||||||
|
if (arg_idx >= arguments.getNodes().size())
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return tryGetStringFromArgument(arguments.getNodes()[arg_idx], res, allow_identifier);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool tryGetStringFromArgument(const QueryTreeNodePtr argument, String * res, bool allow_identifier = true)
|
||||||
|
{
|
||||||
|
if (const auto * literal = argument->as<ConstantNode>())
|
||||||
|
{
|
||||||
|
if (literal->getValue().getType() != Field::Types::String)
|
||||||
|
return false;
|
||||||
|
if (res)
|
||||||
|
*res = literal->getValue().safeGet<String>();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (allow_identifier)
|
||||||
|
{
|
||||||
|
if (const auto * id = argument->as<IdentifierNode>())
|
||||||
|
{
|
||||||
|
if (res)
|
||||||
|
*res = id->getIdentifier().getFullName();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void findRemoteFunctionSecretArguments()
|
||||||
|
{
|
||||||
|
if (isNamedCollectionName(0))
|
||||||
|
{
|
||||||
|
/// remote(named_collection, ..., password = 'password', ...)
|
||||||
|
findSecretNamedArgument("password", 1);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// We're going to replace 'password' with '[HIDDEN'] for the following signatures:
|
||||||
|
/// remote('addresses_expr', db.table, 'user' [, 'password'] [, sharding_key])
|
||||||
|
/// remote('addresses_expr', 'db', 'table', 'user' [, 'password'] [, sharding_key])
|
||||||
|
/// remote('addresses_expr', table_function(), 'user' [, 'password'] [, sharding_key])
|
||||||
|
|
||||||
|
/// But we should check the number of arguments first because we don't need to do any replacements in case of
|
||||||
|
/// remote('addresses_expr', db.table)
|
||||||
|
if (arguments.getNodes().size() < 3)
|
||||||
|
return;
|
||||||
|
|
||||||
|
size_t arg_num = 1;
|
||||||
|
|
||||||
|
/// Skip 1 or 2 arguments with table_function() or db.table or 'db', 'table'.
|
||||||
|
const auto * table_function = arguments.getNodes()[arg_num]->as<FunctionNode>();
|
||||||
|
if (table_function && KnownTableFunctionNames::instance().exists(table_function->getFunctionName()))
|
||||||
|
{
|
||||||
|
++arg_num;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
std::optional<String> database;
|
||||||
|
std::optional<QualifiedTableName> qualified_table_name;
|
||||||
|
if (!tryGetDatabaseNameOrQualifiedTableName(arg_num, database, qualified_table_name))
|
||||||
|
{
|
||||||
|
/// We couldn't evaluate the argument so we don't know whether it is 'db.table' or just 'db'.
|
||||||
|
/// Hence we can't figure out whether we should skip one argument 'user' or two arguments 'table', 'user'
|
||||||
|
/// before the argument 'password'. So it's safer to wipe two arguments just in case.
|
||||||
|
/// The last argument can be also a `sharding_key`, so we need to check that argument is a literal string
|
||||||
|
/// before wiping it (because the `password` argument is always a literal string).
|
||||||
|
if (tryGetStringFromArgument(arg_num + 2, nullptr, /* allow_identifier= */ false))
|
||||||
|
{
|
||||||
|
/// Wipe either `password` or `user`.
|
||||||
|
markSecretArgument(arg_num + 2);
|
||||||
|
}
|
||||||
|
if (tryGetStringFromArgument(arg_num + 3, nullptr, /* allow_identifier= */ false))
|
||||||
|
{
|
||||||
|
/// Wipe either `password` or `sharding_key`.
|
||||||
|
markSecretArgument(arg_num + 3);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Skip the current argument (which is either a database name or a qualified table name).
|
||||||
|
++arg_num;
|
||||||
|
if (database)
|
||||||
|
{
|
||||||
|
/// Skip the 'table' argument if the previous argument was a database name.
|
||||||
|
++arg_num;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Skip username.
|
||||||
|
++arg_num;
|
||||||
|
|
||||||
|
/// Do our replacement:
|
||||||
|
/// remote('addresses_expr', db.table, 'user', 'password', ...) -> remote('addresses_expr', db.table, 'user', '[HIDDEN]', ...)
|
||||||
|
/// The last argument can be also a `sharding_key`, so we need to check that argument is a literal string
|
||||||
|
/// before wiping it (because the `password` argument is always a literal string).
|
||||||
|
bool can_be_password = tryGetStringFromArgument(arg_num, nullptr, /* allow_identifier= */ false);
|
||||||
|
if (can_be_password)
|
||||||
|
markSecretArgument(arg_num);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tries to get either a database name or a qualified table name from an argument.
|
||||||
|
/// Empty string is also allowed (it means the default database).
|
||||||
|
/// The function is used by findRemoteFunctionSecretArguments() to determine how many arguments to skip before a password.
|
||||||
|
bool tryGetDatabaseNameOrQualifiedTableName(
|
||||||
|
size_t arg_idx,
|
||||||
|
std::optional<String> & res_database,
|
||||||
|
std::optional<QualifiedTableName> & res_qualified_table_name) const
|
||||||
|
{
|
||||||
|
res_database.reset();
|
||||||
|
res_qualified_table_name.reset();
|
||||||
|
|
||||||
|
String str;
|
||||||
|
if (!tryGetStringFromArgument(arg_idx, &str, /* allow_identifier= */ true))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (str.empty())
|
||||||
|
{
|
||||||
|
res_database = "";
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto qualified_table_name = QualifiedTableName::tryParseFromString(str);
|
||||||
|
if (!qualified_table_name)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (qualified_table_name->database.empty())
|
||||||
|
res_database = std::move(qualified_table_name->table);
|
||||||
|
else
|
||||||
|
res_qualified_table_name = std::move(qualified_table_name);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void findEncryptionFunctionSecretArguments()
|
||||||
|
{
|
||||||
|
if (arguments.getNodes().empty())
|
||||||
|
return;
|
||||||
|
|
||||||
|
/// We replace all arguments after 'mode' with '[HIDDEN]':
|
||||||
|
/// encrypt('mode', 'plaintext', 'key' [, iv, aad]) -> encrypt('mode', '[HIDDEN]')
|
||||||
|
result.start = 1;
|
||||||
|
result.count = arguments.getNodes().size() - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Whether a specified argument can be the name of a named collection?
|
||||||
|
bool isNamedCollectionName(size_t arg_idx) const
|
||||||
|
{
|
||||||
|
if (arguments.getNodes().size() <= arg_idx)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
const auto * identifier = arguments.getNodes()[arg_idx]->as<IdentifierNode>();
|
||||||
|
return identifier != nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Looks for a secret argument with a specified name. This function looks for arguments in format `key=value` where the key is specified.
|
||||||
|
void findSecretNamedArgument(const std::string_view & key, size_t start = 0)
|
||||||
|
{
|
||||||
|
for (size_t i = start; i < arguments.getNodes().size(); ++i)
|
||||||
|
{
|
||||||
|
const auto & argument = arguments.getNodes()[i];
|
||||||
|
const auto * equals_func = argument->as<FunctionNode>();
|
||||||
|
if (!equals_func || (equals_func->getFunctionName() != "equals"))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
const auto * expr_list = equals_func->getArguments().as<ListNode>();
|
||||||
|
if (!expr_list)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
const auto & equal_args = expr_list->getNodes();
|
||||||
|
if (equal_args.size() != 2)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
String found_key;
|
||||||
|
if (!tryGetStringFromArgument(equal_args[0], &found_key))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (found_key == key)
|
||||||
|
markSecretArgument(i, /* argument_is_named= */ true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -94,7 +94,8 @@ public:
|
|||||||
if (!func_node || func_node->getArguments().getNodes().size() != 1)
|
if (!func_node || func_node->getArguments().getNodes().size() != 1)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
const auto * column_id = func_node->getArguments().getNodes()[0]->as<ColumnNode>();
|
const auto & argument_node = func_node->getArguments().getNodes()[0];
|
||||||
|
const auto * column_id = argument_node->as<ColumnNode>();
|
||||||
if (!column_id)
|
if (!column_id)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -119,7 +120,7 @@ public:
|
|||||||
if (!preimage_range)
|
if (!preimage_range)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
const auto new_node = generateOptimizedDateFilter(comparator, *column_id, *preimage_range);
|
const auto new_node = generateOptimizedDateFilter(comparator, argument_node, *preimage_range);
|
||||||
|
|
||||||
if (!new_node)
|
if (!new_node)
|
||||||
return;
|
return;
|
||||||
@ -128,20 +129,22 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
QueryTreeNodePtr
|
QueryTreeNodePtr generateOptimizedDateFilter(
|
||||||
generateOptimizedDateFilter(const String & comparator, const ColumnNode & column_node, const std::pair<Field, Field> & range) const
|
const String & comparator, const QueryTreeNodePtr & column_node, const std::pair<Field, Field> & range) const
|
||||||
{
|
{
|
||||||
const DateLUTImpl & date_lut = DateLUT::instance("UTC");
|
const DateLUTImpl & date_lut = DateLUT::instance("UTC");
|
||||||
|
|
||||||
String start_date_or_date_time;
|
String start_date_or_date_time;
|
||||||
String end_date_or_date_time;
|
String end_date_or_date_time;
|
||||||
|
|
||||||
if (isDateOrDate32(column_node.getColumnType().get()))
|
const auto & column_node_typed = column_node->as<ColumnNode &>();
|
||||||
|
const auto & column_type = column_node_typed.getColumnType().get();
|
||||||
|
if (isDateOrDate32(column_type))
|
||||||
{
|
{
|
||||||
start_date_or_date_time = date_lut.dateToString(range.first.get<DateLUTImpl::Time>());
|
start_date_or_date_time = date_lut.dateToString(range.first.get<DateLUTImpl::Time>());
|
||||||
end_date_or_date_time = date_lut.dateToString(range.second.get<DateLUTImpl::Time>());
|
end_date_or_date_time = date_lut.dateToString(range.second.get<DateLUTImpl::Time>());
|
||||||
}
|
}
|
||||||
else if (isDateTime(column_node.getColumnType().get()) || isDateTime64(column_node.getColumnType().get()))
|
else if (isDateTime(column_type) || isDateTime64(column_type))
|
||||||
{
|
{
|
||||||
start_date_or_date_time = date_lut.timeToString(range.first.get<DateLUTImpl::Time>());
|
start_date_or_date_time = date_lut.timeToString(range.first.get<DateLUTImpl::Time>());
|
||||||
end_date_or_date_time = date_lut.timeToString(range.second.get<DateLUTImpl::Time>());
|
end_date_or_date_time = date_lut.timeToString(range.second.get<DateLUTImpl::Time>());
|
||||||
@ -151,69 +154,29 @@ private:
|
|||||||
|
|
||||||
if (comparator == "equals")
|
if (comparator == "equals")
|
||||||
{
|
{
|
||||||
const auto lhs = std::make_shared<FunctionNode>("greaterOrEquals");
|
return createFunctionNode(
|
||||||
lhs->getArguments().getNodes().push_back(std::make_shared<ColumnNode>(column_node.getColumn(), column_node.getColumnSource()));
|
"and",
|
||||||
lhs->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(start_date_or_date_time));
|
createFunctionNode("greaterOrEquals", column_node, std::make_shared<ConstantNode>(start_date_or_date_time)),
|
||||||
resolveOrdinaryFunctionNode(*lhs, lhs->getFunctionName());
|
createFunctionNode("less", column_node, std::make_shared<ConstantNode>(end_date_or_date_time)));
|
||||||
|
|
||||||
const auto rhs = std::make_shared<FunctionNode>("less");
|
|
||||||
rhs->getArguments().getNodes().push_back(std::make_shared<ColumnNode>(column_node.getColumn(), column_node.getColumnSource()));
|
|
||||||
rhs->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(end_date_or_date_time));
|
|
||||||
resolveOrdinaryFunctionNode(*rhs, rhs->getFunctionName());
|
|
||||||
|
|
||||||
const auto new_date_filter = std::make_shared<FunctionNode>("and");
|
|
||||||
new_date_filter->getArguments().getNodes() = {lhs, rhs};
|
|
||||||
resolveOrdinaryFunctionNode(*new_date_filter, new_date_filter->getFunctionName());
|
|
||||||
|
|
||||||
return new_date_filter;
|
|
||||||
}
|
}
|
||||||
else if (comparator == "notEquals")
|
else if (comparator == "notEquals")
|
||||||
{
|
{
|
||||||
const auto lhs = std::make_shared<FunctionNode>("less");
|
return createFunctionNode(
|
||||||
lhs->getArguments().getNodes().push_back(std::make_shared<ColumnNode>(column_node.getColumn(), column_node.getColumnSource()));
|
"or",
|
||||||
lhs->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(start_date_or_date_time));
|
createFunctionNode("less", column_node, std::make_shared<ConstantNode>(start_date_or_date_time)),
|
||||||
resolveOrdinaryFunctionNode(*lhs, lhs->getFunctionName());
|
createFunctionNode("greaterOrEquals", column_node, std::make_shared<ConstantNode>(end_date_or_date_time)));
|
||||||
|
|
||||||
const auto rhs = std::make_shared<FunctionNode>("greaterOrEquals");
|
|
||||||
rhs->getArguments().getNodes().push_back(std::make_shared<ColumnNode>(column_node.getColumn(), column_node.getColumnSource()));
|
|
||||||
rhs->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(end_date_or_date_time));
|
|
||||||
resolveOrdinaryFunctionNode(*rhs, rhs->getFunctionName());
|
|
||||||
|
|
||||||
const auto new_date_filter = std::make_shared<FunctionNode>("or");
|
|
||||||
new_date_filter->getArguments().getNodes() = {lhs, rhs};
|
|
||||||
resolveOrdinaryFunctionNode(*new_date_filter, new_date_filter->getFunctionName());
|
|
||||||
|
|
||||||
return new_date_filter;
|
|
||||||
}
|
}
|
||||||
else if (comparator == "greater")
|
else if (comparator == "greater")
|
||||||
{
|
{
|
||||||
const auto new_date_filter = std::make_shared<FunctionNode>("greaterOrEquals");
|
return createFunctionNode("greaterOrEquals", column_node, std::make_shared<ConstantNode>(end_date_or_date_time));
|
||||||
new_date_filter->getArguments().getNodes().push_back(
|
|
||||||
std::make_shared<ColumnNode>(column_node.getColumn(), column_node.getColumnSource()));
|
|
||||||
new_date_filter->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(end_date_or_date_time));
|
|
||||||
resolveOrdinaryFunctionNode(*new_date_filter, new_date_filter->getFunctionName());
|
|
||||||
|
|
||||||
return new_date_filter;
|
|
||||||
}
|
}
|
||||||
else if (comparator == "lessOrEquals")
|
else if (comparator == "lessOrEquals")
|
||||||
{
|
{
|
||||||
const auto new_date_filter = std::make_shared<FunctionNode>("less");
|
return createFunctionNode("less", column_node, std::make_shared<ConstantNode>(end_date_or_date_time));
|
||||||
new_date_filter->getArguments().getNodes().push_back(
|
|
||||||
std::make_shared<ColumnNode>(column_node.getColumn(), column_node.getColumnSource()));
|
|
||||||
new_date_filter->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(end_date_or_date_time));
|
|
||||||
resolveOrdinaryFunctionNode(*new_date_filter, new_date_filter->getFunctionName());
|
|
||||||
|
|
||||||
return new_date_filter;
|
|
||||||
}
|
}
|
||||||
else if (comparator == "less" || comparator == "greaterOrEquals")
|
else if (comparator == "less" || comparator == "greaterOrEquals")
|
||||||
{
|
{
|
||||||
const auto new_date_filter = std::make_shared<FunctionNode>(comparator);
|
return createFunctionNode(comparator, column_node, std::make_shared<ConstantNode>(start_date_or_date_time));
|
||||||
new_date_filter->getArguments().getNodes().push_back(
|
|
||||||
std::make_shared<ColumnNode>(column_node.getColumn(), column_node.getColumnSource()));
|
|
||||||
new_date_filter->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(start_date_or_date_time));
|
|
||||||
resolveOrdinaryFunctionNode(*new_date_filter, new_date_filter->getFunctionName());
|
|
||||||
|
|
||||||
return new_date_filter;
|
|
||||||
}
|
}
|
||||||
else [[unlikely]]
|
else [[unlikely]]
|
||||||
{
|
{
|
||||||
@ -224,10 +187,17 @@ private:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void resolveOrdinaryFunctionNode(FunctionNode & function_node, const String & function_name) const
|
template <typename... Args>
|
||||||
|
QueryTreeNodePtr createFunctionNode(const String & function_name, Args &&... args) const
|
||||||
{
|
{
|
||||||
auto function = FunctionFactory::instance().get(function_name, getContext());
|
auto function = FunctionFactory::instance().get(function_name, getContext());
|
||||||
function_node.resolveAsFunction(function->build(function_node.getArgumentColumns()));
|
const auto function_node = std::make_shared<FunctionNode>(function_name);
|
||||||
|
auto & new_arguments = function_node->getArguments().getNodes();
|
||||||
|
new_arguments.reserve(sizeof...(args));
|
||||||
|
(new_arguments.push_back(std::forward<Args>(args)), ...);
|
||||||
|
function_node->resolveAsFunction(function->build(function_node->getArgumentColumns()));
|
||||||
|
|
||||||
|
return function_node;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#include <Common/checkStackSize.h>
|
#include <Common/checkStackSize.h>
|
||||||
#include <Common/NamePrompter.h>
|
#include <Common/NamePrompter.h>
|
||||||
#include <Common/ProfileEvents.h>
|
#include <Common/ProfileEvents.h>
|
||||||
|
#include <Analyzer/FunctionSecretArgumentsFinderTreeNode.h>
|
||||||
|
|
||||||
#include <IO/WriteBuffer.h>
|
#include <IO/WriteBuffer.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
@ -706,7 +707,10 @@ struct IdentifierResolveScope
|
|||||||
{
|
{
|
||||||
subquery_depth = parent_scope->subquery_depth;
|
subquery_depth = parent_scope->subquery_depth;
|
||||||
context = parent_scope->context;
|
context = parent_scope->context;
|
||||||
|
projection_mask_map = parent_scope->projection_mask_map;
|
||||||
}
|
}
|
||||||
|
else
|
||||||
|
projection_mask_map = std::make_shared<std::map<IQueryTreeNode::Hash, size_t>>();
|
||||||
|
|
||||||
if (auto * union_node = scope_node->as<UnionNode>())
|
if (auto * union_node = scope_node->as<UnionNode>())
|
||||||
{
|
{
|
||||||
@ -718,6 +722,11 @@ struct IdentifierResolveScope
|
|||||||
group_by_use_nulls = context->getSettingsRef().group_by_use_nulls &&
|
group_by_use_nulls = context->getSettingsRef().group_by_use_nulls &&
|
||||||
(query_node->isGroupByWithGroupingSets() || query_node->isGroupByWithRollup() || query_node->isGroupByWithCube());
|
(query_node->isGroupByWithGroupingSets() || query_node->isGroupByWithRollup() || query_node->isGroupByWithCube());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (context)
|
||||||
|
join_use_nulls = context->getSettingsRef().join_use_nulls;
|
||||||
|
else if (parent_scope)
|
||||||
|
join_use_nulls = parent_scope->join_use_nulls;
|
||||||
}
|
}
|
||||||
|
|
||||||
QueryTreeNodePtr scope_node;
|
QueryTreeNodePtr scope_node;
|
||||||
@ -772,6 +781,8 @@ struct IdentifierResolveScope
|
|||||||
|
|
||||||
/// Apply nullability to aggregation keys
|
/// Apply nullability to aggregation keys
|
||||||
bool group_by_use_nulls = false;
|
bool group_by_use_nulls = false;
|
||||||
|
/// Join retutns NULLs instead of default values
|
||||||
|
bool join_use_nulls = false;
|
||||||
|
|
||||||
/// JOINs count
|
/// JOINs count
|
||||||
size_t joins_count = 0;
|
size_t joins_count = 0;
|
||||||
@ -784,6 +795,9 @@ struct IdentifierResolveScope
|
|||||||
*/
|
*/
|
||||||
QueryTreeNodePtr expression_join_tree_node;
|
QueryTreeNodePtr expression_join_tree_node;
|
||||||
|
|
||||||
|
/// Node hash to mask id map
|
||||||
|
std::shared_ptr<std::map<IQueryTreeNode::Hash, size_t>> projection_mask_map;
|
||||||
|
|
||||||
[[maybe_unused]] const IdentifierResolveScope * getNearestQueryScope() const
|
[[maybe_unused]] const IdentifierResolveScope * getNearestQueryScope() const
|
||||||
{
|
{
|
||||||
const IdentifierResolveScope * scope_to_check = this;
|
const IdentifierResolveScope * scope_to_check = this;
|
||||||
@ -1068,6 +1082,8 @@ private:
|
|||||||
class QueryAnalyzer
|
class QueryAnalyzer
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
explicit QueryAnalyzer(bool only_analyze_) : only_analyze(only_analyze_) {}
|
||||||
|
|
||||||
void resolve(QueryTreeNodePtr & node, const QueryTreeNodePtr & table_expression, ContextPtr context)
|
void resolve(QueryTreeNodePtr & node, const QueryTreeNodePtr & table_expression, ContextPtr context)
|
||||||
{
|
{
|
||||||
IdentifierResolveScope scope(node, nullptr /*parent_scope*/);
|
IdentifierResolveScope scope(node, nullptr /*parent_scope*/);
|
||||||
@ -1430,6 +1446,7 @@ private:
|
|||||||
/// Global scalar subquery to scalar value map
|
/// Global scalar subquery to scalar value map
|
||||||
std::unordered_map<QueryTreeNodePtrWithHash, Block> scalar_subquery_to_scalar_value;
|
std::unordered_map<QueryTreeNodePtrWithHash, Block> scalar_subquery_to_scalar_value;
|
||||||
|
|
||||||
|
const bool only_analyze;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Utility functions implementation
|
/// Utility functions implementation
|
||||||
@ -1977,80 +1994,96 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, Iden
|
|||||||
auto interpreter = std::make_unique<InterpreterSelectQueryAnalyzer>(node->toAST(), subquery_context, subquery_context->getViewSource(), options);
|
auto interpreter = std::make_unique<InterpreterSelectQueryAnalyzer>(node->toAST(), subquery_context, subquery_context->getViewSource(), options);
|
||||||
|
|
||||||
auto io = interpreter->execute();
|
auto io = interpreter->execute();
|
||||||
|
|
||||||
PullingAsyncPipelineExecutor executor(io.pipeline);
|
PullingAsyncPipelineExecutor executor(io.pipeline);
|
||||||
io.pipeline.setProgressCallback(context->getProgressCallback());
|
io.pipeline.setProgressCallback(context->getProgressCallback());
|
||||||
io.pipeline.setProcessListElement(context->getProcessListElement());
|
io.pipeline.setProcessListElement(context->getProcessListElement());
|
||||||
|
|
||||||
Block block;
|
if (only_analyze)
|
||||||
|
|
||||||
while (block.rows() == 0 && executor.pull(block))
|
|
||||||
{
|
{
|
||||||
}
|
/// If query is only analyzed, then constants are not correct.
|
||||||
|
scalar_block = interpreter->getSampleBlock();
|
||||||
if (block.rows() == 0)
|
for (auto & column : scalar_block)
|
||||||
{
|
|
||||||
auto types = interpreter->getSampleBlock().getDataTypes();
|
|
||||||
if (types.size() != 1)
|
|
||||||
types = {std::make_shared<DataTypeTuple>(types)};
|
|
||||||
|
|
||||||
auto & type = types[0];
|
|
||||||
if (!type->isNullable())
|
|
||||||
{
|
{
|
||||||
if (!type->canBeInsideNullable())
|
if (column.column->empty())
|
||||||
throw Exception(ErrorCodes::INCORRECT_RESULT_OF_SCALAR_SUBQUERY,
|
{
|
||||||
"Scalar subquery returned empty result of type {} which cannot be Nullable",
|
auto mut_col = column.column->cloneEmpty();
|
||||||
type->getName());
|
mut_col->insertDefault();
|
||||||
|
column.column = std::move(mut_col);
|
||||||
type = makeNullable(type);
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
auto scalar_column = type->createColumn();
|
|
||||||
scalar_column->insert(Null());
|
|
||||||
scalar_block.insert({std::move(scalar_column), type, "null"});
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (block.rows() != 1)
|
Block block;
|
||||||
throw Exception(ErrorCodes::INCORRECT_RESULT_OF_SCALAR_SUBQUERY, "Scalar subquery returned more than one row");
|
|
||||||
|
|
||||||
Block tmp_block;
|
while (block.rows() == 0 && executor.pull(block))
|
||||||
while (tmp_block.rows() == 0 && executor.pull(tmp_block))
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tmp_block.rows() != 0)
|
if (block.rows() == 0)
|
||||||
throw Exception(ErrorCodes::INCORRECT_RESULT_OF_SCALAR_SUBQUERY, "Scalar subquery returned more than one row");
|
|
||||||
|
|
||||||
block = materializeBlock(block);
|
|
||||||
size_t columns = block.columns();
|
|
||||||
|
|
||||||
if (columns == 1)
|
|
||||||
{
|
{
|
||||||
auto & column = block.getByPosition(0);
|
auto types = interpreter->getSampleBlock().getDataTypes();
|
||||||
/// Here we wrap type to nullable if we can.
|
if (types.size() != 1)
|
||||||
/// It is needed cause if subquery return no rows, it's result will be Null.
|
types = {std::make_shared<DataTypeTuple>(types)};
|
||||||
/// In case of many columns, do not check it cause tuple can't be nullable.
|
|
||||||
if (!column.type->isNullable() && column.type->canBeInsideNullable())
|
auto & type = types[0];
|
||||||
|
if (!type->isNullable())
|
||||||
{
|
{
|
||||||
column.type = makeNullable(column.type);
|
if (!type->canBeInsideNullable())
|
||||||
column.column = makeNullable(column.column);
|
throw Exception(ErrorCodes::INCORRECT_RESULT_OF_SCALAR_SUBQUERY,
|
||||||
|
"Scalar subquery returned empty result of type {} which cannot be Nullable",
|
||||||
|
type->getName());
|
||||||
|
|
||||||
|
type = makeNullable(type);
|
||||||
}
|
}
|
||||||
|
|
||||||
scalar_block = block;
|
auto scalar_column = type->createColumn();
|
||||||
|
scalar_column->insert(Null());
|
||||||
|
scalar_block.insert({std::move(scalar_column), type, "null"});
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
/** Make unique column names for tuple.
|
if (block.rows() != 1)
|
||||||
*
|
throw Exception(ErrorCodes::INCORRECT_RESULT_OF_SCALAR_SUBQUERY, "Scalar subquery returned more than one row");
|
||||||
* Example: SELECT (SELECT 2 AS x, x)
|
|
||||||
*/
|
|
||||||
makeUniqueColumnNamesInBlock(block);
|
|
||||||
|
|
||||||
scalar_block.insert({
|
Block tmp_block;
|
||||||
ColumnTuple::create(block.getColumns()),
|
while (tmp_block.rows() == 0 && executor.pull(tmp_block))
|
||||||
std::make_shared<DataTypeTuple>(block.getDataTypes(), block.getNames()),
|
{
|
||||||
"tuple"});
|
}
|
||||||
|
|
||||||
|
if (tmp_block.rows() != 0)
|
||||||
|
throw Exception(ErrorCodes::INCORRECT_RESULT_OF_SCALAR_SUBQUERY, "Scalar subquery returned more than one row");
|
||||||
|
|
||||||
|
block = materializeBlock(block);
|
||||||
|
size_t columns = block.columns();
|
||||||
|
|
||||||
|
if (columns == 1)
|
||||||
|
{
|
||||||
|
auto & column = block.getByPosition(0);
|
||||||
|
/// Here we wrap type to nullable if we can.
|
||||||
|
/// It is needed cause if subquery return no rows, it's result will be Null.
|
||||||
|
/// In case of many columns, do not check it cause tuple can't be nullable.
|
||||||
|
if (!column.type->isNullable() && column.type->canBeInsideNullable())
|
||||||
|
{
|
||||||
|
column.type = makeNullable(column.type);
|
||||||
|
column.column = makeNullable(column.column);
|
||||||
|
}
|
||||||
|
|
||||||
|
scalar_block = block;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/** Make unique column names for tuple.
|
||||||
|
*
|
||||||
|
* Example: SELECT (SELECT 2 AS x, x)
|
||||||
|
*/
|
||||||
|
makeUniqueColumnNamesInBlock(block);
|
||||||
|
|
||||||
|
scalar_block.insert({
|
||||||
|
ColumnTuple::create(block.getColumns()),
|
||||||
|
std::make_shared<DataTypeTuple>(block.getDataTypes(), block.getNames()),
|
||||||
|
"tuple"});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3286,7 +3319,6 @@ QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromJoin(const IdentifierLoo
|
|||||||
QueryTreeNodePtr resolved_identifier;
|
QueryTreeNodePtr resolved_identifier;
|
||||||
|
|
||||||
JoinKind join_kind = from_join_node.getKind();
|
JoinKind join_kind = from_join_node.getKind();
|
||||||
bool join_use_nulls = scope.context->getSettingsRef().join_use_nulls;
|
|
||||||
|
|
||||||
/// If columns from left or right table were missed Object(Nullable('json')) subcolumns, they will be replaced
|
/// If columns from left or right table were missed Object(Nullable('json')) subcolumns, they will be replaced
|
||||||
/// to ConstantNode(NULL), which can't be cast to ColumnNode, so we resolve it here.
|
/// to ConstantNode(NULL), which can't be cast to ColumnNode, so we resolve it here.
|
||||||
@ -3451,7 +3483,7 @@ QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromJoin(const IdentifierLoo
|
|||||||
if (join_node_in_resolve_process || !resolved_identifier)
|
if (join_node_in_resolve_process || !resolved_identifier)
|
||||||
return resolved_identifier;
|
return resolved_identifier;
|
||||||
|
|
||||||
if (join_use_nulls)
|
if (scope.join_use_nulls)
|
||||||
{
|
{
|
||||||
resolved_identifier = resolved_identifier->clone();
|
resolved_identifier = resolved_identifier->clone();
|
||||||
convertJoinedColumnTypeToNullIfNeeded(resolved_identifier, join_kind, resolved_side);
|
convertJoinedColumnTypeToNullIfNeeded(resolved_identifier, join_kind, resolved_side);
|
||||||
@ -4439,7 +4471,7 @@ ProjectionNames QueryAnalyzer::resolveMatcher(QueryTreeNodePtr & matcher_node, I
|
|||||||
else
|
else
|
||||||
matched_expression_nodes_with_names = resolveUnqualifiedMatcher(matcher_node, scope);
|
matched_expression_nodes_with_names = resolveUnqualifiedMatcher(matcher_node, scope);
|
||||||
|
|
||||||
if (scope.context->getSettingsRef().join_use_nulls)
|
if (scope.join_use_nulls)
|
||||||
{
|
{
|
||||||
/** If we are resolving matcher came from the result of JOIN and `join_use_nulls` is set,
|
/** If we are resolving matcher came from the result of JOIN and `join_use_nulls` is set,
|
||||||
* we need to convert joined column type to Nullable.
|
* we need to convert joined column type to Nullable.
|
||||||
@ -5124,22 +5156,31 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Resolve function arguments
|
/// Resolve function arguments
|
||||||
|
|
||||||
bool allow_table_expressions = is_special_function_in;
|
bool allow_table_expressions = is_special_function_in;
|
||||||
auto arguments_projection_names = resolveExpressionNodeList(function_node_ptr->getArgumentsNode(),
|
auto arguments_projection_names = resolveExpressionNodeList(function_node_ptr->getArgumentsNode(),
|
||||||
scope,
|
scope,
|
||||||
true /*allow_lambda_expression*/,
|
true /*allow_lambda_expression*/,
|
||||||
allow_table_expressions /*allow_table_expression*/);
|
allow_table_expressions /*allow_table_expression*/);
|
||||||
|
|
||||||
if (function_node_ptr->toAST()->hasSecretParts())
|
/// Mask arguments if needed
|
||||||
|
if (!scope.context->getSettingsRef().format_display_secrets_in_show_and_select)
|
||||||
{
|
{
|
||||||
for (auto & argument : arguments_projection_names)
|
if (FunctionSecretArgumentsFinder::Result secret_arguments = FunctionSecretArgumentsFinderTreeNode(*function_node_ptr).getResult(); secret_arguments.count)
|
||||||
{
|
{
|
||||||
SipHash hash;
|
auto & argument_nodes = function_node_ptr->getArgumentsNode()->as<ListNode &>().getNodes();
|
||||||
hash.update(argument);
|
|
||||||
argument = getHexUIntLowercase(hash.get128());
|
for (size_t n = secret_arguments.start; n < secret_arguments.start + secret_arguments.count; ++n)
|
||||||
|
{
|
||||||
|
if (auto * constant = argument_nodes[n]->as<ConstantNode>())
|
||||||
|
{
|
||||||
|
auto mask = scope.projection_mask_map->insert({constant->getTreeHash(), scope.projection_mask_map->size() + 1}).first->second;
|
||||||
|
constant->setMaskId(mask);
|
||||||
|
arguments_projection_names[n] = "[HIDDEN id: " + std::to_string(mask) + "]";
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
auto & function_node = *function_node_ptr;
|
auto & function_node = *function_node_ptr;
|
||||||
|
|
||||||
/// Replace right IN function argument if it is table or table function with subquery that read ordinary columns
|
/// Replace right IN function argument if it is table or table function with subquery that read ordinary columns
|
||||||
@ -6651,7 +6692,6 @@ void QueryAnalyzer::initializeTableExpressionData(const QueryTreeNodePtr & table
|
|||||||
if (column_default && column_default->kind == ColumnDefaultKind::Alias)
|
if (column_default && column_default->kind == ColumnDefaultKind::Alias)
|
||||||
{
|
{
|
||||||
auto alias_expression = buildQueryTree(column_default->expression, scope.context);
|
auto alias_expression = buildQueryTree(column_default->expression, scope.context);
|
||||||
alias_expression = buildCastFunction(alias_expression, column_name_and_type.type, scope.context, false /*resolve*/);
|
|
||||||
auto column_node = std::make_shared<ColumnNode>(column_name_and_type, std::move(alias_expression), table_expression_node);
|
auto column_node = std::make_shared<ColumnNode>(column_name_and_type, std::move(alias_expression), table_expression_node);
|
||||||
column_name_to_column_node.emplace(column_name_and_type.name, column_node);
|
column_name_to_column_node.emplace(column_name_and_type.name, column_node);
|
||||||
alias_columns_to_resolve.emplace_back(column_name_and_type.name, column_node);
|
alias_columns_to_resolve.emplace_back(column_name_and_type.name, column_node);
|
||||||
@ -6684,7 +6724,9 @@ void QueryAnalyzer::initializeTableExpressionData(const QueryTreeNodePtr & table
|
|||||||
alias_column_resolve_scope,
|
alias_column_resolve_scope,
|
||||||
false /*allow_lambda_expression*/,
|
false /*allow_lambda_expression*/,
|
||||||
false /*allow_table_expression*/);
|
false /*allow_table_expression*/);
|
||||||
|
auto & resolved_expression = alias_column_to_resolve->getExpression();
|
||||||
|
if (!resolved_expression->getResultType()->equals(*alias_column_to_resolve->getResultType()))
|
||||||
|
resolved_expression = buildCastFunction(resolved_expression, alias_column_to_resolve->getResultType(), scope.context, true);
|
||||||
column_name_to_column_node = std::move(alias_column_resolve_scope.column_name_to_column_node);
|
column_name_to_column_node = std::move(alias_column_resolve_scope.column_name_to_column_node);
|
||||||
column_name_to_column_node[alias_column_to_resolve_name] = alias_column_to_resolve;
|
column_name_to_column_node[alias_column_to_resolve_name] = alias_column_to_resolve;
|
||||||
}
|
}
|
||||||
@ -7558,8 +7600,22 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (query_node_typed.getPrewhere())
|
if (query_node_typed.getPrewhere())
|
||||||
|
{
|
||||||
|
/** Expression in PREWHERE with JOIN should not be modified by join_use_nulls.
|
||||||
|
* Example: SELECT * FROM t1 JOIN t2 USING (id) PREWHERE a = 1
|
||||||
|
* Column `a` should be resolved from table and should not change its type to Nullable.
|
||||||
|
*/
|
||||||
|
bool join_use_nulls = scope.join_use_nulls;
|
||||||
|
bool use_identifier_lookup_to_result_cache = scope.use_identifier_lookup_to_result_cache;
|
||||||
|
scope.join_use_nulls = false;
|
||||||
|
scope.use_identifier_lookup_to_result_cache = false;
|
||||||
|
|
||||||
resolveExpressionNode(query_node_typed.getPrewhere(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
resolveExpressionNode(query_node_typed.getPrewhere(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
||||||
|
|
||||||
|
scope.join_use_nulls = join_use_nulls;
|
||||||
|
scope.use_identifier_lookup_to_result_cache = use_identifier_lookup_to_result_cache;
|
||||||
|
}
|
||||||
|
|
||||||
if (query_node_typed.getWhere())
|
if (query_node_typed.getWhere())
|
||||||
resolveExpressionNode(query_node_typed.getWhere(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
resolveExpressionNode(query_node_typed.getWhere(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
||||||
|
|
||||||
@ -7749,13 +7805,16 @@ void QueryAnalyzer::resolveUnion(const QueryTreeNodePtr & union_node, Identifier
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
QueryAnalysisPass::QueryAnalysisPass(QueryTreeNodePtr table_expression_)
|
QueryAnalysisPass::QueryAnalysisPass(QueryTreeNodePtr table_expression_, bool only_analyze_)
|
||||||
: table_expression(std::move(table_expression_))
|
: table_expression(std::move(table_expression_))
|
||||||
|
, only_analyze(only_analyze_)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
|
QueryAnalysisPass::QueryAnalysisPass(bool only_analyze_) : only_analyze(only_analyze_) {}
|
||||||
|
|
||||||
void QueryAnalysisPass::run(QueryTreeNodePtr & query_tree_node, ContextPtr context)
|
void QueryAnalysisPass::run(QueryTreeNodePtr & query_tree_node, ContextPtr context)
|
||||||
{
|
{
|
||||||
QueryAnalyzer analyzer;
|
QueryAnalyzer analyzer(only_analyze);
|
||||||
analyzer.resolve(query_tree_node, table_expression, context);
|
analyzer.resolve(query_tree_node, table_expression, context);
|
||||||
createUniqueTableAliases(query_tree_node, table_expression, context);
|
createUniqueTableAliases(query_tree_node, table_expression, context);
|
||||||
}
|
}
|
||||||
|
@ -71,13 +71,13 @@ public:
|
|||||||
/** Construct query analysis pass for query or union analysis.
|
/** Construct query analysis pass for query or union analysis.
|
||||||
* Available columns are extracted from query node join tree.
|
* Available columns are extracted from query node join tree.
|
||||||
*/
|
*/
|
||||||
QueryAnalysisPass() = default;
|
explicit QueryAnalysisPass(bool only_analyze_ = false);
|
||||||
|
|
||||||
/** Construct query analysis pass for expression or list of expressions analysis.
|
/** Construct query analysis pass for expression or list of expressions analysis.
|
||||||
* Available expression columns are extracted from table expression.
|
* Available expression columns are extracted from table expression.
|
||||||
* Table expression node must have query, union, table, table function type.
|
* Table expression node must have query, union, table, table function type.
|
||||||
*/
|
*/
|
||||||
explicit QueryAnalysisPass(QueryTreeNodePtr table_expression_);
|
QueryAnalysisPass(QueryTreeNodePtr table_expression_, bool only_analyze_ = false);
|
||||||
|
|
||||||
String getName() override
|
String getName() override
|
||||||
{
|
{
|
||||||
@ -93,6 +93,7 @@ public:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
QueryTreeNodePtr table_expression;
|
QueryTreeNodePtr table_expression;
|
||||||
|
const bool only_analyze;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -246,9 +246,9 @@ void QueryTreePassManager::dump(WriteBuffer & buffer, size_t up_to_pass_index)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void addQueryTreePasses(QueryTreePassManager & manager)
|
void addQueryTreePasses(QueryTreePassManager & manager, bool only_analyze)
|
||||||
{
|
{
|
||||||
manager.addPass(std::make_unique<QueryAnalysisPass>());
|
manager.addPass(std::make_unique<QueryAnalysisPass>(only_analyze));
|
||||||
manager.addPass(std::make_unique<GroupingFunctionsResolvePass>());
|
manager.addPass(std::make_unique<GroupingFunctionsResolvePass>());
|
||||||
|
|
||||||
manager.addPass(std::make_unique<RemoveUnusedProjectionColumnsPass>());
|
manager.addPass(std::make_unique<RemoveUnusedProjectionColumnsPass>());
|
||||||
|
@ -47,6 +47,6 @@ private:
|
|||||||
std::vector<QueryTreePassPtr> passes;
|
std::vector<QueryTreePassPtr> passes;
|
||||||
};
|
};
|
||||||
|
|
||||||
void addQueryTreePasses(QueryTreePassManager & manager);
|
void addQueryTreePasses(QueryTreePassManager & manager, bool only_analyze = false);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
#include <Interpreters/executeDDLQueryOnCluster.h>
|
#include <Interpreters/executeDDLQueryOnCluster.h>
|
||||||
#include <Parsers/ASTBackupQuery.h>
|
#include <Parsers/ASTBackupQuery.h>
|
||||||
#include <Parsers/ASTFunction.h>
|
#include <Parsers/ASTFunction.h>
|
||||||
|
#include <Common/CurrentThread.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <Common/Macros.h>
|
#include <Common/Macros.h>
|
||||||
#include <Common/logger_useful.h>
|
#include <Common/logger_useful.h>
|
||||||
@ -486,7 +487,7 @@ OperationID BackupsWorker::startMakingBackup(const ASTPtr & query, const Context
|
|||||||
/// process_list_element_holder is used to make an element in ProcessList live while BACKUP is working asynchronously.
|
/// process_list_element_holder is used to make an element in ProcessList live while BACKUP is working asynchronously.
|
||||||
auto process_list_element = context_in_use->getProcessListElement();
|
auto process_list_element = context_in_use->getProcessListElement();
|
||||||
|
|
||||||
scheduleFromThreadPool<void>(
|
thread_pool.scheduleOrThrowOnError(
|
||||||
[this,
|
[this,
|
||||||
backup_query,
|
backup_query,
|
||||||
backup_id,
|
backup_id,
|
||||||
@ -502,6 +503,8 @@ OperationID BackupsWorker::startMakingBackup(const ASTPtr & query, const Context
|
|||||||
BackupMutablePtr backup_async;
|
BackupMutablePtr backup_async;
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
|
setThreadName("BackupWorker");
|
||||||
|
CurrentThread::QueryScope query_scope(context_in_use);
|
||||||
doBackup(
|
doBackup(
|
||||||
backup_async,
|
backup_async,
|
||||||
backup_query,
|
backup_query,
|
||||||
@ -517,8 +520,7 @@ OperationID BackupsWorker::startMakingBackup(const ASTPtr & query, const Context
|
|||||||
{
|
{
|
||||||
on_exception(backup_async, backup_id, backup_name_for_logging, backup_settings, backup_coordination);
|
on_exception(backup_async, backup_id, backup_name_for_logging, backup_settings, backup_coordination);
|
||||||
}
|
}
|
||||||
},
|
});
|
||||||
thread_pool, "BackupWorker");
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -864,7 +866,7 @@ OperationID BackupsWorker::startRestoring(const ASTPtr & query, ContextMutablePt
|
|||||||
/// process_list_element_holder is used to make an element in ProcessList live while RESTORE is working asynchronously.
|
/// process_list_element_holder is used to make an element in ProcessList live while RESTORE is working asynchronously.
|
||||||
auto process_list_element = context_in_use->getProcessListElement();
|
auto process_list_element = context_in_use->getProcessListElement();
|
||||||
|
|
||||||
scheduleFromThreadPool<void>(
|
thread_pool.scheduleOrThrowOnError(
|
||||||
[this,
|
[this,
|
||||||
restore_query,
|
restore_query,
|
||||||
restore_id,
|
restore_id,
|
||||||
@ -878,6 +880,8 @@ OperationID BackupsWorker::startRestoring(const ASTPtr & query, ContextMutablePt
|
|||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
|
setThreadName("RestorerWorker");
|
||||||
|
CurrentThread::QueryScope query_scope(context_in_use);
|
||||||
doRestore(
|
doRestore(
|
||||||
restore_query,
|
restore_query,
|
||||||
restore_id,
|
restore_id,
|
||||||
@ -891,9 +895,7 @@ OperationID BackupsWorker::startRestoring(const ASTPtr & query, ContextMutablePt
|
|||||||
{
|
{
|
||||||
on_exception(restore_id, backup_name_for_logging, restore_settings, restore_coordination);
|
on_exception(restore_id, backup_name_for_logging, restore_settings, restore_coordination);
|
||||||
}
|
}
|
||||||
},
|
});
|
||||||
thread_pool,
|
|
||||||
"RestoreWorker");
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -15,8 +15,6 @@
|
|||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
namespace fs = std::filesystem;
|
|
||||||
|
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int BAD_ARGUMENTS;
|
extern const int BAD_ARGUMENTS;
|
||||||
@ -65,13 +63,13 @@ void registerBackupEngineS3(BackupFactory & factory)
|
|||||||
secret_access_key = config.getString(config_prefix + ".secret_access_key", "");
|
secret_access_key = config.getString(config_prefix + ".secret_access_key", "");
|
||||||
|
|
||||||
if (config.has(config_prefix + ".filename"))
|
if (config.has(config_prefix + ".filename"))
|
||||||
s3_uri = fs::path(s3_uri) / config.getString(config_prefix + ".filename");
|
s3_uri = std::filesystem::path(s3_uri) / config.getString(config_prefix + ".filename");
|
||||||
|
|
||||||
if (args.size() > 1)
|
if (args.size() > 1)
|
||||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Backup S3 requires 1 or 2 arguments: named_collection, [filename]");
|
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Backup S3 requires 1 or 2 arguments: named_collection, [filename]");
|
||||||
|
|
||||||
if (args.size() == 1)
|
if (args.size() == 1)
|
||||||
s3_uri = fs::path(s3_uri) / args[0].safeGet<String>();
|
s3_uri = std::filesystem::path(s3_uri) / args[0].safeGet<String>();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -174,6 +174,8 @@ endif ()
|
|||||||
|
|
||||||
add_library(clickhouse_common_io ${clickhouse_common_io_headers} ${clickhouse_common_io_sources})
|
add_library(clickhouse_common_io ${clickhouse_common_io_headers} ${clickhouse_common_io_sources})
|
||||||
|
|
||||||
|
set_source_files_properties(Common/ThreadFuzzer.cpp PROPERTIES COMPILE_FLAGS "-fomit-frame-pointer -momit-leaf-frame-pointer")
|
||||||
|
|
||||||
add_library (clickhouse_malloc OBJECT Common/malloc.cpp)
|
add_library (clickhouse_malloc OBJECT Common/malloc.cpp)
|
||||||
set_source_files_properties(Common/malloc.cpp PROPERTIES COMPILE_FLAGS "-fno-builtin")
|
set_source_files_properties(Common/malloc.cpp PROPERTIES COMPILE_FLAGS "-fno-builtin")
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati
|
|||||||
/// At the same time, I want clickhouse-local to always work, regardless.
|
/// At the same time, I want clickhouse-local to always work, regardless.
|
||||||
/// TODO: get rid of glibc, or replace getaddrinfo to c-ares.
|
/// TODO: get rid of glibc, or replace getaddrinfo to c-ares.
|
||||||
|
|
||||||
compression = config.getBool("compression", host != "localhost" && !isLocalAddress(DNSResolver::instance().resolveHost(host)))
|
compression = config.getBool("compression", host != "localhost" && !isLocalAddress(DNSResolver::instance().resolveHostAllInOriginOrder(host).front()))
|
||||||
? Protocol::Compression::Enable : Protocol::Compression::Disable;
|
? Protocol::Compression::Enable : Protocol::Compression::Disable;
|
||||||
|
|
||||||
timeouts = ConnectionTimeouts()
|
timeouts = ConnectionTimeouts()
|
||||||
|
@ -8,10 +8,8 @@
|
|||||||
# include <unicode/ucol.h>
|
# include <unicode/ucol.h>
|
||||||
# include <unicode/unistr.h>
|
# include <unicode/unistr.h>
|
||||||
#else
|
#else
|
||||||
# if defined(__clang__)
|
# pragma clang diagnostic ignored "-Wunused-private-field"
|
||||||
# pragma clang diagnostic ignored "-Wunused-private-field"
|
# pragma clang diagnostic ignored "-Wmissing-noreturn"
|
||||||
# pragma clang diagnostic ignored "-Wmissing-noreturn"
|
|
||||||
# endif
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
|
25
src/Columns/ColumnUnique.cpp
Normal file
25
src/Columns/ColumnUnique.cpp
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
#include <Columns/ColumnUnique.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
/// Explicit template instantiations.
|
||||||
|
template class ColumnUnique<ColumnInt8>;
|
||||||
|
template class ColumnUnique<ColumnUInt8>;
|
||||||
|
template class ColumnUnique<ColumnInt16>;
|
||||||
|
template class ColumnUnique<ColumnUInt16>;
|
||||||
|
template class ColumnUnique<ColumnInt32>;
|
||||||
|
template class ColumnUnique<ColumnUInt32>;
|
||||||
|
template class ColumnUnique<ColumnInt64>;
|
||||||
|
template class ColumnUnique<ColumnUInt64>;
|
||||||
|
template class ColumnUnique<ColumnInt128>;
|
||||||
|
template class ColumnUnique<ColumnUInt128>;
|
||||||
|
template class ColumnUnique<ColumnInt256>;
|
||||||
|
template class ColumnUnique<ColumnUInt256>;
|
||||||
|
template class ColumnUnique<ColumnFloat32>;
|
||||||
|
template class ColumnUnique<ColumnFloat64>;
|
||||||
|
template class ColumnUnique<ColumnString>;
|
||||||
|
template class ColumnUnique<ColumnFixedString>;
|
||||||
|
template class ColumnUnique<ColumnDateTime64>;
|
||||||
|
|
||||||
|
}
|
@ -15,6 +15,8 @@
|
|||||||
#include <Common/typeid_cast.h>
|
#include <Common/typeid_cast.h>
|
||||||
#include <Common/assert_cast.h>
|
#include <Common/assert_cast.h>
|
||||||
#include <Common/FieldVisitors.h>
|
#include <Common/FieldVisitors.h>
|
||||||
|
#include "Columns/ColumnsDateTime.h"
|
||||||
|
#include "Columns/ColumnsNumber.h"
|
||||||
|
|
||||||
#include <base/range.h>
|
#include <base/range.h>
|
||||||
#include <base/unaligned.h>
|
#include <base/unaligned.h>
|
||||||
@ -736,4 +738,23 @@ UInt128 ColumnUnique<ColumnType>::IncrementalHash::getHash(const ColumnType & co
|
|||||||
return cur_hash;
|
return cur_hash;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
extern template class ColumnUnique<ColumnInt8>;
|
||||||
|
extern template class ColumnUnique<ColumnUInt8>;
|
||||||
|
extern template class ColumnUnique<ColumnInt16>;
|
||||||
|
extern template class ColumnUnique<ColumnUInt16>;
|
||||||
|
extern template class ColumnUnique<ColumnInt32>;
|
||||||
|
extern template class ColumnUnique<ColumnUInt32>;
|
||||||
|
extern template class ColumnUnique<ColumnInt64>;
|
||||||
|
extern template class ColumnUnique<ColumnUInt64>;
|
||||||
|
extern template class ColumnUnique<ColumnInt128>;
|
||||||
|
extern template class ColumnUnique<ColumnUInt128>;
|
||||||
|
extern template class ColumnUnique<ColumnInt256>;
|
||||||
|
extern template class ColumnUnique<ColumnUInt256>;
|
||||||
|
extern template class ColumnUnique<ColumnFloat32>;
|
||||||
|
extern template class ColumnUnique<ColumnFloat64>;
|
||||||
|
extern template class ColumnUnique<ColumnString>;
|
||||||
|
extern template class ColumnUnique<ColumnFixedString>;
|
||||||
|
extern template class ColumnUnique<ColumnDateTime64>;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user