mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 00:22:29 +00:00
Merge branch 'master' into hanfei/keeperrocks
This commit is contained in:
commit
aebadce491
@ -27,6 +27,8 @@ Checks: [
|
||||
'-bugprone-not-null-terminated-result',
|
||||
'-bugprone-reserved-identifier', # useful but too slow, TODO retry when https://reviews.llvm.org/rG1c282052624f9d0bd273bde0b47b30c96699c6c7 is merged
|
||||
'-bugprone-unchecked-optional-access',
|
||||
'-bugprone-crtp-constructor-accessibility',
|
||||
'-bugprone-suspicious-stringview-data-usage',
|
||||
|
||||
'-cert-dcl16-c',
|
||||
'-cert-dcl37-c',
|
||||
@ -36,6 +38,7 @@ Checks: [
|
||||
'-cert-msc51-cpp',
|
||||
'-cert-oop54-cpp',
|
||||
'-cert-oop57-cpp',
|
||||
'-cert-err33-c', # Misreports on clang-19: it warns about all functions containing 'remove' in the name, not only about the standard library.
|
||||
|
||||
'-clang-analyzer-optin.performance.Padding',
|
||||
|
||||
@ -99,6 +102,7 @@ Checks: [
|
||||
'-modernize-use-emplace',
|
||||
'-modernize-use-nodiscard',
|
||||
'-modernize-use-trailing-return-type',
|
||||
'-modernize-use-designated-initializers',
|
||||
|
||||
'-performance-enum-size',
|
||||
'-performance-inefficient-string-concatenation',
|
||||
|
@ -13,3 +13,6 @@
|
||||
# dbms/ → src/
|
||||
# (though it is unlikely that you will see it in blame)
|
||||
06446b4f08a142d6f1bc30664c47ded88ab51782
|
||||
|
||||
# Applied Black formatter for Python code
|
||||
e6f5a3f98b21ba99cf274a9833797889e020a2b3
|
||||
|
1
.github/actionlint.yml
vendored
1
.github/actionlint.yml
vendored
@ -7,3 +7,4 @@ self-hosted-runner:
|
||||
- stress-tester
|
||||
- style-checker
|
||||
- style-checker-aarch64
|
||||
- release-maker
|
||||
|
2
.github/workflows/backport_branches.yml
vendored
2
.github/workflows/backport_branches.yml
vendored
@ -62,7 +62,7 @@ jobs:
|
||||
BuildDockers:
|
||||
needs: [RunConfig]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_docker.yml
|
||||
uses: ./.github/workflows/docker_test_images.yml
|
||||
with:
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
CompatibilityCheckX86:
|
||||
|
151
.github/workflows/create_release.yml
vendored
151
.github/workflows/create_release.yml
vendored
@ -6,8 +6,8 @@ concurrency:
|
||||
'on':
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
sha:
|
||||
description: 'The SHA hash of the commit from which to create the release'
|
||||
ref:
|
||||
description: 'Git reference (branch or commit sha) from which to create the release'
|
||||
required: true
|
||||
type: string
|
||||
type:
|
||||
@ -15,15 +15,152 @@ concurrency:
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- new
|
||||
- patch
|
||||
- new
|
||||
dry-run:
|
||||
description: 'Dry run'
|
||||
required: false
|
||||
default: true
|
||||
type: boolean
|
||||
|
||||
jobs:
|
||||
Release:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
CreateRelease:
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||
runs-on: [self-hosted, release-maker]
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Set envs
|
||||
# https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
RELEASE_INFO_FILE=${{ runner.temp }}/release_info.json
|
||||
EOF
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
- name: Print greeting
|
||||
with:
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Prepare Release Info
|
||||
run: |
|
||||
python3 ./tests/ci/release.py --commit ${{ inputs.sha }} --type ${{ inputs.type }} --dry-run
|
||||
python3 ./tests/ci/create_release.py --prepare-release-info \
|
||||
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
|
||||
--outfile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
echo "::group::Release Info"
|
||||
python3 -m json.tool "$RELEASE_INFO_FILE"
|
||||
echo "::endgroup::"
|
||||
release_tag=$(jq -r '.release_tag' "$RELEASE_INFO_FILE")
|
||||
commit_sha=$(jq -r '.commit_sha' "$RELEASE_INFO_FILE")
|
||||
echo "Release Tag: $release_tag"
|
||||
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
|
||||
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
|
||||
- name: Download All Release Artifacts
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --infile "$RELEASE_INFO_FILE" --download-packages ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Push Git Tag for the Release
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --push-release-tag --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Push New Release Branch
|
||||
if: ${{ inputs.type == 'new' }}
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --push-new-release-branch --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Bump CH Version and Update Contributors' List
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --create-bump-version-pr --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Checkout master
|
||||
run: |
|
||||
git checkout master
|
||||
- name: Bump Docker versions, Changelog, Security
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
[ "$(git branch --show-current)" != "master" ] && echo "not on the master" && exit 1
|
||||
echo "List versions"
|
||||
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||
echo "Update docker version"
|
||||
./utils/list-versions/update-docker-version.sh
|
||||
echo "Generate ChangeLog"
|
||||
export CI=1
|
||||
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
||||
--volume=".:/ClickHouse" clickhouse/style-test \
|
||||
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
|
||||
--gh-user-or-token="$GH_TOKEN" --jobs=5 \
|
||||
--output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
||||
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
||||
echo "Generate Security"
|
||||
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
||||
git diff HEAD
|
||||
- name: Create ChangeLog PR
|
||||
if: ${{ inputs.type == 'patch' && ! inputs.dry-run }}
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||
branch: auto/${{ env.RELEASE_TAG }}
|
||||
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
||||
delete-branch: true
|
||||
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
|
||||
labels: do not test
|
||||
body: |
|
||||
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||
### Changelog category (leave one):
|
||||
- Not for changelog (changelog entry is not required)
|
||||
- name: Reset changes if Dry-run
|
||||
if: ${{ inputs.dry-run }}
|
||||
run: |
|
||||
git reset --hard HEAD
|
||||
- name: Checkout back to GITHUB_REF
|
||||
run: |
|
||||
git checkout "$GITHUB_REF_NAME"
|
||||
- name: Create GH Release
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --create-gh-release \
|
||||
--infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
|
||||
- name: Export TGZ Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test TGZ Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Export RPM Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test RPM Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Export Debian Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test Debian Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Docker clickhouse/clickhouse-server building
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
cd "./tests/ci"
|
||||
export CHECK_NAME="Docker server image"
|
||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||
- name: Docker clickhouse/clickhouse-keeper building
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
cd "./tests/ci"
|
||||
export CHECK_NAME="Docker keeper image"
|
||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||
- name: Post Slack Message
|
||||
if: always()
|
||||
run: |
|
||||
echo Slack Message
|
||||
|
2
.github/workflows/master.yml
vendored
2
.github/workflows/master.yml
vendored
@ -58,7 +58,7 @@ jobs:
|
||||
# BuildDockers:
|
||||
# needs: [RunConfig]
|
||||
# if: ${{ !failure() && !cancelled() }}
|
||||
# uses: ./.github/workflows/reusable_docker.yml
|
||||
# uses: ./.github/workflows/docker_test_images.yml
|
||||
# with:
|
||||
# data: ${{ needs.RunConfig.outputs.data }}
|
||||
# StyleCheck:
|
||||
|
2
.github/workflows/merge_queue.yml
vendored
2
.github/workflows/merge_queue.yml
vendored
@ -51,7 +51,7 @@ jobs:
|
||||
BuildDockers:
|
||||
needs: [RunConfig]
|
||||
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
|
||||
uses: ./.github/workflows/reusable_docker.yml
|
||||
uses: ./.github/workflows/docker_test_images.yml
|
||||
with:
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StyleCheck:
|
||||
|
2
.github/workflows/nightly.yml
vendored
2
.github/workflows/nightly.yml
vendored
@ -40,7 +40,7 @@ jobs:
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
BuildDockers:
|
||||
needs: [RunConfig]
|
||||
uses: ./.github/workflows/reusable_docker.yml
|
||||
uses: ./.github/workflows/docker_test_images.yml
|
||||
with:
|
||||
data: "${{ needs.RunConfig.outputs.data }}"
|
||||
set_latest: true
|
||||
|
4
.github/workflows/pull_request.yml
vendored
4
.github/workflows/pull_request.yml
vendored
@ -72,7 +72,7 @@ jobs:
|
||||
BuildDockers:
|
||||
needs: [RunConfig]
|
||||
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
|
||||
uses: ./.github/workflows/reusable_docker.yml
|
||||
uses: ./.github/workflows/docker_test_images.yml
|
||||
with:
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StyleCheck:
|
||||
@ -172,7 +172,7 @@ jobs:
|
||||
################################# Stage Final #################################
|
||||
#
|
||||
FinishCheck:
|
||||
if: ${{ !failure() }}
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
|
2
.github/workflows/release_branches.yml
vendored
2
.github/workflows/release_branches.yml
vendored
@ -57,7 +57,7 @@ jobs:
|
||||
BuildDockers:
|
||||
needs: [RunConfig]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_docker.yml
|
||||
uses: ./.github/workflows/docker_test_images.yml
|
||||
with:
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
CompatibilityCheckX86:
|
||||
|
2
.github/workflows/reusable_test.yml
vendored
2
.github/workflows/reusable_test.yml
vendored
@ -102,6 +102,8 @@ jobs:
|
||||
--job-name '${{inputs.test_name}}' \
|
||||
--run \
|
||||
--run-command '''${{inputs.run_command}}'''
|
||||
# shellcheck disable=SC2319
|
||||
echo "JOB_EXIT_CODE=$?" >> "$GITHUB_ENV"
|
||||
- name: Post run
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
|
@ -3,8 +3,9 @@
|
||||
#include <base/defines.h>
|
||||
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
bool cgroupsV2Enabled()
|
||||
{
|
||||
@ -13,11 +14,11 @@ bool cgroupsV2Enabled()
|
||||
{
|
||||
/// This file exists iff the host has cgroups v2 enabled.
|
||||
auto controllers_file = default_cgroups_mount / "cgroup.controllers";
|
||||
if (!std::filesystem::exists(controllers_file))
|
||||
if (!fs::exists(controllers_file))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
catch (const std::filesystem::filesystem_error &) /// all "underlying OS API errors", typically: permission denied
|
||||
catch (const fs::filesystem_error &) /// all "underlying OS API errors", typically: permission denied
|
||||
{
|
||||
return false; /// not logging the exception as most callers fall back to cgroups v1
|
||||
}
|
||||
@ -33,8 +34,9 @@ bool cgroupsV2MemoryControllerEnabled()
|
||||
/// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available
|
||||
/// for the current + child cgroups. The set of available controllers can be restricted from level to level using file
|
||||
/// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file.
|
||||
std::string cgroup = cgroupV2OfProcess();
|
||||
auto cgroup_dir = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
|
||||
fs::path cgroup_dir = cgroupV2PathOfProcess();
|
||||
if (cgroup_dir.empty())
|
||||
return false;
|
||||
std::ifstream controllers_file(cgroup_dir / "cgroup.controllers");
|
||||
if (!controllers_file.is_open())
|
||||
return false;
|
||||
@ -46,7 +48,7 @@ bool cgroupsV2MemoryControllerEnabled()
|
||||
#endif
|
||||
}
|
||||
|
||||
std::string cgroupV2OfProcess()
|
||||
fs::path cgroupV2PathOfProcess()
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
chassert(cgroupsV2Enabled());
|
||||
@ -54,17 +56,18 @@ std::string cgroupV2OfProcess()
|
||||
/// A simpler way to get the membership is:
|
||||
std::ifstream cgroup_name_file("/proc/self/cgroup");
|
||||
if (!cgroup_name_file.is_open())
|
||||
return "";
|
||||
return {};
|
||||
/// With cgroups v2, there will be a *single* line with prefix "0::/"
|
||||
/// (see https://docs.kernel.org/admin-guide/cgroup-v2.html)
|
||||
std::string cgroup;
|
||||
std::getline(cgroup_name_file, cgroup);
|
||||
static const std::string v2_prefix = "0::/";
|
||||
if (!cgroup.starts_with(v2_prefix))
|
||||
return "";
|
||||
return {};
|
||||
cgroup = cgroup.substr(v2_prefix.length());
|
||||
return cgroup;
|
||||
/// Note: The 'root' cgroup can have an empty cgroup name, this is valid
|
||||
return default_cgroups_mount / cgroup;
|
||||
#else
|
||||
return "";
|
||||
return {};
|
||||
#endif
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <filesystem>
|
||||
#include <string>
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
/// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers).
|
||||
@ -16,7 +15,7 @@ bool cgroupsV2Enabled();
|
||||
/// Assumes that cgroupsV2Enabled() is enabled.
|
||||
bool cgroupsV2MemoryControllerEnabled();
|
||||
|
||||
/// Which cgroup does the process belong to?
|
||||
/// Returns an empty string if the cgroup cannot be determined.
|
||||
/// Detects which cgroup v2 the process belongs to and returns the filesystem path to the cgroup.
|
||||
/// Returns an empty path the cgroup cannot be determined.
|
||||
/// Assumes that cgroupsV2Enabled() is enabled.
|
||||
std::string cgroupV2OfProcess();
|
||||
std::filesystem::path cgroupV2PathOfProcess();
|
||||
|
@ -23,8 +23,9 @@ std::optional<uint64_t> getCgroupsV2MemoryLimit()
|
||||
if (!cgroupsV2MemoryControllerEnabled())
|
||||
return {};
|
||||
|
||||
std::string cgroup = cgroupV2OfProcess();
|
||||
auto current_cgroup = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
|
||||
std::filesystem::path current_cgroup = cgroupV2PathOfProcess();
|
||||
if (current_cgroup.empty())
|
||||
return {};
|
||||
|
||||
/// Open the bottom-most nested memory limit setting file. If there is no such file at the current
|
||||
/// level, try again at the parent level as memory settings are inherited.
|
||||
|
@ -42,9 +42,19 @@ endif ()
|
||||
# But use 2 parallel jobs, since:
|
||||
# - this is what llvm does
|
||||
# - and I've verfied that lld-11 does not use all available CPU time (in peak) while linking one binary
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO AND PARALLEL_LINK_JOBS GREATER 2)
|
||||
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.")
|
||||
set (PARALLEL_LINK_JOBS 2)
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO)
|
||||
if (ARCH_AARCH64)
|
||||
# aarch64 builds start to often fail with OOMs (reason not yet clear), for now let's limit the concurrency
|
||||
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 1.")
|
||||
set (PARALLEL_LINK_JOBS 1)
|
||||
if (LINKER_NAME MATCHES "lld")
|
||||
math(EXPR LTO_JOBS ${NUMBER_OF_LOGICAL_CORES}/4)
|
||||
set (CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -Wl,--thinlto-jobs=${LTO_JOBS}")
|
||||
endif()
|
||||
elseif (PARALLEL_LINK_JOBS GREATER 2)
|
||||
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.")
|
||||
set (PARALLEL_LINK_JOBS 2)
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB RAM, 'OFF' means the native core count).")
|
||||
|
2
contrib/azure
vendored
2
contrib/azure
vendored
@ -1 +1 @@
|
||||
Subproject commit 92c94d7f37a43cc8fc4d466884a95f610c0593bf
|
||||
Subproject commit ea3e19a7be08519134c643177d56c7484dfec884
|
@ -34,11 +34,7 @@ if (OS_LINUX)
|
||||
# avoid spurious latencies and additional work associated with
|
||||
# MADV_DONTNEED. See
|
||||
# https://github.com/ClickHouse/ClickHouse/issues/11121 for motivation.
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000")
|
||||
else()
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000,prof:true,prof_active:false,background_thread:true")
|
||||
endif()
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000,prof:true,prof_active:false,background_thread:true")
|
||||
else()
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000")
|
||||
endif()
|
||||
@ -179,12 +175,19 @@ endif ()
|
||||
|
||||
target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1)
|
||||
|
||||
# jemalloc provides support for two different libunwind flavors: the original HP libunwind and the one coming with gcc / g++ / libstdc++.
|
||||
# The latter is identified by `JEMALLOC_PROF_LIBGCC` and uses `_Unwind_Backtrace` method instead of `unw_backtrace`.
|
||||
# At the time ClickHouse uses LLVM libunwind which follows libgcc's way of backtracking.
|
||||
# jemalloc provides support two unwind flavors:
|
||||
# - JEMALLOC_PROF_LIBUNWIND - unw_backtrace() - gnu libunwind (compatible with llvm libunwind)
|
||||
# - JEMALLOC_PROF_LIBGCC - _Unwind_Backtrace() - the original HP libunwind and the one coming with gcc / g++ / libstdc++.
|
||||
#
|
||||
# ClickHouse has to provide `unw_backtrace` method by the means of [commit 8e2b31e](https://github.com/ClickHouse/libunwind/commit/8e2b31e766dd502f6df74909e04a7dbdf5182eb1).
|
||||
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBGCC=1)
|
||||
# But for JEMALLOC_PROF_LIBGCC it also calls _Unwind_Backtrace() during
|
||||
# bootstraping of jemalloc, which may lead to deadlock, if the dlsym will do
|
||||
# allocations somewhere (like glibc does prio 2.34, see [1]).
|
||||
#
|
||||
# [1]: https://sourceware.org/git/?p=glibc.git;a=commit;h=fada9018199c21c469ff0e731ef75c6020074ac9
|
||||
#
|
||||
# And since ClickHouse unwind already supports unw_backtrace() we can safely
|
||||
# switch to it to avoid this deadlock.
|
||||
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBUNWIND=1)
|
||||
target_link_libraries (_jemalloc PRIVATE unwind)
|
||||
|
||||
# for RTLD_NEXT
|
||||
|
@ -4,3 +4,14 @@ It allows to integrate JEMalloc into CMake project.
|
||||
- Added JEMALLOC_CONFIG_MALLOC_CONF substitution
|
||||
- Add musl support (USE_MUSL)
|
||||
- Also note, that darwin build requires JEMALLOC_PREFIX, while others do not
|
||||
- JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE should be disabled
|
||||
|
||||
CLOCK_MONOTONIC_COARSE can go backwards after clock_adjtime(ADJ_FREQUENCY)
|
||||
Let's disable it for now, and this menas that CLOCK_MONOTONIC will be used,
|
||||
and this, should not be a problem, since:
|
||||
- jemalloc do not call clock_gettime() that frequently
|
||||
- the difference is CLOCK_MONOTONIC 20ns and CLOCK_MONOTONIC_COARSE 4ns
|
||||
|
||||
This can be done with the following command:
|
||||
|
||||
gg JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE | cut -d: -f1 | xargs sed -i 's@#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE@/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */@'
|
||||
|
@ -96,7 +96,7 @@
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
|
@ -98,7 +98,7 @@
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
|
@ -98,7 +98,7 @@
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
|
@ -98,7 +98,7 @@
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
|
@ -96,7 +96,7 @@
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
|
@ -98,7 +98,7 @@
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
|
@ -99,7 +99,7 @@
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
|
2
contrib/libunwind
vendored
2
contrib/libunwind
vendored
@ -1 +1 @@
|
||||
Subproject commit d6a01c46327e56fd86beb8aaa31591fcd9a6b7df
|
||||
Subproject commit 8f28e64d15819d2d096badd598c7d85bebddb1f2
|
@ -4,9 +4,6 @@ set(LIBUNWIND_CXX_SOURCES
|
||||
"${LIBUNWIND_SOURCE_DIR}/src/libunwind.cpp"
|
||||
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-EHABI.cpp"
|
||||
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-seh.cpp")
|
||||
if (APPLE)
|
||||
set(LIBUNWIND_CXX_SOURCES ${LIBUNWIND_CXX_SOURCES} "${LIBUNWIND_SOURCE_DIR}/src/Unwind_AppleExtras.cpp")
|
||||
endif ()
|
||||
|
||||
set(LIBUNWIND_C_SOURCES
|
||||
"${LIBUNWIND_SOURCE_DIR}/src/UnwindLevel1.c"
|
||||
@ -32,6 +29,7 @@ set_target_properties(unwind PROPERTIES FOLDER "contrib/libunwind-cmake")
|
||||
|
||||
target_include_directories(unwind SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBUNWIND_SOURCE_DIR}/include>)
|
||||
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_NO_HEAP=1)
|
||||
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_REMEMBER_STACK_ALLOC=1)
|
||||
# NOTE: from this macros sizeof(unw_context_t)/sizeof(unw_cursor_t) is depends, so it should be set always
|
||||
target_compile_definitions(unwind PUBLIC -D_LIBUNWIND_IS_NATIVE_ONLY)
|
||||
|
||||
|
2
contrib/openssl
vendored
2
contrib/openssl
vendored
@ -1 +1 @@
|
||||
Subproject commit ee2bb8513b28bf86b35404dd17a0e29305ca9e08
|
||||
Subproject commit 66deddc1e53cda8706604a019777259372d1bd62
|
2
contrib/pocketfft
vendored
2
contrib/pocketfft
vendored
@ -1 +1 @@
|
||||
Subproject commit 9efd4da52cf8d28d14531d14e43ad9d913807546
|
||||
Subproject commit f4c1aa8aa9ce79ad39e80f2c9c41b92ead90fda3
|
2
contrib/rocksdb
vendored
2
contrib/rocksdb
vendored
@ -1 +1 @@
|
||||
Subproject commit 078fa5638690004e1f744076d1bdcc4e93767304
|
||||
Subproject commit be366233921293bd07a84dc4ea6991858665f202
|
@ -5,20 +5,13 @@ if (NOT ENABLE_ROCKSDB)
|
||||
return()
|
||||
endif()
|
||||
|
||||
## this file is extracted from `contrib/rocksdb/CMakeLists.txt`
|
||||
set(ROCKSDB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/rocksdb")
|
||||
list(APPEND CMAKE_MODULE_PATH "${ROCKSDB_SOURCE_DIR}/cmake/modules/")
|
||||
|
||||
set(PORTABLE ON)
|
||||
## always disable jemalloc for rocksdb by default
|
||||
## because it introduces non-standard jemalloc APIs
|
||||
# Always disable jemalloc for rocksdb by default because it introduces non-standard jemalloc APIs
|
||||
option(WITH_JEMALLOC "build with JeMalloc" OFF)
|
||||
set(USE_SNAPPY OFF)
|
||||
if (TARGET ch_contrib::snappy)
|
||||
set(USE_SNAPPY ON)
|
||||
endif()
|
||||
option(WITH_SNAPPY "build with SNAPPY" ${USE_SNAPPY})
|
||||
## lz4, zlib, zstd is enabled in ClickHouse by default
|
||||
|
||||
option(WITH_LIBURING "build with liburing" OFF) # TODO could try to enable this conditionally, depending on ClickHouse's ENABLE_LIBURING
|
||||
|
||||
# ClickHouse cannot be compiled without snappy, lz4, zlib, zstd
|
||||
option(WITH_SNAPPY "build with SNAPPY" ON)
|
||||
option(WITH_LZ4 "build with lz4" ON)
|
||||
option(WITH_ZLIB "build with zlib" ON)
|
||||
option(WITH_ZSTD "build with zstd" ON)
|
||||
@ -26,78 +19,46 @@ option(WITH_ZSTD "build with zstd" ON)
|
||||
# third-party/folly is only validated to work on Linux and Windows for now.
|
||||
# So only turn it on there by default.
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "Linux|Windows")
|
||||
if(MSVC AND MSVC_VERSION LESS 1910)
|
||||
# Folly does not compile with MSVC older than VS2017
|
||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
||||
else()
|
||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" ON)
|
||||
endif()
|
||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" ON)
|
||||
else()
|
||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
||||
endif()
|
||||
|
||||
if( NOT DEFINED CMAKE_CXX_STANDARD )
|
||||
set(CMAKE_CXX_STANDARD 11)
|
||||
if(WITH_SNAPPY)
|
||||
add_definitions(-DSNAPPY)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::snappy)
|
||||
endif()
|
||||
|
||||
if(MSVC)
|
||||
option(WITH_XPRESS "build with windows built in compression" OFF)
|
||||
include("${ROCKSDB_SOURCE_DIR}/thirdparty.inc")
|
||||
else()
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD" AND NOT CMAKE_SYSTEM_NAME MATCHES "kFreeBSD")
|
||||
# FreeBSD has jemalloc as default malloc
|
||||
# but it does not have all the jemalloc files in include/...
|
||||
set(WITH_JEMALLOC ON)
|
||||
else()
|
||||
if(WITH_JEMALLOC AND TARGET ch_contrib::jemalloc)
|
||||
add_definitions(-DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::jemalloc)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_SNAPPY)
|
||||
add_definitions(-DSNAPPY)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::snappy)
|
||||
endif()
|
||||
|
||||
if(WITH_ZLIB)
|
||||
add_definitions(-DZLIB)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zlib)
|
||||
endif()
|
||||
|
||||
if(WITH_LZ4)
|
||||
add_definitions(-DLZ4)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::lz4)
|
||||
endif()
|
||||
|
||||
if(WITH_ZSTD)
|
||||
add_definitions(-DZSTD)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
||||
endif()
|
||||
if(WITH_ZLIB)
|
||||
add_definitions(-DZLIB)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zlib)
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
||||
if(POWER9)
|
||||
set(HAS_POWER9 1)
|
||||
set(HAS_ALTIVEC 1)
|
||||
else()
|
||||
set(HAS_POWER8 1)
|
||||
set(HAS_ALTIVEC 1)
|
||||
endif(POWER9)
|
||||
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
||||
if(WITH_LZ4)
|
||||
add_definitions(-DLZ4)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::lz4)
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64")
|
||||
set(HAS_ARMV8_CRC 1)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64")
|
||||
if(WITH_ZSTD)
|
||||
add_definitions(-DZSTD)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
||||
endif()
|
||||
|
||||
option(PORTABLE "build a portable binary" ON)
|
||||
|
||||
if(ENABLE_AVX2 AND ENABLE_PCLMULQDQ)
|
||||
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
|
||||
add_definitions(-DHAVE_SSE42)
|
||||
add_definitions(-DHAVE_PCLMUL)
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64|AARCH64")
|
||||
set (HAS_ARMV8_CRC 1)
|
||||
# the original build descriptions set specific flags for ARM. These flags are already subsumed by ClickHouse's general
|
||||
# ARM flags, see cmake/cpu_features.cmake
|
||||
# set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||
endif()
|
||||
|
||||
set (HAVE_THREAD_LOCAL 1)
|
||||
if(HAVE_THREAD_LOCAL)
|
||||
add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL)
|
||||
@ -107,8 +68,6 @@ if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
||||
add_definitions(-DOS_MACOSX)
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||
add_definitions(-DOS_LINUX)
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "SunOS")
|
||||
add_definitions(-DOS_SOLARIS)
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
||||
add_definitions(-DOS_FREEBSD)
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Android")
|
||||
@ -123,12 +82,10 @@ endif()
|
||||
|
||||
if (OS_LINUX)
|
||||
add_definitions(-DROCKSDB_SCHED_GETCPU_PRESENT)
|
||||
add_definitions(-DROCKSDB_AUXV_SYSAUXV_PRESENT)
|
||||
add_definitions(-DROCKSDB_AUXV_GETAUXVAL_PRESENT)
|
||||
elseif (OS_FREEBSD)
|
||||
add_definitions(-DROCKSDB_AUXV_SYSAUXV_PRESENT)
|
||||
endif()
|
||||
|
||||
set(ROCKSDB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/rocksdb")
|
||||
|
||||
include_directories(${ROCKSDB_SOURCE_DIR})
|
||||
include_directories("${ROCKSDB_SOURCE_DIR}/include")
|
||||
@ -136,11 +93,11 @@ if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
||||
include_directories("${ROCKSDB_SOURCE_DIR}/third-party/folly")
|
||||
endif()
|
||||
|
||||
# Main library source code
|
||||
|
||||
set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/cache/cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/cache_entry_roles.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/cache_key.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/cache_reservation_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
|
||||
@ -156,6 +113,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/prefetch_buffer_collection.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/c.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/column_family.cc
|
||||
@ -229,6 +187,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/mock_env.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/unique_id_gen.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/file_util.cc
|
||||
@ -247,6 +206,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memory/memory_allocator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc
|
||||
@ -322,6 +282,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/table/table_factory.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/table_properties.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/unique_id.cc
|
||||
${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc
|
||||
${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc
|
||||
@ -333,9 +294,12 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record_handler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record_result.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/coding.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/comparator.cc
|
||||
@ -347,6 +311,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/random.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/regex.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/ribbon_config.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/slice.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc
|
||||
@ -362,18 +327,23 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/cache_dump_load.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/cache_dump_load_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/debug.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_secondary_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc
|
||||
@ -393,6 +363,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/trace/replayer_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc
|
||||
@ -411,6 +382,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/wal_filter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/concurrent_tree.cc
|
||||
@ -425,7 +397,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/standalone_port.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/dbt.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc
|
||||
rocksdb_build_version.cc)
|
||||
build_version.cc) # generated by hand
|
||||
|
||||
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
|
||||
set_source_files_properties(
|
||||
@ -462,5 +434,6 @@ endif()
|
||||
add_library(_rocksdb ${SOURCES})
|
||||
add_library(ch_contrib::rocksdb ALIAS _rocksdb)
|
||||
target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
|
||||
|
||||
# SYSTEM is required to overcome some issues
|
||||
target_include_directories(_rocksdb SYSTEM BEFORE INTERFACE "${ROCKSDB_SOURCE_DIR}/include")
|
||||
|
@ -33,13 +33,9 @@ RUN apt-get update \
|
||||
COPY requirements.txt /
|
||||
RUN pip3 install --no-cache-dir -r /requirements.txt
|
||||
|
||||
COPY * /
|
||||
|
||||
ENV FUZZER_ARGS="-max_total_time=60"
|
||||
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
CMD set -o pipefail \
|
||||
&& timeout -s 9 1h /run_libfuzzer.py 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee main.log
|
||||
|
||||
# docker run --network=host --volume <workspace>:/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/libfuzzer
|
||||
|
||||
|
@ -16,11 +16,17 @@ dpkg -i package_folder/clickhouse-client_*.deb
|
||||
|
||||
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source /utils.lib
|
||||
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --silent --inMemoryPersistence &
|
||||
|
||||
./setup_minio.sh stateful
|
||||
./mc admin trace clickminio > /test_output/minio.log &
|
||||
MC_ADMIN_PID=$!
|
||||
|
||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
|
||||
@ -251,6 +257,8 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
|
||||
sudo clickhouse stop --pid-path /var/run/clickhouse-server2 ||:
|
||||
fi
|
||||
|
||||
# Kill minio admin client to stop collecting logs
|
||||
kill $MC_ADMIN_PID
|
||||
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server.log ||:
|
||||
|
||||
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst ||:
|
||||
@ -272,3 +280,5 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
|
||||
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
|
||||
mv /var/log/clickhouse-server/stderr2.log /test_output/ ||:
|
||||
fi
|
||||
|
||||
collect_core_dumps
|
||||
|
@ -86,6 +86,7 @@ RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoo
|
||||
ENV MINIO_ROOT_USER="clickhouse"
|
||||
ENV MINIO_ROOT_PASSWORD="clickhouse"
|
||||
ENV EXPORT_S3_STORAGE_POLICIES=1
|
||||
ENV CLICKHOUSE_GRPC_CLIENT="/usr/share/clickhouse-utils/grpc-client/clickhouse-grpc-client.py"
|
||||
|
||||
RUN npm install -g azurite@3.30.0 \
|
||||
&& npm install -g tslib && npm install -g node
|
||||
|
@ -8,6 +8,7 @@ cryptography==3.4.8
|
||||
dbus-python==1.2.18
|
||||
distro==1.7.0
|
||||
docutils==0.17.1
|
||||
grpcio==1.47.0
|
||||
gyp==0.1
|
||||
httplib2==0.20.2
|
||||
idna==3.3
|
||||
@ -28,6 +29,7 @@ packaging==24.1
|
||||
pandas==1.5.3
|
||||
pip==24.1.1
|
||||
pipdeptree==2.23.0
|
||||
protobuf==4.25.3
|
||||
pyarrow==15.0.0
|
||||
pyasn1==0.4.8
|
||||
PyJWT==2.3.0
|
||||
|
@ -6,13 +6,12 @@ source /setup_export_logs.sh
|
||||
# fail on errors, verbose and export all env variables
|
||||
set -e -x -a
|
||||
|
||||
MAX_RUN_TIME=${MAX_RUN_TIME:-10800}
|
||||
MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 10800 : MAX_RUN_TIME))
|
||||
MAX_RUN_TIME=${MAX_RUN_TIME:-7200}
|
||||
MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 7200 : MAX_RUN_TIME))
|
||||
|
||||
USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0}
|
||||
USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0}
|
||||
|
||||
# disable for now
|
||||
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
|
||||
|
||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] || [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
@ -55,6 +54,9 @@ source /utils.lib
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
./setup_minio.sh stateless
|
||||
./mc admin trace clickminio > /test_output/minio.log &
|
||||
MC_ADMIN_PID=$!
|
||||
|
||||
./setup_hdfs_minicluster.sh
|
||||
|
||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
@ -310,7 +312,7 @@ function run_tests()
|
||||
try_run_with_retry 10 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')"
|
||||
|
||||
set +e
|
||||
timeout -s TERM --preserve-status 120m clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
||||
timeout -k 60m -s TERM --preserve-status 140m clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
||||
--no-drop-if-fail --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
||||
| ts '%Y-%m-%d %H:%M:%S' \
|
||||
| tee -a test_output/test_result.txt
|
||||
@ -321,7 +323,7 @@ export -f run_tests
|
||||
|
||||
|
||||
# This should be enough to setup job and collect artifacts
|
||||
TIMEOUT=$((MAX_RUN_TIME - 300))
|
||||
TIMEOUT=$((MAX_RUN_TIME - 700))
|
||||
if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
# We don't run tests with Ordinary database in PRs, only in master.
|
||||
# So run new/changed tests with Ordinary at least once in flaky check.
|
||||
@ -384,6 +386,9 @@ if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
sudo clickhouse stop --pid-path /var/run/clickhouse-server1 ||:
|
||||
fi
|
||||
|
||||
# Kill minio admin client to stop collecting logs
|
||||
kill $MC_ADMIN_PID
|
||||
|
||||
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server.log ||:
|
||||
rg -A50 -Fa "============" /var/log/clickhouse-server/stderr.log ||:
|
||||
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst &
|
||||
@ -483,3 +488,5 @@ if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
|
||||
tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||:
|
||||
fi
|
||||
|
||||
collect_core_dumps
|
||||
|
@ -10,7 +10,7 @@ cd hadoop-3.3.1
|
||||
export JAVA_HOME=/usr
|
||||
mkdir -p target/test/data
|
||||
chown clickhouse ./target/test/data
|
||||
sudo -E -u clickhouse bin/mapred minicluster -format -nomr -nnport 12222 >> /test_output/garbage.log 2>&1 &
|
||||
sudo -E -u clickhouse bin/mapred minicluster -format -nomr -nnport 12222 >> /test_output/hdfs_minicluster.log 2>&1 &
|
||||
|
||||
while ! nc -z localhost 12222; do
|
||||
sleep 1
|
||||
|
@ -1,8 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
# core.COMM.PID-TID
|
||||
sysctl kernel.core_pattern='core.%e.%p-%P'
|
||||
|
||||
OK="\tOK\t\\N\t"
|
||||
FAIL="\tFAIL\t\\N\t"
|
||||
|
||||
@ -315,12 +312,4 @@ function collect_query_and_trace_logs()
|
||||
done
|
||||
}
|
||||
|
||||
function collect_core_dumps()
|
||||
{
|
||||
find . -type f -maxdepth 1 -name 'core.*' | while read -r core; do
|
||||
zstd --threads=0 "$core"
|
||||
mv "$core.zst" /test_output/
|
||||
done
|
||||
}
|
||||
|
||||
# vi: ft=bash
|
||||
|
@ -1,5 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
# core.COMM.PID-TID
|
||||
sysctl kernel.core_pattern='core.%e.%p-%P'
|
||||
# ASAN doesn't work with suid_dumpable=2
|
||||
sysctl fs.suid_dumpable=1
|
||||
|
||||
function run_with_retry()
|
||||
{
|
||||
if [[ $- =~ e ]]; then
|
||||
@ -48,4 +53,12 @@ function timeout_with_logging() {
|
||||
return $exit_code
|
||||
}
|
||||
|
||||
function collect_core_dumps()
|
||||
{
|
||||
find . -type f -maxdepth 1 -name 'core.*' | while read -r core; do
|
||||
zstd --threads=0 "$core"
|
||||
mv "$core.zst" /test_output/
|
||||
done
|
||||
}
|
||||
|
||||
# vi: ft=bash
|
||||
|
@ -21,6 +21,9 @@ source /attach_gdb.lib
|
||||
# shellcheck source=../stateless/stress_tests.lib
|
||||
source /stress_tests.lib
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source /utils.lib
|
||||
|
||||
install_packages package_folder
|
||||
|
||||
# Thread Fuzzer allows to check more permutations of possible thread scheduling
|
||||
|
@ -3,7 +3,7 @@ aiosignal==1.3.1
|
||||
astroid==3.1.0
|
||||
async-timeout==4.0.3
|
||||
attrs==23.2.0
|
||||
black==23.12.0
|
||||
black==24.4.2
|
||||
boto3==1.34.131
|
||||
botocore==1.34.131
|
||||
certifi==2024.6.2
|
||||
|
@ -11,6 +11,7 @@ TIMEOUT_SIGN = "[ Timeout! "
|
||||
UNKNOWN_SIGN = "[ UNKNOWN "
|
||||
SKIPPED_SIGN = "[ SKIPPED "
|
||||
HUNG_SIGN = "Found hung queries in processlist"
|
||||
SERVER_DIED_SIGN = "Server died, terminating all processes"
|
||||
DATABASE_SIGN = "Database: "
|
||||
|
||||
SUCCESS_FINISH_SIGNS = ["All tests have finished", "No tests were run"]
|
||||
@ -25,6 +26,7 @@ def process_test_log(log_path, broken_tests):
|
||||
failed = 0
|
||||
success = 0
|
||||
hung = False
|
||||
server_died = False
|
||||
retries = False
|
||||
success_finish = False
|
||||
test_results = []
|
||||
@ -41,6 +43,8 @@ def process_test_log(log_path, broken_tests):
|
||||
if HUNG_SIGN in line:
|
||||
hung = True
|
||||
break
|
||||
if SERVER_DIED_SIGN in line:
|
||||
server_died = True
|
||||
if RETRIES_SIGN in line:
|
||||
retries = True
|
||||
if any(
|
||||
@ -123,6 +127,7 @@ def process_test_log(log_path, broken_tests):
|
||||
failed,
|
||||
success,
|
||||
hung,
|
||||
server_died,
|
||||
success_finish,
|
||||
retries,
|
||||
test_results,
|
||||
@ -150,6 +155,7 @@ def process_result(result_path, broken_tests):
|
||||
failed,
|
||||
success,
|
||||
hung,
|
||||
server_died,
|
||||
success_finish,
|
||||
retries,
|
||||
test_results,
|
||||
@ -165,6 +171,10 @@ def process_result(result_path, broken_tests):
|
||||
description = "Some queries hung, "
|
||||
state = "failure"
|
||||
test_results.append(("Some queries hung", "FAIL", "0", ""))
|
||||
elif server_died:
|
||||
description = "Server died, "
|
||||
state = "failure"
|
||||
test_results.append(("Server died", "FAIL", "0", ""))
|
||||
elif not success_finish:
|
||||
description = "Tests are not finished, "
|
||||
state = "failure"
|
||||
@ -218,5 +228,20 @@ if __name__ == "__main__":
|
||||
state, description, test_results = process_result(args.in_results_dir, broken_tests)
|
||||
logging.info("Result parsed")
|
||||
status = (state, description)
|
||||
|
||||
def test_result_comparator(item):
|
||||
# sort by status then by check name
|
||||
order = {
|
||||
"FAIL": 0,
|
||||
"Timeout": 1,
|
||||
"NOT_FAILED": 2,
|
||||
"BROKEN": 3,
|
||||
"OK": 4,
|
||||
"SKIPPED": 5,
|
||||
}
|
||||
return order.get(item[1], 10), str(item[0]), item[1]
|
||||
|
||||
test_results.sort(key=test_result_comparator)
|
||||
|
||||
write_results(args.out_results_file, args.out_status_file, test_results, status)
|
||||
logging.info("Result written")
|
||||
|
@ -11,7 +11,7 @@ This is for the case when you have Linux machine and want to use it to build `cl
|
||||
|
||||
The cross-build for RISC-V 64 is based on the [Build instructions](../development/build.md), follow them first.
|
||||
|
||||
## Install Clang-16
|
||||
## Install Clang-18
|
||||
|
||||
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup or do
|
||||
```
|
||||
|
@ -226,15 +226,59 @@ Other IDEs you can use are [Sublime Text](https://www.sublimetext.com/), [Visual
|
||||
|
||||
## Writing Code {#writing-code}
|
||||
|
||||
The description of ClickHouse architecture can be found here: https://clickhouse.com/docs/en/development/architecture/
|
||||
Below you can find some quick links which may be useful when writing code for ClickHouse:
|
||||
|
||||
The Code Style Guide: https://clickhouse.com/docs/en/development/style/
|
||||
- [ClickHouse architecture description](https://clickhouse.com/docs/en/development/architecture/).
|
||||
- [The code style guide](https://clickhouse.com/docs/en/development/style/).
|
||||
- [Adding third-party libraries](https://clickhouse.com/docs/en/development/contrib/#adding-third-party-libraries)
|
||||
- [Writing tests](https://clickhouse.com/docs/en/development/tests/)
|
||||
- [List of open issues](https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3Ahacktoberfest)
|
||||
|
||||
Adding third-party libraries: https://clickhouse.com/docs/en/development/contrib/#adding-third-party-libraries
|
||||
## Writing Documentation {#writing-documentation}
|
||||
|
||||
Writing tests: https://clickhouse.com/docs/en/development/tests/
|
||||
As part of every pull request which adds a new feature, it is necessary to write documentation for it. If you'd like to preview your documentation changes the instructions for how to build the documentation page locally are available in the README.md file [here](https://github.com/ClickHouse/clickhouse-docs). When adding a new function to ClickHouse you can use the template below as a guide:
|
||||
|
||||
List of tasks: https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3Ahacktoberfest
|
||||
```markdown
|
||||
# newFunctionName
|
||||
|
||||
A short description of the function goes here. It should describe briefly what it does and a typical usage case.
|
||||
|
||||
**Syntax**
|
||||
|
||||
\```sql
|
||||
newFunctionName(arg1, arg2[, arg3])
|
||||
\```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `arg1` — Description of the argument. [DataType](../data-types/float.md)
|
||||
- `arg2` — Description of the argument. [DataType](../data-types/float.md)
|
||||
- `arg3` — Description of optional argument (optional). [DataType](../data-types/float.md)
|
||||
|
||||
**Implementation Details**
|
||||
|
||||
A description of implementation details if relevant.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns {insert what the function returns here}. [DataType](../data-types/float.md)
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
\```sql
|
||||
SELECT 'write your example query here';
|
||||
\```
|
||||
|
||||
Response:
|
||||
|
||||
\```response
|
||||
┌───────────────────────────────────┐
|
||||
│ the result of the query │
|
||||
└───────────────────────────────────┘
|
||||
\```
|
||||
```
|
||||
|
||||
## Test Data {#test-data}
|
||||
|
||||
|
@ -15,7 +15,7 @@ You have four options for getting up and running with ClickHouse:
|
||||
|
||||
- **[ClickHouse Cloud](https://clickhouse.com/cloud/):** The official ClickHouse as a service, - built by, maintained and supported by the creators of ClickHouse
|
||||
- **[Quick Install](#quick-install):** an easy-to-download binary for testing and developing with ClickHouse
|
||||
- **[Production Deployments](#available-installation-options):** ClickHouse can run on any Linux, FreeBSD, or macOS with x86-64, ARM, or PowerPC64LE CPU architecture
|
||||
- **[Production Deployments](#available-installation-options):** ClickHouse can run on any Linux, FreeBSD, or macOS with x86-64, modern ARM (ARMv8.2-A up), or PowerPC64LE CPU architecture
|
||||
- **[Docker Image](https://hub.docker.com/r/clickhouse/clickhouse-server/):** use the official Docker image in Docker Hub
|
||||
|
||||
## ClickHouse Cloud
|
||||
|
@ -6,7 +6,20 @@ sidebar_label: MySQL Interface
|
||||
|
||||
# MySQL Interface
|
||||
|
||||
ClickHouse supports the MySQL wire protocol. This allow tools that are MySQL-compatible to interact with ClickHouse seamlessly (e.g. [Looker Studio](../integrations/data-visualization/looker-studio-and-clickhouse.md)).
|
||||
ClickHouse supports the MySQL wire protocol. This allows certain clients that do not have native ClickHouse connectors leverage the MySQL protocol instead, and it has been validated with the following BI tools:
|
||||
|
||||
- [Looker Studio](../integrations/data-visualization/looker-studio-and-clickhouse.md)
|
||||
- [Tableau Online](../integrations/tableau-online)
|
||||
- [QuickSight](../integrations/quicksight)
|
||||
|
||||
If you are trying other untested clients or integrations, keep in mind that there could be the following limitations:
|
||||
|
||||
- SSL implementation might not be fully compatible; there could be potential [TLS SNI](https://www.cloudflare.com/learning/ssl/what-is-sni/) issues.
|
||||
- A particular tool might require dialect features (e.g., MySQL-specific functions or settings) that are not implemented yet.
|
||||
|
||||
If there is a native driver available (e.g., [DBeaver](../integrations/dbeaver)), it is always preferred to use it instead of the MySQL interface. Additionally, while most of the MySQL language clients should work fine, MySQL interface is not guaranteed to be a drop-in replacement for a codebase with existing MySQL queries.
|
||||
|
||||
If your use case involves a particular tool that does not have a native ClickHouse driver, and you would like to use it via the MySQL interface and you found certain incompatibilities - please [create an issue](https://github.com/ClickHouse/ClickHouse/issues) in the ClickHouse repository.
|
||||
|
||||
## Enabling the MySQL Interface On ClickHouse Cloud
|
||||
|
||||
|
@ -1030,7 +1030,7 @@ A table with no primary key represents the extreme case of a single equivalence
|
||||
|
||||
The fewer and the larger the equivalence classes are, the higher the degree of freedom when re-shuffling rows.
|
||||
|
||||
The heuristics applied to find the best row order within each equivalence class is suggested by D. Lemir, O. Kaser in [Reordering columns for smaller indexes](https://doi.org/10.1016/j.ins.2011.02.002) and based on sorting the rows within each equivalence class by ascending cardinality of the non-primary key columns.
|
||||
The heuristics applied to find the best row order within each equivalence class is suggested by D. Lemire, O. Kaser in [Reordering columns for smaller indexes](https://doi.org/10.1016/j.ins.2011.02.002) and based on sorting the rows within each equivalence class by ascending cardinality of the non-primary key columns.
|
||||
It performs three steps:
|
||||
1. Find all equivalence classes based on the row values in primary key columns.
|
||||
2. For each equivalence class, calculate (usually estimate) the cardinalities of the non-primary-key columns.
|
||||
|
@ -16,7 +16,7 @@ sidebar_label: clickhouse-local
|
||||
|
||||
While `clickhouse-local` is a great tool for development and testing purposes, and for processing files, it is not suitable for serving end users or applications. In these scenarios, it is recommended to use the open-source [ClickHouse](https://clickhouse.com/docs/en/install). ClickHouse is a powerful OLAP database that is designed to handle large-scale analytical workloads. It provides fast and efficient processing of complex queries on large datasets, making it ideal for use in production environments where high-performance is critical. Additionally, ClickHouse offers a wide range of features such as replication, sharding, and high availability, which are essential for scaling up to handle large datasets and serving applications. If you need to handle larger datasets or serve end users or applications, we recommend using open-source ClickHouse instead of `clickhouse-local`.
|
||||
|
||||
Please read the docs below that show example use cases for `clickhouse-local`, such as [querying local CSVs](#query-data-in-a-csv-file-using-sql) or [reading a parquet file in S3](#query-data-in-a-parquet-file-in-aws-s3).
|
||||
Please read the docs below that show example use cases for `clickhouse-local`, such as [querying local file](#query_data_in_file) or [reading a parquet file in S3](#query-data-in-a-parquet-file-in-aws-s3).
|
||||
|
||||
## Download clickhouse-local
|
||||
|
||||
|
@ -18,7 +18,7 @@ ClickHouse also supports:
|
||||
|
||||
During aggregation, all `NULL` arguments are skipped. If the aggregation has several arguments it will ignore any row in which one or more of them are NULL.
|
||||
|
||||
There is an exception to this rule, which are the functions [`first_value`](../../sql-reference/aggregate-functions/reference/first_value.md), [`last_value`](../../sql-reference/aggregate-functions/reference/last_value.md) and their aliases when followed by the modifier `RESPECT NULLS`: `FIRST_VALUE(b) RESPECT NULLS`.
|
||||
There is an exception to this rule, which are the functions [`first_value`](../../sql-reference/aggregate-functions/reference/first_value.md), [`last_value`](../../sql-reference/aggregate-functions/reference/last_value.md) and their aliases (`any` and `anyLast` respectively) when followed by the modifier `RESPECT NULLS`. For example, `FIRST_VALUE(b) RESPECT NULLS`.
|
||||
|
||||
**Examples:**
|
||||
|
||||
|
@ -5,12 +5,12 @@ sidebar_position: 102
|
||||
|
||||
# any
|
||||
|
||||
Selects the first encountered value of a column.
|
||||
Selects the first encountered value of a column, ignoring any `NULL` values.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
any(column)
|
||||
any(column) [RESPECT NULLS]
|
||||
```
|
||||
|
||||
Aliases: `any_value`, [`first_value`](../reference/first_value.md).
|
||||
@ -20,7 +20,9 @@ Aliases: `any_value`, [`first_value`](../reference/first_value.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
By default, it ignores NULL values and returns the first NOT NULL value found in the column. Like [`first_value`](../../../sql-reference/aggregate-functions/reference/first_value.md) it supports `RESPECT NULLS`, in which case it will select the first value passed, independently on whether it's NULL or not.
|
||||
:::note
|
||||
Supports the `RESPECT NULLS` modifier after the function name. Using this modifier will ensure the function selects the first value passed, regardless of whether it is `NULL` or not.
|
||||
:::
|
||||
|
||||
:::note
|
||||
The return type of the function is the same as the input, except for LowCardinality which is discarded. This means that given no rows as input it will return the default value of that type (0 for integers, or Null for a Nullable() column). You might use the `-OrNull` [combinator](../../../sql-reference/aggregate-functions/combinators.md) ) to modify this behaviour.
|
||||
|
@ -1,44 +0,0 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/any_respect_nulls
|
||||
sidebar_position: 103
|
||||
---
|
||||
|
||||
# any_respect_nulls
|
||||
|
||||
Selects the first encountered value of a column, irregardless of whether it is a `NULL` value or not.
|
||||
|
||||
Alias: `any_value_respect_nulls`, `first_value_repect_nulls`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
any_respect_nulls(column)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
- `column`: The column name.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The last value encountered, irregardless of whether it is a `NULL` value or not.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
CREATE TABLE any_nulls (city Nullable(String)) ENGINE=Log;
|
||||
|
||||
INSERT INTO any_nulls (city) VALUES (NULL), ('Amsterdam'), ('New York'), ('Tokyo'), ('Valencia'), (NULL);
|
||||
|
||||
SELECT any(city), any_respect_nulls(city) FROM any_nulls;
|
||||
```
|
||||
|
||||
```response
|
||||
┌─any(city)─┬─any_respect_nulls(city)─┐
|
||||
│ Amsterdam │ ᴺᵁᴸᴸ │
|
||||
└───────────┴─────────────────────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
- [any](../reference/any.md)
|
@ -5,17 +5,21 @@ sidebar_position: 105
|
||||
|
||||
# anyLast
|
||||
|
||||
Selects the last value encountered. The result is just as indeterminate as for the [any](../../../sql-reference/aggregate-functions/reference/any.md) function.
|
||||
Selects the last value encountered, ignoring any `NULL` values by default. The result is just as indeterminate as for the [any](../../../sql-reference/aggregate-functions/reference/any.md) function.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
anyLast(column)
|
||||
anyLast(column) [RESPECT NULLS]
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
- `column`: The column name.
|
||||
|
||||
:::note
|
||||
Supports the `RESPECT NULLS` modifier after the function name. Using this modifier will ensure the function selects the first value passed, regardless of whether it is `NULL` or not.
|
||||
:::
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The last value encountered.
|
||||
|
@ -1,39 +0,0 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/anylast_respect_nulls
|
||||
sidebar_position: 106
|
||||
---
|
||||
|
||||
# anyLast_respect_nulls
|
||||
|
||||
Selects the last value encountered, irregardless of whether it is `NULL` or not.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
anyLast_respect_nulls(column)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
- `column`: The column name.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The last value encountered, irregardless of whether it is `NULL` or not.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
CREATE TABLE any_last_nulls (city Nullable(String)) ENGINE=Log;
|
||||
|
||||
INSERT INTO any_last_nulls (city) VALUES ('Amsterdam'),(NULL),('New York'),('Tokyo'),('Valencia'),(NULL);
|
||||
|
||||
SELECT anyLast(city), anyLast_respect_nulls(city) FROM any_last_nulls;
|
||||
```
|
||||
|
||||
```response
|
||||
┌─anyLast(city)─┬─anyLast_respect_nulls(city)─┐
|
||||
│ Valencia │ ᴺᵁᴸᴸ │
|
||||
└───────────────┴─────────────────────────────┘
|
||||
```
|
@ -45,10 +45,9 @@ ClickHouse-specific aggregate functions:
|
||||
|
||||
- [aggThrow](../reference/aggthrow.md)
|
||||
- [analysisOfVariance](../reference/analysis_of_variance.md)
|
||||
- [any](../reference/any_respect_nulls.md)
|
||||
- [any](../reference/any.md)
|
||||
- [anyHeavy](../reference/anyheavy.md)
|
||||
- [anyLast](../reference/anylast.md)
|
||||
- [anyLast](../reference/anylast_respect_nulls.md)
|
||||
- [boundingRatio](../reference/boundrat.md)
|
||||
- [first_value](../reference/first_value.md)
|
||||
- [last_value](../reference/last_value.md)
|
||||
|
@ -16,7 +16,7 @@ singleValueOrNull(x)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `x` — Column of any [data type](../../data-types/index.md).
|
||||
- `x` — Column of any [data type](../../data-types/index.md) (except [Map](../../data-types/map.md), [Array](../../data-types/array.md) or [Tuple](../../data-types/tuple) which cannot be of type [Nullable](../../data-types/nullable.md)).
|
||||
|
||||
**Returned values**
|
||||
|
||||
|
@ -96,3 +96,22 @@ Result:
|
||||
│ 1 │ [2] │ [[4,1]] │
|
||||
└───────────┴───────────┴───────────┘
|
||||
```
|
||||
|
||||
## Reading nested subcolumns from Array
|
||||
|
||||
If nested type `T` inside `Array` has subcolumns (for example, if it's a [named tuple](./tuple.md)), you can read its subcolumns from an `Array(T)` type with the same subcolumn names. The type of a subcolumn will be `Array` of the type of original subcolumn.
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
CREATE TABLE t_arr (arr Array(Tuple(field1 UInt32, field2 String))) ENGINE = MergeTree ORDER BY tuple();
|
||||
INSERT INTO t_arr VALUES ([(1, 'Hello'), (2, 'World')]), ([(3, 'This'), (4, 'is'), (5, 'subcolumn')]);
|
||||
SELECT arr.field1, toTypeName(arr.field1), arr.field2, toTypeName(arr.field2) from t_arr;
|
||||
```
|
||||
|
||||
```test
|
||||
┌─arr.field1─┬─toTypeName(arr.field1)─┬─arr.field2────────────────┬─toTypeName(arr.field2)─┐
|
||||
│ [1,2] │ Array(UInt32) │ ['Hello','World'] │ Array(String) │
|
||||
│ [3,4,5] │ Array(UInt32) │ ['This','is','subcolumn'] │ Array(String) │
|
||||
└────────────┴────────────────────────┴───────────────────────────┴────────────────────────┘
|
||||
```
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/data-types/dynamic
|
||||
sidebar_position: 56
|
||||
sidebar_position: 62
|
||||
sidebar_label: Dynamic
|
||||
---
|
||||
|
||||
@ -494,13 +494,43 @@ SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) O
|
||||
|
||||
As we can see, ClickHouse kept the most frequent types `UInt64` and `Array(UInt64)` and casted all other types to `String`.
|
||||
|
||||
## JSONExtract functions with Dynamic
|
||||
|
||||
All `JSONExtract*` functions support `Dynamic` type:
|
||||
|
||||
```sql
|
||||
SELECT JSONExtract('{"a" : [1, 2, 3]}', 'a', 'Dynamic') AS dynamic, dynamicType(dynamic) AS dynamic_type;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─dynamic─┬─dynamic_type───────────┐
|
||||
│ [1,2,3] │ Array(Nullable(Int64)) │
|
||||
└─────────┴────────────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT JSONExtract('{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}', 'obj', 'Map(String, Variant(UInt32, String, Array(UInt32)))') AS map_of_dynamics, mapApply((k, v) -> (k, variantType(v)), map_of_dynamics) AS map_of_dynamic_types```
|
||||
|
||||
```text
|
||||
┌─map_of_dynamics──────────────────┬─map_of_dynamic_types────────────────────────────┐
|
||||
│ {'a':42,'b':'Hello','c':[1,2,3]} │ {'a':'UInt32','b':'String','c':'Array(UInt32)'} │
|
||||
└──────────────────────────────────┴─────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT JSONExtractKeysAndValues('{"a" : 42, "b" : "Hello", "c" : [1,2,3]}', 'Variant(UInt32, String, Array(UInt32))') AS dynamics, arrayMap(x -> (x.1, variantType(x.2)), dynamics) AS dynamic_types```
|
||||
```
|
||||
|
||||
```text
|
||||
┌─dynamics───────────────────────────────┬─dynamic_types─────────────────────────────────────────┐
|
||||
│ [('a',42),('b','Hello'),('c',[1,2,3])] │ [('a','UInt32'),('b','String'),('c','Array(UInt32)')] │
|
||||
└────────────────────────────────────────┴───────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Binary output format
|
||||
|
||||
In [RowBinary](../../interfaces/formats.md#rowbinary-rowbinary) format values of `Dynamic` type are serialized in the following format:
|
||||
In RowBinary format values of `Dynamic` type are serialized in the following format:
|
||||
|
||||
```text
|
||||
<binary_encoded_data_type><value_in_binary_format_according_to_the_data_type>
|
||||
```
|
||||
|
||||
See the [data types binary encoding specification](../../sql-reference/data-types/data-types-binary-encoding.md)
|
||||
|
@ -5,11 +5,11 @@ sidebar_label: Object Data Type
|
||||
keywords: [object, data type]
|
||||
---
|
||||
|
||||
# Object Data Type
|
||||
# Object Data Type (deprecated)
|
||||
|
||||
:::note
|
||||
This feature is not production-ready and is now deprecated. If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864)
|
||||
:::
|
||||
**This feature is not production-ready and is now deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864).
|
||||
|
||||
<hr />
|
||||
|
||||
Stores JavaScript Object Notation (JSON) documents in a single column.
|
||||
|
||||
|
@ -3080,4 +3080,4 @@ Result:
|
||||
|
||||
## Distance functions
|
||||
|
||||
All supported functions are described in [distance functions documentation](../../sql-reference/functions/distance-functions.md).
|
||||
All supported functions are described in [distance functions documentation](../../sql-reference/functions/distance-functions.md).
|
@ -2698,6 +2698,204 @@ Like function `YYYYMMDDhhmmssToDate()` but produces a [DateTime64](../data-types
|
||||
|
||||
Accepts an additional, optional `precision` parameter after the `timezone` parameter.
|
||||
|
||||
## changeYear
|
||||
|
||||
Changes the year component of a date or date time.
|
||||
|
||||
**Syntax**
|
||||
``` sql
|
||||
|
||||
changeYear(date_or_datetime, value)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||
- `value` - a new value of the year. [Integer](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The same type as `date_or_datetime`.
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT changeYear(toDate('1999-01-01'), 2000), changeYear(toDateTime64('1999-01-01 00:00:00.000', 3), 2000);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
┌─changeYear(toDate('1999-01-01'), 2000)─┬─changeYear(toDateTime64('1999-01-01 00:00:00.000', 3), 2000)─┐
|
||||
│ 2000-01-01 │ 2000-01-01 00:00:00.000 │
|
||||
└────────────────────────────────────────┴──────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## changeMonth
|
||||
|
||||
Changes the month component of a date or date time.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
changeMonth(date_or_datetime, value)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||
- `value` - a new value of the month. [Integer](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a value of same type as `date_or_datetime`.
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT changeMonth(toDate('1999-01-01'), 2), changeMonth(toDateTime64('1999-01-01 00:00:00.000', 3), 2);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
┌─changeMonth(toDate('1999-01-01'), 2)─┬─changeMonth(toDateTime64('1999-01-01 00:00:00.000', 3), 2)─┐
|
||||
│ 1999-02-01 │ 1999-02-01 00:00:00.000 │
|
||||
└──────────────────────────────────────┴────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## changeDay
|
||||
|
||||
Changes the day component of a date or date time.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
changeDay(date_or_datetime, value)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||
- `value` - a new value of the day. [Integer](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a value of same type as `date_or_datetime`.
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT changeDay(toDate('1999-01-01'), 5), changeDay(toDateTime64('1999-01-01 00:00:00.000', 3), 5);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
┌─changeDay(toDate('1999-01-01'), 5)─┬─changeDay(toDateTime64('1999-01-01 00:00:00.000', 3), 5)─┐
|
||||
│ 1999-01-05 │ 1999-01-05 00:00:00.000 │
|
||||
└────────────────────────────────────┴──────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## changeHour
|
||||
|
||||
Changes the hour component of a date or date time.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
changeHour(date_or_datetime, value)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||
- `value` - a new value of the hour. [Integer](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a value of same type as `date_or_datetime`. If the input is a [Date](../data-types/date.md), return [DateTime](../data-types/datetime.md). If the input is a [Date32](../data-types/date32.md), return [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT changeHour(toDate('1999-01-01'), 14), changeHour(toDateTime64('1999-01-01 00:00:00.000', 3), 14);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
┌─changeHour(toDate('1999-01-01'), 14)─┬─changeHour(toDateTime64('1999-01-01 00:00:00.000', 3), 14)─┐
|
||||
│ 1999-01-01 14:00:00 │ 1999-01-01 14:00:00.000 │
|
||||
└──────────────────────────────────────┴────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## changeMinute
|
||||
|
||||
Changes the minute component of a date or date time.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
changeMinute(date_or_datetime, value)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||
- `value` - a new value of the minute. [Integer](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a value of same type as `date_or_datetime`. If the input is a [Date](../data-types/date.md), return [DateTime](../data-types/datetime.md). If the input is a [Date32](../data-types/date32.md), return [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT changeMinute(toDate('1999-01-01'), 15), changeMinute(toDateTime64('1999-01-01 00:00:00.000', 3), 15);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
┌─changeMinute(toDate('1999-01-01'), 15)─┬─changeMinute(toDateTime64('1999-01-01 00:00:00.000', 3), 15)─┐
|
||||
│ 1999-01-01 00:15:00 │ 1999-01-01 00:15:00.000 │
|
||||
└────────────────────────────────────────┴──────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## changeSecond
|
||||
|
||||
Changes the second component of a date or date time.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
changeSecond(date_or_datetime, value)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||
- `value` - a new value of the second. [Integer](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a value of same type as `date_or_datetime`. If the input is a [Date](../data-types/date.md), return [DateTime](../data-types/datetime.md). If the input is a [Date32](../data-types/date32.md), return [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT changeSecond(toDate('1999-01-01'), 15), changeSecond(toDateTime64('1999-01-01 00:00:00.000', 3), 15);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
┌─changeSecond(toDate('1999-01-01'), 15)─┬─changeSecond(toDateTime64('1999-01-01 00:00:00.000', 3), 15)─┐
|
||||
│ 1999-01-01 00:00:15 │ 1999-01-01 00:00:15.000 │
|
||||
└────────────────────────────────────────┴──────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## addYears
|
||||
|
||||
Adds a specified number of years to a date, a date with time or a string-encoded date / date with time.
|
||||
@ -2714,6 +2912,7 @@ addYears(date, num)
|
||||
- `num`: Number of years to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date` plus `num` years. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -2751,6 +2950,7 @@ addQuarters(date, num)
|
||||
- `num`: Number of quarters to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date` plus `num` quarters. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -2788,6 +2988,7 @@ addMonths(date, num)
|
||||
- `num`: Number of months to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date` plus `num` months. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -2825,6 +3026,7 @@ addWeeks(date, num)
|
||||
- `num`: Number of weeks to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date` plus `num` weeks. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -2862,6 +3064,7 @@ addDays(date, num)
|
||||
- `num`: Number of days to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date` plus `num` days. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -2899,6 +3102,7 @@ addHours(date, num)
|
||||
- `num`: Number of hours to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
o
|
||||
- Returns `date` plus `num` hours. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -2936,6 +3140,7 @@ addMinutes(date, num)
|
||||
- `num`: Number of minutes to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date` plus `num` minutes. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -2973,6 +3178,7 @@ addSeconds(date, num)
|
||||
- `num`: Number of seconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date` plus `num` seconds. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -3010,6 +3216,7 @@ addMilliseconds(date_time, num)
|
||||
- `num`: Number of milliseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date_time` plus `num` milliseconds. [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -3045,6 +3252,7 @@ addMicroseconds(date_time, num)
|
||||
- `num`: Number of microseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date_time` plus `num` microseconds. [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -3080,6 +3288,7 @@ addNanoseconds(date_time, num)
|
||||
- `num`: Number of nanoseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date_time` plus `num` nanoseconds. [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -3115,6 +3324,7 @@ addInterval(interval_1, interval_2)
|
||||
- `interval_2`: Second interval to be added. [interval](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a tuple of intervals. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
|
||||
|
||||
:::note
|
||||
@ -3161,6 +3371,7 @@ addTupleOfIntervals(interval_1, interval_2)
|
||||
- `intervals`: Tuple of intervals to add to `date`. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date` with added `intervals`. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -3195,6 +3406,7 @@ subtractYears(date, num)
|
||||
- `num`: Number of years to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date` minus `num` years. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -3232,6 +3444,7 @@ subtractQuarters(date, num)
|
||||
- `num`: Number of quarters to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date` minus `num` quarters. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -3269,6 +3482,7 @@ subtractMonths(date, num)
|
||||
- `num`: Number of months to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date` minus `num` months. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -3306,6 +3520,7 @@ subtractWeeks(date, num)
|
||||
- `num`: Number of weeks to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date` minus `num` weeks. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -3343,6 +3558,7 @@ subtractDays(date, num)
|
||||
- `num`: Number of days to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date` minus `num` days. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -3380,6 +3596,7 @@ subtractHours(date, num)
|
||||
- `num`: Number of hours to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date` minus `num` hours. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[Datetime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -3417,6 +3634,7 @@ subtractMinutes(date, num)
|
||||
- `num`: Number of minutes to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date` minus `num` minutes. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -3454,6 +3672,7 @@ subtractSeconds(date, num)
|
||||
- `num`: Number of seconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date` minus `num` seconds. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -3491,6 +3710,7 @@ subtractMilliseconds(date_time, num)
|
||||
- `num`: Number of milliseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date_time` minus `num` milliseconds. [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -3526,6 +3746,7 @@ subtractMicroseconds(date_time, num)
|
||||
- `num`: Number of microseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date_time` minus `num` microseconds. [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -3561,6 +3782,7 @@ subtractNanoseconds(date_time, num)
|
||||
- `num`: Number of nanoseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date_time` minus `num` nanoseconds. [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
@ -3596,6 +3818,7 @@ subtractInterval(interval_1, interval_2)
|
||||
- `interval_2`: Second interval to be negated. [interval](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a tuple of intervals. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
|
||||
|
||||
:::note
|
||||
@ -3642,6 +3865,7 @@ subtractTupleOfIntervals(interval_1, interval_2)
|
||||
- `intervals`: Tuple of intervals to subtract from `date`. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `date` with subtracted `intervals`. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
|
@ -314,10 +314,71 @@ SELECT groupBitXor(cityHash64(*)) FROM table
|
||||
Calculates a 32-bit hash code from any type of integer.
|
||||
This is a relatively fast non-cryptographic hash function of average quality for numbers.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
intHash32(int)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `int` — Integer to hash. [(U)Int*](../data-types/int-uint.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- 32-bit hash code. [UInt32](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT intHash32(42);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─intHash32(42)─┐
|
||||
│ 1228623923 │
|
||||
└───────────────┘
|
||||
```
|
||||
|
||||
## intHash64
|
||||
|
||||
Calculates a 64-bit hash code from any type of integer.
|
||||
It works faster than intHash32. Average quality.
|
||||
This is a relatively fast non-cryptographic hash function of average quality for numbers.
|
||||
It works faster than [intHash32](#inthash32).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
intHash64(int)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `int` — Integer to hash. [(U)Int*](../data-types/int-uint.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- 64-bit hash code. [UInt64](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT intHash64(42);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌────────intHash64(42)─┐
|
||||
│ 11490350930367293593 │
|
||||
└──────────────────────┘
|
||||
```
|
||||
|
||||
## SHA1, SHA224, SHA256, SHA512, SHA512_256
|
||||
|
||||
|
@ -2984,6 +2984,66 @@ Result:
|
||||
└─────────┘
|
||||
```
|
||||
|
||||
## partitionID
|
||||
|
||||
Computes the [partition ID](../../engines/table-engines/mergetree-family/custom-partitioning-key.md).
|
||||
|
||||
:::note
|
||||
This function is slow and should not be called for large amount of rows.
|
||||
:::
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
partitionID(x[, y, ...]);
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — Column for which to return the partition ID.
|
||||
- `y, ...` — Remaining N columns for which to return the partition ID (optional).
|
||||
|
||||
**Returned Value**
|
||||
|
||||
- Partition ID that the row would belong to. [String](../data-types/string.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS tab;
|
||||
|
||||
CREATE TABLE tab
|
||||
(
|
||||
i int,
|
||||
j int
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY i
|
||||
ORDER BY tuple();
|
||||
|
||||
INSERT INTO tab VALUES (1, 1), (1, 2), (1, 3), (2, 4), (2, 5), (2, 6);
|
||||
|
||||
SELECT i, j, partitionID(i), _partition_id FROM tab ORDER BY i, j;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─i─┬─j─┬─partitionID(i)─┬─_partition_id─┐
|
||||
│ 1 │ 1 │ 1 │ 1 │
|
||||
│ 1 │ 2 │ 1 │ 1 │
|
||||
│ 1 │ 3 │ 1 │ 1 │
|
||||
└───┴───┴────────────────┴───────────────┘
|
||||
┌─i─┬─j─┬─partitionID(i)─┬─_partition_id─┐
|
||||
│ 2 │ 4 │ 2 │ 2 │
|
||||
│ 2 │ 5 │ 2 │ 2 │
|
||||
│ 2 │ 6 │ 2 │ 2 │
|
||||
└───┴───┴────────────────┴───────────────┘
|
||||
```
|
||||
|
||||
|
||||
## shardNum
|
||||
|
||||
Returns the index of a shard which processes a part of data in a distributed query. Indices are started from `1`.
|
||||
|
@ -567,12 +567,13 @@ While no standard or recommendation exists for the epoch of Snowflake IDs, imple
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
generateSnowflakeID([expr])
|
||||
generateSnowflakeID([expr, [machine_id]])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — An arbitrary [expression](../../sql-reference/syntax.md#syntax-expressions) used to bypass [common subexpression elimination](../../sql-reference/functions/index.md#common-subexpression-elimination) if the function is called multiple times in a query. The value of the expression has no effect on the returned Snowflake ID. Optional.
|
||||
- `machine_id` — A machine ID, the lowest 10 bits are used. [Int64](../data-types/int-uint.md). Optional.
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -608,6 +609,16 @@ SELECT generateSnowflakeID(1), generateSnowflakeID(2);
|
||||
└────────────────────────┴────────────────────────┘
|
||||
```
|
||||
|
||||
**Example with expression and a machine ID**
|
||||
|
||||
```
|
||||
SELECT generateSnowflakeID('expr', 1);
|
||||
|
||||
┌─generateSnowflakeID('expr', 1)─┐
|
||||
│ 7201148511606784002 │
|
||||
└────────────────────────────────┘
|
||||
```
|
||||
|
||||
## snowflakeToDateTime
|
||||
|
||||
:::warning
|
||||
|
@ -58,6 +58,8 @@ KILL QUERY WHERE query_id='2-857d-4a57-9ee0-327da5d60a90'
|
||||
KILL QUERY WHERE user='username' SYNC
|
||||
```
|
||||
|
||||
:::tip If you are killing a query in ClickHouse Cloud or in a self-managed cluster, then be sure to use the ```ON CLUSTER [cluster-name]``` option, in order to ensure the query is killed on all replicas:::
|
||||
|
||||
Read-only users can only stop their own queries.
|
||||
|
||||
By default, the asynchronous version of queries is used (`ASYNC`), which does not wait for confirmation that queries have stopped.
|
||||
@ -131,6 +133,7 @@ KILL MUTATION WHERE database = 'default' AND table = 'table'
|
||||
-- Cancel the specific mutation:
|
||||
KILL MUTATION WHERE database = 'default' AND table = 'table' AND mutation_id = 'mutation_3.txt'
|
||||
```
|
||||
:::tip If you are killing a mutation in ClickHouse Cloud or in a self-managed cluster, then be sure to use the ```ON CLUSTER [cluster-name]``` option, in order to ensure the mutation is killed on all replicas:::
|
||||
|
||||
The query is useful when a mutation is stuck and cannot finish (e.g. if some function in the mutation query throws an exception when applied to the data contained in the table).
|
||||
|
||||
|
73
docs/en/sql-reference/window-functions/dense_rank.md
Normal file
73
docs/en/sql-reference/window-functions/dense_rank.md
Normal file
@ -0,0 +1,73 @@
|
||||
---
|
||||
slug: /en/sql-reference/window-functions/dense_rank
|
||||
sidebar_label: dense_rank
|
||||
sidebar_position: 7
|
||||
---
|
||||
|
||||
# dense_rank
|
||||
|
||||
Ranks the current row within its partition without gaps. In other words, if the value of any new row encountered is equal to the value of one of the previous rows then it will receive the next successive rank without any gaps in ranking.
|
||||
|
||||
The [rank](./rank.md) function provides the same behaviour, but with gaps in ranking.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
dense_rank (column_name)
|
||||
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
||||
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
|
||||
FROM table_name
|
||||
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
|
||||
```
|
||||
|
||||
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A number for the current row within its partition, without gaps in ranking. [UInt64](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
The following example is based on the example provided in the video instructional [Ranking window functions in ClickHouse](https://youtu.be/Yku9mmBYm_4?si=XIMu1jpYucCQEoXA).
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
CREATE TABLE salaries
|
||||
(
|
||||
`team` String,
|
||||
`player` String,
|
||||
`salary` UInt32,
|
||||
`position` String
|
||||
)
|
||||
Engine = Memory;
|
||||
|
||||
INSERT INTO salaries FORMAT Values
|
||||
('Port Elizabeth Barbarians', 'Gary Chen', 195000, 'F'),
|
||||
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
|
||||
('Port Elizabeth Barbarians', 'Michael Stanley', 150000, 'D'),
|
||||
('New Coreystad Archdukes', 'Scott Harrison', 150000, 'D'),
|
||||
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M'),
|
||||
('South Hampton Seagulls', 'Douglas Benson', 150000, 'M'),
|
||||
('South Hampton Seagulls', 'James Henderson', 140000, 'M');
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT player, salary,
|
||||
dense_rank() OVER (ORDER BY salary DESC) AS dense_rank
|
||||
FROM salaries;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─player──────────┬─salary─┬─dense_rank─┐
|
||||
1. │ Gary Chen │ 195000 │ 1 │
|
||||
2. │ Robert George │ 195000 │ 1 │
|
||||
3. │ Charles Juarez │ 190000 │ 2 │
|
||||
4. │ Michael Stanley │ 150000 │ 3 │
|
||||
5. │ Douglas Benson │ 150000 │ 3 │
|
||||
6. │ Scott Harrison │ 150000 │ 3 │
|
||||
7. │ James Henderson │ 140000 │ 4 │
|
||||
└─────────────────┴────────┴────────────┘
|
||||
```
|
79
docs/en/sql-reference/window-functions/first_value.md
Normal file
79
docs/en/sql-reference/window-functions/first_value.md
Normal file
@ -0,0 +1,79 @@
|
||||
---
|
||||
slug: /en/sql-reference/window-functions/first_value
|
||||
sidebar_label: first_value
|
||||
sidebar_position: 3
|
||||
---
|
||||
|
||||
# first_value
|
||||
|
||||
Returns the first value evaluated within its ordered frame. By default, NULL arguments are skipped, however the `RESPECT NULLS` modifier can be used to override this behaviour.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
first_value (column_name) [[RESPECT NULLS] | [IGNORE NULLS]]
|
||||
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
||||
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
|
||||
FROM table_name
|
||||
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
|
||||
```
|
||||
|
||||
Alias: `any`.
|
||||
|
||||
:::note
|
||||
Using the optional modifier `RESPECT NULLS` after `first_value(column_name)` will ensure that `NULL` arguments are not skipped.
|
||||
See [NULL processing](../aggregate-functions/index.md/#null-processing) for more information.
|
||||
:::
|
||||
|
||||
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The first value evaluated within its ordered frame.
|
||||
|
||||
**Example**
|
||||
|
||||
In this example the `first_value` function is used to find the highest paid footballer from a fictional dataset of salaries of Premier League football players.
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS salaries;
|
||||
CREATE TABLE salaries
|
||||
(
|
||||
`team` String,
|
||||
`player` String,
|
||||
`salary` UInt32,
|
||||
`position` String
|
||||
)
|
||||
Engine = Memory;
|
||||
|
||||
INSERT INTO salaries FORMAT Values
|
||||
('Port Elizabeth Barbarians', 'Gary Chen', 196000, 'F'),
|
||||
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
|
||||
('Port Elizabeth Barbarians', 'Michael Stanley', 100000, 'D'),
|
||||
('New Coreystad Archdukes', 'Scott Harrison', 180000, 'D'),
|
||||
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M'),
|
||||
('South Hampton Seagulls', 'Douglas Benson', 150000, 'M'),
|
||||
('South Hampton Seagulls', 'James Henderson', 140000, 'M');
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT player, salary,
|
||||
first_value(player) OVER (ORDER BY salary DESC) AS highest_paid_player
|
||||
FROM salaries;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─player──────────┬─salary─┬─highest_paid_player─┐
|
||||
1. │ Gary Chen │ 196000 │ Gary Chen │
|
||||
2. │ Robert George │ 195000 │ Gary Chen │
|
||||
3. │ Charles Juarez │ 190000 │ Gary Chen │
|
||||
4. │ Scott Harrison │ 180000 │ Gary Chen │
|
||||
5. │ Douglas Benson │ 150000 │ Gary Chen │
|
||||
6. │ James Henderson │ 140000 │ Gary Chen │
|
||||
7. │ Michael Stanley │ 100000 │ Gary Chen │
|
||||
└─────────────────┴────────┴─────────────────────┘
|
||||
```
|
@ -1,10 +1,11 @@
|
||||
---
|
||||
slug: /en/sql-reference/window-functions/
|
||||
sidebar_position: 62
|
||||
sidebar_label: Window Functions
|
||||
title: Window Functions
|
||||
sidebar_position: 1
|
||||
---
|
||||
|
||||
# Window Functions
|
||||
|
||||
Windows functions let you perform calculations across a set of rows that are related to the current row.
|
||||
Some of the calculations that you can do are similar to those that can be done with an aggregate function, but a window function doesn't cause rows to be grouped into a single output - the individual rows are still returned.
|
||||
|
||||
@ -12,8 +13,8 @@ Some of the calculations that you can do are similar to those that can be done w
|
||||
|
||||
ClickHouse supports the standard grammar for defining windows and window functions. The table below indicates whether a feature is currently supported.
|
||||
|
||||
| Feature | Supported? |
|
||||
|------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Feature | Supported? |
|
||||
|--------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| ad hoc window specification (`count(*) over (partition by id order by time desc)`) | ✅ |
|
||||
| expressions involving window functions, e.g. `(count(*) over ()) / 2)` | ✅ |
|
||||
| `WINDOW` clause (`select ... from table window w as (partition by id)`) | ✅ |
|
||||
@ -75,14 +76,14 @@ WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
||||
|
||||
These functions can be used only as a window function.
|
||||
|
||||
- `row_number()` - Number the current row within its partition starting from 1.
|
||||
- `first_value(x)` - Return the first non-NULL value evaluated within its ordered frame.
|
||||
- `last_value(x)` - Return the last non-NULL value evaluated within its ordered frame.
|
||||
- `nth_value(x, offset)` - Return the first non-NULL value evaluated against the nth row (offset) in its ordered frame.
|
||||
- `rank()` - Rank the current row within its partition with gaps.
|
||||
- `dense_rank()` - Rank the current row within its partition without gaps.
|
||||
- `lagInFrame(x[, offset[, default]])` - Return a value evaluated at the row that is at a specified physical offset row before the current row within the ordered frame. The offset parameter, if not specified, defaults to 1, meaning it will fetch the value from the next row. If the calculated row exceeds the boundaries of the window frame, the specified default value is returned.
|
||||
- `leadInFrame(x[, offset[, default]])` - Return a value evaluated at the row that is offset rows after the current row within the ordered frame. If offset is not provided, it defaults to 1. If the offset leads to a position outside the window frame, the specified default value is used.
|
||||
- [`row_number()`](./row_number.md) - Number the current row within its partition starting from 1.
|
||||
- [`first_value(x)`](./first_value.md) - Return the first value evaluated within its ordered frame.
|
||||
- [`last_value(x)`](./last_value.md) - Return the last value evaluated within its ordered frame.
|
||||
- [`nth_value(x, offset)`](./nth_value.md) - Return the first non-NULL value evaluated against the nth row (offset) in its ordered frame.
|
||||
- [`rank()`](./rank.md) - Rank the current row within its partition with gaps.
|
||||
- [`dense_rank()`](./dense_rank.md) - Rank the current row within its partition without gaps.
|
||||
- [`lagInFrame(x)`](./lagInFrame.md) - Return a value evaluated at the row that is at a specified physical offset row before the current row within the ordered frame.
|
||||
- [`leadInFrame(x)`](./leadInFrame.md) - Return a value evaluated at the row that is offset rows after the current row within the ordered frame.
|
||||
|
||||
## Examples
|
||||
|
||||
|
79
docs/en/sql-reference/window-functions/lagInFrame.md
Normal file
79
docs/en/sql-reference/window-functions/lagInFrame.md
Normal file
@ -0,0 +1,79 @@
|
||||
---
|
||||
slug: /en/sql-reference/window-functions/lagInFrame
|
||||
sidebar_label: lagInFrame
|
||||
sidebar_position: 8
|
||||
---
|
||||
|
||||
# lagInFrame
|
||||
|
||||
Returns a value evaluated at the row that is at a specified physical offset row before the current row within the ordered frame.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
lagInFrame(x[, offset[, default]])
|
||||
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
||||
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
|
||||
FROM table_name
|
||||
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
|
||||
```
|
||||
|
||||
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
|
||||
|
||||
**Parameters**
|
||||
- `x` — Column name.
|
||||
- `offset` — Offset to apply. [(U)Int*](../data-types/int-uint.md). (Optional - `1` by default).
|
||||
- `default` — Value to return if calculated row exceeds the boundaries of the window frame. (Optional - `null` by default).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Value evaluated at the row that is at a specified physical offset before the current row within the ordered frame.
|
||||
|
||||
**Example**
|
||||
|
||||
This example looks at historical data for a specific stock and uses the `lagInFrame` function to calculate a day-to-day delta and percentage change in the closing price of the stock.
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
CREATE TABLE stock_prices
|
||||
(
|
||||
`date` Date,
|
||||
`open` Float32, -- opening price
|
||||
`high` Float32, -- daily high
|
||||
`low` Float32, -- daily low
|
||||
`close` Float32, -- closing price
|
||||
`volume` UInt32 -- trade volume
|
||||
)
|
||||
Engine = Memory;
|
||||
|
||||
INSERT INTO stock_prices FORMAT Values
|
||||
('2024-06-03', 113.62, 115.00, 112.00, 115.00, 438392000),
|
||||
('2024-06-04', 115.72, 116.60, 114.04, 116.44, 403324000),
|
||||
('2024-06-05', 118.37, 122.45, 117.47, 122.44, 528402000),
|
||||
('2024-06-06', 124.05, 125.59, 118.32, 121.00, 664696000),
|
||||
('2024-06-07', 119.77, 121.69, 118.02, 120.89, 412386000);
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
date,
|
||||
close,
|
||||
lagInFrame(close, 1, close) OVER (ORDER BY date ASC) AS previous_day_close,
|
||||
COALESCE(ROUND(close - previous_day_close, 2)) AS delta,
|
||||
COALESCE(ROUND((delta / previous_day_close) * 100, 2)) AS percent_change
|
||||
FROM stock_prices
|
||||
ORDER BY date DESC;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌───────date─┬──close─┬─previous_day_close─┬─delta─┬─percent_change─┐
|
||||
1. │ 2024-06-07 │ 120.89 │ 121 │ -0.11 │ -0.09 │
|
||||
2. │ 2024-06-06 │ 121 │ 122.44 │ -1.44 │ -1.18 │
|
||||
3. │ 2024-06-05 │ 122.44 │ 116.44 │ 6 │ 5.15 │
|
||||
4. │ 2024-06-04 │ 116.44 │ 115 │ 1.44 │ 1.25 │
|
||||
5. │ 2024-06-03 │ 115 │ 115 │ 0 │ 0 │
|
||||
└────────────┴────────┴────────────────────┴───────┴────────────────┘
|
||||
```
|
79
docs/en/sql-reference/window-functions/last_value.md
Normal file
79
docs/en/sql-reference/window-functions/last_value.md
Normal file
@ -0,0 +1,79 @@
|
||||
---
|
||||
slug: /en/sql-reference/window-functions/last_value
|
||||
sidebar_label: last_value
|
||||
sidebar_position: 4
|
||||
---
|
||||
|
||||
# last_value
|
||||
|
||||
Returns the last value evaluated within its ordered frame. By default, NULL arguments are skipped, however the `RESPECT NULLS` modifier can be used to override this behaviour.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
last_value (column_name) [[RESPECT NULLS] | [IGNORE NULLS]]
|
||||
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
||||
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
|
||||
FROM table_name
|
||||
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
|
||||
```
|
||||
|
||||
Alias: `anyLast`.
|
||||
|
||||
:::note
|
||||
Using the optional modifier `RESPECT NULLS` after `first_value(column_name)` will ensure that `NULL` arguments are not skipped.
|
||||
See [NULL processing](../aggregate-functions/index.md/#null-processing) for more information.
|
||||
:::
|
||||
|
||||
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The last value evaluated within its ordered frame.
|
||||
|
||||
**Example**
|
||||
|
||||
In this example the `last_value` function is used to find the highest paid footballer from a fictional dataset of salaries of Premier League football players.
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS salaries;
|
||||
CREATE TABLE salaries
|
||||
(
|
||||
`team` String,
|
||||
`player` String,
|
||||
`salary` UInt32,
|
||||
`position` String
|
||||
)
|
||||
Engine = Memory;
|
||||
|
||||
INSERT INTO salaries FORMAT Values
|
||||
('Port Elizabeth Barbarians', 'Gary Chen', 196000, 'F'),
|
||||
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
|
||||
('Port Elizabeth Barbarians', 'Michael Stanley', 100000, 'D'),
|
||||
('New Coreystad Archdukes', 'Scott Harrison', 180000, 'D'),
|
||||
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M'),
|
||||
('South Hampton Seagulls', 'Douglas Benson', 150000, 'M'),
|
||||
('South Hampton Seagulls', 'James Henderson', 140000, 'M');
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT player, salary,
|
||||
last_value(player) OVER (ORDER BY salary DESC RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS lowest_paid_player
|
||||
FROM salaries;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─player──────────┬─salary─┬─lowest_paid_player─┐
|
||||
1. │ Gary Chen │ 196000 │ Michael Stanley │
|
||||
2. │ Robert George │ 195000 │ Michael Stanley │
|
||||
3. │ Charles Juarez │ 190000 │ Michael Stanley │
|
||||
4. │ Scott Harrison │ 180000 │ Michael Stanley │
|
||||
5. │ Douglas Benson │ 150000 │ Michael Stanley │
|
||||
6. │ James Henderson │ 140000 │ Michael Stanley │
|
||||
7. │ Michael Stanley │ 100000 │ Michael Stanley │
|
||||
└─────────────────┴────────┴────────────────────┘
|
||||
```
|
60
docs/en/sql-reference/window-functions/leadInFrame.md
Normal file
60
docs/en/sql-reference/window-functions/leadInFrame.md
Normal file
@ -0,0 +1,60 @@
|
||||
---
|
||||
slug: /en/sql-reference/window-functions/leadInFrame
|
||||
sidebar_label: leadInFrame
|
||||
sidebar_position: 9
|
||||
---
|
||||
|
||||
# leadInFrame
|
||||
|
||||
Returns a value evaluated at the row that is offset rows after the current row within the ordered frame.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
leadInFrame(x[, offset[, default]])
|
||||
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
||||
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
|
||||
FROM table_name
|
||||
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
|
||||
```
|
||||
|
||||
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
|
||||
|
||||
**Parameters**
|
||||
- `x` — Column name.
|
||||
- `offset` — Offset to apply. [(U)Int*](../data-types/int-uint.md). (Optional - `1` by default).
|
||||
- `default` — Value to return if calculated row exceeds the boundaries of the window frame. (Optional - `null` by default).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- value evaluated at the row that is offset rows after the current row within the ordered frame.
|
||||
|
||||
**Example**
|
||||
|
||||
This example looks at [historical data](https://www.kaggle.com/datasets/sazidthe1/nobel-prize-data) for Nobel Prize winners and uses the `leadInFrame` function to return a list of successive winners in the physics category.
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
CREATE OR REPLACE VIEW nobel_prize_laureates AS FROM file('nobel_laureates_data.csv') SELECT *;
|
||||
```
|
||||
|
||||
```sql
|
||||
FROM nobel_prize_laureates SELECT fullName, leadInFrame(year, 1, year) OVER (PARTITION BY category ORDER BY year) AS year, category, motivation WHERE category == 'physics' ORDER BY year DESC LIMIT 9;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─fullName─────────┬─year─┬─category─┬─motivation─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
1. │ Pierre Agostini │ 2023 │ physics │ for experimental methods that generate attosecond pulses of light for the study of electron dynamics in matter │
|
||||
2. │ Ferenc Krausz │ 2023 │ physics │ for experimental methods that generate attosecond pulses of light for the study of electron dynamics in matter │
|
||||
3. │ Anne L Huillier │ 2023 │ physics │ for experimental methods that generate attosecond pulses of light for the study of electron dynamics in matter │
|
||||
4. │ Alain Aspect │ 2022 │ physics │ for experiments with entangled photons establishing the violation of Bell inequalities and pioneering quantum information science │
|
||||
5. │ Anton Zeilinger │ 2022 │ physics │ for experiments with entangled photons establishing the violation of Bell inequalities and pioneering quantum information science │
|
||||
6. │ John Clauser │ 2022 │ physics │ for experiments with entangled photons establishing the violation of Bell inequalities and pioneering quantum information science │
|
||||
7. │ Syukuro Manabe │ 2021 │ physics │ for the physical modelling of Earths climate quantifying variability and reliably predicting global warming │
|
||||
8. │ Klaus Hasselmann │ 2021 │ physics │ for the physical modelling of Earths climate quantifying variability and reliably predicting global warming │
|
||||
9. │ Giorgio Parisi │ 2021 │ physics │ for the discovery of the interplay of disorder and fluctuations in physical systems from atomic to planetary scales │
|
||||
└──────────────────┴──────┴──────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
75
docs/en/sql-reference/window-functions/nth_value.md
Normal file
75
docs/en/sql-reference/window-functions/nth_value.md
Normal file
@ -0,0 +1,75 @@
|
||||
---
|
||||
slug: /en/sql-reference/window-functions/nth_value
|
||||
sidebar_label: nth_value
|
||||
sidebar_position: 5
|
||||
---
|
||||
|
||||
# nth_value
|
||||
|
||||
Returns the first non-NULL value evaluated against the nth row (offset) in its ordered frame.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
nth_value (x, offset)
|
||||
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
||||
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
|
||||
FROM table_name
|
||||
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
|
||||
```
|
||||
|
||||
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `x` — Column name.
|
||||
- `offset` — nth row to evaluate current row against.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The first non-NULL value evaluated against the nth row (offset) in its ordered frame.
|
||||
|
||||
**Example**
|
||||
|
||||
In this example the `nth-value` function is used to find the third-highest salary from a fictional dataset of salaries of Premier League football players.
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS salaries;
|
||||
CREATE TABLE salaries
|
||||
(
|
||||
`team` String,
|
||||
`player` String,
|
||||
`salary` UInt32,
|
||||
`position` String
|
||||
)
|
||||
Engine = Memory;
|
||||
|
||||
INSERT INTO salaries FORMAT Values
|
||||
('Port Elizabeth Barbarians', 'Gary Chen', 195000, 'F'),
|
||||
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
|
||||
('Port Elizabeth Barbarians', 'Michael Stanley', 100000, 'D'),
|
||||
('New Coreystad Archdukes', 'Scott Harrison', 180000, 'D'),
|
||||
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M'),
|
||||
('South Hampton Seagulls', 'Douglas Benson', 150000, 'M'),
|
||||
('South Hampton Seagulls', 'James Henderson', 140000, 'M');
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT player, salary, nth_value(player,3) OVER(ORDER BY salary DESC) AS third_highest_salary FROM salaries;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─player──────────┬─salary─┬─third_highest_salary─┐
|
||||
1. │ Gary Chen │ 195000 │ │
|
||||
2. │ Robert George │ 195000 │ │
|
||||
3. │ Charles Juarez │ 190000 │ Charles Juarez │
|
||||
4. │ Scott Harrison │ 180000 │ Charles Juarez │
|
||||
5. │ Douglas Benson │ 150000 │ Charles Juarez │
|
||||
6. │ James Henderson │ 140000 │ Charles Juarez │
|
||||
7. │ Michael Stanley │ 100000 │ Charles Juarez │
|
||||
└─────────────────┴────────┴──────────────────────┘
|
||||
```
|
74
docs/en/sql-reference/window-functions/rank.md
Normal file
74
docs/en/sql-reference/window-functions/rank.md
Normal file
@ -0,0 +1,74 @@
|
||||
---
|
||||
slug: /en/sql-reference/window-functions/rank
|
||||
sidebar_label: rank
|
||||
sidebar_position: 6
|
||||
---
|
||||
|
||||
# rank
|
||||
|
||||
Ranks the current row within its partition with gaps. In other words, if the value of any row it encounters is equal to the value of a previous row then it will receive the same rank as that previous row.
|
||||
The rank of the next row is then equal to the rank of the previous row plus a gap equal to the number of times the previous rank was given.
|
||||
|
||||
The [dense_rank](./dense_rank.md) function provides the same behaviour but without gaps in ranking.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
rank (column_name)
|
||||
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
||||
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
|
||||
FROM table_name
|
||||
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
|
||||
```
|
||||
|
||||
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A number for the current row within its partition, including gaps. [UInt64](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
The following example is based on the example provided in the video instructional [Ranking window functions in ClickHouse](https://youtu.be/Yku9mmBYm_4?si=XIMu1jpYucCQEoXA).
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
CREATE TABLE salaries
|
||||
(
|
||||
`team` String,
|
||||
`player` String,
|
||||
`salary` UInt32,
|
||||
`position` String
|
||||
)
|
||||
Engine = Memory;
|
||||
|
||||
INSERT INTO salaries FORMAT Values
|
||||
('Port Elizabeth Barbarians', 'Gary Chen', 195000, 'F'),
|
||||
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
|
||||
('Port Elizabeth Barbarians', 'Michael Stanley', 150000, 'D'),
|
||||
('New Coreystad Archdukes', 'Scott Harrison', 150000, 'D'),
|
||||
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M'),
|
||||
('South Hampton Seagulls', 'Douglas Benson', 150000, 'M'),
|
||||
('South Hampton Seagulls', 'James Henderson', 140000, 'M');
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT player, salary,
|
||||
rank() OVER (ORDER BY salary DESC) AS rank
|
||||
FROM salaries;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─player──────────┬─salary─┬─rank─┐
|
||||
1. │ Gary Chen │ 195000 │ 1 │
|
||||
2. │ Robert George │ 195000 │ 1 │
|
||||
3. │ Charles Juarez │ 190000 │ 3 │
|
||||
4. │ Douglas Benson │ 150000 │ 4 │
|
||||
5. │ Michael Stanley │ 150000 │ 4 │
|
||||
6. │ Scott Harrison │ 150000 │ 4 │
|
||||
7. │ James Henderson │ 140000 │ 7 │
|
||||
└─────────────────┴────────┴──────┘
|
||||
```
|
67
docs/en/sql-reference/window-functions/row_number.md
Normal file
67
docs/en/sql-reference/window-functions/row_number.md
Normal file
@ -0,0 +1,67 @@
|
||||
---
|
||||
slug: /en/sql-reference/window-functions/row_number
|
||||
sidebar_label: row_number
|
||||
sidebar_position: 2
|
||||
---
|
||||
|
||||
# row_number
|
||||
|
||||
Numbers the current row within its partition starting from 1.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
row_number (column_name)
|
||||
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
||||
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
|
||||
FROM table_name
|
||||
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
|
||||
```
|
||||
|
||||
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A number for the current row within its partition. [UInt64](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
The following example is based on the example provided in the video instructional [Ranking window functions in ClickHouse](https://youtu.be/Yku9mmBYm_4?si=XIMu1jpYucCQEoXA).
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
CREATE TABLE salaries
|
||||
(
|
||||
`team` String,
|
||||
`player` String,
|
||||
`salary` UInt32,
|
||||
`position` String
|
||||
)
|
||||
Engine = Memory;
|
||||
|
||||
INSERT INTO salaries FORMAT Values
|
||||
('Port Elizabeth Barbarians', 'Gary Chen', 195000, 'F'),
|
||||
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
|
||||
('Port Elizabeth Barbarians', 'Michael Stanley', 150000, 'D'),
|
||||
('New Coreystad Archdukes', 'Scott Harrison', 150000, 'D'),
|
||||
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M');
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT player, salary,
|
||||
row_number() OVER (ORDER BY salary DESC) AS row_number
|
||||
FROM salaries;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─player──────────┬─salary─┬─row_number─┐
|
||||
1. │ Gary Chen │ 195000 │ 1 │
|
||||
2. │ Robert George │ 195000 │ 2 │
|
||||
3. │ Charles Juarez │ 190000 │ 3 │
|
||||
4. │ Scott Harrison │ 150000 │ 4 │
|
||||
5. │ Michael Stanley │ 150000 │ 5 │
|
||||
└─────────────────┴────────┴────────────┘
|
||||
```
|
@ -82,14 +82,14 @@ FROM LEFT_RIGHT
|
||||
SELECT
|
||||
left,
|
||||
right,
|
||||
if(left < right, 'left is smaller than right', 'right is greater or equal than left') AS is_smaller
|
||||
if(left < right, 'left is smaller than right', 'right is smaller or equal than left') AS is_smaller
|
||||
FROM LEFT_RIGHT
|
||||
WHERE isNotNull(left) AND isNotNull(right)
|
||||
|
||||
┌─left─┬─right─┬─is_smaller──────────────────────────┐
|
||||
│ 1 │ 3 │ left is smaller than right │
|
||||
│ 2 │ 2 │ right is greater or equal than left │
|
||||
│ 3 │ 1 │ right is greater or equal than left │
|
||||
│ 2 │ 2 │ right is smaller or equal than left │
|
||||
│ 3 │ 1 │ right is smaller or equal than left │
|
||||
└──────┴───────┴─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
|
@ -4,6 +4,9 @@ if (USE_CLANG_TIDY)
|
||||
set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
|
||||
endif ()
|
||||
|
||||
set(MAX_LINKER_MEMORY 3500)
|
||||
include(../cmake/limit_jobs.cmake)
|
||||
|
||||
include(${ClickHouse_SOURCE_DIR}/cmake/split_debug_symbols.cmake)
|
||||
|
||||
# The `clickhouse` binary is a multi purpose tool that contains multiple execution modes (client, server, etc.),
|
||||
|
@ -24,9 +24,8 @@
|
||||
#include <Common/TerminalSize.h>
|
||||
#include <Common/config_version.h>
|
||||
#include <Common/formatReadable.h>
|
||||
|
||||
#include <Core/Settings.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Poco/Util/Application.h>
|
||||
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
@ -49,6 +48,8 @@
|
||||
#include <Formats/registerFormats.h>
|
||||
#include <Formats/FormatFactory.h>
|
||||
|
||||
#include <Poco/Util/Application.h>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
using namespace std::literals;
|
||||
|
||||
@ -185,6 +186,8 @@ void Client::parseConnectionsCredentials(Poco::Util::AbstractConfiguration & con
|
||||
history_file = home_path + "/" + history_file.substr(1);
|
||||
config.setString("history_file", history_file);
|
||||
}
|
||||
if (config.has(prefix + ".accept-invalid-certificate"))
|
||||
config.setBool("accept-invalid-certificate", config.getBool(prefix + ".accept-invalid-certificate"));
|
||||
}
|
||||
|
||||
if (!connection_name.empty() && !connection_found)
|
||||
@ -276,6 +279,12 @@ void Client::initialize(Poco::Util::Application & self)
|
||||
else if (config().has("connection"))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "--connection was specified, but config does not exist");
|
||||
|
||||
if (config().has("accept-invalid-certificate"))
|
||||
{
|
||||
config().setString("openSSL.client.invalidCertificateHandler.name", "AcceptCertificateHandler");
|
||||
config().setString("openSSL.client.verificationMode", "none");
|
||||
}
|
||||
|
||||
/** getenv is thread-safe in Linux glibc and in all sane libc implementations.
|
||||
* But the standard does not guarantee that subsequent calls will not rewrite the value by returned pointer.
|
||||
*
|
||||
@ -730,7 +739,7 @@ bool Client::processWithFuzzing(const String & full_query)
|
||||
}
|
||||
if (auto *q = orig_ast->as<ASTSetQuery>())
|
||||
{
|
||||
if (auto *setDialect = q->changes.tryGet("dialect"); setDialect && setDialect->safeGet<String>() == "kusto")
|
||||
if (auto *set_dialect = q->changes.tryGet("dialect"); set_dialect && set_dialect->safeGet<String>() == "kusto")
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
<!-- Config set into /etc/clickhouse-client/. It's used if no other configs are found. -->
|
||||
<config>
|
||||
<!-- Shorthand for self-signed combination in openSSL section below: <accept-invalid-certificate>1</accept-invalid-certificate> -->
|
||||
<openSSL>
|
||||
<client> <!-- Used for connection to server's secure tcp port -->
|
||||
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||
@ -72,6 +73,7 @@
|
||||
|
||||
Default: "hostname" will be used. -->
|
||||
<name>default</name>
|
||||
<!-- For self-signed server certificate when connecting to secure tcp: <accept-invalid-certificate>1</accept-invalid-certificate> -->
|
||||
<!-- Host that will be used for connection. -->
|
||||
<hostname>127.0.0.1</hostname>
|
||||
<port>9000</port>
|
||||
|
@ -1,5 +1,7 @@
|
||||
#include "LibraryBridge.h"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
int mainEntryClickHouseLibraryBridge(int argc, char ** argv)
|
||||
{
|
||||
DB::LibraryBridge app;
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "ExternalDictionaryLibraryHandlerFactory.h"
|
||||
|
||||
#include <Formats/FormatFactory.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <Common/BridgeProtocolVersion.h>
|
||||
|
@ -376,6 +376,7 @@ void LocalServer::setupUsers()
|
||||
" </networks>"
|
||||
" <profile>default</profile>"
|
||||
" <quota>default</quota>"
|
||||
" <named_collection_control>1</named_collection_control>"
|
||||
" </default>"
|
||||
" </users>"
|
||||
" <quotas>"
|
||||
|
@ -3,11 +3,12 @@
|
||||
#include <Client/ClientBase.h>
|
||||
#include <Client/LocalConnection.h>
|
||||
|
||||
#include <Common/StatusFile.h>
|
||||
#include <Common/InterruptListener.h>
|
||||
#include <Loggers/Loggers.h>
|
||||
#include <Core/ServerSettings.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Loggers/Loggers.h>
|
||||
#include <Common/InterruptListener.h>
|
||||
#include <Common/StatusFile.h>
|
||||
|
||||
#include <filesystem>
|
||||
#include <memory>
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#if USE_ODBC
|
||||
|
||||
#include <Core/Settings.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <Poco/Net/HTTPServerResponse.h>
|
||||
#include <Common/BridgeProtocolVersion.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Core/Settings.h>
|
||||
#include "getIdentifierQuote.h"
|
||||
#include "validateODBCConnectionString.h"
|
||||
#include "ODBCPooledConnectionFactory.h"
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <IO/ReadBufferFromIStream.h>
|
||||
#include <Poco/Net/HTTPServerRequest.h>
|
||||
#include <Poco/Net/HTTPServerResponse.h>
|
||||
|
@ -1,5 +1,7 @@
|
||||
#include "ODBCBridge.h"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
int mainEntryClickHouseODBCBridge(int argc, char ** argv)
|
||||
{
|
||||
DB::ODBCBridge app;
|
||||
|
@ -2,8 +2,10 @@
|
||||
|
||||
#if USE_ODBC
|
||||
|
||||
#include <Core/Settings.h>
|
||||
#include <Server/HTTP/HTMLForm.h>
|
||||
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Poco/Net/HTTPServerRequest.h>
|
||||
|
@ -68,6 +68,7 @@
|
||||
#include <Interpreters/registerInterpreters.h>
|
||||
#include <Interpreters/JIT/CompiledExpressionCache.h>
|
||||
#include <Access/AccessControl.h>
|
||||
#include <Storages/MergeTree/MergeTreeSettings.h>
|
||||
#include <Storages/StorageReplicatedMergeTree.h>
|
||||
#include <Storages/System/attachSystemTables.h>
|
||||
#include <Storages/System/attachInformationSchemaTables.h>
|
||||
|
@ -516,6 +516,9 @@
|
||||
/// Save query in history only if it is different.
|
||||
let previous_query = '';
|
||||
|
||||
/// Start of the last query
|
||||
let last_query_start = 0;
|
||||
|
||||
const current_url = new URL(window.location);
|
||||
const opened_locally = location.protocol == 'file:';
|
||||
|
||||
@ -567,6 +570,8 @@
|
||||
'&password=' + encodeURIComponent(password)
|
||||
}
|
||||
|
||||
last_query_start = performance.now();
|
||||
|
||||
const xhr = new XMLHttpRequest;
|
||||
|
||||
xhr.open('POST', url, true);
|
||||
@ -579,7 +584,8 @@
|
||||
if (posted_request_num != request_num) {
|
||||
return;
|
||||
} else if (this.readyState === XMLHttpRequest.DONE) {
|
||||
renderResponse(this.status, this.response);
|
||||
const elapsed_msec = performance.now() - last_query_start;
|
||||
renderResponse(this.status, this.response, elapsed_msec);
|
||||
|
||||
/// The query is saved in browser history (in state JSON object)
|
||||
/// as well as in URL fragment identifier.
|
||||
@ -587,7 +593,8 @@
|
||||
const state = {
|
||||
query: query,
|
||||
status: this.status,
|
||||
response: this.response.length > 100000 ? null : this.response /// Lower than the browser's limit.
|
||||
response: this.response.length > 100000 ? null : this.response, /// Lower than the browser's limit.
|
||||
elapsed_msec: elapsed_msec,
|
||||
};
|
||||
const title = "ClickHouse Query: " + query;
|
||||
|
||||
@ -617,7 +624,7 @@
|
||||
xhr.send(query);
|
||||
}
|
||||
|
||||
function renderResponse(status, response) {
|
||||
function renderResponse(status, response, elapsed_msec) {
|
||||
document.getElementById('hourglass').style.display = 'none';
|
||||
|
||||
if (status === 200) {
|
||||
@ -632,6 +639,7 @@
|
||||
renderChart(json);
|
||||
} else {
|
||||
renderUnparsedResult(response);
|
||||
stats.innerText = `Elapsed (client-side): ${(elapsed_msec / 1000).toFixed(3)} sec.`;
|
||||
}
|
||||
document.getElementById('check-mark').style.display = 'inline';
|
||||
} else {
|
||||
@ -651,7 +659,7 @@
|
||||
clear();
|
||||
return;
|
||||
}
|
||||
renderResponse(event.state.status, event.state.response);
|
||||
renderResponse(event.state.status, event.state.response, event.state.elapsed_msec);
|
||||
};
|
||||
|
||||
if (window.location.hash) {
|
||||
|
@ -17,6 +17,8 @@ src_paths = ["src", "tests/ci", "tests/sqllogic"]
|
||||
[tool.pylint.'MESSAGES CONTROL']
|
||||
# pytest.mark.parametrize is not callable (not-callable)
|
||||
disable = '''
|
||||
pointless-string-statement,
|
||||
line-too-long,
|
||||
missing-docstring,
|
||||
too-few-public-methods,
|
||||
invalid-name,
|
||||
@ -35,10 +37,10 @@ disable = '''
|
||||
broad-except,
|
||||
bare-except,
|
||||
no-else-return,
|
||||
global-statement
|
||||
global-statement,
|
||||
f-string-without-interpolation,
|
||||
'''
|
||||
|
||||
[tool.pylint.SIMILARITIES]
|
||||
# due to SQL
|
||||
min-similarity-lines=1000
|
||||
|
||||
|
@ -21,7 +21,6 @@
|
||||
#include <Backups/BackupEntriesCollector.h>
|
||||
#include <Backups/RestorerFromBackup.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <Storages/MergeTree/MergeTreeSettings.h>
|
||||
#include <base/defines.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <Common/re2.h>
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Core/Settings.h>
|
||||
|
||||
static constexpr size_t MAX_AGGREGATE_FUNCTION_NAME_LENGTH = 1000;
|
||||
|
||||
|
@ -3,7 +3,6 @@
|
||||
#include <base/scope_guard.h>
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include <Core/Settings.h>
|
||||
|
||||
#include <Analyzer/IQueryTreeNode.h>
|
||||
#include <Analyzer/QueryNode.h>
|
||||
|
@ -10,6 +10,8 @@
|
||||
#include <Analyzer/TableNode.h>
|
||||
#include <Analyzer/UnionNode.h>
|
||||
|
||||
#include <Core/Settings.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
@ -11,6 +11,8 @@
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Analyzer/Utils.h>
|
||||
|
||||
#include <Core/Settings.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
@ -11,6 +11,8 @@
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Analyzer/LambdaNode.h>
|
||||
|
||||
#include <Core/Settings.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
@ -8,6 +8,8 @@
|
||||
#include <Analyzer/TableExpressionModifiers.h>
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
|
||||
#include <Core/Settings.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
@ -22,6 +22,8 @@
|
||||
#include <Analyzer/HashUtils.h>
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
|
||||
#include <Core/Settings.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
@ -10,6 +10,8 @@
|
||||
#include <Analyzer/Utils.h>
|
||||
#include <Analyzer/HashUtils.h>
|
||||
|
||||
#include <Core/Settings.h>
|
||||
|
||||
#include <Storages/IStorage.h>
|
||||
|
||||
#include <Functions/FunctionFactory.h>
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user