mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 17:12:03 +00:00
Merge branch 'master' into random-settings
This commit is contained in:
commit
33e4c703a1
@ -12,6 +12,7 @@ BraceWrapping:
|
|||||||
AfterUnion: true
|
AfterUnion: true
|
||||||
BeforeCatch: true
|
BeforeCatch: true
|
||||||
BeforeElse: true
|
BeforeElse: true
|
||||||
|
BeforeLambdaBody: true
|
||||||
IndentBraces: false
|
IndentBraces: false
|
||||||
BreakConstructorInitializersBeforeComma: false
|
BreakConstructorInitializersBeforeComma: false
|
||||||
Cpp11BracedListStyle: true
|
Cpp11BracedListStyle: true
|
||||||
|
@ -142,6 +142,7 @@ Checks: '-*,
|
|||||||
clang-analyzer-cplusplus.PlacementNewChecker,
|
clang-analyzer-cplusplus.PlacementNewChecker,
|
||||||
clang-analyzer-cplusplus.SelfAssignment,
|
clang-analyzer-cplusplus.SelfAssignment,
|
||||||
clang-analyzer-deadcode.DeadStores,
|
clang-analyzer-deadcode.DeadStores,
|
||||||
|
clang-analyzer-cplusplus.Move,
|
||||||
clang-analyzer-optin.cplusplus.VirtualCall,
|
clang-analyzer-optin.cplusplus.VirtualCall,
|
||||||
clang-analyzer-security.insecureAPI.UncheckedReturn,
|
clang-analyzer-security.insecureAPI.UncheckedReturn,
|
||||||
clang-analyzer-security.insecureAPI.bcmp,
|
clang-analyzer-security.insecureAPI.bcmp,
|
||||||
|
39
.github/workflows/master.yml
vendored
39
.github/workflows/master.yml
vendored
@ -86,6 +86,7 @@ jobs:
|
|||||||
StyleCheck:
|
StyleCheck:
|
||||||
needs: DockerHubPush
|
needs: DockerHubPush
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
|
if: ${{ success() || failure() }}
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
run: |
|
run: |
|
||||||
@ -93,6 +94,8 @@ jobs:
|
|||||||
TEMP_PATH=${{ runner.temp }}/style_check
|
TEMP_PATH=${{ runner.temp }}/style_check
|
||||||
EOF
|
EOF
|
||||||
- name: Download changed images
|
- name: Download changed images
|
||||||
|
# even if artifact does not exist, e.g. on `do not test` label or failed Docker job
|
||||||
|
continue-on-error: true
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v2
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
name: changed_images
|
||||||
@ -1062,6 +1065,41 @@ jobs:
|
|||||||
docker kill "$(docker ps -q)" ||:
|
docker kill "$(docker ps -q)" ||:
|
||||||
docker rm -f "$(docker ps -a -q)" ||:
|
docker rm -f "$(docker ps -a -q)" ||:
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
FunctionalStatelessTestReleaseS3:
|
||||||
|
needs: [BuilderDebRelease]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/stateless_s3_storage
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Stateless tests (release, s3 storage, actions)
|
||||||
|
REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse
|
||||||
|
KILL_TIMEOUT=10800
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Clear repository
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Functional test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill "$(docker ps -q)" ||:
|
||||||
|
docker rm -f "$(docker ps -a -q)" ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestAarch64:
|
FunctionalStatelessTestAarch64:
|
||||||
needs: [BuilderDebAarch64]
|
needs: [BuilderDebAarch64]
|
||||||
runs-on: [self-hosted, func-tester-aarch64]
|
runs-on: [self-hosted, func-tester-aarch64]
|
||||||
@ -2841,6 +2879,7 @@ jobs:
|
|||||||
- FunctionalStatefulTestDebug
|
- FunctionalStatefulTestDebug
|
||||||
- FunctionalStatefulTestRelease
|
- FunctionalStatefulTestRelease
|
||||||
- FunctionalStatefulTestReleaseDatabaseOrdinary
|
- FunctionalStatefulTestReleaseDatabaseOrdinary
|
||||||
|
- FunctionalStatelessTestReleaseS3
|
||||||
- FunctionalStatefulTestAarch64
|
- FunctionalStatefulTestAarch64
|
||||||
- FunctionalStatefulTestAsan
|
- FunctionalStatefulTestAsan
|
||||||
- FunctionalStatefulTestTsan
|
- FunctionalStatefulTestTsan
|
||||||
|
73
.github/workflows/nightly.yml
vendored
Normal file
73
.github/workflows/nightly.yml
vendored
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
name: NightlyBuilds
|
||||||
|
|
||||||
|
env:
|
||||||
|
# Force the stdout and stderr streams to be unbuffered
|
||||||
|
PYTHONUNBUFFERED: 1
|
||||||
|
|
||||||
|
"on":
|
||||||
|
schedule:
|
||||||
|
- cron: '13 3 * * *'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
DockerHubPushAarch64:
|
||||||
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
|
steps:
|
||||||
|
- name: Clear repository
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Images check
|
||||||
|
run: |
|
||||||
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
python3 docker_images_check.py --suffix aarch64 --all
|
||||||
|
- name: Upload images files to artifacts
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: changed_images_aarch64
|
||||||
|
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||||
|
DockerHubPushAmd64:
|
||||||
|
runs-on: [self-hosted, style-checker]
|
||||||
|
steps:
|
||||||
|
- name: Clear repository
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Images check
|
||||||
|
run: |
|
||||||
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
python3 docker_images_check.py --suffix amd64 --all
|
||||||
|
- name: Upload images files to artifacts
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: changed_images_amd64
|
||||||
|
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||||
|
DockerHubPush:
|
||||||
|
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||||
|
runs-on: [self-hosted, style-checker]
|
||||||
|
steps:
|
||||||
|
- name: Clear repository
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Download changed aarch64 images
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
name: changed_images_aarch64
|
||||||
|
path: ${{ runner.temp }}
|
||||||
|
- name: Download changed amd64 images
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
name: changed_images_amd64
|
||||||
|
path: ${{ runner.temp }}
|
||||||
|
- name: Images check
|
||||||
|
run: |
|
||||||
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||||
|
- name: Upload images files to artifacts
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: changed_images
|
||||||
|
path: ${{ runner.temp }}/changed_images.json
|
39
.github/workflows/pull_request.yml
vendored
39
.github/workflows/pull_request.yml
vendored
@ -111,6 +111,7 @@ jobs:
|
|||||||
StyleCheck:
|
StyleCheck:
|
||||||
needs: DockerHubPush
|
needs: DockerHubPush
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
|
if: ${{ success() || failure() }}
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
run: |
|
run: |
|
||||||
@ -118,6 +119,8 @@ jobs:
|
|||||||
TEMP_PATH=${{ runner.temp }}/style_check
|
TEMP_PATH=${{ runner.temp }}/style_check
|
||||||
EOF
|
EOF
|
||||||
- name: Download changed images
|
- name: Download changed images
|
||||||
|
# even if artifact does not exist, e.g. on `do not test` label or failed Docker job
|
||||||
|
continue-on-error: true
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v2
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
name: changed_images
|
||||||
@ -1212,6 +1215,41 @@ jobs:
|
|||||||
docker kill "$(docker ps -q)" ||:
|
docker kill "$(docker ps -q)" ||:
|
||||||
docker rm -f "$(docker ps -a -q)" ||:
|
docker rm -f "$(docker ps -a -q)" ||:
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
FunctionalStatelessTestReleaseS3:
|
||||||
|
needs: [BuilderDebRelease]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/stateless_s3_storage
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Stateless tests (release, s3 storage, actions)
|
||||||
|
REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse
|
||||||
|
KILL_TIMEOUT=10800
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Clear repository
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Functional test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill "$(docker ps -q)" ||:
|
||||||
|
docker rm -f "$(docker ps -a -q)" ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestAarch64:
|
FunctionalStatelessTestAarch64:
|
||||||
needs: [BuilderDebAarch64]
|
needs: [BuilderDebAarch64]
|
||||||
runs-on: [self-hosted, func-tester-aarch64]
|
runs-on: [self-hosted, func-tester-aarch64]
|
||||||
@ -3034,6 +3072,7 @@ jobs:
|
|||||||
- FunctionalStatefulTestTsan
|
- FunctionalStatefulTestTsan
|
||||||
- FunctionalStatefulTestMsan
|
- FunctionalStatefulTestMsan
|
||||||
- FunctionalStatefulTestUBsan
|
- FunctionalStatefulTestUBsan
|
||||||
|
- FunctionalStatelessTestReleaseS3
|
||||||
- StressTestDebug
|
- StressTestDebug
|
||||||
- StressTestAsan
|
- StressTestAsan
|
||||||
- StressTestTsan
|
- StressTestTsan
|
||||||
|
1
.github/workflows/release.yml
vendored
1
.github/workflows/release.yml
vendored
@ -22,7 +22,6 @@ jobs:
|
|||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Download packages and push to Artifactory
|
- name: Download packages and push to Artifactory
|
||||||
env:
|
|
||||||
run: |
|
run: |
|
||||||
rm -rf "$TEMP_PATH" && mkdir -p "$REPO_COPY"
|
rm -rf "$TEMP_PATH" && mkdir -p "$REPO_COPY"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$REPO_COPY"
|
cp -r "$GITHUB_WORKSPACE" "$REPO_COPY"
|
||||||
|
38
.github/workflows/tags_stable.yml
vendored
Normal file
38
.github/workflows/tags_stable.yml
vendored
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
name: TagsStableWorkflow
|
||||||
|
# - Gets artifacts from S3
|
||||||
|
# - Sends it to JFROG Artifactory
|
||||||
|
# - Adds them to the release assets
|
||||||
|
|
||||||
|
on: # yamllint disable-line rule:truthy
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- 'v*-stable'
|
||||||
|
- 'v*-lts'
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
UpdateVersions:
|
||||||
|
runs-on: [self-hosted, style-checker]
|
||||||
|
steps:
|
||||||
|
- name: Get tag name
|
||||||
|
run: echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
ref: master
|
||||||
|
- name: Generate versions
|
||||||
|
run: |
|
||||||
|
git fetch --tags
|
||||||
|
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||||
|
- name: Create Pull Request
|
||||||
|
uses: peter-evans/create-pull-request@v3
|
||||||
|
with:
|
||||||
|
commit-message: Update version_date.tsv after ${{ env.GITHUB_TAG }}
|
||||||
|
branch: auto/${{ env.GITHUB_TAG }}
|
||||||
|
delete-branch: true
|
||||||
|
title: Update version_date.tsv after ${{ env.GITHUB_TAG }}
|
||||||
|
body: |
|
||||||
|
Update version_date.tsv after ${{ env.GITHUB_TAG }}
|
||||||
|
|
||||||
|
Changelog category (leave one):
|
||||||
|
- Not for changelog (changelog entry is not required)
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -259,3 +259,6 @@
|
|||||||
[submodule "contrib/azure"]
|
[submodule "contrib/azure"]
|
||||||
path = contrib/azure
|
path = contrib/azure
|
||||||
url = https://github.com/ClickHouse-Extras/azure-sdk-for-cpp.git
|
url = https://github.com/ClickHouse-Extras/azure-sdk-for-cpp.git
|
||||||
|
[submodule "contrib/minizip-ng"]
|
||||||
|
path = contrib/minizip-ng
|
||||||
|
url = https://github.com/zlib-ng/minizip-ng
|
||||||
|
27
.potato.yml
27
.potato.yml
@ -1,27 +0,0 @@
|
|||||||
# This is the configuration file with settings for Potato.
|
|
||||||
# Potato is an internal Yandex technology that allows us to sync internal [Yandex.Tracker](https://yandex.com/tracker/) and GitHub.
|
|
||||||
|
|
||||||
# For all PRs where documentation is needed, just add a 'pr-feature' label and we will include it into documentation sprints.
|
|
||||||
|
|
||||||
# The project name.
|
|
||||||
name: clickhouse
|
|
||||||
# Object handlers defines which handlers we use.
|
|
||||||
handlers:
|
|
||||||
# The handler for creating an Yandex.Tracker issue.
|
|
||||||
- name: issue-create
|
|
||||||
params:
|
|
||||||
triggers:
|
|
||||||
# The trigger for creating the Yandex.Tracker issue. When the specified event occurs, it transfers PR data to Yandex.Tracker.
|
|
||||||
github:pullRequest:labeled:
|
|
||||||
data:
|
|
||||||
# The Yandex.Tracker queue to create the issue in. Each issue in Tracker belongs to one of the project queues.
|
|
||||||
queue: CLICKHOUSEDOCS
|
|
||||||
# The issue title.
|
|
||||||
summary: '[Potato] Pull Request #{{pullRequest.number}}'
|
|
||||||
# The issue description.
|
|
||||||
description: >
|
|
||||||
{{pullRequest.description}}
|
|
||||||
|
|
||||||
Ссылка на Pull Request: {{pullRequest.webUrl}}
|
|
||||||
# The condition for creating the Yandex.Tracker issue.
|
|
||||||
condition: eventPayload.labels.filter(label => ['pr-feature'].includes(label.name)).length
|
|
@ -67,7 +67,7 @@ if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.git" AND NOT EXISTS "${ClickHouse_SOURC
|
|||||||
message (FATAL_ERROR "Submodules are not initialized. Run\n\tgit submodule update --init --recursive")
|
message (FATAL_ERROR "Submodules are not initialized. Run\n\tgit submodule update --init --recursive")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
include (cmake/find/ccache.cmake)
|
include (cmake/ccache.cmake)
|
||||||
|
|
||||||
# Take care to add prlimit in command line before ccache, or else ccache thinks that
|
# Take care to add prlimit in command line before ccache, or else ccache thinks that
|
||||||
# prlimit is compiler, and clang++ is its input file, and refuses to work with
|
# prlimit is compiler, and clang++ is its input file, and refuses to work with
|
||||||
@ -247,8 +247,6 @@ endif()
|
|||||||
|
|
||||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||||
set(USE_DEBUG_HELPERS ON)
|
set(USE_DEBUG_HELPERS ON)
|
||||||
else ()
|
|
||||||
set(USE_DEBUG_HELPERS ON)
|
|
||||||
endif()
|
endif()
|
||||||
option(USE_DEBUG_HELPERS "Enable debug helpers" ${USE_DEBUG_HELPERS})
|
option(USE_DEBUG_HELPERS "Enable debug helpers" ${USE_DEBUG_HELPERS})
|
||||||
|
|
||||||
@ -403,17 +401,6 @@ else ()
|
|||||||
option(WERROR "Enable -Werror compiler option" ON)
|
option(WERROR "Enable -Werror compiler option" ON)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (WERROR)
|
|
||||||
# Don't pollute CMAKE_CXX_FLAGS with -Werror as it will break some CMake checks.
|
|
||||||
# Instead, adopt modern cmake usage requirement.
|
|
||||||
target_compile_options(global-libs INTERFACE "-Werror")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
# Make this extra-checks for correct library dependencies.
|
|
||||||
if (OS_LINUX AND NOT SANITIZE)
|
|
||||||
target_link_options(global-libs INTERFACE "-Wl,--no-undefined")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
# Increase stack size on Musl. We need big stack for our recursive-descend parser.
|
# Increase stack size on Musl. We need big stack for our recursive-descend parser.
|
||||||
if (USE_MUSL)
|
if (USE_MUSL)
|
||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-z,stack-size=2097152")
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-z,stack-size=2097152")
|
||||||
@ -421,6 +408,7 @@ endif ()
|
|||||||
|
|
||||||
include(cmake/dbms_glob_sources.cmake)
|
include(cmake/dbms_glob_sources.cmake)
|
||||||
|
|
||||||
|
add_library(global-group INTERFACE)
|
||||||
if (OS_LINUX OR OS_ANDROID)
|
if (OS_LINUX OR OS_ANDROID)
|
||||||
include(cmake/linux/default_libs.cmake)
|
include(cmake/linux/default_libs.cmake)
|
||||||
elseif (OS_DARWIN)
|
elseif (OS_DARWIN)
|
||||||
@ -428,6 +416,18 @@ elseif (OS_DARWIN)
|
|||||||
elseif (OS_FREEBSD)
|
elseif (OS_FREEBSD)
|
||||||
include(cmake/freebsd/default_libs.cmake)
|
include(cmake/freebsd/default_libs.cmake)
|
||||||
endif ()
|
endif ()
|
||||||
|
link_libraries(global-group)
|
||||||
|
|
||||||
|
if (WERROR)
|
||||||
|
# Don't pollute CMAKE_CXX_FLAGS with -Werror as it will break some CMake checks.
|
||||||
|
# Instead, adopt modern cmake usage requirement.
|
||||||
|
target_compile_options(global-group INTERFACE "-Werror")
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
# Make this extra-checks for correct library dependencies.
|
||||||
|
if (OS_LINUX AND NOT SANITIZE)
|
||||||
|
target_link_options(global-group INTERFACE "-Wl,--no-undefined")
|
||||||
|
endif ()
|
||||||
|
|
||||||
######################################
|
######################################
|
||||||
### Add targets below this comment ###
|
### Add targets below this comment ###
|
||||||
|
@ -22,9 +22,10 @@ The following versions of ClickHouse server are currently being supported with s
|
|||||||
| 21.7 | :x: |
|
| 21.7 | :x: |
|
||||||
| 21.8 | ✅ |
|
| 21.8 | ✅ |
|
||||||
| 21.9 | :x: |
|
| 21.9 | :x: |
|
||||||
| 21.10 | ✅ |
|
| 21.10 | :x: |
|
||||||
| 21.11 | ✅ |
|
| 21.11 | ✅ |
|
||||||
| 21.12 | ✅ |
|
| 21.12 | ✅ |
|
||||||
|
| 22.1 | ✅ |
|
||||||
|
|
||||||
## Reporting a Vulnerability
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
|
@ -2,7 +2,9 @@
|
|||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <string_view>
|
#include <string_view>
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
|
#include <cassert>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <sys/select.h>
|
#include <sys/select.h>
|
||||||
@ -34,13 +36,37 @@ bool hasInputData()
|
|||||||
return select(1, &fds, nullptr, nullptr, &timeout) == 1;
|
return select(1, &fds, nullptr, nullptr, &timeout) == 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct NoCaseCompare
|
||||||
|
{
|
||||||
|
bool operator()(const std::string & str1, const std::string & str2)
|
||||||
|
{
|
||||||
|
return std::lexicographical_compare(begin(str1), end(str1), begin(str2), end(str2), [](const char c1, const char c2)
|
||||||
|
{
|
||||||
|
return std::tolower(c1) < std::tolower(c2);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
using Words = std::vector<std::string>;
|
||||||
|
template <class Compare>
|
||||||
|
void addNewWords(Words & to, const Words & from, Compare comp)
|
||||||
|
{
|
||||||
|
size_t old_size = to.size();
|
||||||
|
size_t new_size = old_size + from.size();
|
||||||
|
|
||||||
|
to.reserve(new_size);
|
||||||
|
to.insert(to.end(), from.begin(), from.end());
|
||||||
|
auto middle = to.begin() + old_size;
|
||||||
|
std::inplace_merge(to.begin(), middle, to.end(), comp);
|
||||||
|
|
||||||
|
auto last_unique = std::unique(to.begin(), to.end());
|
||||||
|
to.erase(last_unique, to.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<LineReader::Suggest::WordsRange> LineReader::Suggest::getCompletions(const String & prefix, size_t prefix_length) const
|
}
|
||||||
{
|
|
||||||
if (!ready)
|
|
||||||
return std::nullopt;
|
|
||||||
|
|
||||||
|
replxx::Replxx::completions_t LineReader::Suggest::getCompletions(const String & prefix, size_t prefix_length)
|
||||||
|
{
|
||||||
std::string_view last_word;
|
std::string_view last_word;
|
||||||
|
|
||||||
auto last_word_pos = prefix.find_last_of(word_break_characters);
|
auto last_word_pos = prefix.find_last_of(word_break_characters);
|
||||||
@ -48,21 +74,45 @@ std::optional<LineReader::Suggest::WordsRange> LineReader::Suggest::getCompletio
|
|||||||
last_word = prefix;
|
last_word = prefix;
|
||||||
else
|
else
|
||||||
last_word = std::string_view(prefix).substr(last_word_pos + 1, std::string::npos);
|
last_word = std::string_view(prefix).substr(last_word_pos + 1, std::string::npos);
|
||||||
|
|
||||||
/// last_word can be empty.
|
/// last_word can be empty.
|
||||||
|
|
||||||
|
std::pair<Words::const_iterator, Words::const_iterator> range;
|
||||||
|
|
||||||
|
std::lock_guard lock(mutex);
|
||||||
|
|
||||||
/// Only perform case sensitive completion when the prefix string contains any uppercase characters
|
/// Only perform case sensitive completion when the prefix string contains any uppercase characters
|
||||||
if (std::none_of(prefix.begin(), prefix.end(), [&](auto c) { return c >= 'A' && c <= 'Z'; }))
|
if (std::none_of(prefix.begin(), prefix.end(), [](char32_t x) { return iswupper(static_cast<wint_t>(x)); }))
|
||||||
return std::equal_range(
|
range = std::equal_range(
|
||||||
words_no_case.begin(), words_no_case.end(), last_word, [prefix_length](std::string_view s, std::string_view prefix_searched)
|
words_no_case.begin(), words_no_case.end(), last_word, [prefix_length](std::string_view s, std::string_view prefix_searched)
|
||||||
{
|
{
|
||||||
return strncasecmp(s.data(), prefix_searched.data(), prefix_length) < 0;
|
return strncasecmp(s.data(), prefix_searched.data(), prefix_length) < 0;
|
||||||
});
|
});
|
||||||
else
|
else
|
||||||
return std::equal_range(words.begin(), words.end(), last_word, [prefix_length](std::string_view s, std::string_view prefix_searched)
|
range = std::equal_range(words.begin(), words.end(), last_word, [prefix_length](std::string_view s, std::string_view prefix_searched)
|
||||||
{
|
{
|
||||||
return strncmp(s.data(), prefix_searched.data(), prefix_length) < 0;
|
return strncmp(s.data(), prefix_searched.data(), prefix_length) < 0;
|
||||||
});
|
});
|
||||||
|
|
||||||
|
return replxx::Replxx::completions_t(range.first, range.second);
|
||||||
|
}
|
||||||
|
|
||||||
|
void LineReader::Suggest::addWords(Words && new_words)
|
||||||
|
{
|
||||||
|
Words new_words_no_case = new_words;
|
||||||
|
if (!new_words.empty())
|
||||||
|
{
|
||||||
|
std::sort(new_words.begin(), new_words.end());
|
||||||
|
std::sort(new_words_no_case.begin(), new_words_no_case.end(), NoCaseCompare{});
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
std::lock_guard lock(mutex);
|
||||||
|
addNewWords(words, new_words, std::less<std::string>{});
|
||||||
|
addNewWords(words_no_case, new_words_no_case, NoCaseCompare{});
|
||||||
|
|
||||||
|
assert(std::is_sorted(words.begin(), words.end()));
|
||||||
|
assert(std::is_sorted(words_no_case.begin(), words_no_case.end(), NoCaseCompare{}));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LineReader::LineReader(const String & history_file_path_, bool multiline_, Patterns extenders_, Patterns delimiters_)
|
LineReader::LineReader(const String & history_file_path_, bool multiline_, Patterns extenders_, Patterns delimiters_)
|
||||||
|
@ -1,10 +1,12 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <base/types.h>
|
#include <mutex>
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
#include <replxx.hxx>
|
||||||
|
|
||||||
|
#include <base/types.h>
|
||||||
|
|
||||||
class LineReader
|
class LineReader
|
||||||
{
|
{
|
||||||
@ -12,14 +14,16 @@ public:
|
|||||||
struct Suggest
|
struct Suggest
|
||||||
{
|
{
|
||||||
using Words = std::vector<std::string>;
|
using Words = std::vector<std::string>;
|
||||||
using WordsRange = std::pair<Words::const_iterator, Words::const_iterator>;
|
|
||||||
|
|
||||||
|
/// Get vector for the matched range of words if any.
|
||||||
|
replxx::Replxx::completions_t getCompletions(const String & prefix, size_t prefix_length);
|
||||||
|
void addWords(Words && new_words);
|
||||||
|
|
||||||
|
private:
|
||||||
Words words;
|
Words words;
|
||||||
Words words_no_case;
|
Words words_no_case;
|
||||||
std::atomic<bool> ready{false};
|
|
||||||
|
|
||||||
/// Get iterators for the matched range of words if any.
|
std::mutex mutex;
|
||||||
std::optional<WordsRange> getCompletions(const String & prefix, size_t prefix_length) const;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
using Patterns = std::vector<const char *>;
|
using Patterns = std::vector<const char *>;
|
||||||
|
@ -25,13 +25,6 @@ void trim(String & s)
|
|||||||
s.erase(std::find_if(s.rbegin(), s.rend(), [](unsigned char ch) { return !std::isspace(ch); }).base(), s.end());
|
s.erase(std::find_if(s.rbegin(), s.rend(), [](unsigned char ch) { return !std::isspace(ch); }).base(), s.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check if string ends with given character after skipping whitespaces.
|
|
||||||
bool ends_with(const std::string_view & s, const std::string_view & p)
|
|
||||||
{
|
|
||||||
auto ss = std::string_view(s.data(), s.rend() - std::find_if(s.rbegin(), s.rend(), [](unsigned char ch) { return !std::isspace(ch); }));
|
|
||||||
return ss.ends_with(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string getEditor()
|
std::string getEditor()
|
||||||
{
|
{
|
||||||
const char * editor = std::getenv("EDITOR");
|
const char * editor = std::getenv("EDITOR");
|
||||||
@ -132,8 +125,14 @@ void convertHistoryFile(const std::string & path, replxx::Replxx & rx)
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool replxx_last_is_delimiter = false;
|
||||||
|
void ReplxxLineReader::setLastIsDelimiter(bool flag)
|
||||||
|
{
|
||||||
|
replxx_last_is_delimiter = flag;
|
||||||
|
}
|
||||||
|
|
||||||
ReplxxLineReader::ReplxxLineReader(
|
ReplxxLineReader::ReplxxLineReader(
|
||||||
const Suggest & suggest,
|
Suggest & suggest,
|
||||||
const String & history_file_path_,
|
const String & history_file_path_,
|
||||||
bool multiline_,
|
bool multiline_,
|
||||||
Patterns extenders_,
|
Patterns extenders_,
|
||||||
@ -179,14 +178,13 @@ ReplxxLineReader::ReplxxLineReader(
|
|||||||
|
|
||||||
auto callback = [&suggest] (const String & context, size_t context_size)
|
auto callback = [&suggest] (const String & context, size_t context_size)
|
||||||
{
|
{
|
||||||
if (auto range = suggest.getCompletions(context, context_size))
|
return suggest.getCompletions(context, context_size);
|
||||||
return Replxx::completions_t(range->first, range->second);
|
|
||||||
return Replxx::completions_t();
|
|
||||||
};
|
};
|
||||||
|
|
||||||
rx.set_completion_callback(callback);
|
rx.set_completion_callback(callback);
|
||||||
rx.set_complete_on_empty(false);
|
rx.set_complete_on_empty(false);
|
||||||
rx.set_word_break_characters(word_break_characters);
|
rx.set_word_break_characters(word_break_characters);
|
||||||
|
rx.set_ignore_case(true);
|
||||||
|
|
||||||
if (highlighter)
|
if (highlighter)
|
||||||
rx.set_highlighter_callback(highlighter);
|
rx.set_highlighter_callback(highlighter);
|
||||||
@ -198,21 +196,11 @@ ReplxxLineReader::ReplxxLineReader(
|
|||||||
|
|
||||||
auto commit_action = [this](char32_t code)
|
auto commit_action = [this](char32_t code)
|
||||||
{
|
{
|
||||||
std::string_view str = rx.get_state().text();
|
|
||||||
|
|
||||||
/// Always commit line when we see extender at the end. It will start a new prompt.
|
|
||||||
for (const auto * extender : extenders)
|
|
||||||
if (ends_with(str, extender))
|
|
||||||
return rx.invoke(Replxx::ACTION::COMMIT_LINE, code);
|
|
||||||
|
|
||||||
/// If we see an delimiter at the end, commit right away.
|
|
||||||
for (const auto * delimiter : delimiters)
|
|
||||||
if (ends_with(str, delimiter))
|
|
||||||
return rx.invoke(Replxx::ACTION::COMMIT_LINE, code);
|
|
||||||
|
|
||||||
/// If we allow multiline and there is already something in the input, start a newline.
|
/// If we allow multiline and there is already something in the input, start a newline.
|
||||||
if (multiline && !input.empty())
|
/// NOTE: Lexer is only available if we use highlighter.
|
||||||
|
if (highlighter && multiline && !replxx_last_is_delimiter)
|
||||||
return rx.invoke(Replxx::ACTION::NEW_LINE, code);
|
return rx.invoke(Replxx::ACTION::NEW_LINE, code);
|
||||||
|
replxx_last_is_delimiter = false;
|
||||||
return rx.invoke(Replxx::ACTION::COMMIT_LINE, code);
|
return rx.invoke(Replxx::ACTION::COMMIT_LINE, code);
|
||||||
};
|
};
|
||||||
/// bind C-j to ENTER action.
|
/// bind C-j to ENTER action.
|
||||||
|
@ -9,7 +9,7 @@ class ReplxxLineReader : public LineReader
|
|||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
ReplxxLineReader(
|
ReplxxLineReader(
|
||||||
const Suggest & suggest,
|
Suggest & suggest,
|
||||||
const String & history_file_path,
|
const String & history_file_path,
|
||||||
bool multiline,
|
bool multiline,
|
||||||
Patterns extenders_,
|
Patterns extenders_,
|
||||||
@ -19,6 +19,9 @@ public:
|
|||||||
|
|
||||||
void enableBracketedPaste() override;
|
void enableBracketedPaste() override;
|
||||||
|
|
||||||
|
/// If highlight is on, we will set a flag to denote whether the last token is a delimiter.
|
||||||
|
/// This is useful to determine the behavior of <ENTER> key when multiline is enabled.
|
||||||
|
static void setLastIsDelimiter(bool flag);
|
||||||
private:
|
private:
|
||||||
InputStatus readOneLine(const String & prompt) override;
|
InputStatus readOneLine(const String & prompt) override;
|
||||||
void addToHistory(const String & line) override;
|
void addToHistory(const String & line) override;
|
||||||
|
@ -12,6 +12,8 @@ namespace
|
|||||||
{
|
{
|
||||||
template <typename... Ts> constexpr size_t numArgs(Ts &&...) { return sizeof...(Ts); }
|
template <typename... Ts> constexpr size_t numArgs(Ts &&...) { return sizeof...(Ts); }
|
||||||
template <typename T, typename... Ts> constexpr auto firstArg(T && x, Ts &&...) { return std::forward<T>(x); }
|
template <typename T, typename... Ts> constexpr auto firstArg(T && x, Ts &&...) { return std::forward<T>(x); }
|
||||||
|
/// For implicit conversion of fmt::basic_runtime<> to char* for std::string ctor
|
||||||
|
template <typename T, typename... Ts> constexpr auto firstArg(fmt::basic_runtime<T> && data, Ts &&...) { return data.str.data(); }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
127
base/base/sort.h
127
base/base/sort.h
@ -1,26 +1,133 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <pdqsort.h>
|
||||||
|
|
||||||
|
#ifndef NDEBUG
|
||||||
|
|
||||||
|
#include <pcg_random.hpp>
|
||||||
|
#include <base/getThreadId.h>
|
||||||
|
|
||||||
|
/** Same as libcxx std::__debug_less. Just without dependency on private part of standard library.
|
||||||
|
* Check that Comparator induce strict weak ordering.
|
||||||
|
*/
|
||||||
|
template <typename Comparator>
|
||||||
|
class DebugLessComparator
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
constexpr DebugLessComparator(Comparator & cmp_)
|
||||||
|
: cmp(cmp_)
|
||||||
|
{}
|
||||||
|
|
||||||
|
template <typename LhsType, typename RhsType>
|
||||||
|
constexpr bool operator()(const LhsType & lhs, const RhsType & rhs)
|
||||||
|
{
|
||||||
|
bool lhs_less_than_rhs = cmp(lhs, rhs);
|
||||||
|
if (lhs_less_than_rhs)
|
||||||
|
assert(!cmp(rhs, lhs));
|
||||||
|
|
||||||
|
return lhs_less_than_rhs;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename LhsType, typename RhsType>
|
||||||
|
constexpr bool operator()(LhsType & lhs, RhsType & rhs)
|
||||||
|
{
|
||||||
|
bool lhs_less_than_rhs = cmp(lhs, rhs);
|
||||||
|
if (lhs_less_than_rhs)
|
||||||
|
assert(!cmp(rhs, lhs));
|
||||||
|
|
||||||
|
return lhs_less_than_rhs;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
Comparator & cmp;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename Comparator>
|
||||||
|
using ComparatorWrapper = DebugLessComparator<Comparator>;
|
||||||
|
|
||||||
|
template <typename RandomIt>
|
||||||
|
void shuffle(RandomIt first, RandomIt last)
|
||||||
|
{
|
||||||
|
static thread_local pcg64 rng(getThreadId());
|
||||||
|
std::shuffle(first, last, rng);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
template <typename Comparator>
|
||||||
|
using ComparatorWrapper = Comparator;
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
#pragma GCC diagnostic push
|
#pragma GCC diagnostic push
|
||||||
#pragma GCC diagnostic ignored "-Wold-style-cast"
|
#pragma GCC diagnostic ignored "-Wold-style-cast"
|
||||||
|
|
||||||
#include <miniselect/floyd_rivest_select.h>
|
#include <miniselect/floyd_rivest_select.h>
|
||||||
|
|
||||||
template <class RandomIt>
|
template <typename RandomIt>
|
||||||
void nth_element(RandomIt first, RandomIt nth, RandomIt last)
|
void nth_element(RandomIt first, RandomIt nth, RandomIt last)
|
||||||
{
|
{
|
||||||
::miniselect::floyd_rivest_select(first, nth, last);
|
using value_type = typename std::iterator_traits<RandomIt>::value_type;
|
||||||
|
using comparator = std::less<value_type>;
|
||||||
|
|
||||||
|
comparator compare;
|
||||||
|
ComparatorWrapper<comparator> compare_wrapper = compare;
|
||||||
|
|
||||||
|
#ifndef NDEBUG
|
||||||
|
::shuffle(first, last);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
::miniselect::floyd_rivest_select(first, nth, last, compare_wrapper);
|
||||||
|
|
||||||
|
#ifndef NDEBUG
|
||||||
|
::shuffle(first, nth);
|
||||||
|
|
||||||
|
if (nth != last)
|
||||||
|
::shuffle(nth + 1, last);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class RandomIt>
|
template <typename RandomIt, typename Compare>
|
||||||
void partial_sort(RandomIt first, RandomIt middle, RandomIt last)
|
|
||||||
{
|
|
||||||
::miniselect::floyd_rivest_partial_sort(first, middle, last);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class RandomIt, class Compare>
|
|
||||||
void partial_sort(RandomIt first, RandomIt middle, RandomIt last, Compare compare)
|
void partial_sort(RandomIt first, RandomIt middle, RandomIt last, Compare compare)
|
||||||
{
|
{
|
||||||
::miniselect::floyd_rivest_partial_sort(first, middle, last, compare);
|
#ifndef NDEBUG
|
||||||
|
::shuffle(first, last);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ComparatorWrapper<Compare> compare_wrapper = compare;
|
||||||
|
::miniselect::floyd_rivest_partial_sort(first, middle, last, compare_wrapper);
|
||||||
|
|
||||||
|
#ifndef NDEBUG
|
||||||
|
::shuffle(middle, last);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename RandomIt>
|
||||||
|
void partial_sort(RandomIt first, RandomIt middle, RandomIt last)
|
||||||
|
{
|
||||||
|
using value_type = typename std::iterator_traits<RandomIt>::value_type;
|
||||||
|
using comparator = std::less<value_type>;
|
||||||
|
|
||||||
|
::partial_sort(first, middle, last, comparator());
|
||||||
}
|
}
|
||||||
|
|
||||||
#pragma GCC diagnostic pop
|
#pragma GCC diagnostic pop
|
||||||
|
|
||||||
|
template <typename RandomIt, typename Compare>
|
||||||
|
void sort(RandomIt first, RandomIt last, Compare compare)
|
||||||
|
{
|
||||||
|
#ifndef NDEBUG
|
||||||
|
::shuffle(first, last);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ComparatorWrapper<Compare> compare_wrapper = compare;
|
||||||
|
::pdqsort(first, last, compare_wrapper);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename RandomIt>
|
||||||
|
void sort(RandomIt first, RandomIt last)
|
||||||
|
{
|
||||||
|
using value_type = typename std::iterator_traits<RandomIt>::value_type;
|
||||||
|
using comparator = std::less<value_type>;
|
||||||
|
::sort(first, last, comparator());
|
||||||
|
}
|
||||||
|
@ -12,6 +12,18 @@
|
|||||||
#include <tuple>
|
#include <tuple>
|
||||||
#include <limits>
|
#include <limits>
|
||||||
|
|
||||||
|
#include <boost/multiprecision/cpp_bin_float.hpp>
|
||||||
|
#include <boost/math/special_functions/fpclassify.hpp>
|
||||||
|
|
||||||
|
/// Use same extended double for all platforms
|
||||||
|
#if (LDBL_MANT_DIG == 64)
|
||||||
|
#define CONSTEXPR_FROM_DOUBLE constexpr
|
||||||
|
using FromDoubleIntermediateType = long double;
|
||||||
|
#else
|
||||||
|
/// `wide_integer_from_builtin` can't be constexpr with non-literal `cpp_bin_float_double_extended`
|
||||||
|
#define CONSTEXPR_FROM_DOUBLE
|
||||||
|
using FromDoubleIntermediateType = boost::multiprecision::cpp_bin_float_double_extended;
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace wide
|
namespace wide
|
||||||
{
|
{
|
||||||
@ -265,26 +277,37 @@ struct integer<Bits, Signed>::_impl
|
|||||||
constexpr static void set_multiplier(integer<Bits, Signed> & self, T t) noexcept
|
constexpr static void set_multiplier(integer<Bits, Signed> & self, T t) noexcept
|
||||||
{
|
{
|
||||||
constexpr uint64_t max_int = std::numeric_limits<uint64_t>::max();
|
constexpr uint64_t max_int = std::numeric_limits<uint64_t>::max();
|
||||||
|
static_assert(std::is_same_v<T, double> || std::is_same_v<T, FromDoubleIntermediateType>);
|
||||||
/// Implementation specific behaviour on overflow (if we don't check here, stack overflow will triggered in bigint_cast).
|
/// Implementation specific behaviour on overflow (if we don't check here, stack overflow will triggered in bigint_cast).
|
||||||
|
if constexpr (std::is_same_v<T, double>)
|
||||||
|
{
|
||||||
if (!std::isfinite(t))
|
if (!std::isfinite(t))
|
||||||
{
|
{
|
||||||
self = 0;
|
self = 0;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (!boost::math::isfinite(t))
|
||||||
|
{
|
||||||
|
self = 0;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const T alpha = t / static_cast<T>(max_int);
|
const T alpha = t / static_cast<T>(max_int);
|
||||||
|
|
||||||
if (alpha <= static_cast<T>(max_int))
|
if (alpha <= static_cast<T>(max_int))
|
||||||
self = static_cast<uint64_t>(alpha);
|
self = static_cast<uint64_t>(alpha);
|
||||||
else // max(double) / 2^64 will surely contain less than 52 precision bits, so speed up computations.
|
else // max(double) / 2^64 will surely contain less than 52 precision bits, so speed up computations.
|
||||||
set_multiplier<double>(self, alpha);
|
set_multiplier<double>(self, static_cast<double>(alpha));
|
||||||
|
|
||||||
self *= max_int;
|
self *= max_int;
|
||||||
self += static_cast<uint64_t>(t - floor(alpha) * static_cast<T>(max_int)); // += b_i
|
self += static_cast<uint64_t>(t - floor(alpha) * static_cast<T>(max_int)); // += b_i
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr static void wide_integer_from_builtin(integer<Bits, Signed> & self, double rhs) noexcept
|
CONSTEXPR_FROM_DOUBLE static void wide_integer_from_builtin(integer<Bits, Signed> & self, double rhs) noexcept
|
||||||
{
|
{
|
||||||
constexpr int64_t max_int = std::numeric_limits<int64_t>::max();
|
constexpr int64_t max_int = std::numeric_limits<int64_t>::max();
|
||||||
constexpr int64_t min_int = std::numeric_limits<int64_t>::lowest();
|
constexpr int64_t min_int = std::numeric_limits<int64_t>::lowest();
|
||||||
@ -294,24 +317,17 @@ struct integer<Bits, Signed>::_impl
|
|||||||
/// the result may not fit in 64 bits.
|
/// the result may not fit in 64 bits.
|
||||||
/// The example of such a number is 9.22337e+18.
|
/// The example of such a number is 9.22337e+18.
|
||||||
/// As to_Integral does a static_cast to int64_t, it may result in UB.
|
/// As to_Integral does a static_cast to int64_t, it may result in UB.
|
||||||
/// The necessary check here is that long double has enough significant (mantissa) bits to store the
|
/// The necessary check here is that FromDoubleIntermediateType has enough significant (mantissa) bits to store the
|
||||||
/// int64_t max value precisely.
|
/// int64_t max value precisely.
|
||||||
|
|
||||||
// TODO Be compatible with Apple aarch64
|
if (rhs > static_cast<FromDoubleIntermediateType>(min_int) && rhs < static_cast<FromDoubleIntermediateType>(max_int))
|
||||||
#if not (defined(__APPLE__) && defined(__aarch64__))
|
|
||||||
static_assert(LDBL_MANT_DIG >= 64,
|
|
||||||
"On your system long double has less than 64 precision bits, "
|
|
||||||
"which may result in UB when initializing double from int64_t");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (rhs > static_cast<long double>(min_int) && rhs < static_cast<long double>(max_int))
|
|
||||||
{
|
{
|
||||||
self = static_cast<int64_t>(rhs);
|
self = static_cast<int64_t>(rhs);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const long double rhs_long_double = (static_cast<long double>(rhs) < 0)
|
const FromDoubleIntermediateType rhs_long_double = (static_cast<FromDoubleIntermediateType>(rhs) < 0)
|
||||||
? -static_cast<long double>(rhs)
|
? -static_cast<FromDoubleIntermediateType>(rhs)
|
||||||
: rhs;
|
: rhs;
|
||||||
|
|
||||||
set_multiplier(self, rhs_long_double);
|
set_multiplier(self, rhs_long_double);
|
||||||
|
@ -79,18 +79,14 @@ static void call_default_signal_handler(int sig)
|
|||||||
raise(sig);
|
raise(sig);
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr size_t max_query_id_size = 127;
|
|
||||||
|
|
||||||
static const size_t signal_pipe_buf_size =
|
static const size_t signal_pipe_buf_size =
|
||||||
sizeof(int)
|
sizeof(int)
|
||||||
+ sizeof(siginfo_t)
|
+ sizeof(siginfo_t)
|
||||||
+ sizeof(ucontext_t)
|
+ sizeof(ucontext_t*)
|
||||||
+ sizeof(StackTrace)
|
+ sizeof(StackTrace)
|
||||||
+ sizeof(UInt32)
|
+ sizeof(UInt32)
|
||||||
+ max_query_id_size + 1 /// query_id + varint encoded length
|
|
||||||
+ sizeof(void*);
|
+ sizeof(void*);
|
||||||
|
|
||||||
|
|
||||||
using signal_function = void(int, siginfo_t*, void*);
|
using signal_function = void(int, siginfo_t*, void*);
|
||||||
|
|
||||||
static void writeSignalIDtoSignalPipe(int sig)
|
static void writeSignalIDtoSignalPipe(int sig)
|
||||||
@ -129,18 +125,14 @@ static void signalHandler(int sig, siginfo_t * info, void * context)
|
|||||||
char buf[signal_pipe_buf_size];
|
char buf[signal_pipe_buf_size];
|
||||||
DB::WriteBufferFromFileDescriptorDiscardOnFailure out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf);
|
DB::WriteBufferFromFileDescriptorDiscardOnFailure out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf);
|
||||||
|
|
||||||
const ucontext_t signal_context = *reinterpret_cast<ucontext_t *>(context);
|
const ucontext_t * signal_context = reinterpret_cast<ucontext_t *>(context);
|
||||||
const StackTrace stack_trace(signal_context);
|
const StackTrace stack_trace(*signal_context);
|
||||||
|
|
||||||
StringRef query_id = DB::CurrentThread::getQueryId(); /// This is signal safe.
|
|
||||||
query_id.size = std::min(query_id.size, max_query_id_size);
|
|
||||||
|
|
||||||
DB::writeBinary(sig, out);
|
DB::writeBinary(sig, out);
|
||||||
DB::writePODBinary(*info, out);
|
DB::writePODBinary(*info, out);
|
||||||
DB::writePODBinary(signal_context, out);
|
DB::writePODBinary(signal_context, out);
|
||||||
DB::writePODBinary(stack_trace, out);
|
DB::writePODBinary(stack_trace, out);
|
||||||
DB::writeBinary(UInt32(getThreadId()), out);
|
DB::writeBinary(UInt32(getThreadId()), out);
|
||||||
DB::writeStringBinary(query_id, out);
|
|
||||||
DB::writePODBinary(DB::current_thread, out);
|
DB::writePODBinary(DB::current_thread, out);
|
||||||
|
|
||||||
out.next();
|
out.next();
|
||||||
@ -184,6 +176,8 @@ public:
|
|||||||
|
|
||||||
void run() override
|
void run() override
|
||||||
{
|
{
|
||||||
|
static_assert(PIPE_BUF >= 512);
|
||||||
|
static_assert(signal_pipe_buf_size <= PIPE_BUF, "Only write of PIPE_BUF to pipe is atomic and the minimal known PIPE_BUF across supported platforms is 512");
|
||||||
char buf[signal_pipe_buf_size];
|
char buf[signal_pipe_buf_size];
|
||||||
DB::ReadBufferFromFileDescriptor in(signal_pipe.fds_rw[0], signal_pipe_buf_size, buf);
|
DB::ReadBufferFromFileDescriptor in(signal_pipe.fds_rw[0], signal_pipe_buf_size, buf);
|
||||||
|
|
||||||
@ -227,10 +221,9 @@ public:
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
siginfo_t info{};
|
siginfo_t info{};
|
||||||
ucontext_t context{};
|
ucontext_t * context{};
|
||||||
StackTrace stack_trace(NoCapture{});
|
StackTrace stack_trace(NoCapture{});
|
||||||
UInt32 thread_num{};
|
UInt32 thread_num{};
|
||||||
std::string query_id;
|
|
||||||
DB::ThreadStatus * thread_ptr{};
|
DB::ThreadStatus * thread_ptr{};
|
||||||
|
|
||||||
if (sig != SanitizerTrap)
|
if (sig != SanitizerTrap)
|
||||||
@ -241,12 +234,11 @@ public:
|
|||||||
|
|
||||||
DB::readPODBinary(stack_trace, in);
|
DB::readPODBinary(stack_trace, in);
|
||||||
DB::readBinary(thread_num, in);
|
DB::readBinary(thread_num, in);
|
||||||
DB::readBinary(query_id, in);
|
|
||||||
DB::readPODBinary(thread_ptr, in);
|
DB::readPODBinary(thread_ptr, in);
|
||||||
|
|
||||||
/// This allows to receive more signals if failure happens inside onFault function.
|
/// This allows to receive more signals if failure happens inside onFault function.
|
||||||
/// Example: segfault while symbolizing stack trace.
|
/// Example: segfault while symbolizing stack trace.
|
||||||
std::thread([=, this] { onFault(sig, info, context, stack_trace, thread_num, query_id, thread_ptr); }).detach();
|
std::thread([=, this] { onFault(sig, info, context, stack_trace, thread_num, thread_ptr); }).detach();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -279,18 +271,27 @@ private:
|
|||||||
void onFault(
|
void onFault(
|
||||||
int sig,
|
int sig,
|
||||||
const siginfo_t & info,
|
const siginfo_t & info,
|
||||||
const ucontext_t & context,
|
ucontext_t * context,
|
||||||
const StackTrace & stack_trace,
|
const StackTrace & stack_trace,
|
||||||
UInt32 thread_num,
|
UInt32 thread_num,
|
||||||
const std::string & query_id,
|
|
||||||
DB::ThreadStatus * thread_ptr) const
|
DB::ThreadStatus * thread_ptr) const
|
||||||
{
|
{
|
||||||
DB::ThreadStatus thread_status;
|
DB::ThreadStatus thread_status;
|
||||||
|
|
||||||
|
String query_id;
|
||||||
|
String query;
|
||||||
|
|
||||||
/// Send logs from this thread to client if possible.
|
/// Send logs from this thread to client if possible.
|
||||||
/// It will allow client to see failure messages directly.
|
/// It will allow client to see failure messages directly.
|
||||||
if (thread_ptr)
|
if (thread_ptr)
|
||||||
{
|
{
|
||||||
|
query_id = thread_ptr->getQueryId().toString();
|
||||||
|
|
||||||
|
if (auto thread_group = thread_ptr->getThreadGroup())
|
||||||
|
{
|
||||||
|
query = thread_group->query;
|
||||||
|
}
|
||||||
|
|
||||||
if (auto logs_queue = thread_ptr->getInternalTextLogsQueue())
|
if (auto logs_queue = thread_ptr->getInternalTextLogsQueue())
|
||||||
DB::CurrentThread::attachInternalTextLogsQueue(logs_queue, DB::LogsLevel::trace);
|
DB::CurrentThread::attachInternalTextLogsQueue(logs_queue, DB::LogsLevel::trace);
|
||||||
}
|
}
|
||||||
@ -305,19 +306,19 @@ private:
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LOG_FATAL(log, "(version {}{}, {}) (from thread {}) (query_id: {}) Received signal {} ({})",
|
LOG_FATAL(log, "(version {}{}, {}) (from thread {}) (query_id: {}) (query: {}) Received signal {} ({})",
|
||||||
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info,
|
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info,
|
||||||
thread_num, query_id, strsignal(sig), sig);
|
thread_num, query_id, query, strsignal(sig), sig);
|
||||||
}
|
}
|
||||||
|
|
||||||
String error_message;
|
String error_message;
|
||||||
|
|
||||||
if (sig != SanitizerTrap)
|
if (sig != SanitizerTrap)
|
||||||
error_message = signalToErrorMessage(sig, info, context);
|
error_message = signalToErrorMessage(sig, info, *context);
|
||||||
else
|
else
|
||||||
error_message = "Sanitizer trap.";
|
error_message = "Sanitizer trap.";
|
||||||
|
|
||||||
LOG_FATAL(log, error_message);
|
LOG_FATAL(log, fmt::runtime(error_message));
|
||||||
|
|
||||||
if (stack_trace.getSize())
|
if (stack_trace.getSize())
|
||||||
{
|
{
|
||||||
@ -330,11 +331,11 @@ private:
|
|||||||
for (size_t i = stack_trace.getOffset(); i < stack_trace.getSize(); ++i)
|
for (size_t i = stack_trace.getOffset(); i < stack_trace.getSize(); ++i)
|
||||||
bare_stacktrace << ' ' << stack_trace.getFramePointers()[i];
|
bare_stacktrace << ' ' << stack_trace.getFramePointers()[i];
|
||||||
|
|
||||||
LOG_FATAL(log, bare_stacktrace.str());
|
LOG_FATAL(log, fmt::runtime(bare_stacktrace.str()));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write symbolized stack trace line by line for better grep-ability.
|
/// Write symbolized stack trace line by line for better grep-ability.
|
||||||
stack_trace.toStringEveryLine([&](const std::string & s) { LOG_FATAL(log, s); });
|
stack_trace.toStringEveryLine([&](const std::string & s) { LOG_FATAL(log, fmt::runtime(s)); });
|
||||||
|
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
/// Write information about binary checksum. It can be difficult to calculate, so do it only after printing stack trace.
|
/// Write information about binary checksum. It can be difficult to calculate, so do it only after printing stack trace.
|
||||||
@ -389,20 +390,16 @@ static void sanitizerDeathCallback()
|
|||||||
|
|
||||||
const StackTrace stack_trace;
|
const StackTrace stack_trace;
|
||||||
|
|
||||||
StringRef query_id = DB::CurrentThread::getQueryId();
|
|
||||||
query_id.size = std::min(query_id.size, max_query_id_size);
|
|
||||||
|
|
||||||
int sig = SignalListener::SanitizerTrap;
|
int sig = SignalListener::SanitizerTrap;
|
||||||
DB::writeBinary(sig, out);
|
DB::writeBinary(sig, out);
|
||||||
DB::writePODBinary(stack_trace, out);
|
DB::writePODBinary(stack_trace, out);
|
||||||
DB::writeBinary(UInt32(getThreadId()), out);
|
DB::writeBinary(UInt32(getThreadId()), out);
|
||||||
DB::writeStringBinary(query_id, out);
|
|
||||||
DB::writePODBinary(DB::current_thread, out);
|
DB::writePODBinary(DB::current_thread, out);
|
||||||
|
|
||||||
out.next();
|
out.next();
|
||||||
|
|
||||||
/// The time that is usually enough for separate thread to print info into log.
|
/// The time that is usually enough for separate thread to print info into log.
|
||||||
sleepForSeconds(10);
|
sleepForSeconds(20);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -22,16 +22,12 @@ set(CMAKE_OSX_DEPLOYMENT_TARGET 10.15)
|
|||||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||||
find_package(Threads REQUIRED)
|
find_package(Threads REQUIRED)
|
||||||
|
|
||||||
include (cmake/find/cxx.cmake)
|
include (cmake/cxx.cmake)
|
||||||
|
|
||||||
add_library(global-group INTERFACE)
|
|
||||||
|
|
||||||
target_link_libraries(global-group INTERFACE
|
target_link_libraries(global-group INTERFACE
|
||||||
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
||||||
)
|
)
|
||||||
|
|
||||||
link_libraries(global-group)
|
|
||||||
|
|
||||||
# FIXME: remove when all contribs will get custom cmake lists
|
# FIXME: remove when all contribs will get custom cmake lists
|
||||||
install(
|
install(
|
||||||
TARGETS global-group global-libs
|
TARGETS global-group global-libs
|
||||||
|
@ -22,17 +22,13 @@ set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS})
|
|||||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||||
find_package(Threads REQUIRED)
|
find_package(Threads REQUIRED)
|
||||||
|
|
||||||
include (cmake/find/unwind.cmake)
|
include (cmake/unwind.cmake)
|
||||||
include (cmake/find/cxx.cmake)
|
include (cmake/cxx.cmake)
|
||||||
|
|
||||||
add_library(global-group INTERFACE)
|
|
||||||
|
|
||||||
target_link_libraries(global-group INTERFACE
|
target_link_libraries(global-group INTERFACE
|
||||||
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
||||||
)
|
)
|
||||||
|
|
||||||
link_libraries(global-group)
|
|
||||||
|
|
||||||
# FIXME: remove when all contribs will get custom cmake lists
|
# FIXME: remove when all contribs will get custom cmake lists
|
||||||
install(
|
install(
|
||||||
TARGETS global-group global-libs
|
TARGETS global-group global-libs
|
||||||
|
@ -42,18 +42,15 @@ if (NOT OS_ANDROID)
|
|||||||
add_subdirectory(base/harmful)
|
add_subdirectory(base/harmful)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
include (cmake/find/unwind.cmake)
|
include (cmake/unwind.cmake)
|
||||||
include (cmake/find/cxx.cmake)
|
include (cmake/cxx.cmake)
|
||||||
|
|
||||||
add_library(global-group INTERFACE)
|
|
||||||
target_link_libraries(global-group INTERFACE
|
target_link_libraries(global-group INTERFACE
|
||||||
-Wl,--start-group
|
-Wl,--start-group
|
||||||
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
||||||
-Wl,--end-group
|
-Wl,--end-group
|
||||||
)
|
)
|
||||||
|
|
||||||
link_libraries(global-group)
|
|
||||||
|
|
||||||
# FIXME: remove when all contribs will get custom cmake lists
|
# FIXME: remove when all contribs will get custom cmake lists
|
||||||
install(
|
install(
|
||||||
TARGETS global-group global-libs
|
TARGETS global-group global-libs
|
||||||
|
1
contrib/CMakeLists.txt
vendored
1
contrib/CMakeLists.txt
vendored
@ -78,6 +78,7 @@ add_contrib (croaring-cmake croaring)
|
|||||||
add_contrib (zstd-cmake zstd)
|
add_contrib (zstd-cmake zstd)
|
||||||
add_contrib (zlib-ng-cmake zlib-ng)
|
add_contrib (zlib-ng-cmake zlib-ng)
|
||||||
add_contrib (bzip2-cmake bzip2)
|
add_contrib (bzip2-cmake bzip2)
|
||||||
|
add_contrib (minizip-ng-cmake minizip-ng)
|
||||||
add_contrib (snappy-cmake snappy)
|
add_contrib (snappy-cmake snappy)
|
||||||
add_contrib (rocksdb-cmake rocksdb)
|
add_contrib (rocksdb-cmake rocksdb)
|
||||||
add_contrib (thrift-cmake thrift)
|
add_contrib (thrift-cmake thrift)
|
||||||
|
@ -29,12 +29,6 @@ if (OS_FREEBSD)
|
|||||||
message (FATAL_ERROR "Using internal parquet library on FreeBSD is not supported")
|
message (FATAL_ERROR "Using internal parquet library on FreeBSD is not supported")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(USE_STATIC_LIBRARIES)
|
|
||||||
set(FLATBUFFERS_LIBRARY flatbuffers)
|
|
||||||
else()
|
|
||||||
set(FLATBUFFERS_LIBRARY flatbuffers_shared)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set (CMAKE_CXX_STANDARD 17)
|
set (CMAKE_CXX_STANDARD 17)
|
||||||
|
|
||||||
set(ARROW_VERSION "6.0.1")
|
set(ARROW_VERSION "6.0.1")
|
||||||
@ -95,9 +89,16 @@ set(FLATBUFFERS_BUILD_TESTS OFF CACHE BOOL "Skip flatbuffers tests")
|
|||||||
|
|
||||||
add_subdirectory(${FLATBUFFERS_SRC_DIR} "${FLATBUFFERS_BINARY_DIR}")
|
add_subdirectory(${FLATBUFFERS_SRC_DIR} "${FLATBUFFERS_BINARY_DIR}")
|
||||||
|
|
||||||
message(STATUS "FLATBUFFERS_LIBRARY: ${FLATBUFFERS_LIBRARY}")
|
add_library(_flatbuffers INTERFACE)
|
||||||
|
if(USE_STATIC_LIBRARIES)
|
||||||
|
target_link_libraries(_flatbuffers INTERFACE flatbuffers)
|
||||||
|
else()
|
||||||
|
target_link_libraries(_flatbuffers INTERFACE flatbuffers_shared)
|
||||||
|
endif()
|
||||||
|
target_include_directories(_flatbuffers INTERFACE ${FLATBUFFERS_INCLUDE_DIR})
|
||||||
|
|
||||||
# === hdfs
|
# === hdfs
|
||||||
|
# NOTE: cannot use ch_contrib::hdfs since it's INCLUDE_DIRECTORIES does not includes trailing "hdfs/"
|
||||||
set(HDFS_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libhdfs3/include/hdfs/")
|
set(HDFS_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libhdfs3/include/hdfs/")
|
||||||
|
|
||||||
# arrow-cmake cmake file calling orc cmake subroutine which detects certain compiler features.
|
# arrow-cmake cmake file calling orc cmake subroutine which detects certain compiler features.
|
||||||
@ -123,8 +124,6 @@ configure_file("${ORC_SOURCE_SRC_DIR}/Adaptor.hh.in" "${ORC_BUILD_INCLUDE_DIR}/A
|
|||||||
|
|
||||||
# ARROW_ORC + adapters/orc/CMakefiles
|
# ARROW_ORC + adapters/orc/CMakefiles
|
||||||
set(ORC_SRCS
|
set(ORC_SRCS
|
||||||
"${ARROW_SRC_DIR}/arrow/adapters/orc/adapter.cc"
|
|
||||||
"${ARROW_SRC_DIR}/arrow/adapters/orc/adapter_util.cc"
|
|
||||||
"${ORC_SOURCE_SRC_DIR}/Exceptions.cc"
|
"${ORC_SOURCE_SRC_DIR}/Exceptions.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/OrcFile.cc"
|
"${ORC_SOURCE_SRC_DIR}/OrcFile.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/Reader.cc"
|
"${ORC_SOURCE_SRC_DIR}/Reader.cc"
|
||||||
@ -151,6 +150,22 @@ set(ORC_SRCS
|
|||||||
"${ORC_ADDITION_SOURCE_DIR}/orc_proto.pb.cc"
|
"${ORC_ADDITION_SOURCE_DIR}/orc_proto.pb.cc"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
add_library(_orc ${ORC_SRCS})
|
||||||
|
target_link_libraries(_orc PRIVATE
|
||||||
|
ch_contrib::protobuf
|
||||||
|
ch_contrib::lz4
|
||||||
|
ch_contrib::snappy
|
||||||
|
ch_contrib::zlib
|
||||||
|
ch_contrib::zstd)
|
||||||
|
target_include_directories(_orc SYSTEM BEFORE PUBLIC ${ORC_INCLUDE_DIR})
|
||||||
|
target_include_directories(_orc SYSTEM BEFORE PUBLIC ${ORC_BUILD_INCLUDE_DIR})
|
||||||
|
target_include_directories(_orc SYSTEM PRIVATE
|
||||||
|
${ORC_SOURCE_SRC_DIR}
|
||||||
|
${ORC_SOURCE_WRAP_DIR}
|
||||||
|
${ORC_BUILD_SRC_DIR}
|
||||||
|
${ORC_ADDITION_SOURCE_DIR}
|
||||||
|
${ARROW_SRC_DIR})
|
||||||
|
|
||||||
|
|
||||||
# === arrow
|
# === arrow
|
||||||
|
|
||||||
@ -336,7 +351,8 @@ set(ARROW_SRCS
|
|||||||
"${LIBRARY_DIR}/ipc/reader.cc"
|
"${LIBRARY_DIR}/ipc/reader.cc"
|
||||||
"${LIBRARY_DIR}/ipc/writer.cc"
|
"${LIBRARY_DIR}/ipc/writer.cc"
|
||||||
|
|
||||||
${ORC_SRCS}
|
"${ARROW_SRC_DIR}/arrow/adapters/orc/adapter.cc"
|
||||||
|
"${ARROW_SRC_DIR}/arrow/adapters/orc/adapter_util.cc"
|
||||||
)
|
)
|
||||||
|
|
||||||
add_definitions(-DARROW_WITH_LZ4)
|
add_definitions(-DARROW_WITH_LZ4)
|
||||||
@ -356,30 +372,27 @@ endif ()
|
|||||||
|
|
||||||
add_library(_arrow ${ARROW_SRCS})
|
add_library(_arrow ${ARROW_SRCS})
|
||||||
|
|
||||||
# Arrow dependencies
|
target_link_libraries(_arrow PRIVATE
|
||||||
add_dependencies(_arrow ${FLATBUFFERS_LIBRARY})
|
boost::filesystem
|
||||||
|
|
||||||
target_link_libraries(_arrow PRIVATE ${FLATBUFFERS_LIBRARY} boost::filesystem)
|
_flatbuffers
|
||||||
|
|
||||||
|
ch_contrib::double_conversion
|
||||||
|
|
||||||
|
ch_contrib::lz4
|
||||||
|
ch_contrib::snappy
|
||||||
|
ch_contrib::zlib
|
||||||
|
ch_contrib::zstd
|
||||||
|
ch_contrib::zstd
|
||||||
|
)
|
||||||
|
target_link_libraries(_arrow PUBLIC _orc)
|
||||||
|
|
||||||
add_dependencies(_arrow protoc)
|
add_dependencies(_arrow protoc)
|
||||||
|
|
||||||
target_include_directories(_arrow SYSTEM BEFORE PUBLIC ${ARROW_SRC_DIR})
|
target_include_directories(_arrow SYSTEM BEFORE PUBLIC ${ARROW_SRC_DIR})
|
||||||
target_include_directories(_arrow SYSTEM BEFORE PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/cpp/src")
|
target_include_directories(_arrow SYSTEM BEFORE PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/cpp/src")
|
||||||
target_link_libraries(_arrow PRIVATE ch_contrib::double_conversion)
|
|
||||||
target_link_libraries(_arrow PRIVATE ch_contrib::protobuf)
|
|
||||||
target_link_libraries(_arrow PRIVATE ch_contrib::lz4)
|
|
||||||
target_link_libraries(_arrow PRIVATE ch_contrib::snappy)
|
|
||||||
target_link_libraries(_arrow PRIVATE ch_contrib::zlib)
|
|
||||||
target_link_libraries(_arrow PRIVATE ch_contrib::zstd)
|
|
||||||
|
|
||||||
target_include_directories(_arrow SYSTEM BEFORE PUBLIC ${ORC_INCLUDE_DIR})
|
|
||||||
target_include_directories(_arrow SYSTEM BEFORE PUBLIC ${ORC_BUILD_INCLUDE_DIR})
|
|
||||||
target_include_directories(_arrow SYSTEM PRIVATE ${ORC_SOURCE_SRC_DIR})
|
|
||||||
target_include_directories(_arrow SYSTEM PRIVATE ${ORC_SOURCE_WRAP_DIR})
|
|
||||||
target_include_directories(_arrow SYSTEM PRIVATE ${ORC_BUILD_SRC_DIR})
|
|
||||||
target_include_directories(_arrow SYSTEM PRIVATE ${ORC_ADDITION_SOURCE_DIR})
|
|
||||||
target_include_directories(_arrow SYSTEM PRIVATE ${ARROW_SRC_DIR})
|
target_include_directories(_arrow SYSTEM PRIVATE ${ARROW_SRC_DIR})
|
||||||
target_include_directories(_arrow SYSTEM PRIVATE ${FLATBUFFERS_INCLUDE_DIR})
|
|
||||||
target_include_directories(_arrow SYSTEM PRIVATE ${HDFS_INCLUDE_DIR})
|
target_include_directories(_arrow SYSTEM PRIVATE ${HDFS_INCLUDE_DIR})
|
||||||
|
|
||||||
# === parquet
|
# === parquet
|
||||||
|
@ -56,19 +56,11 @@ list(APPEND SOURCES ${CASS_SRC_DIR}/atomic/atomic_std.hpp)
|
|||||||
add_library(_curl_hostcheck OBJECT ${CASS_SRC_DIR}/third_party/curl/hostcheck.cpp)
|
add_library(_curl_hostcheck OBJECT ${CASS_SRC_DIR}/third_party/curl/hostcheck.cpp)
|
||||||
add_library(_hdr_histogram OBJECT ${CASS_SRC_DIR}/third_party/hdr_histogram/hdr_histogram.cpp)
|
add_library(_hdr_histogram OBJECT ${CASS_SRC_DIR}/third_party/hdr_histogram/hdr_histogram.cpp)
|
||||||
add_library(_http-parser OBJECT ${CASS_SRC_DIR}/third_party/http-parser/http_parser.c)
|
add_library(_http-parser OBJECT ${CASS_SRC_DIR}/third_party/http-parser/http_parser.c)
|
||||||
add_library(_minizip OBJECT
|
|
||||||
${CASS_SRC_DIR}/third_party/minizip/ioapi.c
|
|
||||||
${CASS_SRC_DIR}/third_party/minizip/zip.c
|
|
||||||
${CASS_SRC_DIR}/third_party/minizip/unzip.c)
|
|
||||||
|
|
||||||
target_link_libraries(_minizip ch_contrib::zlib)
|
|
||||||
target_compile_definitions(_minizip PRIVATE "-Dz_crc_t=unsigned long")
|
|
||||||
|
|
||||||
list(APPEND INCLUDE_DIRS
|
list(APPEND INCLUDE_DIRS
|
||||||
${CASS_SRC_DIR}/third_party/curl
|
${CASS_SRC_DIR}/third_party/curl
|
||||||
${CASS_SRC_DIR}/third_party/hdr_histogram
|
${CASS_SRC_DIR}/third_party/hdr_histogram
|
||||||
${CASS_SRC_DIR}/third_party/http-parser
|
${CASS_SRC_DIR}/third_party/http-parser
|
||||||
${CASS_SRC_DIR}/third_party/minizip
|
|
||||||
${CASS_SRC_DIR}/third_party/mt19937_64
|
${CASS_SRC_DIR}/third_party/mt19937_64
|
||||||
${CASS_SRC_DIR}/third_party/rapidjson/rapidjson
|
${CASS_SRC_DIR}/third_party/rapidjson/rapidjson
|
||||||
${CASS_SRC_DIR}/third_party/sparsehash/src)
|
${CASS_SRC_DIR}/third_party/sparsehash/src)
|
||||||
@ -123,10 +115,9 @@ add_library(_cassandra
|
|||||||
${SOURCES}
|
${SOURCES}
|
||||||
$<TARGET_OBJECTS:_curl_hostcheck>
|
$<TARGET_OBJECTS:_curl_hostcheck>
|
||||||
$<TARGET_OBJECTS:_hdr_histogram>
|
$<TARGET_OBJECTS:_hdr_histogram>
|
||||||
$<TARGET_OBJECTS:_http-parser>
|
$<TARGET_OBJECTS:_http-parser>)
|
||||||
$<TARGET_OBJECTS:_minizip>)
|
|
||||||
|
|
||||||
target_link_libraries(_cassandra ch_contrib::zlib)
|
target_link_libraries(_cassandra ch_contrib::zlib ch_contrib::minizip)
|
||||||
target_include_directories(_cassandra PRIVATE ${CMAKE_CURRENT_BINARY_DIR} ${INCLUDE_DIRS})
|
target_include_directories(_cassandra PRIVATE ${CMAKE_CURRENT_BINARY_DIR} ${INCLUDE_DIRS})
|
||||||
target_include_directories(_cassandra SYSTEM BEFORE PUBLIC ${CASS_INCLUDE_DIR})
|
target_include_directories(_cassandra SYSTEM BEFORE PUBLIC ${CASS_INCLUDE_DIR})
|
||||||
target_compile_definitions(_cassandra PRIVATE CASS_BUILDING)
|
target_compile_definitions(_cassandra PRIVATE CASS_BUILDING)
|
||||||
|
2
contrib/fmtlib
vendored
2
contrib/fmtlib
vendored
@ -1 +1 @@
|
|||||||
Subproject commit c108ee1d590089ccf642fc85652b845924067af2
|
Subproject commit b6f4ceaed0a0a24ccf575fab6c56dd50ccf6f1a9
|
@ -1,7 +1,10 @@
|
|||||||
set (SRCS
|
set (SRCS
|
||||||
|
# NOTE: do not build module for now:
|
||||||
|
# ../fmtlib/src/fmt.cc
|
||||||
../fmtlib/src/format.cc
|
../fmtlib/src/format.cc
|
||||||
../fmtlib/src/os.cc
|
../fmtlib/src/os.cc
|
||||||
|
|
||||||
|
../fmtlib/include/fmt/args.h
|
||||||
../fmtlib/include/fmt/chrono.h
|
../fmtlib/include/fmt/chrono.h
|
||||||
../fmtlib/include/fmt/color.h
|
../fmtlib/include/fmt/color.h
|
||||||
../fmtlib/include/fmt/compile.h
|
../fmtlib/include/fmt/compile.h
|
||||||
@ -11,9 +14,9 @@ set (SRCS
|
|||||||
../fmtlib/include/fmt/locale.h
|
../fmtlib/include/fmt/locale.h
|
||||||
../fmtlib/include/fmt/os.h
|
../fmtlib/include/fmt/os.h
|
||||||
../fmtlib/include/fmt/ostream.h
|
../fmtlib/include/fmt/ostream.h
|
||||||
../fmtlib/include/fmt/posix.h
|
|
||||||
../fmtlib/include/fmt/printf.h
|
../fmtlib/include/fmt/printf.h
|
||||||
../fmtlib/include/fmt/ranges.h
|
../fmtlib/include/fmt/ranges.h
|
||||||
|
../fmtlib/include/fmt/xchar.h
|
||||||
)
|
)
|
||||||
|
|
||||||
add_library(_fmt ${SRCS})
|
add_library(_fmt ${SRCS})
|
||||||
|
1
contrib/minizip-ng
vendored
Submodule
1
contrib/minizip-ng
vendored
Submodule
@ -0,0 +1 @@
|
|||||||
|
Subproject commit 6cffc951851620e0fac1993be75e4713c334de03
|
168
contrib/minizip-ng-cmake/CMakeLists.txt
Normal file
168
contrib/minizip-ng-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,168 @@
|
|||||||
|
option(ENABLE_MINIZIP "Enable minizip-ng the zip manipulation library" ${ENABLE_LIBRARIES})
|
||||||
|
if (NOT ENABLE_MINIZIP)
|
||||||
|
message (STATUS "minizip-ng disabled")
|
||||||
|
return()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(_MINIZIP_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/minizip-ng")
|
||||||
|
|
||||||
|
# Initial source files
|
||||||
|
set(MINIZIP_SRC
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_crypt.c
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_os.c
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm.c
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm_buf.c
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm_mem.c
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm_split.c
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_zip.c
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_zip_rw.c)
|
||||||
|
|
||||||
|
# Initial header files
|
||||||
|
set(MINIZIP_HDR
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz.h
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_os.h
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_crypt.h
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm.h
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm_buf.h
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm_mem.h
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm_split.h
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm_os.h
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_zip.h
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_zip_rw.h)
|
||||||
|
|
||||||
|
set(MINIZIP_INC ${_MINIZIP_SOURCE_DIR})
|
||||||
|
|
||||||
|
set(MINIZIP_DEF)
|
||||||
|
set(MINIZIP_PUBLIC_DEF)
|
||||||
|
set(MINIZIP_LIB)
|
||||||
|
|
||||||
|
# Check if zlib is present
|
||||||
|
set(MZ_ZLIB ON)
|
||||||
|
if(MZ_ZLIB)
|
||||||
|
# Use zlib from ClickHouse contrib
|
||||||
|
list(APPEND MINIZIP_LIB ch_contrib::zlib)
|
||||||
|
|
||||||
|
list(APPEND MINIZIP_SRC
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm_zlib.c)
|
||||||
|
|
||||||
|
list(APPEND MINIZIP_HDR
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm_zlib.h)
|
||||||
|
|
||||||
|
list(APPEND MINIZIP_DEF "-DHAVE_ZLIB")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# Check if bzip2 is present
|
||||||
|
set(MZ_BZIP2 ${ENABLE_BZIP2})
|
||||||
|
if(MZ_BZIP2)
|
||||||
|
# Use bzip2 from ClickHouse contrib
|
||||||
|
list(APPEND MINIZIP_LIB ch_contrib::bzip2)
|
||||||
|
|
||||||
|
list(APPEND MINIZIP_SRC
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm_bzip.c)
|
||||||
|
|
||||||
|
list(APPEND MINIZIP_HDR
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm_bzip.h)
|
||||||
|
|
||||||
|
list(APPEND MINIZIP_DEF "-DHAVE_BZIP2")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# Check if liblzma is present
|
||||||
|
set(MZ_LZMA ON)
|
||||||
|
if(MZ_LZMA)
|
||||||
|
# Use liblzma from ClickHouse contrib
|
||||||
|
list(APPEND MINIZIP_LIB ch_contrib::xz)
|
||||||
|
|
||||||
|
list(APPEND MINIZIP_SRC
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm_lzma.c)
|
||||||
|
|
||||||
|
list(APPEND MINIZIP_HDR
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm_lzma.h)
|
||||||
|
|
||||||
|
list(APPEND MINIZIP_DEF "-DHAVE_LZMA")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# Check if zstd is present
|
||||||
|
set(MZ_ZSTD ON)
|
||||||
|
if(MZ_ZSTD)
|
||||||
|
# Use zstd from ClickHouse contrib
|
||||||
|
list(APPEND MINIZIP_LIB ch_contrib::zstd)
|
||||||
|
|
||||||
|
list(APPEND MINIZIP_SRC
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm_zstd.c)
|
||||||
|
|
||||||
|
list(APPEND MINIZIP_HDR
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm_zstd.h)
|
||||||
|
|
||||||
|
list(APPEND MINIZIP_DEF "-DHAVE_ZSTD")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(NOT MZ_ZLIB AND NOT MZ_ZSTD AND NOT MZ_BZIP2 AND NOT MZ_LZMA)
|
||||||
|
message(STATUS "Compression not supported due to missing libraries")
|
||||||
|
|
||||||
|
list(APPEND MINIZIP_DEF -DMZ_ZIP_NO_DECOMPRESSION)
|
||||||
|
list(APPEND MINIZIP_DEF -DMZ_ZIP_NO_COMPRESSION)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# Check to see if openssl installation is present
|
||||||
|
set(MZ_OPENSSL ${ENABLE_SSL})
|
||||||
|
if(MZ_OPENSSL)
|
||||||
|
# Use openssl from ClickHouse contrib
|
||||||
|
list(APPEND MINIZIP_LIB OpenSSL::SSL OpenSSL::Crypto)
|
||||||
|
|
||||||
|
list(APPEND MINIZIP_SRC
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_crypt_openssl.c)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# Include WinZIP AES encryption
|
||||||
|
set(MZ_WZAES ${ENABLE_SSL})
|
||||||
|
if(MZ_WZAES)
|
||||||
|
list(APPEND MINIZIP_DEF -DHAVE_WZAES)
|
||||||
|
|
||||||
|
list(APPEND MINIZIP_SRC
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm_wzaes.c)
|
||||||
|
|
||||||
|
list(APPEND MINIZIP_HDR
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm_wzaes.h)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# Include traditional PKWare encryption
|
||||||
|
set(MZ_PKCRYPT ON)
|
||||||
|
if(MZ_PKCRYPT)
|
||||||
|
list(APPEND MINIZIP_DEF -DHAVE_PKCRYPT)
|
||||||
|
|
||||||
|
list(APPEND MINIZIP_SRC
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm_pkcrypt.c)
|
||||||
|
|
||||||
|
list(APPEND MINIZIP_HDR
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm_pkcrypt.h)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# Unix specific
|
||||||
|
if(UNIX)
|
||||||
|
list(APPEND MINIZIP_SRC
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_os_posix.c
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_strm_os_posix.c)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# Include compatibility layer
|
||||||
|
set(MZ_COMPAT ON)
|
||||||
|
if(MZ_COMPAT)
|
||||||
|
list(APPEND MINIZIP_SRC
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_compat.c)
|
||||||
|
|
||||||
|
list(APPEND MINIZIP_HDR
|
||||||
|
${_MINIZIP_SOURCE_DIR}/mz_compat.h
|
||||||
|
zip.h
|
||||||
|
unzip.h)
|
||||||
|
|
||||||
|
list(APPEND MINIZIP_INC "${CMAKE_CURRENT_SOURCE_DIR}")
|
||||||
|
list(APPEND MINIZIP_PUBLIC_DEF "-DMZ_COMPAT_VERSION=110")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
add_library(_minizip ${MINIZIP_SRC} ${MINIZIP_HDR})
|
||||||
|
target_include_directories(_minizip PUBLIC ${MINIZIP_INC})
|
||||||
|
target_compile_definitions(_minizip PUBLIC ${MINIZIP_PUBLIC_DEF})
|
||||||
|
target_compile_definitions(_minizip PRIVATE ${MINIZIP_DEF})
|
||||||
|
target_link_libraries(_minizip PRIVATE ${MINIZIP_LIB})
|
||||||
|
|
||||||
|
add_library(ch_contrib::minizip ALIAS _minizip)
|
13
contrib/minizip-ng-cmake/unzip.h
Normal file
13
contrib/minizip-ng-cmake/unzip.h
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
/* unzip.h -- Compatibility layer shim
|
||||||
|
part of the minizip-ng project
|
||||||
|
|
||||||
|
This program is distributed under the terms of the same license as zlib.
|
||||||
|
See the accompanying LICENSE file for the full text of the license.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef MZ_COMPAT_UNZIP
|
||||||
|
#define MZ_COMPAT_UNZIP
|
||||||
|
|
||||||
|
#include "mz_compat.h"
|
||||||
|
|
||||||
|
#endif
|
13
contrib/minizip-ng-cmake/zip.h
Normal file
13
contrib/minizip-ng-cmake/zip.h
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
/* zip.h -- Compatibility layer shim
|
||||||
|
part of the minizip-ng project
|
||||||
|
|
||||||
|
This program is distributed under the terms of the same license as zlib.
|
||||||
|
See the accompanying LICENSE file for the full text of the license.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef MZ_COMPAT_ZIP
|
||||||
|
#define MZ_COMPAT_ZIP
|
||||||
|
|
||||||
|
#include "mz_compat.h"
|
||||||
|
|
||||||
|
#endif
|
2
contrib/orc
vendored
2
contrib/orc
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 0a936f6bbdb9303308973073f8623b5a8d82eae1
|
Subproject commit f9a393ed2433a60034795284f82d093b348f2102
|
2
contrib/replxx
vendored
2
contrib/replxx
vendored
@ -1 +1 @@
|
|||||||
Subproject commit f019cba7ea1bcd1b4feb7826f28ed57fb581b04c
|
Subproject commit 9460e5e0fc10f78f460af26a6bd928798cac864d
|
@ -72,11 +72,6 @@ else()
|
|||||||
|
|
||||||
if(WITH_ZSTD)
|
if(WITH_ZSTD)
|
||||||
add_definitions(-DZSTD)
|
add_definitions(-DZSTD)
|
||||||
include_directories(${ZSTD_INCLUDE_DIR})
|
|
||||||
include_directories("${ZSTD_INCLUDE_DIR}/common")
|
|
||||||
include_directories("${ZSTD_INCLUDE_DIR}/dictBuilder")
|
|
||||||
include_directories("${ZSTD_INCLUDE_DIR}/deprecated")
|
|
||||||
|
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
@ -132,11 +127,6 @@ endif()
|
|||||||
|
|
||||||
if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
||||||
add_definitions(-DOS_MACOSX)
|
add_definitions(-DOS_MACOSX)
|
||||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES arm)
|
|
||||||
add_definitions(-DIOS_CROSS_COMPILE -DROCKSDB_LITE)
|
|
||||||
# no debug info for IOS, that will make our library big
|
|
||||||
add_definitions(-DNDEBUG)
|
|
||||||
endif()
|
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||||
add_definitions(-DOS_LINUX)
|
add_definitions(-DOS_LINUX)
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "SunOS")
|
elseif(CMAKE_SYSTEM_NAME MATCHES "SunOS")
|
||||||
|
2
debian/clickhouse-server.service
vendored
2
debian/clickhouse-server.service
vendored
@ -16,6 +16,8 @@ Restart=always
|
|||||||
RestartSec=30
|
RestartSec=30
|
||||||
RuntimeDirectory=clickhouse-server
|
RuntimeDirectory=clickhouse-server
|
||||||
ExecStart=/usr/bin/clickhouse-server --config=/etc/clickhouse-server/config.xml --pid-file=/run/clickhouse-server/clickhouse-server.pid
|
ExecStart=/usr/bin/clickhouse-server --config=/etc/clickhouse-server/config.xml --pid-file=/run/clickhouse-server/clickhouse-server.pid
|
||||||
|
# Minus means that this file is optional.
|
||||||
|
EnvironmentFile=-/etc/default/clickhouse
|
||||||
LimitCORE=infinity
|
LimitCORE=infinity
|
||||||
LimitNOFILE=500000
|
LimitNOFILE=500000
|
||||||
CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE
|
CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE
|
||||||
|
@ -1,15 +0,0 @@
|
|||||||
version: "2"
|
|
||||||
|
|
||||||
services:
|
|
||||||
builder:
|
|
||||||
image: clickhouse/clickhouse-builder
|
|
||||||
build: docker/builder
|
|
||||||
client:
|
|
||||||
image: clickhouse/clickhouse-client
|
|
||||||
build: docker/client
|
|
||||||
command: ['--host', 'server']
|
|
||||||
server:
|
|
||||||
image: clickhouse/clickhouse-server
|
|
||||||
build: docker/server
|
|
||||||
ports:
|
|
||||||
- 8123:8123
|
|
@ -32,6 +32,7 @@
|
|||||||
"dependent": []
|
"dependent": []
|
||||||
},
|
},
|
||||||
"docker/test/pvs": {
|
"docker/test/pvs": {
|
||||||
|
"only_amd64": true,
|
||||||
"name": "clickhouse/pvs-test",
|
"name": "clickhouse/pvs-test",
|
||||||
"dependent": []
|
"dependent": []
|
||||||
},
|
},
|
||||||
@ -72,6 +73,7 @@
|
|||||||
"dependent": []
|
"dependent": []
|
||||||
},
|
},
|
||||||
"docker/test/integration/runner": {
|
"docker/test/integration/runner": {
|
||||||
|
"only_amd64": true,
|
||||||
"name": "clickhouse/integration-tests-runner",
|
"name": "clickhouse/integration-tests-runner",
|
||||||
"dependent": []
|
"dependent": []
|
||||||
},
|
},
|
||||||
@ -124,6 +126,7 @@
|
|||||||
"dependent": []
|
"dependent": []
|
||||||
},
|
},
|
||||||
"docker/test/integration/kerberos_kdc": {
|
"docker/test/integration/kerberos_kdc": {
|
||||||
|
"only_amd64": true,
|
||||||
"name": "clickhouse/kerberos-kdc",
|
"name": "clickhouse/kerberos-kdc",
|
||||||
"dependent": []
|
"dependent": []
|
||||||
},
|
},
|
||||||
@ -137,6 +140,7 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"docker/test/integration/kerberized_hadoop": {
|
"docker/test/integration/kerberized_hadoop": {
|
||||||
|
"only_amd64": true,
|
||||||
"name": "clickhouse/kerberized-hadoop",
|
"name": "clickhouse/kerberized-hadoop",
|
||||||
"dependent": []
|
"dependent": []
|
||||||
},
|
},
|
||||||
|
@ -185,15 +185,14 @@ handle SIGUSR2 nostop noprint pass
|
|||||||
handle SIG$RTMIN nostop noprint pass
|
handle SIG$RTMIN nostop noprint pass
|
||||||
info signals
|
info signals
|
||||||
continue
|
continue
|
||||||
|
gcore
|
||||||
backtrace full
|
backtrace full
|
||||||
info locals
|
thread apply all backtrace full
|
||||||
info registers
|
info registers
|
||||||
disassemble /s
|
disassemble /s
|
||||||
up
|
up
|
||||||
info locals
|
|
||||||
disassemble /s
|
disassemble /s
|
||||||
up
|
up
|
||||||
info locals
|
|
||||||
disassemble /s
|
disassemble /s
|
||||||
p \"done\"
|
p \"done\"
|
||||||
detach
|
detach
|
||||||
@ -314,6 +313,11 @@ quit
|
|||||||
|| echo "Fuzzer failed ($fuzzer_exit_code). See the logs." ; } \
|
|| echo "Fuzzer failed ($fuzzer_exit_code). See the logs." ; } \
|
||||||
| tail -1 > description.txt
|
| tail -1 > description.txt
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if test -f core.*; then
|
||||||
|
pigz core.*
|
||||||
|
mv core.*.gz core.gz
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
case "$stage" in
|
case "$stage" in
|
||||||
@ -345,6 +349,10 @@ case "$stage" in
|
|||||||
time fuzz
|
time fuzz
|
||||||
;&
|
;&
|
||||||
"report")
|
"report")
|
||||||
|
CORE_LINK=''
|
||||||
|
if [ -f core.gz ]; then
|
||||||
|
CORE_LINK='<a href="core.gz">core.gz</a>'
|
||||||
|
fi
|
||||||
cat > report.html <<EOF ||:
|
cat > report.html <<EOF ||:
|
||||||
<!DOCTYPE html>
|
<!DOCTYPE html>
|
||||||
<html lang="en">
|
<html lang="en">
|
||||||
@ -386,6 +394,7 @@ th { cursor: pointer; }
|
|||||||
<a href="fuzzer.log">fuzzer.log</a>
|
<a href="fuzzer.log">fuzzer.log</a>
|
||||||
<a href="server.log">server.log</a>
|
<a href="server.log">server.log</a>
|
||||||
<a href="main.log">main.log</a>
|
<a href="main.log">main.log</a>
|
||||||
|
${CORE_LINK}
|
||||||
</p>
|
</p>
|
||||||
<table>
|
<table>
|
||||||
<tr><th>Test name</th><th>Test status</th><th>Description</th></tr>
|
<tr><th>Test name</th><th>Test status</th><th>Description</th></tr>
|
||||||
|
@ -20,4 +20,5 @@ RUN cd /tmp && \
|
|||||||
cd commons-daemon-1.0.15-src/src/native/unix && \
|
cd commons-daemon-1.0.15-src/src/native/unix && \
|
||||||
./configure && \
|
./configure && \
|
||||||
make && \
|
make && \
|
||||||
cp ./jsvc /usr/local/hadoop/sbin
|
cp ./jsvc /usr/local/hadoop-2.7.0/sbin && \
|
||||||
|
[ -e /usr/local/hadoop ] || ln -s ./hadoop-2.7.0 /usr/local/hadoop
|
||||||
|
@ -58,9 +58,7 @@ RUN apt-get update \
|
|||||||
|
|
||||||
RUN dockerd --version; docker --version
|
RUN dockerd --version; docker --version
|
||||||
|
|
||||||
ARG TARGETARCH
|
RUN python3 -m pip install \
|
||||||
# FIXME: psycopg2-binary is not available for aarch64, we skip it for now
|
|
||||||
RUN test x$TARGETARCH = xarm64 || ( python3 -m pip install \
|
|
||||||
PyMySQL \
|
PyMySQL \
|
||||||
aerospike==4.0.0 \
|
aerospike==4.0.0 \
|
||||||
avro==1.10.2 \
|
avro==1.10.2 \
|
||||||
@ -90,7 +88,7 @@ RUN test x$TARGETARCH = xarm64 || ( python3 -m pip install \
|
|||||||
urllib3 \
|
urllib3 \
|
||||||
requests-kerberos \
|
requests-kerberos \
|
||||||
pyhdfs \
|
pyhdfs \
|
||||||
azure-storage-blob )
|
azure-storage-blob
|
||||||
|
|
||||||
COPY modprobe.sh /usr/local/bin/modprobe
|
COPY modprobe.sh /usr/local/bin/modprobe
|
||||||
COPY dockerd-entrypoint.sh /usr/local/bin/
|
COPY dockerd-entrypoint.sh /usr/local/bin/
|
||||||
|
@ -4,7 +4,7 @@ services:
|
|||||||
kerberizedhdfs1:
|
kerberizedhdfs1:
|
||||||
cap_add:
|
cap_add:
|
||||||
- DAC_READ_SEARCH
|
- DAC_READ_SEARCH
|
||||||
image: clickhouse/kerberized-hadoop
|
image: clickhouse/kerberized-hadoop:${DOCKER_KERBERIZED_HADOOP_TAG:-latest}
|
||||||
hostname: kerberizedhdfs1
|
hostname: kerberizedhdfs1
|
||||||
restart: always
|
restart: always
|
||||||
volumes:
|
volumes:
|
||||||
|
@ -45,6 +45,7 @@ export DOCKER_MYSQL_JS_CLIENT_TAG=${DOCKER_MYSQL_JS_CLIENT_TAG:=latest}
|
|||||||
export DOCKER_MYSQL_PHP_CLIENT_TAG=${DOCKER_MYSQL_PHP_CLIENT_TAG:=latest}
|
export DOCKER_MYSQL_PHP_CLIENT_TAG=${DOCKER_MYSQL_PHP_CLIENT_TAG:=latest}
|
||||||
export DOCKER_POSTGRESQL_JAVA_CLIENT_TAG=${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:=latest}
|
export DOCKER_POSTGRESQL_JAVA_CLIENT_TAG=${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:=latest}
|
||||||
export DOCKER_KERBEROS_KDC_TAG=${DOCKER_KERBEROS_KDC_TAG:=latest}
|
export DOCKER_KERBEROS_KDC_TAG=${DOCKER_KERBEROS_KDC_TAG:=latest}
|
||||||
|
export DOCKER_KERBERIZED_HADOOP_TAG=${DOCKER_KERBERIZED_HADOOP_TAG:=latest}
|
||||||
|
|
||||||
cd /ClickHouse/tests/integration
|
cd /ClickHouse/tests/integration
|
||||||
exec "$@"
|
exec "$@"
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# docker build -t clickhouse/performance-comparison .
|
# docker build -t clickhouse/performance-comparison .
|
||||||
FROM ubuntu:18.04
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
# ARG for quick switch to a given ubuntu mirror
|
# ARG for quick switch to a given ubuntu mirror
|
||||||
ARG apt_archive="http://archive.ubuntu.com"
|
ARG apt_archive="http://archive.ubuntu.com"
|
||||||
|
@ -4,11 +4,7 @@
|
|||||||
ARG FROM_TAG=latest
|
ARG FROM_TAG=latest
|
||||||
FROM clickhouse/binary-builder:$FROM_TAG
|
FROM clickhouse/binary-builder:$FROM_TAG
|
||||||
|
|
||||||
# PVS studio doesn't support aarch64/arm64, so there is a check for it everywhere
|
RUN apt-get update --yes \
|
||||||
# We'll produce an empty image for arm64
|
|
||||||
ARG TARGETARCH
|
|
||||||
|
|
||||||
RUN test x$TARGETARCH = xarm64 || ( apt-get update --yes \
|
|
||||||
&& apt-get install \
|
&& apt-get install \
|
||||||
bash \
|
bash \
|
||||||
wget \
|
wget \
|
||||||
@ -21,7 +17,7 @@ RUN test x$TARGETARCH = xarm64 || ( apt-get update --yes \
|
|||||||
libprotoc-dev \
|
libprotoc-dev \
|
||||||
libgrpc++-dev \
|
libgrpc++-dev \
|
||||||
libc-ares-dev \
|
libc-ares-dev \
|
||||||
--yes --no-install-recommends )
|
--yes --no-install-recommends
|
||||||
|
|
||||||
#RUN wget -nv -O - http://files.viva64.com/etc/pubkey.txt | sudo apt-key add -
|
#RUN wget -nv -O - http://files.viva64.com/etc/pubkey.txt | sudo apt-key add -
|
||||||
#RUN sudo wget -nv -O /etc/apt/sources.list.d/viva64.list http://files.viva64.com/etc/viva64.list
|
#RUN sudo wget -nv -O /etc/apt/sources.list.d/viva64.list http://files.viva64.com/etc/viva64.list
|
||||||
@ -33,7 +29,7 @@ RUN test x$TARGETARCH = xarm64 || ( apt-get update --yes \
|
|||||||
|
|
||||||
ENV PKG_VERSION="pvs-studio-latest"
|
ENV PKG_VERSION="pvs-studio-latest"
|
||||||
|
|
||||||
RUN test x$TARGETARCH = xarm64 || ( set -x \
|
RUN set -x \
|
||||||
&& export PUBKEY_HASHSUM="ad369a2e9d8b8c30f5a9f2eb131121739b79c78e03fef0f016ea51871a5f78cd4e6257b270dca0ac3be3d1f19d885516" \
|
&& export PUBKEY_HASHSUM="ad369a2e9d8b8c30f5a9f2eb131121739b79c78e03fef0f016ea51871a5f78cd4e6257b270dca0ac3be3d1f19d885516" \
|
||||||
&& wget -nv https://files.viva64.com/etc/pubkey.txt -O /tmp/pubkey.txt \
|
&& wget -nv https://files.viva64.com/etc/pubkey.txt -O /tmp/pubkey.txt \
|
||||||
&& echo "${PUBKEY_HASHSUM} /tmp/pubkey.txt" | sha384sum -c \
|
&& echo "${PUBKEY_HASHSUM} /tmp/pubkey.txt" | sha384sum -c \
|
||||||
@ -41,7 +37,7 @@ RUN test x$TARGETARCH = xarm64 || ( set -x \
|
|||||||
&& wget -nv "https://files.viva64.com/${PKG_VERSION}.deb" \
|
&& wget -nv "https://files.viva64.com/${PKG_VERSION}.deb" \
|
||||||
&& { debsig-verify ${PKG_VERSION}.deb \
|
&& { debsig-verify ${PKG_VERSION}.deb \
|
||||||
|| echo "WARNING: Some file was just downloaded from the internet without any validation and we are installing it into the system"; } \
|
|| echo "WARNING: Some file was just downloaded from the internet without any validation and we are installing it into the system"; } \
|
||||||
&& dpkg -i "${PKG_VERSION}.deb" )
|
&& dpkg -i "${PKG_VERSION}.deb"
|
||||||
|
|
||||||
ENV CCACHE_DIR=/test_output/ccache
|
ENV CCACHE_DIR=/test_output/ccache
|
||||||
|
|
||||||
|
@ -12,7 +12,11 @@ dpkg -i package_folder/clickhouse-common-static_*.deb
|
|||||||
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
|
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
|
||||||
dpkg -i package_folder/clickhouse-server_*.deb
|
dpkg -i package_folder/clickhouse-server_*.deb
|
||||||
dpkg -i package_folder/clickhouse-client_*.deb
|
dpkg -i package_folder/clickhouse-client_*.deb
|
||||||
|
if [[ -n "$TEST_CASES_FROM_DEB" ]] && [[ "$TEST_CASES_FROM_DEB" -eq 1 ]]; then
|
||||||
dpkg -i package_folder/clickhouse-test_*.deb
|
dpkg -i package_folder/clickhouse-test_*.deb
|
||||||
|
else
|
||||||
|
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||||
|
fi
|
||||||
|
|
||||||
# install test configs
|
# install test configs
|
||||||
/usr/share/clickhouse-test/config/install.sh
|
/usr/share/clickhouse-test/config/install.sh
|
||||||
@ -85,6 +89,10 @@ function run_tests()
|
|||||||
# everything in parallel except DatabaseReplicated. See below.
|
# everything in parallel except DatabaseReplicated. See below.
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
|
||||||
|
ADDITIONAL_OPTIONS+=('--s3-storage')
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
ADDITIONAL_OPTIONS+=('--replicated-database')
|
ADDITIONAL_OPTIONS+=('--replicated-database')
|
||||||
ADDITIONAL_OPTIONS+=('--jobs')
|
ADDITIONAL_OPTIONS+=('--jobs')
|
||||||
|
@ -148,14 +148,12 @@ info signals
|
|||||||
continue
|
continue
|
||||||
gcore
|
gcore
|
||||||
backtrace full
|
backtrace full
|
||||||
info locals
|
thread apply all backtrace full
|
||||||
info registers
|
info registers
|
||||||
disassemble /s
|
disassemble /s
|
||||||
up
|
up
|
||||||
info locals
|
|
||||||
disassemble /s
|
disassemble /s
|
||||||
up
|
up
|
||||||
info locals
|
|
||||||
disassemble /s
|
disassemble /s
|
||||||
p \"done\"
|
p \"done\"
|
||||||
detach
|
detach
|
||||||
@ -269,5 +267,5 @@ clickhouse-local --structure "test String, res String" -q "SELECT 'failure', tes
|
|||||||
# Default filename is 'core.PROCESS_ID'
|
# Default filename is 'core.PROCESS_ID'
|
||||||
for core in core.*; do
|
for core in core.*; do
|
||||||
pigz $core
|
pigz $core
|
||||||
mv $core.gz /output/
|
mv $core.gz /test_output/
|
||||||
done
|
done
|
||||||
|
@ -11,6 +11,7 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
|||||||
curl \
|
curl \
|
||||||
git \
|
git \
|
||||||
libxml2-utils \
|
libxml2-utils \
|
||||||
|
moreutils \
|
||||||
pylint \
|
pylint \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
shellcheck \
|
shellcheck \
|
||||||
|
@ -10,72 +10,26 @@ def process_result(result_folder):
|
|||||||
status = "success"
|
status = "success"
|
||||||
description = ""
|
description = ""
|
||||||
test_results = []
|
test_results = []
|
||||||
|
checks = (
|
||||||
|
("header duplicates", "duplicate_output.txt"),
|
||||||
|
("shellcheck", "shellcheck_output.txt"),
|
||||||
|
("style", "style_output.txt"),
|
||||||
|
("typos", "typos_output.txt"),
|
||||||
|
("whitespaces", "whitespaces_output.txt"),
|
||||||
|
("workflows", "workflows_output.txt"),
|
||||||
|
)
|
||||||
|
|
||||||
duplicate_log_path = "{}/duplicate_output.txt".format(result_folder)
|
for name, out_file in checks:
|
||||||
if not os.path.exists(duplicate_log_path):
|
full_path = os.path.join(result_folder, out_file)
|
||||||
logging.info("No header duplicates check log on path %s", duplicate_log_path)
|
if not os.path.exists(full_path):
|
||||||
return "exception", "No header duplicates check log", []
|
logging.info("No %s check log on path %s", name, full_path)
|
||||||
elif os.stat(duplicate_log_path).st_size != 0:
|
return "exception", f"No {name} check log", []
|
||||||
description += " Header duplicates check failed. "
|
elif os.stat(full_path).st_size != 0:
|
||||||
test_results.append(("Header duplicates check", "FAIL"))
|
description += f"Check {name} failed. "
|
||||||
|
test_results.append((f"Check {name}", "FAIL"))
|
||||||
status = "failure"
|
status = "failure"
|
||||||
else:
|
else:
|
||||||
test_results.append(("Header duplicates check", "OK"))
|
test_results.append((f"Check {name}", "OK"))
|
||||||
|
|
||||||
shellcheck_log_path = "{}/shellcheck_output.txt".format(result_folder)
|
|
||||||
if not os.path.exists(shellcheck_log_path):
|
|
||||||
logging.info("No shellcheck log on path %s", shellcheck_log_path)
|
|
||||||
return "exception", "No shellcheck log", []
|
|
||||||
elif os.stat(shellcheck_log_path).st_size != 0:
|
|
||||||
description += " Shellcheck check failed. "
|
|
||||||
test_results.append(("Shellcheck ", "FAIL"))
|
|
||||||
status = "failure"
|
|
||||||
else:
|
|
||||||
test_results.append(("Shellcheck", "OK"))
|
|
||||||
|
|
||||||
style_log_path = "{}/style_output.txt".format(result_folder)
|
|
||||||
if not os.path.exists(style_log_path):
|
|
||||||
logging.info("No style check log on path %s", style_log_path)
|
|
||||||
return "exception", "No style check log", []
|
|
||||||
elif os.stat(style_log_path).st_size != 0:
|
|
||||||
description += "Style check failed. "
|
|
||||||
test_results.append(("Style check", "FAIL"))
|
|
||||||
status = "failure"
|
|
||||||
else:
|
|
||||||
test_results.append(("Style check", "OK"))
|
|
||||||
|
|
||||||
typos_log_path = "{}/typos_output.txt".format(result_folder)
|
|
||||||
if not os.path.exists(typos_log_path):
|
|
||||||
logging.info("No typos check log on path %s", typos_log_path)
|
|
||||||
return "exception", "No typos check log", []
|
|
||||||
elif os.stat(typos_log_path).st_size != 0:
|
|
||||||
description += "Typos check failed. "
|
|
||||||
test_results.append(("Typos check", "FAIL"))
|
|
||||||
status = "failure"
|
|
||||||
else:
|
|
||||||
test_results.append(("Typos check", "OK"))
|
|
||||||
|
|
||||||
whitespaces_log_path = "{}/whitespaces_output.txt".format(result_folder)
|
|
||||||
if not os.path.exists(whitespaces_log_path):
|
|
||||||
logging.info("No whitespaces check log on path %s", whitespaces_log_path)
|
|
||||||
return "exception", "No whitespaces check log", []
|
|
||||||
elif os.stat(whitespaces_log_path).st_size != 0:
|
|
||||||
description += "Whitespaces check failed. "
|
|
||||||
test_results.append(("Whitespaces check", "FAIL"))
|
|
||||||
status = "failure"
|
|
||||||
else:
|
|
||||||
test_results.append(("Whitespaces check", "OK"))
|
|
||||||
|
|
||||||
workflows_log_path = "{}/workflows_output.txt".format(result_folder)
|
|
||||||
if not os.path.exists(workflows_log_path):
|
|
||||||
logging.info("No workflows check log on path %s", style_log_path)
|
|
||||||
return "exception", "No workflows check log", []
|
|
||||||
elif os.stat(whitespaces_log_path).st_size != 0:
|
|
||||||
description += "Workflows check failed. "
|
|
||||||
test_results.append(("Workflows check", "FAIL"))
|
|
||||||
status = "failure"
|
|
||||||
else:
|
|
||||||
test_results.append(("Workflows check", "OK"))
|
|
||||||
|
|
||||||
if not description:
|
if not description:
|
||||||
description += "Style check success"
|
description += "Style check success"
|
||||||
|
@ -3,10 +3,16 @@
|
|||||||
# yaml check is not the best one
|
# yaml check is not the best one
|
||||||
|
|
||||||
cd /ClickHouse/utils/check-style || echo -e "failure\tRepo not found" > /test_output/check_status.tsv
|
cd /ClickHouse/utils/check-style || echo -e "failure\tRepo not found" > /test_output/check_status.tsv
|
||||||
|
echo "Check duplicates" | ts
|
||||||
./check-duplicate-includes.sh |& tee /test_output/duplicate_output.txt
|
./check-duplicate-includes.sh |& tee /test_output/duplicate_output.txt
|
||||||
|
echo "Check style" | ts
|
||||||
./check-style -n |& tee /test_output/style_output.txt
|
./check-style -n |& tee /test_output/style_output.txt
|
||||||
|
echo "Check typos" | ts
|
||||||
./check-typos |& tee /test_output/typos_output.txt
|
./check-typos |& tee /test_output/typos_output.txt
|
||||||
|
echo "Check whitespaces" | ts
|
||||||
./check-whitespaces -n |& tee /test_output/whitespaces_output.txt
|
./check-whitespaces -n |& tee /test_output/whitespaces_output.txt
|
||||||
|
echo "Check sorkflows" | ts
|
||||||
./check-workflows |& tee /test_output/workflows_output.txt
|
./check-workflows |& tee /test_output/workflows_output.txt
|
||||||
|
echo "Check shell scripts with shellcheck" | ts
|
||||||
./shellcheck-run.sh |& tee /test_output/shellcheck_output.txt
|
./shellcheck-run.sh |& tee /test_output/shellcheck_output.txt
|
||||||
/process_style_check_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
/process_style_check_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||||
|
@ -43,24 +43,27 @@ RUN pip3 install urllib3 testflows==1.7.20 docker-compose==1.29.1 docker==5.0.0
|
|||||||
ENV DOCKER_CHANNEL stable
|
ENV DOCKER_CHANNEL stable
|
||||||
ENV DOCKER_VERSION 20.10.6
|
ENV DOCKER_VERSION 20.10.6
|
||||||
|
|
||||||
RUN set -eux; \
|
# Architecture of the image when BuildKit/buildx is used
|
||||||
\
|
ARG TARGETARCH
|
||||||
# this "case" statement is generated via "update.sh"
|
|
||||||
\
|
# Install docker
|
||||||
if ! wget -nv -O docker.tgz "https://download.docker.com/linux/static/${DOCKER_CHANNEL}/x86_64/docker-${DOCKER_VERSION}.tgz"; then \
|
RUN arch=${TARGETARCH:-amd64} \
|
||||||
echo >&2 "error: failed to download 'docker-${DOCKER_VERSION}' from '${DOCKER_CHANNEL}' for '${x86_64}'"; \
|
&& case $arch in \
|
||||||
exit 1; \
|
amd64) rarch=x86_64 ;; \
|
||||||
fi; \
|
arm64) rarch=aarch64 ;; \
|
||||||
\
|
esac \
|
||||||
tar --extract \
|
&& set -eux \
|
||||||
|
&& if ! wget -nv -O docker.tgz "https://download.docker.com/linux/static/${DOCKER_CHANNEL}/${rarch}/docker-${DOCKER_VERSION}.tgz"; then \
|
||||||
|
echo >&2 "error: failed to download 'docker-${DOCKER_VERSION}' from '${DOCKER_CHANNEL}' for '${rarch}'" \
|
||||||
|
&& exit 1; \
|
||||||
|
fi \
|
||||||
|
&& tar --extract \
|
||||||
--file docker.tgz \
|
--file docker.tgz \
|
||||||
--strip-components 1 \
|
--strip-components 1 \
|
||||||
--directory /usr/local/bin/ \
|
--directory /usr/local/bin/ \
|
||||||
; \
|
&& rm docker.tgz \
|
||||||
rm docker.tgz; \
|
&& dockerd --version \
|
||||||
\
|
&& docker --version
|
||||||
dockerd --version; \
|
|
||||||
docker --version
|
|
||||||
|
|
||||||
COPY modprobe.sh /usr/local/bin/modprobe
|
COPY modprobe.sh /usr/local/bin/modprobe
|
||||||
COPY dockerd-entrypoint.sh /usr/local/bin/
|
COPY dockerd-entrypoint.sh /usr/local/bin/
|
||||||
|
@ -22,7 +22,7 @@ cmake .. \
|
|||||||
|
|
||||||
1. ClickHouse's source CMake files (located in the root directory and in `/src`).
|
1. ClickHouse's source CMake files (located in the root directory and in `/src`).
|
||||||
2. Arch-dependent CMake files (located in `/cmake/*os_name*`).
|
2. Arch-dependent CMake files (located in `/cmake/*os_name*`).
|
||||||
3. Libraries finders (search for contrib libraries, located in `/cmake/find`).
|
3. Libraries finders (search for contrib libraries, located in `/contrib/*/CMakeLists.txt`).
|
||||||
3. Contrib build CMake files (used instead of libraries' own CMake files, located in `/cmake/modules`)
|
3. Contrib build CMake files (used instead of libraries' own CMake files, located in `/cmake/modules`)
|
||||||
|
|
||||||
## List of CMake flags
|
## List of CMake flags
|
||||||
|
@ -8,4 +8,4 @@ sudo apt-get update
|
|||||||
sudo apt-get install -y clickhouse-server clickhouse-client
|
sudo apt-get install -y clickhouse-server clickhouse-client
|
||||||
|
|
||||||
sudo service clickhouse-server start
|
sudo service clickhouse-server start
|
||||||
clickhouse-client
|
clickhouse-client # or "clickhouse-client --password" if you set up a password.
|
||||||
|
@ -4,4 +4,4 @@ sudo yum-config-manager --add-repo https://repo.clickhouse.com/rpm/clickhouse.re
|
|||||||
sudo yum install clickhouse-server clickhouse-client
|
sudo yum install clickhouse-server clickhouse-client
|
||||||
|
|
||||||
sudo /etc/init.d/clickhouse-server start
|
sudo /etc/init.d/clickhouse-server start
|
||||||
clickhouse-client
|
clickhouse-client # or "clickhouse-client --password" if you set up a password.
|
||||||
|
@ -125,10 +125,6 @@ For installing CMake and Ninja on Mac OS X first install Homebrew and then insta
|
|||||||
|
|
||||||
Next, check the version of CMake: `cmake --version`. If it is below 3.12, you should install a newer version from the website: https://cmake.org/download/.
|
Next, check the version of CMake: `cmake --version`. If it is below 3.12, you should install a newer version from the website: https://cmake.org/download/.
|
||||||
|
|
||||||
## Optional External Libraries {#optional-external-libraries}
|
|
||||||
|
|
||||||
ClickHouse uses several external libraries for building. All of them do not need to be installed separately as they are built together with ClickHouse from the sources located in the submodules. You can check the list in `contrib`.
|
|
||||||
|
|
||||||
## C++ Compiler {#c-compiler}
|
## C++ Compiler {#c-compiler}
|
||||||
|
|
||||||
Compilers Clang starting from version 11 is supported for building ClickHouse.
|
Compilers Clang starting from version 11 is supported for building ClickHouse.
|
||||||
|
@ -97,13 +97,16 @@ Structure of the `patterns` section:
|
|||||||
|
|
||||||
``` text
|
``` text
|
||||||
pattern
|
pattern
|
||||||
|
rule_type
|
||||||
regexp
|
regexp
|
||||||
function
|
function
|
||||||
pattern
|
pattern
|
||||||
|
rule_type
|
||||||
regexp
|
regexp
|
||||||
age + precision
|
age + precision
|
||||||
...
|
...
|
||||||
pattern
|
pattern
|
||||||
|
rule_type
|
||||||
regexp
|
regexp
|
||||||
function
|
function
|
||||||
age + precision
|
age + precision
|
||||||
@ -127,12 +130,20 @@ When processing a row, ClickHouse checks the rules in the `pattern` sections. Ea
|
|||||||
|
|
||||||
Fields for `pattern` and `default` sections:
|
Fields for `pattern` and `default` sections:
|
||||||
|
|
||||||
- `regexp`– A pattern for the metric name.
|
- `rule_type` - a rule's type. It's applied only to a particular metrics. The engine use it to separate plain and tagged metrics. Optional parameter. Default value: `all`.
|
||||||
|
It's unnecessary when performance is not critical, or only one metrics type is used, e.g. plain metrics. By default only one type of rules set is created. Otherwise, if any of special types is defined, two different sets are created. One for plain metrics (root.branch.leaf) and one for tagged metrics (root.branch.leaf;tag1=value1).
|
||||||
|
The default rules are ended up in both sets.
|
||||||
|
Valid values:
|
||||||
|
- `all` (default) - a universal rule, used when `rule_type` is omitted.
|
||||||
|
- `plain` - a rule for plain metrics. The field `regexp` is processed as regular expression.
|
||||||
|
- `tagged` - a rule for tagged metrics (metrics are stored in DB in the format of `someName?tag1=value1&tag2=value2&tag3=value3`). Regular expression must be sorted by tags' names, first tag must be `__name__` if exists. The field `regexp` is processed as regular expression.
|
||||||
|
- `tag_list` - a rule for tagged matrics, a simple DSL for easier metric description in graphite format `someName;tag1=value1;tag2=value2`, `someName`, or `tag1=value1;tag2=value2`. The field `regexp` is translated into a `tagged` rule. The sorting by tags' names is unnecessary, ti will be done automatically. A tag's value (but not a name) can be set as a regular expression, e.g. `env=(dev|staging)`.
|
||||||
|
- `regexp` – A pattern for the metric name (a regular or DSL).
|
||||||
- `age` – The minimum age of the data in seconds.
|
- `age` – The minimum age of the data in seconds.
|
||||||
- `precision`– How precisely to define the age of the data in seconds. Should be a divisor for 86400 (seconds in a day).
|
- `precision`– How precisely to define the age of the data in seconds. Should be a divisor for 86400 (seconds in a day).
|
||||||
- `function` – The name of the aggregating function to apply to data whose age falls within the range `[age, age + precision]`. Accepted functions: min / max / any / avg. The average is calculated imprecisely, like the average of the averages.
|
- `function` – The name of the aggregating function to apply to data whose age falls within the range `[age, age + precision]`. Accepted functions: min / max / any / avg. The average is calculated imprecisely, like the average of the averages.
|
||||||
|
|
||||||
### Configuration Example {#configuration-example}
|
### Configuration Example without rules types {#configuration-example}
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
<graphite_rollup>
|
<graphite_rollup>
|
||||||
@ -167,6 +178,81 @@ Fields for `pattern` and `default` sections:
|
|||||||
</graphite_rollup>
|
</graphite_rollup>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Configuration Example with rules types {#configuration-typed-example}
|
||||||
|
|
||||||
|
``` xml
|
||||||
|
<graphite_rollup>
|
||||||
|
<version_column_name>Version</version_column_name>
|
||||||
|
<pattern>
|
||||||
|
<rule_type>plain</rule_type>
|
||||||
|
<regexp>click_cost</regexp>
|
||||||
|
<function>any</function>
|
||||||
|
<retention>
|
||||||
|
<age>0</age>
|
||||||
|
<precision>5</precision>
|
||||||
|
</retention>
|
||||||
|
<retention>
|
||||||
|
<age>86400</age>
|
||||||
|
<precision>60</precision>
|
||||||
|
</retention>
|
||||||
|
</pattern>
|
||||||
|
<pattern>
|
||||||
|
<rule_type>tagged</rule_type>
|
||||||
|
<regexp>^((.*)|.)min\?</regexp>
|
||||||
|
<function>min</function>
|
||||||
|
<retention>
|
||||||
|
<age>0</age>
|
||||||
|
<precision>5</precision>
|
||||||
|
</retention>
|
||||||
|
<retention>
|
||||||
|
<age>86400</age>
|
||||||
|
<precision>60</precision>
|
||||||
|
</retention>
|
||||||
|
</pattern>
|
||||||
|
<pattern>
|
||||||
|
<rule_type>tagged</rule_type>
|
||||||
|
<regexp><![CDATA[^someName\?(.*&)*tag1=value1(&|$)]]></regexp>
|
||||||
|
<function>min</function>
|
||||||
|
<retention>
|
||||||
|
<age>0</age>
|
||||||
|
<precision>5</precision>
|
||||||
|
</retention>
|
||||||
|
<retention>
|
||||||
|
<age>86400</age>
|
||||||
|
<precision>60</precision>
|
||||||
|
</retention>
|
||||||
|
</pattern>
|
||||||
|
<pattern>
|
||||||
|
<rule_type>tag_list</rule_type>
|
||||||
|
<regexp>someName;tag2=value2</regexp>
|
||||||
|
<retention>
|
||||||
|
<age>0</age>
|
||||||
|
<precision>5</precision>
|
||||||
|
</retention>
|
||||||
|
<retention>
|
||||||
|
<age>86400</age>
|
||||||
|
<precision>60</precision>
|
||||||
|
</retention>
|
||||||
|
</pattern>
|
||||||
|
<default>
|
||||||
|
<function>max</function>
|
||||||
|
<retention>
|
||||||
|
<age>0</age>
|
||||||
|
<precision>60</precision>
|
||||||
|
</retention>
|
||||||
|
<retention>
|
||||||
|
<age>3600</age>
|
||||||
|
<precision>300</precision>
|
||||||
|
</retention>
|
||||||
|
<retention>
|
||||||
|
<age>86400</age>
|
||||||
|
<precision>3600</precision>
|
||||||
|
</retention>
|
||||||
|
</default>
|
||||||
|
</graphite_rollup>
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
!!! warning "Warning"
|
!!! warning "Warning"
|
||||||
Data rollup is performed during merges. Usually, for old partitions, merges are not started, so for rollup it is necessary to trigger an unscheduled merge using [optimize](../../../sql-reference/statements/optimize.md). Or use additional tools, for example [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer).
|
Data rollup is performed during merges. Usually, for old partitions, merges are not started, so for rollup it is necessary to trigger an unscheduled merge using [optimize](../../../sql-reference/statements/optimize.md). Or use additional tools, for example [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer).
|
||||||
|
|
||||||
|
@ -886,3 +886,12 @@ S3 disk can be configured as `main` or `cold` storage:
|
|||||||
```
|
```
|
||||||
|
|
||||||
In case of `cold` option a data can be moved to S3 if local disk free size will be smaller than `move_factor * disk_size` or by TTL move rule.
|
In case of `cold` option a data can be moved to S3 if local disk free size will be smaller than `move_factor * disk_size` or by TTL move rule.
|
||||||
|
|
||||||
|
## Virtual Columns {#virtual-columns}
|
||||||
|
|
||||||
|
- `_part` — Name of a part.
|
||||||
|
- `_part_index` — Sequential index of the part in the query result.
|
||||||
|
- `_partition_id` — Name of a partition.
|
||||||
|
- `_part_uuid` — Unique part identifier (if enabled MergeTree setting `assign_part_uuids`).
|
||||||
|
- `_partition_value` — Values (a tuple) of a `partition by` expression.
|
||||||
|
- `_sample_factor` — Sample factor (from the query).
|
||||||
|
@ -54,10 +54,8 @@ If the set of columns in the Buffer table does not match the set of columns in a
|
|||||||
If the types do not match for one of the columns in the Buffer table and a subordinate table, an error message is entered in the server log, and the buffer is cleared.
|
If the types do not match for one of the columns in the Buffer table and a subordinate table, an error message is entered in the server log, and the buffer is cleared.
|
||||||
The same thing happens if the subordinate table does not exist when the buffer is flushed.
|
The same thing happens if the subordinate table does not exist when the buffer is flushed.
|
||||||
|
|
||||||
If you need to run ALTER for a subordinate table, and the Buffer table, we recommend first deleting the Buffer table, running ALTER for the subordinate table, then creating the Buffer table again.
|
|
||||||
|
|
||||||
!!! attention "Attention"
|
!!! attention "Attention"
|
||||||
Running ALTER on the Buffer table in releases made before 28 Sep 2020 will cause a `Block structure mismatch` error (see [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117)), so deleting the Buffer table and then recreating is the only option. It is advisable to check that this error is fixed in your release before trying to run ALTER on the Buffer table.
|
Running ALTER on the Buffer table in releases made before 26 Oct 2021 will cause a `Block structure mismatch` error (see [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117) and [#30565](https://github.com/ClickHouse/ClickHouse/pull/30565)), so deleting the Buffer table and then recreating is the only option. It is advisable to check that this error is fixed in your release before trying to run ALTER on the Buffer table.
|
||||||
|
|
||||||
If the server is restarted abnormally, the data in the buffer is lost.
|
If the server is restarted abnormally, the data in the buffer is lost.
|
||||||
|
|
||||||
|
@ -209,6 +209,8 @@ When querying a `Distributed` table, `SELECT` queries are sent to all shards and
|
|||||||
|
|
||||||
When the `max_parallel_replicas` option is enabled, query processing is parallelized across all replicas within a single shard. For more information, see the section [max_parallel_replicas](../../../operations/settings/settings.md#settings-max_parallel_replicas).
|
When the `max_parallel_replicas` option is enabled, query processing is parallelized across all replicas within a single shard. For more information, see the section [max_parallel_replicas](../../../operations/settings/settings.md#settings-max_parallel_replicas).
|
||||||
|
|
||||||
|
To learn more about how distibuted `in` and `global in` queries are processed, refer to [this](../../../sql-reference/operators/in.md#select-distributed-subqueries) documentation.
|
||||||
|
|
||||||
## Virtual Columns {#virtual-columns}
|
## Virtual Columns {#virtual-columns}
|
||||||
|
|
||||||
- `_shard_num` — Contains the `shard_num` value from the table `system.clusters`. Type: [UInt32](../../../sql-reference/data-types/int-uint.md).
|
- `_shard_num` — Contains the `shard_num` value from the table `system.clusters`. Type: [UInt32](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
@ -7,18 +7,29 @@ toc_title: URL
|
|||||||
|
|
||||||
Queries data to/from a remote HTTP/HTTPS server. This engine is similar to the [File](../../../engines/table-engines/special/file.md) engine.
|
Queries data to/from a remote HTTP/HTTPS server. This engine is similar to the [File](../../../engines/table-engines/special/file.md) engine.
|
||||||
|
|
||||||
Syntax: `URL(URL, Format)`
|
Syntax: `URL(URL [,Format] [,CompressionMethod])`
|
||||||
|
|
||||||
|
- The `URL` parameter must conform to the structure of a Uniform Resource Locator. The specified URL must point to a server that uses HTTP or HTTPS. This does not require any additional headers for getting a response from the server.
|
||||||
|
|
||||||
|
- The `Format` must be one that ClickHouse can use in `SELECT` queries and, if necessary, in `INSERTs`. For the full list of supported formats, see [Formats](../../../interfaces/formats.md#formats).
|
||||||
|
|
||||||
|
- `CompressionMethod` indicates that whether the HTTP body should be compressed. If the compression is enabled, the HTTP packets sent by the URL engine contain 'Content-Encoding' header to indicate which compression method is used.
|
||||||
|
|
||||||
|
To enable compression, please first make sure the remote HTTP endpoint indicated by the `URL` parameter supports corresponding compression algorithm.
|
||||||
|
|
||||||
|
The supported `CompressionMethod` should be one of following:
|
||||||
|
- gzip or gz
|
||||||
|
- deflate
|
||||||
|
- brotli or br
|
||||||
|
- lzma or xz
|
||||||
|
- zstd or zst
|
||||||
|
- lz4
|
||||||
|
- bz2
|
||||||
|
- snappy
|
||||||
|
- none
|
||||||
|
|
||||||
## Usage {#using-the-engine-in-the-clickhouse-server}
|
## Usage {#using-the-engine-in-the-clickhouse-server}
|
||||||
|
|
||||||
The `format` must be one that ClickHouse can use in
|
|
||||||
`SELECT` queries and, if necessary, in `INSERTs`. For the full list of supported formats, see
|
|
||||||
[Formats](../../../interfaces/formats.md#formats).
|
|
||||||
|
|
||||||
The `URL` must conform to the structure of a Uniform Resource Locator. The specified URL must point to a server
|
|
||||||
that uses HTTP or HTTPS. This does not require any
|
|
||||||
additional headers for getting a response from the server.
|
|
||||||
|
|
||||||
`INSERT` and `SELECT` queries are transformed to `POST` and `GET` requests,
|
`INSERT` and `SELECT` queries are transformed to `POST` and `GET` requests,
|
||||||
respectively. For processing `POST` requests, the remote server must support
|
respectively. For processing `POST` requests, the remote server must support
|
||||||
[Chunked transfer encoding](https://en.wikipedia.org/wiki/Chunked_transfer_encoding).
|
[Chunked transfer encoding](https://en.wikipedia.org/wiki/Chunked_transfer_encoding).
|
||||||
|
@ -25,6 +25,7 @@ Categories:
|
|||||||
- **[Operations](../faq/operations/index.md)**
|
- **[Operations](../faq/operations/index.md)**
|
||||||
- [Which ClickHouse version to use in production?](../faq/operations/production.md)
|
- [Which ClickHouse version to use in production?](../faq/operations/production.md)
|
||||||
- [Is it possible to delete old records from a ClickHouse table?](../faq/operations/delete-old-data.md)
|
- [Is it possible to delete old records from a ClickHouse table?](../faq/operations/delete-old-data.md)
|
||||||
|
- [Does ClickHouse support multi-region replication?](../faq/operations/multi-region-replication.md)
|
||||||
- **[Integration](../faq/integration/index.md)**
|
- **[Integration](../faq/integration/index.md)**
|
||||||
- [How do I export data from ClickHouse to a file?](../faq/integration/file-export.md)
|
- [How do I export data from ClickHouse to a file?](../faq/integration/file-export.md)
|
||||||
- [What if I have a problem with encodings when connecting to Oracle via ODBC?](../faq/integration/oracle-odbc.md)
|
- [What if I have a problem with encodings when connecting to Oracle via ODBC?](../faq/integration/oracle-odbc.md)
|
||||||
|
@ -69,14 +69,14 @@ You can also download and install packages manually from [here](https://repo.cli
|
|||||||
It is recommended to use official pre-compiled `tgz` archives for all Linux distributions, where installation of `deb` or `rpm` packages is not possible.
|
It is recommended to use official pre-compiled `tgz` archives for all Linux distributions, where installation of `deb` or `rpm` packages is not possible.
|
||||||
|
|
||||||
The required version can be downloaded with `curl` or `wget` from repository https://repo.clickhouse.com/tgz/.
|
The required version can be downloaded with `curl` or `wget` from repository https://repo.clickhouse.com/tgz/.
|
||||||
After that downloaded archives should be unpacked and installed with installation scripts. Example for the latest version:
|
After that downloaded archives should be unpacked and installed with installation scripts. Example for the latest stable version:
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
export LATEST_VERSION=`curl https://api.github.com/repos/ClickHouse/ClickHouse/tags 2>/dev/null | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -n 1`
|
export LATEST_VERSION=`curl https://api.github.com/repos/ClickHouse/ClickHouse/tags 2>/dev/null | grep stable | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -n 1`
|
||||||
curl -O https://repo.clickhouse.com/tgz/clickhouse-common-static-$LATEST_VERSION.tgz
|
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-common-static-$LATEST_VERSION.tgz
|
||||||
curl -O https://repo.clickhouse.com/tgz/clickhouse-common-static-dbg-$LATEST_VERSION.tgz
|
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-common-static-dbg-$LATEST_VERSION.tgz
|
||||||
curl -O https://repo.clickhouse.com/tgz/clickhouse-server-$LATEST_VERSION.tgz
|
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-server-$LATEST_VERSION.tgz
|
||||||
curl -O https://repo.clickhouse.com/tgz/clickhouse-client-$LATEST_VERSION.tgz
|
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-client-$LATEST_VERSION.tgz
|
||||||
|
|
||||||
tar -xzvf clickhouse-common-static-$LATEST_VERSION.tgz
|
tar -xzvf clickhouse-common-static-$LATEST_VERSION.tgz
|
||||||
sudo clickhouse-common-static-$LATEST_VERSION/install/doinst.sh
|
sudo clickhouse-common-static-$LATEST_VERSION/install/doinst.sh
|
||||||
|
@ -23,11 +23,13 @@ Web UI can be accessed here: `http://localhost:8123/play`.
|
|||||||
![Web UI](../images/play.png)
|
![Web UI](../images/play.png)
|
||||||
|
|
||||||
|
|
||||||
In health-check scripts use `GET /ping` request. This handler always returns “Ok.” (with a line feed at the end). Available from version 18.12.13.
|
In health-check scripts use `GET /ping` request. This handler always returns “Ok.” (with a line feed at the end). Available from version 18.12.13. See also `/replicas_status` to check replica's delay.
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ curl 'http://localhost:8123/ping'
|
$ curl 'http://localhost:8123/ping'
|
||||||
Ok.
|
Ok.
|
||||||
|
$ curl 'http://localhost:8123/replicas_status'
|
||||||
|
Ok.
|
||||||
```
|
```
|
||||||
|
|
||||||
Send the request as a URL ‘query’ parameter, or as a POST. Or send the beginning of the query in the ‘query’ parameter, and the rest in the POST (we’ll explain later why this is necessary). The size of the URL is limited to 16 KB, so keep this in mind when sending large queries.
|
Send the request as a URL ‘query’ parameter, or as a POST. Or send the beginning of the query in the ‘query’ parameter, and the rest in the POST (we’ll explain later why this is necessary). The size of the URL is limited to 16 KB, so keep this in mind when sending large queries.
|
||||||
|
@ -67,6 +67,7 @@ toc_title: Adopters
|
|||||||
| <a href="https://geniee.co.jp" class="favicon">Geniee</a> | Ad network | Main product | — | — | [Blog post in Japanese, July 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) |
|
| <a href="https://geniee.co.jp" class="favicon">Geniee</a> | Ad network | Main product | — | — | [Blog post in Japanese, July 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) |
|
||||||
| <a href="https://www.genotek.ru/" class="favicon">Genotek</a> | Bioinformatics | Main product | — | — | [Video, August 2020](https://youtu.be/v3KyZbz9lEE) |
|
| <a href="https://www.genotek.ru/" class="favicon">Genotek</a> | Bioinformatics | Main product | — | — | [Video, August 2020](https://youtu.be/v3KyZbz9lEE) |
|
||||||
| <a href="https://gigapipe.com/" class="favicon">Gigapipe</a> | Managed ClickHouse | Main product | — | — | [Official website](https://gigapipe.com/) |
|
| <a href="https://gigapipe.com/" class="favicon">Gigapipe</a> | Managed ClickHouse | Main product | — | — | [Official website](https://gigapipe.com/) |
|
||||||
|
| <a href="https://gigasheet.co/" class="favicon">Gigasheet</a> | Analytics | Main product | — | — | Direct Reference, February 2022|
|
||||||
| <a href="https://glaber.io/" class="favicon">Glaber</a> | Monitoring | Main product | — | — | [Website](https://glaber.io/) |
|
| <a href="https://glaber.io/" class="favicon">Glaber</a> | Monitoring | Main product | — | — | [Website](https://glaber.io/) |
|
||||||
| <a href="https://graphcdn.io/" class="favicon">GraphCDN</a> | CDN | Traffic Analytics | — | — | [Blog Post in English, August 2021](https://altinity.com/blog/delivering-insight-on-graphql-apis-with-clickhouse-at-graphcdn/) |
|
| <a href="https://graphcdn.io/" class="favicon">GraphCDN</a> | CDN | Traffic Analytics | — | — | [Blog Post in English, August 2021](https://altinity.com/blog/delivering-insight-on-graphql-apis-with-clickhouse-at-graphcdn/) |
|
||||||
| <a href="https://www.grouparoo.com" class="favicon">Grouparoo</a> | Data Warehouse Integrations | Main product | — | — | [Official Website, November 2021](https://www.grouparoo.com/integrations) |
|
| <a href="https://www.grouparoo.com" class="favicon">Grouparoo</a> | Data Warehouse Integrations | Main product | — | — | [Official Website, November 2021](https://www.grouparoo.com/integrations) |
|
||||||
|
@ -108,7 +108,13 @@ Examples of configuration for quorum with three nodes can be found in [integrati
|
|||||||
ClickHouse Keeper is bundled into the ClickHouse server package, just add configuration of `<keeper_server>` and start ClickHouse server as always. If you want to run standalone ClickHouse Keeper you can start it in a similar way with:
|
ClickHouse Keeper is bundled into the ClickHouse server package, just add configuration of `<keeper_server>` and start ClickHouse server as always. If you want to run standalone ClickHouse Keeper you can start it in a similar way with:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
clickhouse-keeper --config /etc/your_path_to_config/config.xml --daemon
|
clickhouse-keeper --config /etc/your_path_to_config/config.xml
|
||||||
|
```
|
||||||
|
|
||||||
|
If you don't have the symlink (`clickhouse-keeper`) you can create it or specify `keeper` as argument:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
clickhouse keeper --config /etc/your_path_to_config/config.xml
|
||||||
```
|
```
|
||||||
|
|
||||||
## Four Letter Word Commands {#four-letter-word-commands}
|
## Four Letter Word Commands {#four-letter-word-commands}
|
||||||
|
@ -14,7 +14,7 @@ toc_title: OpenTelemetry Support
|
|||||||
|
|
||||||
ClickHouse accepts trace context HTTP headers, as described by the [W3C recommendation](https://www.w3.org/TR/trace-context/). It also accepts trace context over a native protocol that is used for communication between ClickHouse servers or between the client and server. For manual testing, trace context headers conforming to the Trace Context recommendation can be supplied to `clickhouse-client` using `--opentelemetry-traceparent` and `--opentelemetry-tracestate` flags.
|
ClickHouse accepts trace context HTTP headers, as described by the [W3C recommendation](https://www.w3.org/TR/trace-context/). It also accepts trace context over a native protocol that is used for communication between ClickHouse servers or between the client and server. For manual testing, trace context headers conforming to the Trace Context recommendation can be supplied to `clickhouse-client` using `--opentelemetry-traceparent` and `--opentelemetry-tracestate` flags.
|
||||||
|
|
||||||
If no parent trace context is supplied, ClickHouse can start a new trace, with probability controlled by the [opentelemetry_start_trace_probability](../operations/settings/settings.md#opentelemetry-start-trace-probability) setting.
|
If no parent trace context is supplied or the provided trace context does not comply with W3C standard above, ClickHouse can start a new trace, with probability controlled by the [opentelemetry_start_trace_probability](../operations/settings/settings.md#opentelemetry-start-trace-probability) setting.
|
||||||
|
|
||||||
## Propagating the Trace Context
|
## Propagating the Trace Context
|
||||||
|
|
||||||
@ -46,8 +46,8 @@ ENGINE = URL('http://127.0.0.1:9411/api/v2/spans', 'JSONEachRow')
|
|||||||
SETTINGS output_format_json_named_tuples_as_objects = 1,
|
SETTINGS output_format_json_named_tuples_as_objects = 1,
|
||||||
output_format_json_array_of_rows = 1 AS
|
output_format_json_array_of_rows = 1 AS
|
||||||
SELECT
|
SELECT
|
||||||
lower(hex(reinterpretAsFixedString(trace_id))) AS traceId,
|
lower(hex(trace_id)) AS traceId,
|
||||||
lower(hex(parent_span_id)) AS parentId,
|
case when parent_span_id = 0 then '' else lower(hex(parent_span_id)) end AS parentId,
|
||||||
lower(hex(span_id)) AS id,
|
lower(hex(span_id)) AS id,
|
||||||
operation_name AS name,
|
operation_name AS name,
|
||||||
start_time_us AS timestamp,
|
start_time_us AS timestamp,
|
||||||
|
@ -27,7 +27,7 @@ To analyze the `trace_log` system table:
|
|||||||
|
|
||||||
For security reasons, introspection functions are disabled by default.
|
For security reasons, introspection functions are disabled by default.
|
||||||
|
|
||||||
- Use the `addressToLine`, `addressToSymbol` and `demangle` [introspection functions](../../sql-reference/functions/introspection.md) to get function names and their positions in ClickHouse code. To get a profile for some query, you need to aggregate data from the `trace_log` table. You can aggregate data by individual functions or by the whole stack traces.
|
- Use the `addressToLine`, `addressToLineWithInlines`, `addressToSymbol` and `demangle` [introspection functions](../../sql-reference/functions/introspection.md) to get function names and their positions in ClickHouse code. To get a profile for some query, you need to aggregate data from the `trace_log` table. You can aggregate data by individual functions or by the whole stack traces.
|
||||||
|
|
||||||
If you need to visualize `trace_log` info, try [flamegraph](../../interfaces/third-party/gui/#clickhouse-flamegraph) and [speedscope](https://github.com/laplab/clickhouse-speedscope).
|
If you need to visualize `trace_log` info, try [flamegraph](../../interfaces/third-party/gui/#clickhouse-flamegraph) and [speedscope](https://github.com/laplab/clickhouse-speedscope).
|
||||||
|
|
||||||
|
@ -1803,6 +1803,48 @@ If an INSERTed block is skipped due to deduplication in the source table, there
|
|||||||
At the same time, this behaviour “breaks” `INSERT` idempotency. If an `INSERT` into the main table was successful and `INSERT` into a materialized view failed (e.g. because of communication failure with Zookeeper) a client will get an error and can retry the operation. However, the materialized view won’t receive the second insert because it will be discarded by deduplication in the main (source) table. The setting `deduplicate_blocks_in_dependent_materialized_views` allows for changing this behaviour. On retry, a materialized view will receive the repeat insert and will perform a deduplication check by itself,
|
At the same time, this behaviour “breaks” `INSERT` idempotency. If an `INSERT` into the main table was successful and `INSERT` into a materialized view failed (e.g. because of communication failure with Zookeeper) a client will get an error and can retry the operation. However, the materialized view won’t receive the second insert because it will be discarded by deduplication in the main (source) table. The setting `deduplicate_blocks_in_dependent_materialized_views` allows for changing this behaviour. On retry, a materialized view will receive the repeat insert and will perform a deduplication check by itself,
|
||||||
ignoring check result for the source table, and will insert rows lost because of the first failure.
|
ignoring check result for the source table, and will insert rows lost because of the first failure.
|
||||||
|
|
||||||
|
## insert_deduplication_token {#insert_deduplication_token}
|
||||||
|
|
||||||
|
The setting allows a user to provide own deduplication semantic in MergeTree/ReplicatedMergeTree
|
||||||
|
For example, by providing a unique value for the setting in each INSERT statement,
|
||||||
|
user can avoid the same inserted data being deduplicated.
|
||||||
|
|
||||||
|
Possilbe values:
|
||||||
|
|
||||||
|
- Any string
|
||||||
|
|
||||||
|
Default value: empty string (disabled)
|
||||||
|
|
||||||
|
`insert_deduplication_token` is used for deduplication _only_ when not empty.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE test_table
|
||||||
|
( A Int64 )
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY A
|
||||||
|
SETTINGS non_replicated_deduplication_window = 100;
|
||||||
|
|
||||||
|
INSERT INTO test_table Values SETTINGS insert_deduplication_token = 'test' (1);
|
||||||
|
|
||||||
|
-- the next insert won't be deduplicated because insert_deduplication_token is different
|
||||||
|
INSERT INTO test_table Values SETTINGS insert_deduplication_token = 'test1' (1);
|
||||||
|
|
||||||
|
-- the next insert will be deduplicated because insert_deduplication_token
|
||||||
|
-- is the same as one of the previous
|
||||||
|
INSERT INTO test_table Values SETTINGS insert_deduplication_token = 'test' (2);
|
||||||
|
|
||||||
|
SELECT * FROM test_table
|
||||||
|
|
||||||
|
┌─A─┐
|
||||||
|
│ 1 │
|
||||||
|
└───┘
|
||||||
|
┌─A─┐
|
||||||
|
│ 1 │
|
||||||
|
└───┘
|
||||||
|
```
|
||||||
|
|
||||||
## max_network_bytes {#settings-max-network-bytes}
|
## max_network_bytes {#settings-max-network-bytes}
|
||||||
|
|
||||||
Limits the data volume (in bytes) that is received or transmitted over the network when executing a query. This setting applies to every individual query.
|
Limits the data volume (in bytes) that is received or transmitted over the network when executing a query. This setting applies to every individual query.
|
||||||
@ -2304,7 +2346,7 @@ Possible values:
|
|||||||
- 1 — Enabled.
|
- 1 — Enabled.
|
||||||
- 0 — Disabled.
|
- 0 — Disabled.
|
||||||
|
|
||||||
Default value: `0`.
|
Default value: `1`.
|
||||||
|
|
||||||
## output_format_parallel_formatting {#output-format-parallel-formatting}
|
## output_format_parallel_formatting {#output-format-parallel-formatting}
|
||||||
|
|
||||||
@ -2315,7 +2357,7 @@ Possible values:
|
|||||||
- 1 — Enabled.
|
- 1 — Enabled.
|
||||||
- 0 — Disabled.
|
- 0 — Disabled.
|
||||||
|
|
||||||
Default value: `0`.
|
Default value: `1`.
|
||||||
|
|
||||||
## min_chunk_bytes_for_parallel_parsing {#min-chunk-bytes-for-parallel-parsing}
|
## min_chunk_bytes_for_parallel_parsing {#min-chunk-bytes-for-parallel-parsing}
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
Contains stack traces of all server threads. Allows developers to introspect the server state.
|
Contains stack traces of all server threads. Allows developers to introspect the server state.
|
||||||
|
|
||||||
To analyze stack frames, use the `addressToLine`, `addressToSymbol` and `demangle` [introspection functions](../../sql-reference/functions/introspection.md).
|
To analyze stack frames, use the `addressToLine`, `addressToLineWithInlines`, `addressToSymbol` and `demangle` [introspection functions](../../sql-reference/functions/introspection.md).
|
||||||
|
|
||||||
Columns:
|
Columns:
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ Contains stack traces collected by the sampling query profiler.
|
|||||||
|
|
||||||
ClickHouse creates this table when the [trace_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-trace_log) server configuration section is set. Also the [query_profiler_real_time_period_ns](../../operations/settings/settings.md#query_profiler_real_time_period_ns) and [query_profiler_cpu_time_period_ns](../../operations/settings/settings.md#query_profiler_cpu_time_period_ns) settings should be set.
|
ClickHouse creates this table when the [trace_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-trace_log) server configuration section is set. Also the [query_profiler_real_time_period_ns](../../operations/settings/settings.md#query_profiler_real_time_period_ns) and [query_profiler_cpu_time_period_ns](../../operations/settings/settings.md#query_profiler_cpu_time_period_ns) settings should be set.
|
||||||
|
|
||||||
To analyze logs, use the `addressToLine`, `addressToSymbol` and `demangle` introspection functions.
|
To analyze logs, use the `addressToLine`, `addressToLineWithInlines`, `addressToSymbol` and `demangle` introspection functions.
|
||||||
|
|
||||||
Columns:
|
Columns:
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@ Applies Student's t-test to samples from two populations.
|
|||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
studentTTest(sample_data, sample_index)
|
studentTTest([confidence_level])(sample_data, sample_index)
|
||||||
```
|
```
|
||||||
|
|
||||||
Values of both samples are in the `sample_data` column. If `sample_index` equals to 0 then the value in that row belongs to the sample from the first population. Otherwise it belongs to the sample from the second population.
|
Values of both samples are in the `sample_data` column. If `sample_index` equals to 0 then the value in that row belongs to the sample from the first population. Otherwise it belongs to the sample from the second population.
|
||||||
@ -21,12 +21,19 @@ The null hypothesis is that means of populations are equal. Normal distribution
|
|||||||
- `sample_data` — Sample data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
|
- `sample_data` — Sample data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
|
||||||
- `sample_index` — Sample index. [Integer](../../../sql-reference/data-types/int-uint.md).
|
- `sample_index` — Sample index. [Integer](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Parameters**
|
||||||
|
|
||||||
|
- `confidence_level` — Confidence level in order to calculate confidence intervals. [Float](../../../sql-reference/data-types/float.md).
|
||||||
|
|
||||||
|
|
||||||
**Returned values**
|
**Returned values**
|
||||||
|
|
||||||
[Tuple](../../../sql-reference/data-types/tuple.md) with two elements:
|
[Tuple](../../../sql-reference/data-types/tuple.md) with two or four elements (if the optional `confidence_level` is specified):
|
||||||
|
|
||||||
- calculated t-statistic. [Float64](../../../sql-reference/data-types/float.md).
|
- calculated t-statistic. [Float64](../../../sql-reference/data-types/float.md).
|
||||||
- calculated p-value. [Float64](../../../sql-reference/data-types/float.md).
|
- calculated p-value. [Float64](../../../sql-reference/data-types/float.md).
|
||||||
|
- [calculated confidence-interval-low.] [Float64](../../../sql-reference/data-types/float.md).
|
||||||
|
- [calculated confidence-interval-high.] [Float64](../../../sql-reference/data-types/float.md).
|
||||||
|
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
@ -10,7 +10,7 @@ Applies Welch's t-test to samples from two populations.
|
|||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
welchTTest(sample_data, sample_index)
|
welchTTest([confidence_level])(sample_data, sample_index)
|
||||||
```
|
```
|
||||||
|
|
||||||
Values of both samples are in the `sample_data` column. If `sample_index` equals to 0 then the value in that row belongs to the sample from the first population. Otherwise it belongs to the sample from the second population.
|
Values of both samples are in the `sample_data` column. If `sample_index` equals to 0 then the value in that row belongs to the sample from the first population. Otherwise it belongs to the sample from the second population.
|
||||||
@ -21,12 +21,18 @@ The null hypothesis is that means of populations are equal. Normal distribution
|
|||||||
- `sample_data` — Sample data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
|
- `sample_data` — Sample data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
|
||||||
- `sample_index` — Sample index. [Integer](../../../sql-reference/data-types/int-uint.md).
|
- `sample_index` — Sample index. [Integer](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Parameters**
|
||||||
|
|
||||||
|
- `confidence_level` — Confidence level in order to calculate confidence intervals. [Float](../../../sql-reference/data-types/float.md).
|
||||||
|
|
||||||
**Returned values**
|
**Returned values**
|
||||||
|
|
||||||
[Tuple](../../../sql-reference/data-types/tuple.md) with two elements:
|
[Tuple](../../../sql-reference/data-types/tuple.md) with two or four elements (if the optional `confidence_level` is specified)
|
||||||
|
|
||||||
- calculated t-statistic. [Float64](../../../sql-reference/data-types/float.md).
|
- calculated t-statistic. [Float64](../../../sql-reference/data-types/float.md).
|
||||||
- calculated p-value. [Float64](../../../sql-reference/data-types/float.md).
|
- calculated p-value. [Float64](../../../sql-reference/data-types/float.md).
|
||||||
|
- [calculated confidence-interval-low.] [Float64](../../../sql-reference/data-types/float.md).
|
||||||
|
- [calculated confidence-interval-high.] [Float64](../../../sql-reference/data-types/float.md).
|
||||||
|
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 40
|
toc_priority: 40
|
||||||
toc_title: UInt8, UInt16, UInt32, UInt64, UInt256, Int8, Int16, Int32, Int64, Int128, Int256
|
toc_title: UInt8, UInt16, UInt32, UInt64, UInt128, UInt256, Int8, Int16, Int32, Int64, Int128, Int256
|
||||||
---
|
---
|
||||||
|
|
||||||
# UInt8, UInt16, UInt32, UInt64, UInt256, Int8, Int16, Int32, Int64, Int128, Int256 {#uint8-uint16-uint32-uint64-uint256-int8-int16-int32-int64-int128-int256}
|
# UInt8, UInt16, UInt32, UInt64, UInt128, UInt256, Int8, Int16, Int32, Int64, Int128, Int256
|
||||||
|
|
||||||
Fixed-length integers, with or without a sign.
|
Fixed-length integers, with or without a sign.
|
||||||
|
|
||||||
|
@ -120,7 +120,7 @@ The `mail` and `phone` fields are of type String, but the `icq` field is `UInt32
|
|||||||
Get the first available contact method for the customer from the contact list:
|
Get the first available contact method for the customer from the contact list:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook;
|
SELECT name, coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook;
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
|
@ -113,6 +113,111 @@ trace_source_code_lines: /lib/x86_64-linux-gnu/libpthread-2.27.so
|
|||||||
/build/glibc-OTsEL5/glibc-2.27/misc/../sysdeps/unix/sysv/linux/x86_64/clone.S:97
|
/build/glibc-OTsEL5/glibc-2.27/misc/../sysdeps/unix/sysv/linux/x86_64/clone.S:97
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## addressToLineWithInlines {#addresstolinewithinlines}
|
||||||
|
|
||||||
|
Similar to `addressToLine`, but it will return an Array with all inline functions, and will be much slower as a price.
|
||||||
|
|
||||||
|
If you use official ClickHouse packages, you need to install the `clickhouse-common-static-dbg` package.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
addressToLineWithInlines(address_of_binary_instruction)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `address_of_binary_instruction` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Address of instruction in a running process.
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Array which first element is source code filename and the line number in this file delimited by colon. And from second element, inline functions' source code filename and line number and function name are listed.
|
||||||
|
|
||||||
|
- Array with single element which is name of a binary, if the function couldn’t find the debug information.
|
||||||
|
|
||||||
|
- Empty array, if the address is not valid.
|
||||||
|
|
||||||
|
Type: [Array(String)](../../sql-reference/data-types/array.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Enabling introspection functions:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SET allow_introspection_functions=1;
|
||||||
|
```
|
||||||
|
|
||||||
|
Applying the function to address.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT addressToLineWithInlines(531055181::UInt64);
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─addressToLineWithInlines(CAST('531055181', 'UInt64'))────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ ['./src/Functions/addressToLineWithInlines.cpp:98','./build_normal_debug/./src/Functions/addressToLineWithInlines.cpp:176:DB::(anonymous namespace)::FunctionAddressToLineWithInlines::implCached(unsigned long) const'] │
|
||||||
|
└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Applying the function to the whole stack trace:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT
|
||||||
|
ta, addressToLineWithInlines(arrayJoin(trace) as ta)
|
||||||
|
FROM system.trace_log
|
||||||
|
WHERE
|
||||||
|
query_id = '5e173544-2020-45de-b645-5deebe2aae54';
|
||||||
|
```
|
||||||
|
|
||||||
|
The [arrayJoin](../../sql-reference/functions/array-functions.md#array-functions-join) functions will split array to rows.
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌────────ta─┬─addressToLineWithInlines(arrayJoin(trace))───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ 365497529 │ ['./build_normal_debug/./contrib/libcxx/include/string_view:252'] │
|
||||||
|
│ 365593602 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:191'] │
|
||||||
|
│ 365593866 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:0'] │
|
||||||
|
│ 365592528 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:0'] │
|
||||||
|
│ 365591003 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:477'] │
|
||||||
|
│ 365590479 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:442'] │
|
||||||
|
│ 365590600 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:457'] │
|
||||||
|
│ 365598941 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:0'] │
|
||||||
|
│ 365607098 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:0'] │
|
||||||
|
│ 365590571 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:451'] │
|
||||||
|
│ 365598941 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:0'] │
|
||||||
|
│ 365607098 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:0'] │
|
||||||
|
│ 365590571 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:451'] │
|
||||||
|
│ 365598941 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:0'] │
|
||||||
|
│ 365607098 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:0'] │
|
||||||
|
│ 365590571 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:451'] │
|
||||||
|
│ 365598941 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:0'] │
|
||||||
|
│ 365597289 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:807'] │
|
||||||
|
│ 365599840 │ ['./build_normal_debug/./src/Common/Dwarf.cpp:1118'] │
|
||||||
|
│ 531058145 │ ['./build_normal_debug/./src/Functions/addressToLineWithInlines.cpp:152'] │
|
||||||
|
│ 531055181 │ ['./src/Functions/addressToLineWithInlines.cpp:98','./build_normal_debug/./src/Functions/addressToLineWithInlines.cpp:176:DB::(anonymous namespace)::FunctionAddressToLineWithInlines::implCached(unsigned long) const'] │
|
||||||
|
│ 422333613 │ ['./build_normal_debug/./src/Functions/IFunctionAdaptors.h:21'] │
|
||||||
|
│ 586866022 │ ['./build_normal_debug/./src/Functions/IFunction.cpp:216'] │
|
||||||
|
│ 586869053 │ ['./build_normal_debug/./src/Functions/IFunction.cpp:264'] │
|
||||||
|
│ 586873237 │ ['./build_normal_debug/./src/Functions/IFunction.cpp:334'] │
|
||||||
|
│ 597901620 │ ['./build_normal_debug/./src/Interpreters/ExpressionActions.cpp:601'] │
|
||||||
|
│ 597898534 │ ['./build_normal_debug/./src/Interpreters/ExpressionActions.cpp:718'] │
|
||||||
|
│ 630442912 │ ['./build_normal_debug/./src/Processors/Transforms/ExpressionTransform.cpp:23'] │
|
||||||
|
│ 546354050 │ ['./build_normal_debug/./src/Processors/ISimpleTransform.h:38'] │
|
||||||
|
│ 626026993 │ ['./build_normal_debug/./src/Processors/ISimpleTransform.cpp:89'] │
|
||||||
|
│ 626294022 │ ['./build_normal_debug/./src/Processors/Executors/ExecutionThreadContext.cpp:45'] │
|
||||||
|
│ 626293730 │ ['./build_normal_debug/./src/Processors/Executors/ExecutionThreadContext.cpp:63'] │
|
||||||
|
│ 626169525 │ ['./build_normal_debug/./src/Processors/Executors/PipelineExecutor.cpp:213'] │
|
||||||
|
│ 626170308 │ ['./build_normal_debug/./src/Processors/Executors/PipelineExecutor.cpp:178'] │
|
||||||
|
│ 626166348 │ ['./build_normal_debug/./src/Processors/Executors/PipelineExecutor.cpp:329'] │
|
||||||
|
│ 626163461 │ ['./build_normal_debug/./src/Processors/Executors/PipelineExecutor.cpp:84'] │
|
||||||
|
│ 626323536 │ ['./build_normal_debug/./src/Processors/Executors/PullingAsyncPipelineExecutor.cpp:85'] │
|
||||||
|
│ 626323277 │ ['./build_normal_debug/./src/Processors/Executors/PullingAsyncPipelineExecutor.cpp:112'] │
|
||||||
|
│ 626323133 │ ['./build_normal_debug/./contrib/libcxx/include/type_traits:3682'] │
|
||||||
|
│ 626323041 │ ['./build_normal_debug/./contrib/libcxx/include/tuple:1415'] │
|
||||||
|
└───────────┴──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## addressToSymbol {#addresstosymbol}
|
## addressToSymbol {#addresstosymbol}
|
||||||
|
|
||||||
Converts virtual memory address inside ClickHouse server process to the symbol from ClickHouse object files.
|
Converts virtual memory address inside ClickHouse server process to the symbol from ClickHouse object files.
|
||||||
|
@ -22,7 +22,7 @@ tuple(x, y, …)
|
|||||||
## tupleElement {#tupleelement}
|
## tupleElement {#tupleelement}
|
||||||
|
|
||||||
A function that allows getting a column from a tuple.
|
A function that allows getting a column from a tuple.
|
||||||
‘N’ is the column index, starting from 1. N must be a constant. ‘N’ must be a constant. ‘N’ must be a strict postive integer no greater than the size of the tuple.
|
‘N’ is the column index, starting from 1. ‘N’ must be a constant. ‘N’ must be a strict postive integer no greater than the size of the tuple.
|
||||||
There is no cost to execute the function.
|
There is no cost to execute the function.
|
||||||
|
|
||||||
The function implements the operator `x.N`.
|
The function implements the operator `x.N`.
|
||||||
|
@ -216,6 +216,17 @@ This is more optimal than using the normal IN. However, keep the following point
|
|||||||
|
|
||||||
It also makes sense to specify a local table in the `GLOBAL IN` clause, in case this local table is only available on the requestor server and you want to use data from it on remote servers.
|
It also makes sense to specify a local table in the `GLOBAL IN` clause, in case this local table is only available on the requestor server and you want to use data from it on remote servers.
|
||||||
|
|
||||||
|
### Distributed Subqueries and max_rows_in_set
|
||||||
|
|
||||||
|
You can use [`max_rows_in_set`](../../operations/settings/query-complexity.md#max-rows-in-set) and [`max_bytes_in_set`](../../operations/settings/query-complexity.md#max-rows-in-set) to control how much data is tranferred during distributed queries.
|
||||||
|
|
||||||
|
This is specially important if the `global in` query returns a large amount of data. Consider the following sql -
|
||||||
|
```sql
|
||||||
|
select * from table1 where col1 global in (select col1 from table2 where <some_predicate>)
|
||||||
|
```
|
||||||
|
|
||||||
|
If `some_predicate` is not selective enough, it will return large amount of data and cause performance issues. In such cases, it is wise to limit the data transfer over the network. Also, note that [`set_overflow_mode`](../../operations/settings/query-complexity.md#set_overflow_mode) is set to `throw` (by default) meaning that an exception is raised when these thresholds are met.
|
||||||
|
|
||||||
### Distributed Subqueries and max_parallel_replicas {#max_parallel_replica-subqueries}
|
### Distributed Subqueries and max_parallel_replicas {#max_parallel_replica-subqueries}
|
||||||
|
|
||||||
When max_parallel_replicas is greater than 1, distributed queries are further transformed. For example, the following:
|
When max_parallel_replicas is greater than 1, distributed queries are further transformed. For example, the following:
|
||||||
|
@ -197,12 +197,13 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL;
|
|||||||
|
|
||||||
## MATERIALIZE COLUMN {#materialize-column}
|
## MATERIALIZE COLUMN {#materialize-column}
|
||||||
|
|
||||||
Materializes the column in the parts where the column is missing. This is useful in case of creating a new column with complicated `DEFAULT` or `MATERIALIZED` expression. Calculation of the column directly on `SELECT` query can cause bigger request execution time, so it is reasonable to use `MATERIALIZE COLUMN` for such columns. To perform same manipulation for existing column, use `FINAL` modifier.
|
Materializes or updates a column with an expression for a default value (`DEFAULT` or `MATERIALIZED`).
|
||||||
|
It is used if it is necessary to add or update a column with a complicated expression, because evaluating such an expression directly on `SELECT` executing turns out to be expensive.
|
||||||
|
|
||||||
Syntax:
|
Syntax:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
ALTER TABLE table MATERIALIZE COLUMN col [FINAL];
|
ALTER TABLE table MATERIALIZE COLUMN col;
|
||||||
```
|
```
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -211,20 +212,34 @@ ALTER TABLE table MATERIALIZE COLUMN col [FINAL];
|
|||||||
DROP TABLE IF EXISTS tmp;
|
DROP TABLE IF EXISTS tmp;
|
||||||
SET mutations_sync = 2;
|
SET mutations_sync = 2;
|
||||||
CREATE TABLE tmp (x Int64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY tuple();
|
CREATE TABLE tmp (x Int64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY tuple();
|
||||||
INSERT INTO tmp SELECT * FROM system.numbers LIMIT 10;
|
INSERT INTO tmp SELECT * FROM system.numbers LIMIT 5;
|
||||||
ALTER TABLE tmp ADD COLUMN s String MATERIALIZED toString(x);
|
ALTER TABLE tmp ADD COLUMN s String MATERIALIZED toString(x);
|
||||||
|
|
||||||
ALTER TABLE tmp MATERIALIZE COLUMN s;
|
ALTER TABLE tmp MATERIALIZE COLUMN s;
|
||||||
|
|
||||||
|
SELECT groupArray(x), groupArray(s) FROM (select x,s from tmp order by x);
|
||||||
|
|
||||||
|
┌─groupArray(x)─┬─groupArray(s)─────────┐
|
||||||
|
│ [0,1,2,3,4] │ ['0','1','2','3','4'] │
|
||||||
|
└───────────────┴───────────────────────┘
|
||||||
|
|
||||||
|
ALTER TABLE tmp MODIFY COLUMN s String MATERIALIZED toString(round(100/x));
|
||||||
|
|
||||||
|
INSERT INTO tmp SELECT * FROM system.numbers LIMIT 5,5;
|
||||||
|
|
||||||
SELECT groupArray(x), groupArray(s) FROM tmp;
|
SELECT groupArray(x), groupArray(s) FROM tmp;
|
||||||
```
|
|
||||||
|
|
||||||
**Result:**
|
┌─groupArray(x)─────────┬─groupArray(s)──────────────────────────────────┐
|
||||||
|
│ [0,1,2,3,4,5,6,7,8,9] │ ['0','1','2','3','4','20','17','14','12','11'] │
|
||||||
|
└───────────────────────┴────────────────────────────────────────────────┘
|
||||||
|
|
||||||
```sql
|
ALTER TABLE tmp MATERIALIZE COLUMN s;
|
||||||
┌─groupArray(x)─────────┬─groupArray(s)─────────────────────────────┐
|
|
||||||
│ [0,1,2,3,4,5,6,7,8,9] │ ['0','1','2','3','4','5','6','7','8','9'] │
|
SELECT groupArray(x), groupArray(s) FROM tmp;
|
||||||
└───────────────────────┴───────────────────────────────────────────┘
|
|
||||||
|
┌─groupArray(x)─────────┬─groupArray(s)─────────────────────────────────────────┐
|
||||||
|
│ [0,1,2,3,4,5,6,7,8,9] │ ['inf','100','50','33','25','20','17','14','12','11'] │
|
||||||
|
└───────────────────────┴───────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
@ -172,6 +172,7 @@ Hierarchy of privileges:
|
|||||||
- `SYSTEM FLUSH LOGS`
|
- `SYSTEM FLUSH LOGS`
|
||||||
- [INTROSPECTION](#grant-introspection)
|
- [INTROSPECTION](#grant-introspection)
|
||||||
- `addressToLine`
|
- `addressToLine`
|
||||||
|
- `addressToLineWithInlines`
|
||||||
- `addressToSymbol`
|
- `addressToSymbol`
|
||||||
- `demangle`
|
- `demangle`
|
||||||
- [SOURCES](#grant-sources)
|
- [SOURCES](#grant-sources)
|
||||||
@ -430,6 +431,7 @@ Allows using [introspection](../../operations/optimizing-performance/sampling-qu
|
|||||||
|
|
||||||
- `INTROSPECTION`. Level: `GROUP`. Aliases: `INTROSPECTION FUNCTIONS`
|
- `INTROSPECTION`. Level: `GROUP`. Aliases: `INTROSPECTION FUNCTIONS`
|
||||||
- `addressToLine`. Level: `GLOBAL`
|
- `addressToLine`. Level: `GLOBAL`
|
||||||
|
- `addressToLineWithInlines`. Level: `GLOBAL`
|
||||||
- `addressToSymbol`. Level: `GLOBAL`
|
- `addressToSymbol`. Level: `GLOBAL`
|
||||||
- `demangle`. Level: `GLOBAL`
|
- `demangle`. Level: `GLOBAL`
|
||||||
|
|
||||||
|
@ -285,7 +285,7 @@ ORDER BY expr [WITH FILL] [FROM const_expr] [TO const_expr] [STEP const_numeric_
|
|||||||
`WITH FILL` can be applied for fields with Numeric (all kinds of float, decimal, int) or Date/DateTime types. When applied for `String` fields, missed values are filled with empty strings.
|
`WITH FILL` can be applied for fields with Numeric (all kinds of float, decimal, int) or Date/DateTime types. When applied for `String` fields, missed values are filled with empty strings.
|
||||||
When `FROM const_expr` not defined sequence of filling use minimal `expr` field value from `ORDER BY`.
|
When `FROM const_expr` not defined sequence of filling use minimal `expr` field value from `ORDER BY`.
|
||||||
When `TO const_expr` not defined sequence of filling use maximum `expr` field value from `ORDER BY`.
|
When `TO const_expr` not defined sequence of filling use maximum `expr` field value from `ORDER BY`.
|
||||||
When `STEP const_numeric_expr` defined then `const_numeric_expr` interprets `as is` for numeric types as `days` for Date type and as `seconds` for DateTime type.
|
When `STEP const_numeric_expr` defined then `const_numeric_expr` interprets `as is` for numeric types, as `days` for Date type, as `seconds` for DateTime type. It also supports [INTERVAL](https://clickhouse.com/docs/en/sql-reference/data-types/special-data-types/interval/) data type representing time and date intervals.
|
||||||
When `STEP const_numeric_expr` omitted then sequence of filling use `1.0` for numeric type, `1 day` for Date type and `1 second` for DateTime type.
|
When `STEP const_numeric_expr` omitted then sequence of filling use `1.0` for numeric type, `1 day` for Date type and `1 second` for DateTime type.
|
||||||
|
|
||||||
Example of a query without `WITH FILL`:
|
Example of a query without `WITH FILL`:
|
||||||
@ -402,4 +402,85 @@ Result:
|
|||||||
└────────────┴────────────┴──────────┘
|
└────────────┴────────────┴──────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The following query uses the `INTERVAL` data type of 1 day for each data filled on column `d1`:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT
|
||||||
|
toDate((number * 10) * 86400) AS d1,
|
||||||
|
toDate(number * 86400) AS d2,
|
||||||
|
'original' AS source
|
||||||
|
FROM numbers(10)
|
||||||
|
WHERE (number % 3) = 1
|
||||||
|
ORDER BY
|
||||||
|
d1 WITH FILL STEP INTERVAL 1 DAY,
|
||||||
|
d2 WITH FILL;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
```
|
||||||
|
┌─────────d1─┬─────────d2─┬─source───┐
|
||||||
|
│ 1970-01-11 │ 1970-01-02 │ original │
|
||||||
|
│ 1970-01-12 │ 1970-01-01 │ │
|
||||||
|
│ 1970-01-13 │ 1970-01-01 │ │
|
||||||
|
│ 1970-01-14 │ 1970-01-01 │ │
|
||||||
|
│ 1970-01-15 │ 1970-01-01 │ │
|
||||||
|
│ 1970-01-16 │ 1970-01-01 │ │
|
||||||
|
│ 1970-01-17 │ 1970-01-01 │ │
|
||||||
|
│ 1970-01-18 │ 1970-01-01 │ │
|
||||||
|
│ 1970-01-19 │ 1970-01-01 │ │
|
||||||
|
│ 1970-01-20 │ 1970-01-01 │ │
|
||||||
|
│ 1970-01-21 │ 1970-01-01 │ │
|
||||||
|
│ 1970-01-22 │ 1970-01-01 │ │
|
||||||
|
│ 1970-01-23 │ 1970-01-01 │ │
|
||||||
|
│ 1970-01-24 │ 1970-01-01 │ │
|
||||||
|
│ 1970-01-25 │ 1970-01-01 │ │
|
||||||
|
│ 1970-01-26 │ 1970-01-01 │ │
|
||||||
|
│ 1970-01-27 │ 1970-01-01 │ │
|
||||||
|
│ 1970-01-28 │ 1970-01-01 │ │
|
||||||
|
│ 1970-01-29 │ 1970-01-01 │ │
|
||||||
|
│ 1970-01-30 │ 1970-01-01 │ │
|
||||||
|
│ 1970-01-31 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-01 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-02 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-03 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-04 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-05 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-06 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-07 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-08 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-09 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-10 │ 1970-01-05 │ original │
|
||||||
|
│ 1970-02-11 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-12 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-13 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-14 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-15 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-16 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-17 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-18 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-19 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-20 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-21 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-22 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-23 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-24 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-25 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-26 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-27 │ 1970-01-01 │ │
|
||||||
|
│ 1970-02-28 │ 1970-01-01 │ │
|
||||||
|
│ 1970-03-01 │ 1970-01-01 │ │
|
||||||
|
│ 1970-03-02 │ 1970-01-01 │ │
|
||||||
|
│ 1970-03-03 │ 1970-01-01 │ │
|
||||||
|
│ 1970-03-04 │ 1970-01-01 │ │
|
||||||
|
│ 1970-03-05 │ 1970-01-01 │ │
|
||||||
|
│ 1970-03-06 │ 1970-01-01 │ │
|
||||||
|
│ 1970-03-07 │ 1970-01-01 │ │
|
||||||
|
│ 1970-03-08 │ 1970-01-01 │ │
|
||||||
|
│ 1970-03-09 │ 1970-01-01 │ │
|
||||||
|
│ 1970-03-10 │ 1970-01-01 │ │
|
||||||
|
│ 1970-03-11 │ 1970-01-01 │ │
|
||||||
|
│ 1970-03-12 │ 1970-01-08 │ original │
|
||||||
|
└────────────┴────────────┴──────────┘
|
||||||
|
```
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/sql-reference/statements/select/order-by/) <!--hide-->
|
[Original article](https://clickhouse.com/docs/en/sql-reference/statements/select/order-by/) <!--hide-->
|
||||||
|
@ -72,7 +72,7 @@ Reloads all [CatBoost](../../guides/apply-catboost-model.md#applying-catboost-mo
|
|||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SYSTEM RELOAD MODELS
|
SYSTEM RELOAD MODELS [ON CLUSTER cluster_name]
|
||||||
```
|
```
|
||||||
|
|
||||||
## RELOAD MODEL {#query_language-system-reload-model}
|
## RELOAD MODEL {#query_language-system-reload-model}
|
||||||
@ -82,7 +82,7 @@ Completely reloads a CatBoost model `model_name` if the configuration was update
|
|||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SYSTEM RELOAD MODEL <model_name>
|
SYSTEM RELOAD MODEL [ON CLUSTER cluster_name] <model_name>
|
||||||
```
|
```
|
||||||
|
|
||||||
## RELOAD FUNCTIONS {#query_language-system-reload-functions}
|
## RELOAD FUNCTIONS {#query_language-system-reload-functions}
|
||||||
@ -92,8 +92,8 @@ Reloads all registered [executable user defined functions](../functions/index.md
|
|||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
RELOAD FUNCTIONS
|
RELOAD FUNCTIONS [ON CLUSTER cluster_name]
|
||||||
RELOAD FUNCTION function_name
|
RELOAD FUNCTION [ON CLUSTER cluster_name] function_name
|
||||||
```
|
```
|
||||||
|
|
||||||
## DROP DNS CACHE {#query_language-system-drop-dns-cache}
|
## DROP DNS CACHE {#query_language-system-drop-dns-cache}
|
||||||
|
@ -3,14 +3,14 @@ toc_priority: 53
|
|||||||
toc_title: USE
|
toc_title: USE
|
||||||
---
|
---
|
||||||
|
|
||||||
# USE 语句 {#use}
|
# USE Statement {#use}
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
USE db
|
USE db
|
||||||
```
|
```
|
||||||
|
|
||||||
用于设置会话的当前数据库。
|
Lets you set the current database for the session.
|
||||||
|
|
||||||
如果查询语句中没有在表名前面以加点的方式指明数据库名, 则用当前数据库进行搜索。
|
The current database is used for searching for tables if the database is not explicitly defined in the query with a dot before the table name.
|
||||||
|
|
||||||
使用 HTTP 协议时无法进行此查询,因为没有会话的概念。
|
This query can’t be made when using the HTTP protocol, since there is no concept of a session.
|
||||||
|
@ -30,7 +30,7 @@ There may be any number of space symbols between syntactical constructions (incl
|
|||||||
|
|
||||||
ClickHouse supports either SQL-style and C-style comments:
|
ClickHouse supports either SQL-style and C-style comments:
|
||||||
|
|
||||||
- SQL-style comments start with `--` and continue to the end of the line, a space after `--` can be omitted.
|
- SQL-style comments start with `--`, `#!` or `# ` and continue to the end of the line, a space after `--` and `#!` can be omitted.
|
||||||
- C-style are from `/*` to `*/`and can be multiline, spaces are not required either.
|
- C-style are from `/*` to `*/`and can be multiline, spaces are not required either.
|
||||||
|
|
||||||
## Keywords {#syntax-keywords}
|
## Keywords {#syntax-keywords}
|
||||||
|
@ -5,6 +5,6 @@ toc_title: Roadmap
|
|||||||
|
|
||||||
# Roadmap {#roadmap}
|
# Roadmap {#roadmap}
|
||||||
|
|
||||||
The roadmap for the year 2021 is published for open discussion [here](https://github.com/ClickHouse/ClickHouse/issues/17623).
|
The roadmap for the year 2022 is published for open discussion [here](https://github.com/ClickHouse/ClickHouse/issues/32513).
|
||||||
|
|
||||||
{## [Original article](https://clickhouse.com/docs/en/roadmap/) ##}
|
{## [Original article](https://clickhouse.com/docs/en/roadmap/) ##}
|
||||||
|
BIN
docs/ko/images/column-oriented.gif
Normal file
BIN
docs/ko/images/column-oriented.gif
Normal file
Binary file not shown.
After Width: | Height: | Size: 43 KiB |
1
docs/ko/images/logo.svg
Normal file
1
docs/ko/images/logo.svg
Normal file
@ -0,0 +1 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="54" height="48" markdown="1" viewBox="0 0 9 8"><style>.o{fill:#fc0}.r{fill:red}</style><path d="M0,7 h1 v1 h-1 z" class="r"/><path d="M0,0 h1 v7 h-1 z" class="o"/><path d="M2,0 h1 v8 h-1 z" class="o"/><path d="M4,0 h1 v8 h-1 z" class="o"/><path d="M6,0 h1 v8 h-1 z" class="o"/><path d="M8,3.25 h1 v1.5 h-1 z" class="o"/></svg>
|
After Width: | Height: | Size: 373 B |
BIN
docs/ko/images/play.png
Normal file
BIN
docs/ko/images/play.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 26 KiB |
BIN
docs/ko/images/row-oriented.gif
Normal file
BIN
docs/ko/images/row-oriented.gif
Normal file
Binary file not shown.
After Width: | Height: | Size: 38 KiB |
94
docs/ko/index.md
Normal file
94
docs/ko/index.md
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 0
|
||||||
|
toc_title: 목차
|
||||||
|
---
|
||||||
|
|
||||||
|
# ClickHouse란? {#what-is-clickhouse}
|
||||||
|
|
||||||
|
ClickHouse® 는 query의 온라인 분석 처리(OLAP)를 위한 열 지향(column-oriented) 데이터베이스 관리 시스템(DBMS)입니다.
|
||||||
|
|
||||||
|
"보통의" 행 지향(row-oriented) DMBS에서는 데이터가 다음과 같은 순서로 저장됩니다.
|
||||||
|
|
||||||
|
| row | WatchID | JavaEnable | Title | GoodEvent | EventTime |
|
||||||
|
|-----|-------------|------------|--------------------|-----------|---------------------|
|
||||||
|
| #0 | 89354350662 | 1 | Investor Relations | 1 | 2016-05-18 05:19:20 |
|
||||||
|
| #1 | 90329509958 | 0 | Contact us | 1 | 2016-05-18 08:10:20 |
|
||||||
|
| #2 | 89953706054 | 1 | Mission | 1 | 2016-05-18 07:38:00 |
|
||||||
|
| #N | … | … | … | … | … |
|
||||||
|
|
||||||
|
즉, 행과 관련된 모든 값들은 물리적으로 나란히 저장됩니다.
|
||||||
|
|
||||||
|
행 지향(row-oriented) DMBS의 예시로는 MySQL, Postgres, 그리고 MS SQL 서버 등이 있습니다.
|
||||||
|
|
||||||
|
열 지향 (column-oriented) DBMS에서는 데이터가 아래와 같은 방식으로 저장됩니다:
|
||||||
|
|
||||||
|
| Row: | #0 | #1 | #2 | #N |
|
||||||
|
|-------------|---------------------|---------------------|---------------------|-----|
|
||||||
|
| WatchID: | 89354350662 | 90329509958 | 89953706054 | … |
|
||||||
|
| JavaEnable: | 1 | 0 | 1 | … |
|
||||||
|
| Title: | Investor Relations | Contact us | Mission | … |
|
||||||
|
| GoodEvent: | 1 | 1 | 1 | … |
|
||||||
|
| EventTime: | 2016-05-18 05:19:20 | 2016-05-18 08:10:20 | 2016-05-18 07:38:00 | … |
|
||||||
|
|
||||||
|
이 예에서는 데이터가 정렬된 순서만을 보여줍니다. 다른 열의 값들은 서로 분리되어 저장되고, 같은 열의 정보들은 함께 저장됩니다.
|
||||||
|
|
||||||
|
열 지향(column-oriented) DBMS 의 종류는 Vertica, Paraccel (Actian Matrix and Amazon Redshift), Sybase IQ, Exasol, Infobright, InfiniDB, MonetDB (VectorWise and Actian Vector), LucidDB, SAP HANA, Google Dremel, Google PowerDrill, Druid, 그리고 kdb+ 등이 있습니다.
|
||||||
|
|
||||||
|
데이터를 저장하기 위한 서로 다른 순서는 다른 시나리오에 더 적합합니다. 데이터 접근 시나리오는 쿼리가 수행되는 빈도, 비율 및 비율을 나타내거나, 각 쿼리 유형(행, 열 및 바이트)에 대해 읽은 데이터의 양 데이터 읽기와 업데이트 사이의 관계, 데이터의 작업 크기 및 로컬에서 사용되는 방법 트랜잭션이 사용되는지 여부, 트랜잭션이 얼마나 격리되어 있는지, 데이터 복제 및 논리적 무결성에 대한 요구 사항, 각 쿼리 유형에 대한 대기 시간 및 처리량 요구 사항 등이 있습니다.
|
||||||
|
|
||||||
|
시스템의 부하가 높을수록 사용 시나리오의 요구 사항에 맞게 시스템 설정을 사용자 지정하는 것이 더 중요하며 이 사용자 지정은 더욱 세분화됩니다. 상당히 다른 시나리오에 똑같이 적합한 시스템은 없습니다. 만약 높은 부하에서 시스템이 넓은 시나리오 집합에 대해 적응한다면 시스템은 모든 시나리오를 모두 제대로 처리하지 못하거나 가능한 시나리오 중 하나 또는 몇 개에 대해서만 잘 작동할 것입니다.
|
||||||
|
|
||||||
|
## OLAP 시나리오의 중요 속성들 {#key-properties-of-olap-scenario}
|
||||||
|
|
||||||
|
- 요청(request)의 대부분은 읽기 접근에 관한 것입니다.
|
||||||
|
- 데이터는 단일 행이 아니라 상당히 큰 일괄 처리(\> 1000개 행)로 업데이트됩니다. 또는 전혀 업데이트되지 않습니다.
|
||||||
|
- 데이터는 DB에 추가되지만 수정되지는 않습니다.
|
||||||
|
- 읽기의 경우 DB에서 상당히 많은 수의 행이 추출되지만 열은 일부만 추출됩니다.
|
||||||
|
- 테이블은 "넓습니다". 이는 열의 수가 많다는 것을 의미합니다.
|
||||||
|
- 쿼리는 상대적으로 드뭅니다(일반적으로 서버당 수백 또는 초당 쿼리 미만).
|
||||||
|
- 간단한 쿼리의 경우 약 50ms의 대기 시간이 허용됩니다.
|
||||||
|
- 열 값은 숫자와 짧은 문자열(예: URL당 60바이트)과 같이 상당히 작습니다
|
||||||
|
- 단일 쿼리를 처리할 때 높은 처리량이 필요합니다(서버당 초당 최대 수십억 행).
|
||||||
|
- 트랜잭션이 필요하지 않습니다.
|
||||||
|
- 데이터 일관성에 대한 요구 사항이 낮습니다.
|
||||||
|
- 쿼리당 하나의 큰 테이블이 존재하고 하나를 제외한 모든 테이블은 작습니다.
|
||||||
|
- 쿼리 결과가 원본 데이터보다 훨씬 작습니다. 즉, 데이터가 필터링되거나 집계되므로 결과가 단일 서버의 RAM에 꼭 들어맞습니다.
|
||||||
|
|
||||||
|
OLAP 시나리오가 다른 일반적인 시나리오(OLTP 또는 키-값 액세스와 같은)와 매우 다르다는 것을 쉽게 알 수 있습니다. 따라서 적절한 성능을 얻으려면 분석 쿼리를 처리하기 위해 OLTP 또는 키-값 DB를 사용하는 것은 의미가 없습니다. 예를 들어 분석에 MongoDB나 Redis를 사용하려고 하면 OLAP 데이터베이스에 비해 성능이 매우 저하됩니다.
|
||||||
|
|
||||||
|
## 왜 열 지향 데이터베이스가 OLAP 시나리오에 적합한가{#why-column-oriented-databases-work-better-in-the-olap-scenario}
|
||||||
|
|
||||||
|
열 지향(column-oriented) 데이터베이스는 OLAP 시나리오에 더 적합합니다. 대부분의 쿼리를 처리하는 데 있어서 행 지향(row-oriented) 데이터베이스보다 100배 이상 빠릅니다. 그 이유는 아래에 자세히 설명되어 있지만 사실은 시각적으로 더 쉽게 설명할 수 있습니다.
|
||||||
|
|
||||||
|
**행 지향 DBMS**
|
||||||
|
|
||||||
|
![Row-oriented](images/row-oriented.gif#)
|
||||||
|
|
||||||
|
**열 지향 DBMS**
|
||||||
|
|
||||||
|
![Column-oriented](images/column-oriented.gif#)
|
||||||
|
|
||||||
|
차이가 보이시나요?
|
||||||
|
|
||||||
|
### 입출력 {#inputoutput}
|
||||||
|
|
||||||
|
1. 분석 쿼리의 경우 적은 수의 테이블 열만 읽어야 합니다. 열 지향 데이터베이스에서는 필요한 데이터만 읽을 수 있습니다. 예를 들어 100개 중 5개의 열이 필요한 경우 I/O가 20배 감소할 것으로 예상할 수 있습니다.
|
||||||
|
2. 데이터는 패킷으로 읽히므로 압축하기가 더 쉽습니다. 열의 데이터도 압축하기 쉽습니다. 이것은 I/O의 볼륨을 더욱 감소시킵니다.
|
||||||
|
3. 감소된 I/O로 인해 시스템 캐시에 더 많은 데이터가 들어갑니다.
|
||||||
|
|
||||||
|
예를 들어, "각 광고 플랫폼에 대한 레코드 수 계산" 쿼리는 압축되지 않은 1바이트를 차지하는 하나의 "광고 플랫폼 ID" 열을 읽어야 합니다. 트래픽의 대부분이 광고 플랫폼에서 발생하지 않은 경우 이 열의 최소 10배 압축을 기대할 수 있습니다. 빠른 압축 알고리즘을 사용하면 초당 최소 몇 기가바이트의 압축되지 않은 데이터의 속도로 데이터 압축 해제가 가능합니다. 즉, 이 쿼리는 단일 서버에서 초당 약 수십억 행의 속도로 처리될 수 있습니다. 이 속도는 정말 실제로 달성됩니다.
|
||||||
|
|
||||||
|
### CPU {#cpu}
|
||||||
|
|
||||||
|
쿼리를 수행하려면 많은 행을 처리해야 하므로 별도의 행이 아닌 전체 벡터에 대한 모든 연산을 디스패치하거나 쿼리 엔진을 구현하여 디스패치 비용이 거의 들지 않습니다. 반쯤 괜찮은 디스크 하위 시스템에서 이렇게 하지 않으면 쿼리 인터프리터가 불가피하게 CPU를 정지시킵니다. 데이터를 열에 저장하고 가능한 경우 열별로 처리하는 것이 좋습니다.
|
||||||
|
|
||||||
|
이를 수행하기위한 두가지 방법이 있습니다.
|
||||||
|
|
||||||
|
1. 벡터 엔진. 모든 연산은 별도의 값 대신 벡터에 대해 작성됩니다. 즉, 작업을 자주 호출할 필요가 없으며 파견 비용도 무시할 수 있습니다. 작업 코드에는 최적화된 내부 주기가 포함되어 있습니다.
|
||||||
|
2. 코드 생성. 쿼리에 대해 생성된 코드에는 모든 간접 호출이 있습니다.
|
||||||
|
|
||||||
|
이것은 단순한 쿼리를 실행할 때 의미가 없기 때문에 "일반" 데이터베이스에서는 수행되지 않습니다. 그러나 예외가 있습니다. 예를 들어 MemSQL은 코드 생성을 사용하여 SQL 쿼리를 처리할 때 대기 시간을 줄입니다. (비교되게, 분석 DBMS는 대기 시간이 아닌 처리량 최적화가 필요합니다.)
|
||||||
|
|
||||||
|
CPU 효율성을 위해 쿼리 언어는 선언적(SQL 또는 MDX)이거나 최소한 벡터(J, K)여야 합니다. 쿼리는 최적화를 허용하는 암시적 루프만 포함해야 합니다.
|
||||||
|
|
||||||
|
{## [원문](https://clickhouse.com/docs/en/) ##}
|
@ -2,8 +2,13 @@
|
|||||||
toc_priority: 65
|
toc_priority: 65
|
||||||
toc_title: Сборка на Mac OS X
|
toc_title: Сборка на Mac OS X
|
||||||
---
|
---
|
||||||
|
|
||||||
# Как собрать ClickHouse на Mac OS X {#how-to-build-clickhouse-on-mac-os-x}
|
# Как собрать ClickHouse на Mac OS X {#how-to-build-clickhouse-on-mac-os-x}
|
||||||
|
|
||||||
|
!!! info "Вам не нужно собирать ClickHouse самостоятельно"
|
||||||
|
Вы можете установить предварительно собранный ClickHouse, как описано в [Быстром старте](https://clickhouse.com/#quick-start).
|
||||||
|
Следуйте инструкциям по установке для `macOS (Intel)` или `macOS (Apple Silicon)`.
|
||||||
|
|
||||||
Сборка должна запускаться с x86_64 (Intel) на macOS версии 10.15 (Catalina) и выше в последней версии компилятора Xcode's native AppleClang, Homebrew's vanilla Clang или в GCC-компиляторах.
|
Сборка должна запускаться с x86_64 (Intel) на macOS версии 10.15 (Catalina) и выше в последней версии компилятора Xcode's native AppleClang, Homebrew's vanilla Clang или в GCC-компиляторах.
|
||||||
|
|
||||||
## Установка Homebrew {#install-homebrew}
|
## Установка Homebrew {#install-homebrew}
|
||||||
|
@ -99,13 +99,16 @@ patterns
|
|||||||
|
|
||||||
``` text
|
``` text
|
||||||
pattern
|
pattern
|
||||||
|
rule_type
|
||||||
regexp
|
regexp
|
||||||
function
|
function
|
||||||
pattern
|
pattern
|
||||||
|
rule_type
|
||||||
regexp
|
regexp
|
||||||
age + precision
|
age + precision
|
||||||
...
|
...
|
||||||
pattern
|
pattern
|
||||||
|
rule_type
|
||||||
regexp
|
regexp
|
||||||
function
|
function
|
||||||
age + precision
|
age + precision
|
||||||
@ -129,12 +132,20 @@ default
|
|||||||
|
|
||||||
Поля для разделов `pattern` и `default`:
|
Поля для разделов `pattern` и `default`:
|
||||||
|
|
||||||
- `regexp` – шаблон имени метрики.
|
- `rule_type` - тип правила (применяется только к метрикам указанных типов), используется для разделения правил проверки плоских/теггированных метрик. Опциональное поле. Значение по умолчанию: `all`.
|
||||||
|
Если используются метрики только одного типа или производительность проверки правил некритична, можно не использовать. По умолчанию создается только один тип правил для проверки. Иначе, если хотя бы для одного правила указано отличное от умолчания значение, создаются 2 независимых типа правил - для обычных (классические root.branch.leaf) и теггированных метрик (root.branch.leaf;tag1=value1).
|
||||||
|
Правила по умолчанию попадают в оба правила обоих типов.
|
||||||
|
Возможные значения:
|
||||||
|
- `all` (default) - универсальное правило, назначается также по умолчанию, если поле не задано
|
||||||
|
- `plain` - правило для плоских метрик (без тегов). Поле `regexp` обрабатывается как регулярное выражение.
|
||||||
|
- `tagged` - правило для теггированных метрик (метрика хранится в БД в формате `someName?tag1=value1&tag2=value2&tag3=value3`), регулярное выражение должно быть отсортированно по именам тегов, первым - значение тега `__name__`, если есть. Поле `regexp` обрабатывается как регулярное выражение.
|
||||||
|
- `tag_list` - правило для теггированных метрик, простой DSL для упрощения задания регулярного выражения в формате тегов graphite `someName;tag1=value1;tag2=value2`, `someName` или `tag1=value1;tag2=value2`. Поле `regexp` транслируется в правило `tagged`. Cортировать по именам тегов не обязательно, оно отсортируется автоматически. Значение тега (но не имя) может быть регулярным выражением (например `env=(dev|staging)`).
|
||||||
|
- `regexp` – шаблон имени метрики (регулярное выражение или DSL).
|
||||||
- `age` – минимальный возраст данных в секундах.
|
- `age` – минимальный возраст данных в секундах.
|
||||||
- `precision` – точность определения возраста данных в секундах. Должен быть делителем для 86400 (количество секунд в сутках).
|
- `precision` – точность определения возраста данных в секундах. Должен быть делителем для 86400 (количество секунд в сутках).
|
||||||
- `function` – имя агрегирующей функции, которую следует применить к данным, чей возраст оказался в интервале `[age, age + precision]`. Допустимые функции: min/max/any/avg. Avg вычисляется неточно, как среднее от средних.
|
- `function` – имя агрегирующей функции, которую следует применить к данным, чей возраст оказался в интервале `[age, age + precision]`. Допустимые функции: min/max/any/avg. Avg вычисляется неточно, как среднее от средних.
|
||||||
|
|
||||||
### Пример конфигурации {#configuration-example}
|
### Пример конфигурации без разделения типа правил {#configuration-example}
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
<graphite_rollup>
|
<graphite_rollup>
|
||||||
@ -169,6 +180,80 @@ default
|
|||||||
</graphite_rollup>
|
</graphite_rollup>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Пример конфигурации c разделением типа правил {#configuration-typed-example}
|
||||||
|
|
||||||
|
``` xml
|
||||||
|
<graphite_rollup>
|
||||||
|
<version_column_name>Version</version_column_name>
|
||||||
|
<pattern>
|
||||||
|
<rule_type>plain</rule_type>
|
||||||
|
<regexp>click_cost</regexp>
|
||||||
|
<function>any</function>
|
||||||
|
<retention>
|
||||||
|
<age>0</age>
|
||||||
|
<precision>5</precision>
|
||||||
|
</retention>
|
||||||
|
<retention>
|
||||||
|
<age>86400</age>
|
||||||
|
<precision>60</precision>
|
||||||
|
</retention>
|
||||||
|
</pattern>
|
||||||
|
<pattern>
|
||||||
|
<rule_type>tagged</rule_type>
|
||||||
|
<regexp>^((.*)|.)min\?</regexp>
|
||||||
|
<function>min</function>
|
||||||
|
<retention>
|
||||||
|
<age>0</age>
|
||||||
|
<precision>5</precision>
|
||||||
|
</retention>
|
||||||
|
<retention>
|
||||||
|
<age>86400</age>
|
||||||
|
<precision>60</precision>
|
||||||
|
</retention>
|
||||||
|
</pattern>
|
||||||
|
<pattern>
|
||||||
|
<rule_type>tagged</rule_type>
|
||||||
|
<regexp><![CDATA[^someName\?(.*&)*tag1=value1(&|$)]]></regexp>
|
||||||
|
<function>min</function>
|
||||||
|
<retention>
|
||||||
|
<age>0</age>
|
||||||
|
<precision>5</precision>
|
||||||
|
</retention>
|
||||||
|
<retention>
|
||||||
|
<age>86400</age>
|
||||||
|
<precision>60</precision>
|
||||||
|
</retention>
|
||||||
|
</pattern>
|
||||||
|
<pattern>
|
||||||
|
<rule_type>tag_list</rule_type>
|
||||||
|
<regexp>someName;tag2=value2</regexp>
|
||||||
|
<retention>
|
||||||
|
<age>0</age>
|
||||||
|
<precision>5</precision>
|
||||||
|
</retention>
|
||||||
|
<retention>
|
||||||
|
<age>86400</age>
|
||||||
|
<precision>60</precision>
|
||||||
|
</retention>
|
||||||
|
</pattern>
|
||||||
|
<default>
|
||||||
|
<function>max</function>
|
||||||
|
<retention>
|
||||||
|
<age>0</age>
|
||||||
|
<precision>60</precision>
|
||||||
|
</retention>
|
||||||
|
<retention>
|
||||||
|
<age>3600</age>
|
||||||
|
<precision>300</precision>
|
||||||
|
</retention>
|
||||||
|
<retention>
|
||||||
|
<age>86400</age>
|
||||||
|
<precision>3600</precision>
|
||||||
|
</retention>
|
||||||
|
</default>
|
||||||
|
</graphite_rollup>
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
!!! warning "Внимание"
|
!!! warning "Внимание"
|
||||||
Прореживание данных производится во время слияний. Обычно для старых партиций слияния не запускаются, поэтому для прореживания надо инициировать незапланированное слияние используя [optimize](../../../sql-reference/statements/optimize.md). Или использовать дополнительные инструменты, например [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer).
|
Прореживание данных производится во время слияний. Обычно для старых партиций слияния не запускаются, поэтому для прореживания надо инициировать незапланированное слияние используя [optimize](../../../sql-reference/statements/optimize.md). Или использовать дополнительные инструменты, например [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer).
|
||||||
|
@ -872,3 +872,13 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd'
|
|||||||
```
|
```
|
||||||
|
|
||||||
Если диск сконфигурирован как `cold`, данные будут переноситься в S3 при срабатывании правил TTL или когда свободное место на локальном диске станет меньше порогового значения, которое определяется как `move_factor * disk_size`.
|
Если диск сконфигурирован как `cold`, данные будут переноситься в S3 при срабатывании правил TTL или когда свободное место на локальном диске станет меньше порогового значения, которое определяется как `move_factor * disk_size`.
|
||||||
|
|
||||||
|
## Виртуальные столбцы {#virtual-columns}
|
||||||
|
|
||||||
|
- `_part` — Имя куска.
|
||||||
|
- `_part_index` — Номер куска по порядку в результате запроса.
|
||||||
|
- `_partition_id` — Имя партиции.
|
||||||
|
- `_part_uuid` — Уникальный идентификатор куска (если включена MergeTree настройка `assign_part_uuids`).
|
||||||
|
- `_partition_value` — Значения (кортеж) выражения `partition by`.
|
||||||
|
- `_sample_factor` — Коэффициент сэмплирования (из запроса).
|
||||||
|
|
||||||
|
@ -48,10 +48,8 @@ CREATE TABLE merge.hits_buffer AS merge.hits ENGINE = Buffer(merge, hits, 16, 10
|
|||||||
Если у одного из столбцов таблицы Buffer и подчинённой таблицы не совпадает тип, то в лог сервера будет записано сообщение об ошибке и буфер будет очищен.
|
Если у одного из столбцов таблицы Buffer и подчинённой таблицы не совпадает тип, то в лог сервера будет записано сообщение об ошибке и буфер будет очищен.
|
||||||
То же самое происходит, если подчинённая таблица не существует в момент сброса буфера.
|
То же самое происходит, если подчинённая таблица не существует в момент сброса буфера.
|
||||||
|
|
||||||
Если есть необходимость выполнить ALTER для подчинённой таблицы и для таблицы Buffer, то рекомендуется удалить таблицу Buffer, затем выполнить ALTER подчинённой таблицы, а после создать таблицу Buffer заново.
|
|
||||||
|
|
||||||
!!! attention "Внимание"
|
!!! attention "Внимание"
|
||||||
В релизах до 28 сентября 2020 года выполнение ALTER на таблице Buffer ломает структуру блоков и вызывает ошибку (см. [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117)), поэтому удаление буфера и его пересоздание — единственный вариант миграции для данного движка. Перед выполнением ALTER на таблице Buffer убедитесь, что в вашей версии эта ошибка устранена.
|
В релизах до 26 октября 2021 года выполнение ALTER на таблице Buffer ломает структуру блоков и вызывает ошибку (см. [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117) и [#30565](https://github.com/ClickHouse/ClickHouse/pull/30565)), поэтому удаление буфера и его пересоздание — единственный вариант миграции для данного движка. Перед выполнением ALTER на таблице Buffer убедитесь, что в вашей версии эта ошибка устранена.
|
||||||
|
|
||||||
При нештатном перезапуске сервера, данные, находящиеся в буфере, будут потеряны.
|
При нештатном перезапуске сервера, данные, находящиеся в буфере, будут потеряны.
|
||||||
|
|
||||||
|
@ -1736,6 +1736,48 @@ ClickHouse генерирует исключение:
|
|||||||
Т.е. если `INSERT` в основную таблицу д.б. пропущен (сдедуплицирован), то автоматически не будет вставки и в материализованные представления. Это имплементировано для того, чтобы работали материализованные представления, которые сильно группируют данные основных `INSERT`, до такой степени что блоки вставляемые в материализованные представления получаются одинаковыми для разных `INSERT` в основную таблицу.
|
Т.е. если `INSERT` в основную таблицу д.б. пропущен (сдедуплицирован), то автоматически не будет вставки и в материализованные представления. Это имплементировано для того, чтобы работали материализованные представления, которые сильно группируют данные основных `INSERT`, до такой степени что блоки вставляемые в материализованные представления получаются одинаковыми для разных `INSERT` в основную таблицу.
|
||||||
Одновременно это «ломает» идемпотентность вставки в материализованные представления. Т.е. если `INSERT` был успешен в основную таблицу и неуспешен в таблицу материализованного представления (напр. из-за сетевого сбоя при коммуникации с Zookeeper), клиент получит ошибку и попытается повторить `INSERT`. Но вставки в материализованные представления произведено не будет, потому что дедупликация сработает на основной таблице. Настройка `deduplicate_blocks_in_dependent_materialized_views` позволяет это изменить. Т.е. при повторном `INSERT` будет произведена дедупликация на таблице материализованного представления, и повторный инсерт вставит данные в таблицу материализованного представления, которые не удалось вставить из-за сбоя первого `INSERT`.
|
Одновременно это «ломает» идемпотентность вставки в материализованные представления. Т.е. если `INSERT` был успешен в основную таблицу и неуспешен в таблицу материализованного представления (напр. из-за сетевого сбоя при коммуникации с Zookeeper), клиент получит ошибку и попытается повторить `INSERT`. Но вставки в материализованные представления произведено не будет, потому что дедупликация сработает на основной таблице. Настройка `deduplicate_blocks_in_dependent_materialized_views` позволяет это изменить. Т.е. при повторном `INSERT` будет произведена дедупликация на таблице материализованного представления, и повторный инсерт вставит данные в таблицу материализованного представления, которые не удалось вставить из-за сбоя первого `INSERT`.
|
||||||
|
|
||||||
|
## insert_deduplication_token {#insert_deduplication_token}
|
||||||
|
|
||||||
|
Этот параметр позволяет пользователю указать собственную семантику дедупликации в MergeTree/ReplicatedMergeTree.
|
||||||
|
Например, предоставляя уникальное значение параметра в каждом операторе INSERT,
|
||||||
|
пользователь может избежать дедупликации одних и тех же вставленных данных.
|
||||||
|
|
||||||
|
Возможные значения:
|
||||||
|
|
||||||
|
- Любая строка
|
||||||
|
|
||||||
|
Значение по умолчанию: пустая строка (выключено).
|
||||||
|
|
||||||
|
`insert_deduplication_token` используется для дедупликации _только_ когда значение не пустое
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE test_table
|
||||||
|
( A Int64 )
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY A
|
||||||
|
SETTINGS non_replicated_deduplication_window = 100;
|
||||||
|
|
||||||
|
INSERT INTO test_table Values SETTINGS insert_deduplication_token = 'test' (1);
|
||||||
|
|
||||||
|
-- следующая вставка не будет дедуплицирована, потому что insert_deduplication_token отличается
|
||||||
|
INSERT INTO test_table Values SETTINGS insert_deduplication_token = 'test1' (1);
|
||||||
|
|
||||||
|
-- следующая вставка будет дедуплицирована, потому что insert_deduplication_token
|
||||||
|
-- тот же самый, что и один из предыдущих
|
||||||
|
INSERT INTO test_table Values SETTINGS insert_deduplication_token = 'test' (2);
|
||||||
|
|
||||||
|
SELECT * FROM test_table
|
||||||
|
|
||||||
|
┌─A─┐
|
||||||
|
│ 1 │
|
||||||
|
└───┘
|
||||||
|
┌─A─┐
|
||||||
|
│ 1 │
|
||||||
|
└───┘
|
||||||
|
```
|
||||||
|
|
||||||
## count_distinct_implementation {#settings-count_distinct_implementation}
|
## count_distinct_implementation {#settings-count_distinct_implementation}
|
||||||
|
|
||||||
Задаёт, какая из функций `uniq*` используется при выполнении конструкции [COUNT(DISTINCT …)](../../sql-reference/aggregate-functions/reference/count.md#agg_function-count).
|
Задаёт, какая из функций `uniq*` используется при выполнении конструкции [COUNT(DISTINCT …)](../../sql-reference/aggregate-functions/reference/count.md#agg_function-count).
|
||||||
@ -2119,7 +2161,7 @@ ClickHouse генерирует исключение:
|
|||||||
- 1 — включен режим параллельного разбора.
|
- 1 — включен режим параллельного разбора.
|
||||||
- 0 — отключен режим параллельного разбора.
|
- 0 — отключен режим параллельного разбора.
|
||||||
|
|
||||||
Значение по умолчанию: `0`.
|
Значение по умолчанию: `1`.
|
||||||
|
|
||||||
## output_format_parallel_formatting {#output-format-parallel-formatting}
|
## output_format_parallel_formatting {#output-format-parallel-formatting}
|
||||||
|
|
||||||
@ -2130,7 +2172,7 @@ ClickHouse генерирует исключение:
|
|||||||
- 1 — включен режим параллельного форматирования.
|
- 1 — включен режим параллельного форматирования.
|
||||||
- 0 — отключен режим параллельного форматирования.
|
- 0 — отключен режим параллельного форматирования.
|
||||||
|
|
||||||
Значение по умолчанию: `0`.
|
Значение по умолчанию: `1`.
|
||||||
|
|
||||||
## min_chunk_bytes_for_parallel_parsing {#min-chunk-bytes-for-parallel-parsing}
|
## min_chunk_bytes_for_parallel_parsing {#min-chunk-bytes-for-parallel-parsing}
|
||||||
|
|
||||||
|
@ -197,12 +197,13 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL;
|
|||||||
|
|
||||||
## MATERIALIZE COLUMN {#materialize-column}
|
## MATERIALIZE COLUMN {#materialize-column}
|
||||||
|
|
||||||
Материализует столбец таблицы в кусках, в которых отсутствуют значения. Используется, если необходимо создать новый столбец со сложным материализованным выражением или выражением для заполнения по умолчанию (`DEFAULT`), потому как вычисление такого столбца прямо во время выполнения запроса `SELECT` оказывается ощутимо затратным. Чтобы совершить ту же операцию для существующего столбца, используйте модификатор `FINAL`.
|
Материализует или обновляет столбец таблицы с выражением для значения по умолчанию (`DEFAULT` или `MATERIALIZED`).
|
||||||
|
Используется, если необходимо добавить или обновить столбец со сложным выражением, потому как вычисление такого выражения прямо во время выполнения запроса `SELECT` оказывается ощутимо затратным.
|
||||||
|
|
||||||
Синтаксис:
|
Синтаксис:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
ALTER TABLE table MATERIALIZE COLUMN col [FINAL];
|
ALTER TABLE table MATERIALIZE COLUMN col;
|
||||||
```
|
```
|
||||||
|
|
||||||
**Пример**
|
**Пример**
|
||||||
@ -211,21 +212,39 @@ ALTER TABLE table MATERIALIZE COLUMN col [FINAL];
|
|||||||
DROP TABLE IF EXISTS tmp;
|
DROP TABLE IF EXISTS tmp;
|
||||||
SET mutations_sync = 2;
|
SET mutations_sync = 2;
|
||||||
CREATE TABLE tmp (x Int64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY tuple();
|
CREATE TABLE tmp (x Int64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY tuple();
|
||||||
INSERT INTO tmp SELECT * FROM system.numbers LIMIT 10;
|
INSERT INTO tmp SELECT * FROM system.numbers LIMIT 5;
|
||||||
ALTER TABLE tmp ADD COLUMN s String MATERIALIZED toString(x);
|
ALTER TABLE tmp ADD COLUMN s String MATERIALIZED toString(x);
|
||||||
|
|
||||||
ALTER TABLE tmp MATERIALIZE COLUMN s;
|
ALTER TABLE tmp MATERIALIZE COLUMN s;
|
||||||
|
|
||||||
|
SELECT groupArray(x), groupArray(s) FROM (select x,s from tmp order by x);
|
||||||
|
|
||||||
|
┌─groupArray(x)─┬─groupArray(s)─────────┐
|
||||||
|
│ [0,1,2,3,4] │ ['0','1','2','3','4'] │
|
||||||
|
└───────────────┴───────────────────────┘
|
||||||
|
|
||||||
|
ALTER TABLE tmp MODIFY COLUMN s String MATERIALIZED toString(round(100/x));
|
||||||
|
|
||||||
|
INSERT INTO tmp SELECT * FROM system.numbers LIMIT 5,5;
|
||||||
|
|
||||||
SELECT groupArray(x), groupArray(s) FROM tmp;
|
SELECT groupArray(x), groupArray(s) FROM tmp;
|
||||||
|
|
||||||
|
┌─groupArray(x)─────────┬─groupArray(s)──────────────────────────────────┐
|
||||||
|
│ [0,1,2,3,4,5,6,7,8,9] │ ['0','1','2','3','4','20','17','14','12','11'] │
|
||||||
|
└───────────────────────┴────────────────────────────────────────────────┘
|
||||||
|
|
||||||
|
ALTER TABLE tmp MATERIALIZE COLUMN s;
|
||||||
|
|
||||||
|
SELECT groupArray(x), groupArray(s) FROM tmp;
|
||||||
|
|
||||||
|
┌─groupArray(x)─────────┬─groupArray(s)─────────────────────────────────────────┐
|
||||||
|
│ [0,1,2,3,4,5,6,7,8,9] │ ['inf','100','50','33','25','20','17','14','12','11'] │
|
||||||
|
└───────────────────────┴───────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
**Результат:**
|
**Смотрите также**
|
||||||
|
|
||||||
```sql
|
- [MATERIALIZED](../../statements/create/table.md#materialized).
|
||||||
┌─groupArray(x)─────────┬─groupArray(s)─────────────────────────────┐
|
|
||||||
│ [0,1,2,3,4,5,6,7,8,9] │ ['0','1','2','3','4','5','6','7','8','9'] │
|
|
||||||
└───────────────────────┴───────────────────────────────────────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
## Ограничения запроса ALTER {#ogranicheniia-zaprosa-alter}
|
## Ограничения запроса ALTER {#ogranicheniia-zaprosa-alter}
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def')
|
|||||||
## Комментарии {#comments}
|
## Комментарии {#comments}
|
||||||
|
|
||||||
Поддерживаются комментарии в SQL-стиле и C-стиле.
|
Поддерживаются комментарии в SQL-стиле и C-стиле.
|
||||||
Комментарии в SQL-стиле: от `--` до конца строки. Пробел после `--` может не ставиться.
|
Комментарии в SQL-стиле: от `--`, `#!` или `# ` до конца строки. Пробел после `--` и `#!` может не ставиться.
|
||||||
Комментарии в C-стиле: от `/*` до `*/`. Такие комментарии могут быть многострочными. Пробелы тоже не обязательны.
|
Комментарии в C-стиле: от `/*` до `*/`. Такие комментарии могут быть многострочными. Пробелы тоже не обязательны.
|
||||||
|
|
||||||
## Ключевые слова {#syntax-keywords}
|
## Ключевые слова {#syntax-keywords}
|
||||||
|
@ -90,7 +90,10 @@ def concatenate(lang, docs_path, single_page_file, nav):
|
|||||||
line)
|
line)
|
||||||
|
|
||||||
# If failed to replace the relative link, print to log
|
# If failed to replace the relative link, print to log
|
||||||
if '../' in line:
|
# But with some exceptions:
|
||||||
|
# - "../src/" -- for cmake-in-clickhouse.md (link to sources)
|
||||||
|
# - "../usr/share" -- changelog entry that has "../usr/share/zoneinfo"
|
||||||
|
if '../' in line and (not '../usr/share' in line) and (not '../src/' in line):
|
||||||
logging.info('Failed to resolve relative link:')
|
logging.info('Failed to resolve relative link:')
|
||||||
logging.info(path)
|
logging.info(path)
|
||||||
logging.info(line)
|
logging.info(line)
|
||||||
|
@ -14,7 +14,7 @@ toc_title: Introduction
|
|||||||
|
|
||||||
- [MySQL](../../engines/database-engines/mysql.md)
|
- [MySQL](../../engines/database-engines/mysql.md)
|
||||||
|
|
||||||
- [MaterializeMySQL](../../engines/database-engines/materialize-mysql.md)
|
- [MaterializeMySQL](../../engines/database-engines/materialized-mysql.md)
|
||||||
|
|
||||||
- [Lazy](../../engines/database-engines/lazy.md)
|
- [Lazy](../../engines/database-engines/lazy.md)
|
||||||
|
|
||||||
@ -26,4 +26,6 @@ toc_title: Introduction
|
|||||||
|
|
||||||
- [Replicated](../../engines/database-engines/replicated.md)
|
- [Replicated](../../engines/database-engines/replicated.md)
|
||||||
|
|
||||||
|
- [SQLite](../../engines/database-engines/sqlite.md)
|
||||||
|
|
||||||
[来源文章](https://clickhouse.com/docs/en/database_engines/) <!--hide-->
|
[来源文章](https://clickhouse.com/docs/en/database_engines/) <!--hide-->
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user