mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge remote-tracking branch 'upstream/master' into HEAD
This commit is contained in:
commit
ccd78e3838
32
.github/workflows/backport.yml
vendored
Normal file
32
.github/workflows/backport.yml
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
name: CherryPick
|
||||
concurrency:
|
||||
group: cherry-pick
|
||||
on: # yamllint disable-line rule:truthy
|
||||
schedule:
|
||||
- cron: '0 */3 * * *'
|
||||
jobs:
|
||||
CherryPick:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Cherry pick
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/cherry_pick
|
||||
ROBOT_CLICKHOUSE_SSH_KEY: ${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
REPO_OWNER: "ClickHouse"
|
||||
REPO_NAME: "ClickHouse"
|
||||
REPO_TEAM: "core"
|
||||
run: |
|
||||
sudo pip install GitPython
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 cherry_pick.py
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
2
.github/workflows/cancel.yml
vendored
2
.github/workflows/cancel.yml
vendored
@ -1,7 +1,7 @@
|
||||
name: Cancel
|
||||
on: # yamllint disable-line rule:truthy
|
||||
workflow_run:
|
||||
workflows: ["CIGithubActions"]
|
||||
workflows: ["CIGithubActions", "ReleaseCI"]
|
||||
types:
|
||||
- requested
|
||||
jobs:
|
||||
|
1233
.github/workflows/main.yml
vendored
1233
.github/workflows/main.yml
vendored
File diff suppressed because it is too large
Load Diff
1384
.github/workflows/master.yml
vendored
Normal file
1384
.github/workflows/master.yml
vendored
Normal file
File diff suppressed because it is too large
Load Diff
933
.github/workflows/release_branches.yml
vendored
Normal file
933
.github/workflows/release_branches.yml
vendored
Normal file
@ -0,0 +1,933 @@
|
||||
name: ReleaseCI
|
||||
on: # yamllint disable-line rule:truthy
|
||||
push:
|
||||
branches:
|
||||
- '21.**'
|
||||
- '22.**'
|
||||
- '23.**'
|
||||
- '24.**'
|
||||
- 'backport/**'
|
||||
jobs:
|
||||
DockerHubPush:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_images_check.py
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images.json
|
||||
CompatibilityCheck:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: CompatibilityCheck
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/compatibility_check
|
||||
REPO_COPY: ${{runner.temp}}/compatibility_check/ClickHouse
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 compatibility_check.py 0
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
#########################################################################################
|
||||
#################################### ORDINARY BUILDS ####################################
|
||||
#########################################################################################
|
||||
BuilderDebRelease:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/build_check
|
||||
IMAGES_PATH: ${{runner.temp}}/images_path
|
||||
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
BUILD_NUMBER: 0
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderDebAsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/build_check
|
||||
IMAGES_PATH: ${{runner.temp}}/images_path
|
||||
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
BUILD_NUMBER: 3
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderDebUBsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/build_check
|
||||
IMAGES_PATH: ${{runner.temp}}/images_path
|
||||
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
BUILD_NUMBER: 4
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderDebTsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/build_check
|
||||
IMAGES_PATH: ${{runner.temp}}/images_path
|
||||
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
BUILD_NUMBER: 5
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderDebMsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/build_check
|
||||
IMAGES_PATH: ${{runner.temp}}/images_path
|
||||
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
BUILD_NUMBER: 6
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderDebDebug:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/build_check
|
||||
IMAGES_PATH: ${{runner.temp}}/images_path
|
||||
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
BUILD_NUMBER: 7
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
############################################################################################
|
||||
##################################### BUILD REPORTER #######################################
|
||||
############################################################################################
|
||||
BuilderReport:
|
||||
needs:
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAsan
|
||||
- BuilderDebTsan
|
||||
- BuilderDebUBsan
|
||||
- BuilderDebMsan
|
||||
- BuilderDebDebug
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Report Builder
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/report_check
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
##############################################################################################
|
||||
########################### FUNCTIONAl STATELESS TESTS #######################################
|
||||
##############################################################################################
|
||||
FunctionalStatelessTestRelease:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateless tests (release, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT: 10800
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateless tests (address, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT: 10800
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateless_tsan
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateless tests (thread, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT: 10800
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestUBsan:
|
||||
needs: [BuilderDebUBsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateless_ubsan
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateless tests (ubsan, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateless_ubsan/ClickHouse
|
||||
KILL_TIMEOUT: 10800
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestMsan:
|
||||
needs: [BuilderDebMsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateless tests (memory, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT: 10800
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateless tests (debug, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT: 10800
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
##############################################################################################
|
||||
############################ FUNCTIONAl STATEFUL TESTS #######################################
|
||||
##############################################################################################
|
||||
FunctionalStatefulTestRelease:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateful_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateful tests (release, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateful_debug/ClickHouse
|
||||
KILL_TIMEOUT: 3600
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatefulTestAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateful_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateful tests (address, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateful_debug/ClickHouse
|
||||
KILL_TIMEOUT: 3600
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatefulTestTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateful_tsan
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateful tests (thread, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateful_tsan/ClickHouse
|
||||
KILL_TIMEOUT: 3600
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatefulTestMsan:
|
||||
needs: [BuilderDebMsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateful_msan
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateful tests (memory, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateful_msan/ClickHouse
|
||||
KILL_TIMEOUT: 3600
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatefulTestUBsan:
|
||||
needs: [BuilderDebUBsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateful_ubsan
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateful tests (ubsan, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateful_ubsan/ClickHouse
|
||||
KILL_TIMEOUT: 3600
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatefulTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateful_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateful tests (debug, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateful_debug/ClickHouse
|
||||
KILL_TIMEOUT: 3600
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
##############################################################################################
|
||||
######################################### STRESS TESTS #######################################
|
||||
##############################################################################################
|
||||
StressTestAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Stress test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stress_thread
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stress test (address, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stress_thread/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
StressTestTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Stress test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stress_thread
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stress test (thread, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stress_thread/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
StressTestMsan:
|
||||
needs: [BuilderDebMsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Stress test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stress_memory
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stress test (memory, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stress_memory/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
StressTestUBsan:
|
||||
needs: [BuilderDebUBsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Stress test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stress_undefined
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stress test (undefined, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stress_undefined/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
StressTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Stress test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stress_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stress test (debug, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stress_debug/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
#############################################################################################
|
||||
############################# INTEGRATION TESTS #############################################
|
||||
#############################################################################################
|
||||
IntegrationTestsAsan:
|
||||
needs: [BuilderDebAsan, FunctionalStatelessTestAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Integration test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Integration tests (asan, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
IntegrationTestsTsan:
|
||||
needs: [BuilderDebTsan, FunctionalStatelessTestTsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Integration test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Integration tests (thread, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
IntegrationTestsRelease:
|
||||
needs: [BuilderDebRelease, FunctionalStatelessTestRelease]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Integration test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/integration_tests_release
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Integration tests (release, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/integration_tests_release/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FinishCheck:
|
||||
needs:
|
||||
- DockerHubPush
|
||||
- BuilderReport
|
||||
- FunctionalStatelessTestDebug
|
||||
- FunctionalStatelessTestRelease
|
||||
- FunctionalStatelessTestAsan
|
||||
- FunctionalStatelessTestTsan
|
||||
- FunctionalStatelessTestMsan
|
||||
- FunctionalStatelessTestUBsan
|
||||
- FunctionalStatefulTestDebug
|
||||
- FunctionalStatefulTestRelease
|
||||
- FunctionalStatefulTestAsan
|
||||
- FunctionalStatefulTestTsan
|
||||
- FunctionalStatefulTestMsan
|
||||
- FunctionalStatefulTestUBsan
|
||||
- StressTestDebug
|
||||
- StressTestAsan
|
||||
- StressTestTsan
|
||||
- StressTestMsan
|
||||
- StressTestUBsan
|
||||
- IntegrationTestsAsan
|
||||
- IntegrationTestsRelease
|
||||
- IntegrationTestsTsan
|
||||
- CompatibilityCheck
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Finish label
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 finish_check.py
|
@ -1,4 +1,4 @@
|
||||
### ClickHouse release v21.11, 2021-11-07
|
||||
### ClickHouse release v21.11, 2021-11-09
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
||||
@ -17,7 +17,7 @@
|
||||
* Support `EXISTS (subquery)`. Closes [#6852](https://github.com/ClickHouse/ClickHouse/issues/6852). [#29731](https://github.com/ClickHouse/ClickHouse/pull/29731) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Session logging for audit. Logging all successful and failed login and logout events to a new `system.session_log` table. [#22415](https://github.com/ClickHouse/ClickHouse/pull/22415) ([Vasily Nemkov](https://github.com/Enmk)) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Support multidimensional cosine distance and euclidean distance functions; L1, L2, Lp, Linf distances and norms. Scalar product on tuples and various arithmetic operators on tuples. This fully closes [#4509](https://github.com/ClickHouse/ClickHouse/issues/4509) and even more. [#27933](https://github.com/ClickHouse/ClickHouse/pull/27933) ([Alexey Boykov](https://github.com/mathalex)).
|
||||
* Add support for compression and decompression for `INTO OUTPUT` and `FROM INFILE` (with autodetect or with additional optional parameter). [#27135](https://github.com/ClickHouse/ClickHouse/pull/27135) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* Add support for compression and decompression for `INTO OUTFILE` and `FROM INFILE` (with autodetect or with additional optional parameter). [#27135](https://github.com/ClickHouse/ClickHouse/pull/27135) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* Add CORS (Cross Origin Resource Sharing) support with HTTP `OPTIONS` request. It means, now Grafana will work with serverless requests without a kludges. Closes [#18693](https://github.com/ClickHouse/ClickHouse/issues/18693). [#29155](https://github.com/ClickHouse/ClickHouse/pull/29155) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* Queries with JOIN ON now supports disjunctions (OR). [#21320](https://github.com/ClickHouse/ClickHouse/pull/21320) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
* Added function `tokens`. That allow to split string into tokens using non-alpha numeric ASCII characters as separators. [#29981](https://github.com/ClickHouse/ClickHouse/pull/29981) ([Maksim Kita](https://github.com/kitaisreal)). Added function `ngrams` to extract ngrams from text. Closes [#29699](https://github.com/ClickHouse/ClickHouse/issues/29699). [#29738](https://github.com/ClickHouse/ClickHouse/pull/29738) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
|
@ -624,7 +624,7 @@ macro (add_executable target)
|
||||
# invoke built-in add_executable
|
||||
# explicitly acquire and interpose malloc symbols by clickhouse_malloc
|
||||
# if GLIBC_COMPATIBILITY is ON and ENABLE_THINLTO is on than provide memcpy symbol explicitly to neutrialize thinlto's libcall generation.
|
||||
if (GLIBC_COMPATIBILITY AND ENABLE_THINLTO)
|
||||
if (ARCH_AMD64 AND GLIBC_COMPATIBILITY AND ENABLE_THINLTO)
|
||||
_add_executable (${ARGV} $<TARGET_OBJECTS:clickhouse_malloc> $<TARGET_OBJECTS:memcpy>)
|
||||
else ()
|
||||
_add_executable (${ARGV} $<TARGET_OBJECTS:clickhouse_malloc>)
|
||||
|
@ -108,6 +108,11 @@ public:
|
||||
LocalDate toDate() const { return LocalDate(m_year, m_month, m_day); }
|
||||
LocalDateTime toStartOfDate() const { return LocalDateTime(m_year, m_month, m_day, 0, 0, 0); }
|
||||
|
||||
time_t to_time_t(const DateLUTImpl & time_zone = DateLUT::instance()) const
|
||||
{
|
||||
return time_zone.makeDateTime(m_year, m_month, m_day, m_hour, m_minute, m_second);
|
||||
}
|
||||
|
||||
std::string toString() const
|
||||
{
|
||||
std::string s{"0000-00-00 00:00:00"};
|
||||
|
@ -25,6 +25,16 @@ void trim(String & s)
|
||||
s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end());
|
||||
}
|
||||
|
||||
std::string getEditor()
|
||||
{
|
||||
const char * editor = std::getenv("EDITOR");
|
||||
|
||||
if (!editor || !*editor)
|
||||
editor = "vim";
|
||||
|
||||
return editor;
|
||||
}
|
||||
|
||||
/// Copied from replxx::src/util.cxx::now_ms_str() under the terms of 3-clause BSD license of Replxx.
|
||||
/// Copyright (c) 2017-2018, Marcin Konarski (amok at codestation.org)
|
||||
/// Copyright (c) 2010, Salvatore Sanfilippo (antirez at gmail dot com)
|
||||
@ -123,6 +133,7 @@ ReplxxLineReader::ReplxxLineReader(
|
||||
Patterns delimiters_,
|
||||
replxx::Replxx::highlighter_callback_t highlighter_)
|
||||
: LineReader(history_file_path_, multiline_, std::move(extenders_), std::move(delimiters_)), highlighter(std::move(highlighter_))
|
||||
, editor(getEditor())
|
||||
{
|
||||
using namespace std::placeholders;
|
||||
using Replxx = replxx::Replxx;
|
||||
@ -236,14 +247,13 @@ void ReplxxLineReader::addToHistory(const String & line)
|
||||
rx.print("Unlock of history file failed: %s\n", errnoToString(errno).c_str());
|
||||
}
|
||||
|
||||
int ReplxxLineReader::execute(const std::string & command)
|
||||
/// See comments in ShellCommand::executeImpl()
|
||||
/// (for the vfork via dlsym())
|
||||
int ReplxxLineReader::executeEditor(const std::string & path)
|
||||
{
|
||||
std::vector<char> argv0("sh", &("sh"[3]));
|
||||
std::vector<char> argv1("-c", &("-c"[3]));
|
||||
std::vector<char> argv2(command.data(), command.data() + command.size() + 1);
|
||||
|
||||
const char * filename = "/bin/sh";
|
||||
char * const argv[] = {argv0.data(), argv1.data(), argv2.data(), nullptr};
|
||||
std::vector<char> argv0(editor.data(), editor.data() + editor.size() + 1);
|
||||
std::vector<char> argv1(path.data(), path.data() + path.size() + 1);
|
||||
char * const argv[] = {argv0.data(), argv1.data(), nullptr};
|
||||
|
||||
static void * real_vfork = dlsym(RTLD_DEFAULT, "vfork");
|
||||
if (!real_vfork)
|
||||
@ -260,6 +270,7 @@ int ReplxxLineReader::execute(const std::string & command)
|
||||
return -1;
|
||||
}
|
||||
|
||||
/// Child
|
||||
if (0 == pid)
|
||||
{
|
||||
sigset_t mask;
|
||||
@ -267,16 +278,26 @@ int ReplxxLineReader::execute(const std::string & command)
|
||||
sigprocmask(0, nullptr, &mask);
|
||||
sigprocmask(SIG_UNBLOCK, &mask, nullptr);
|
||||
|
||||
execv(filename, argv);
|
||||
execvp(editor.c_str(), argv);
|
||||
rx.print("Cannot execute %s: %s\n", editor.c_str(), errnoToString(errno).c_str());
|
||||
_exit(-1);
|
||||
}
|
||||
|
||||
int status = 0;
|
||||
if (-1 == waitpid(pid, &status, 0))
|
||||
do
|
||||
{
|
||||
rx.print("Cannot waitpid: %s\n", errnoToString(errno).c_str());
|
||||
return -1;
|
||||
}
|
||||
int exited_pid = waitpid(pid, &status, 0);
|
||||
if (exited_pid == -1)
|
||||
{
|
||||
if (errno == EINTR)
|
||||
continue;
|
||||
|
||||
rx.print("Cannot waitpid: %s\n", errnoToString(errno).c_str());
|
||||
return -1;
|
||||
}
|
||||
else
|
||||
break;
|
||||
} while (true);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -290,10 +311,6 @@ void ReplxxLineReader::openEditor()
|
||||
return;
|
||||
}
|
||||
|
||||
const char * editor = std::getenv("EDITOR");
|
||||
if (!editor || !*editor)
|
||||
editor = "vim";
|
||||
|
||||
replxx::Replxx::State state(rx.get_state());
|
||||
|
||||
size_t bytes_written = 0;
|
||||
@ -316,7 +333,7 @@ void ReplxxLineReader::openEditor()
|
||||
return;
|
||||
}
|
||||
|
||||
if (0 == execute(fmt::format("{} {}", editor, filename)))
|
||||
if (0 == executeEditor(filename))
|
||||
{
|
||||
try
|
||||
{
|
||||
|
@ -22,7 +22,7 @@ public:
|
||||
private:
|
||||
InputStatus readOneLine(const String & prompt) override;
|
||||
void addToHistory(const String & line) override;
|
||||
int execute(const std::string & command);
|
||||
int executeEditor(const std::string & path);
|
||||
void openEditor();
|
||||
|
||||
replxx::Replxx rx;
|
||||
@ -31,4 +31,6 @@ private:
|
||||
// used to call flock() to synchronize multiple clients using same history file
|
||||
int history_file_fd = -1;
|
||||
bool bracketed_paste_enabled = false;
|
||||
|
||||
std::string editor;
|
||||
};
|
||||
|
@ -28,8 +28,8 @@
|
||||
#define NO_INLINE __attribute__((__noinline__))
|
||||
#define MAY_ALIAS __attribute__((__may_alias__))
|
||||
|
||||
#if !defined(__x86_64__) && !defined(__aarch64__) && !defined(__PPC__)
|
||||
# error "The only supported platforms are x86_64 and AArch64, PowerPC (work in progress)"
|
||||
#if !defined(__x86_64__) && !defined(__aarch64__) && !defined(__PPC__) && !(defined(__riscv) && (__riscv_xlen == 64))
|
||||
# error "The only supported platforms are x86_64 and AArch64, PowerPC (work in progress) and RISC-V 64 (experimental)"
|
||||
#endif
|
||||
|
||||
/// Check for presence of address sanitizer
|
||||
|
@ -16,17 +16,6 @@
|
||||
*/
|
||||
uint64_t getMemoryAmountOrZero()
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
// Try to lookup at the Cgroup limit
|
||||
std::ifstream cgroup_limit("/sys/fs/cgroup/memory/memory.limit_in_bytes");
|
||||
if (cgroup_limit.is_open())
|
||||
{
|
||||
uint64_t amount = 0; // in case of read error
|
||||
cgroup_limit >> amount;
|
||||
return amount;
|
||||
}
|
||||
#endif
|
||||
|
||||
int64_t num_pages = sysconf(_SC_PHYS_PAGES);
|
||||
if (num_pages <= 0)
|
||||
return 0;
|
||||
@ -35,7 +24,22 @@ uint64_t getMemoryAmountOrZero()
|
||||
if (page_size <= 0)
|
||||
return 0;
|
||||
|
||||
return num_pages * page_size;
|
||||
uint64_t memory_amount = num_pages * page_size;
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
// Try to lookup at the Cgroup limit
|
||||
std::ifstream cgroup_limit("/sys/fs/cgroup/memory/memory.limit_in_bytes");
|
||||
if (cgroup_limit.is_open())
|
||||
{
|
||||
uint64_t memory_limit = 0; // in case of read error
|
||||
cgroup_limit >> memory_limit;
|
||||
if (memory_limit > 0 && memory_limit < memory_amount)
|
||||
memory_amount = memory_limit;
|
||||
}
|
||||
#endif
|
||||
|
||||
return memory_amount;
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
@ -63,6 +63,9 @@
|
||||
#include <Common/Elf.h>
|
||||
#include <filesystem>
|
||||
|
||||
#include <loggers/OwnFormattingChannel.h>
|
||||
#include <loggers/OwnPatternFormatter.h>
|
||||
|
||||
#include <Common/config_version.h>
|
||||
|
||||
#if defined(OS_DARWIN)
|
||||
@ -675,6 +678,34 @@ void BaseDaemon::initialize(Application & self)
|
||||
if ((!log_path.empty() && is_daemon) || config().has("logger.stderr"))
|
||||
{
|
||||
std::string stderr_path = config().getString("logger.stderr", log_path + "/stderr.log");
|
||||
|
||||
/// Check that stderr is writable before freopen(),
|
||||
/// since freopen() will make stderr invalid on error,
|
||||
/// and logging to stderr will be broken,
|
||||
/// so the following code (that is used in every program) will not write anything:
|
||||
///
|
||||
/// int main(int argc, char ** argv)
|
||||
/// {
|
||||
/// try
|
||||
/// {
|
||||
/// DB::SomeApp app;
|
||||
/// return app.run(argc, argv);
|
||||
/// }
|
||||
/// catch (...)
|
||||
/// {
|
||||
/// std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
|
||||
/// return 1;
|
||||
/// }
|
||||
/// }
|
||||
if (access(stderr_path.c_str(), W_OK))
|
||||
{
|
||||
int fd;
|
||||
if ((fd = creat(stderr_path.c_str(), 0600)) == -1 && errno != EEXIST)
|
||||
throw Poco::OpenFileException("File " + stderr_path + " (logger.stderr) is not writable");
|
||||
if (fd != -1)
|
||||
::close(fd);
|
||||
}
|
||||
|
||||
if (!freopen(stderr_path.c_str(), "a+", stderr))
|
||||
throw Poco::OpenFileException("Cannot attach stderr to " + stderr_path);
|
||||
|
||||
@ -973,6 +1004,14 @@ void BaseDaemon::setupWatchdog()
|
||||
memcpy(argv0, new_process_name, std::min(strlen(new_process_name), original_process_name.size()));
|
||||
}
|
||||
|
||||
/// If streaming compression of logs is used then we write watchdog logs to cerr
|
||||
if (config().getRawString("logger.stream_compress", "false") == "true")
|
||||
{
|
||||
Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter;
|
||||
Poco::AutoPtr<DB::OwnFormattingChannel> log = new DB::OwnFormattingChannel(pf, new Poco::ConsoleChannel(std::cerr));
|
||||
logger().setChannel(log);
|
||||
}
|
||||
|
||||
logger().information(fmt::format("Will watch for the process with pid {}", pid));
|
||||
|
||||
/// Forward signals to the child process.
|
||||
|
@ -62,7 +62,13 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
if (!log_path.empty())
|
||||
{
|
||||
createDirectory(log_path);
|
||||
std::cerr << "Logging " << log_level_string << " to " << log_path << std::endl;
|
||||
|
||||
std::string ext;
|
||||
if (config.getRawString("logger.stream_compress", "false") == "true")
|
||||
ext = ".lz4";
|
||||
|
||||
std::cerr << "Logging " << log_level_string << " to " << log_path << ext << std::endl;
|
||||
|
||||
auto log_level = Poco::Logger::parseLevel(log_level_string);
|
||||
if (log_level > max_log_level)
|
||||
{
|
||||
@ -75,6 +81,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
log_file->setProperty(Poco::FileChannel::PROP_ROTATION, config.getRawString("logger.size", "100M"));
|
||||
log_file->setProperty(Poco::FileChannel::PROP_ARCHIVE, "number");
|
||||
log_file->setProperty(Poco::FileChannel::PROP_COMPRESS, config.getRawString("logger.compress", "true"));
|
||||
log_file->setProperty(Poco::FileChannel::PROP_STREAMCOMPRESS, config.getRawString("logger.stream_compress", "false"));
|
||||
log_file->setProperty(Poco::FileChannel::PROP_PURGECOUNT, config.getRawString("logger.count", "1"));
|
||||
log_file->setProperty(Poco::FileChannel::PROP_FLUSH, config.getRawString("logger.flush", "true"));
|
||||
log_file->setProperty(Poco::FileChannel::PROP_ROTATEONOPEN, config.getRawString("logger.rotateOnOpen", "false"));
|
||||
@ -100,13 +107,18 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
max_log_level = errorlog_level;
|
||||
}
|
||||
|
||||
std::cerr << "Logging errors to " << errorlog_path << std::endl;
|
||||
std::string ext;
|
||||
if (config.getRawString("logger.stream_compress", "false") == "true")
|
||||
ext = ".lz4";
|
||||
|
||||
std::cerr << "Logging errors to " << errorlog_path << ext << std::endl;
|
||||
|
||||
error_log_file = new Poco::FileChannel;
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_PATH, fs::weakly_canonical(errorlog_path));
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_ROTATION, config.getRawString("logger.size", "100M"));
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_ARCHIVE, "number");
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_COMPRESS, config.getRawString("logger.compress", "true"));
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_STREAMCOMPRESS, config.getRawString("logger.stream_compress", "false"));
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_PURGECOUNT, config.getRawString("logger.count", "1"));
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_FLUSH, config.getRawString("logger.flush", "true"));
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_ROTATEONOPEN, config.getRawString("logger.rotateOnOpen", "false"));
|
||||
|
@ -16,3 +16,7 @@ endif ()
|
||||
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64le.*|PPC64LE.*)")
|
||||
set (ARCH_PPC64LE 1)
|
||||
endif ()
|
||||
if (CMAKE_SYSTEM_PROCESSOR MATCHES "riscv64")
|
||||
set (ARCH_RISCV64 1)
|
||||
endif ()
|
||||
|
||||
|
@ -20,10 +20,10 @@ set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
|
||||
set (LINKER_NAME "ld.lld" CACHE STRING "" FORCE)
|
||||
set (LINKER_NAME "riscv64-linux-gnu-ld.bfd" CACHE STRING "" FORCE)
|
||||
|
||||
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=bfd")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=bfd")
|
||||
|
||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
||||
|
2
contrib/boost
vendored
2
contrib/boost
vendored
@ -1 +1 @@
|
||||
Subproject commit 79358a3106aab6af464430ed67c7efafebf5cd6f
|
||||
Subproject commit fcb058e1459ac273ecfe7cdf72791cb1479115af
|
@ -196,6 +196,12 @@ if (NOT EXTERNAL_BOOST_FOUND)
|
||||
"${LIBRARY_DIR}/libs/context/src/asm/make_ppc64_sysv_elf_gas.S"
|
||||
"${LIBRARY_DIR}/libs/context/src/asm/ontop_ppc64_sysv_elf_gas.S"
|
||||
)
|
||||
elseif (ARCH_RISCV64)
|
||||
set (SRCS_CONTEXT ${SRCS_CONTEXT}
|
||||
"${LIBRARY_DIR}/libs/context/src/asm/jump_riscv64_sysv_elf_gas.S"
|
||||
"${LIBRARY_DIR}/libs/context/src/asm/make_riscv64_sysv_elf_gas.S"
|
||||
"${LIBRARY_DIR}/libs/context/src/asm/ontop_riscv64_sysv_elf_gas.S"
|
||||
)
|
||||
elseif(OS_DARWIN)
|
||||
set (SRCS_CONTEXT ${SRCS_CONTEXT}
|
||||
"${LIBRARY_DIR}/libs/context/src/asm/jump_x86_64_sysv_macho_gas.S"
|
||||
|
@ -1,5 +1,5 @@
|
||||
if (SANITIZE OR NOT (
|
||||
((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_ARM OR ARCH_PPC64LE)) OR
|
||||
((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_ARM OR ARCH_PPC64LE OR ARCH_RISCV64)) OR
|
||||
(OS_DARWIN AND (CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo" OR CMAKE_BUILD_TYPE STREQUAL "Debug"))
|
||||
))
|
||||
if (ENABLE_JEMALLOC)
|
||||
@ -112,6 +112,8 @@ elseif (ARCH_ARM)
|
||||
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_aarch64")
|
||||
elseif (ARCH_PPC64LE)
|
||||
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_ppc64le")
|
||||
elseif (ARCH_RISCV64)
|
||||
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_riscv64")
|
||||
else ()
|
||||
message (FATAL_ERROR "internal jemalloc: This arch is not supported")
|
||||
endif ()
|
||||
|
8
contrib/jemalloc-cmake/include_linux_riscv64/README
Normal file
8
contrib/jemalloc-cmake/include_linux_riscv64/README
Normal file
@ -0,0 +1,8 @@
|
||||
Here are pre-generated files from jemalloc on Linux risc-v.
|
||||
You can obtain these files by running ./autogen.sh inside jemalloc source directory.
|
||||
|
||||
Added #define GNU_SOURCE
|
||||
Added JEMALLOC_OVERRIDE___POSIX_MEMALIGN because why not.
|
||||
Removed JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF because it's non standard.
|
||||
Removed JEMALLOC_PURGE_MADVISE_FREE because it's available only from Linux 4.5.
|
||||
Added JEMALLOC_CONFIG_MALLOC_CONF substitution
|
@ -0,0 +1,367 @@
|
||||
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
|
||||
#ifndef JEMALLOC_INTERNAL_DEFS_H_
|
||||
#define JEMALLOC_INTERNAL_DEFS_H_
|
||||
/*
|
||||
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
|
||||
* public APIs to be prefixed. This makes it possible, with some care, to use
|
||||
* multiple allocators simultaneously.
|
||||
*/
|
||||
/* #undef JEMALLOC_PREFIX */
|
||||
/* #undef JEMALLOC_CPREFIX */
|
||||
|
||||
/*
|
||||
* Define overrides for non-standard allocator-related functions if they are
|
||||
* present on the system.
|
||||
*/
|
||||
#define JEMALLOC_OVERRIDE___LIBC_CALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_FREE
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
|
||||
#define JEMALLOC_OVERRIDE___LIBC_REALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_VALLOC
|
||||
/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
|
||||
|
||||
/*
|
||||
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
||||
* For shared libraries, symbol visibility mechanisms prevent these symbols
|
||||
* from being exported, but for static libraries, naming collisions are a real
|
||||
* possibility.
|
||||
*/
|
||||
#define JEMALLOC_PRIVATE_NAMESPACE je_
|
||||
|
||||
/*
|
||||
* Hyper-threaded CPUs may need a special instruction inside spin loops in
|
||||
* order to yield to another virtual CPU.
|
||||
*/
|
||||
#define CPU_SPINWAIT
|
||||
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
|
||||
#define HAVE_CPU_SPINWAIT 0
|
||||
|
||||
/*
|
||||
* Number of significant bits in virtual addresses. This may be less than the
|
||||
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
|
||||
* bits are the same as bit 47.
|
||||
*/
|
||||
#define LG_VADDR 48
|
||||
|
||||
/* Defined if C11 atomics are available. */
|
||||
#define JEMALLOC_C11_ATOMICS 1
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS 1
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_BUILTIN_CLZ
|
||||
|
||||
/*
|
||||
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
|
||||
*/
|
||||
/* #undef JEMALLOC_OS_UNFAIR_LOCK */
|
||||
|
||||
/* Defined if syscall(2) is usable. */
|
||||
#define JEMALLOC_USE_SYSCALL
|
||||
|
||||
/*
|
||||
* Defined if secure_getenv(3) is available.
|
||||
*/
|
||||
// #define JEMALLOC_HAVE_SECURE_GETENV
|
||||
|
||||
/*
|
||||
* Defined if issetugid(2) is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_ISSETUGID */
|
||||
|
||||
/* Defined if pthread_atfork(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_ATFORK
|
||||
|
||||
/* Defined if pthread_setname_np(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1
|
||||
|
||||
/*
|
||||
* Defined if mach_absolute_time() is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
|
||||
|
||||
/*
|
||||
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
||||
* bootstrapping will cause recursion into the pthreads library. Therefore, if
|
||||
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
|
||||
* malloc_tsd.
|
||||
*/
|
||||
/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */
|
||||
|
||||
/*
|
||||
* Defined if threaded initialization is known to be safe on this platform.
|
||||
* Among other things, it must be possible to initialize a mutex without
|
||||
* triggering allocation in order for threaded allocation to be safe.
|
||||
*/
|
||||
#define JEMALLOC_THREADED_INIT
|
||||
|
||||
/*
|
||||
* Defined if the pthreads implementation defines
|
||||
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
|
||||
* to avoid recursive allocation during mutex initialization.
|
||||
*/
|
||||
/* #undef JEMALLOC_MUTEX_INIT_CB */
|
||||
|
||||
/* Non-empty if the tls_model attribute is supported. */
|
||||
#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
|
||||
|
||||
/*
|
||||
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
|
||||
* inline functions.
|
||||
*/
|
||||
/* #undef JEMALLOC_DEBUG */
|
||||
|
||||
/* JEMALLOC_STATS enables statistics calculation. */
|
||||
#define JEMALLOC_STATS
|
||||
|
||||
/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
|
||||
/* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */
|
||||
|
||||
/* JEMALLOC_PROF enables allocation profiling. */
|
||||
/* #undef JEMALLOC_PROF */
|
||||
|
||||
/* Use libunwind for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_LIBUNWIND */
|
||||
|
||||
/* Use libgcc for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_LIBGCC */
|
||||
|
||||
/* Use gcc intrinsics for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_GCC */
|
||||
|
||||
/*
|
||||
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
|
||||
* segment (DSS).
|
||||
*/
|
||||
#define JEMALLOC_DSS
|
||||
|
||||
/* Support memory filling (junk/zero). */
|
||||
#define JEMALLOC_FILL
|
||||
|
||||
/* Support utrace(2)-based tracing. */
|
||||
/* #undef JEMALLOC_UTRACE */
|
||||
|
||||
/* Support optional abort() on OOM. */
|
||||
/* #undef JEMALLOC_XMALLOC */
|
||||
|
||||
/* Support lazy locking (avoid locking unless a second thread is launched). */
|
||||
/* #undef JEMALLOC_LAZY_LOCK */
|
||||
|
||||
/*
|
||||
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
|
||||
* classes).
|
||||
*/
|
||||
/* #undef LG_QUANTUM */
|
||||
|
||||
/* One page is 2^LG_PAGE bytes. */
|
||||
#define LG_PAGE 16
|
||||
|
||||
/*
|
||||
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||
* system does not explicitly support huge pages; system calls that require
|
||||
* explicit huge page support are separately configured.
|
||||
*/
|
||||
#define LG_HUGEPAGE 29
|
||||
|
||||
/*
|
||||
* If defined, adjacent virtual memory mappings with identical attributes
|
||||
* automatically coalesce, and they fragment when changes are made to subranges.
|
||||
* This is the normal order of things for mmap()/munmap(), but on Windows
|
||||
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
|
||||
* mappings do *not* coalesce/fragment.
|
||||
*/
|
||||
#define JEMALLOC_MAPS_COALESCE
|
||||
|
||||
/*
|
||||
* If defined, retain memory for later reuse by default rather than using e.g.
|
||||
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
|
||||
* common sequences of mmap()/munmap() calls will cause virtual memory map
|
||||
* holes.
|
||||
*/
|
||||
#define JEMALLOC_RETAIN
|
||||
|
||||
/* TLS is used to map arenas and magazine caches to threads. */
|
||||
#define JEMALLOC_TLS
|
||||
|
||||
/*
|
||||
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
|
||||
* Don't use this directly; instead use unreachable() from util.h
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable
|
||||
|
||||
/*
|
||||
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
|
||||
* use ffs_*() from util.h.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll
|
||||
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
|
||||
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
|
||||
|
||||
/*
|
||||
* popcount*() functions to use for bitmapping.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl
|
||||
#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount
|
||||
|
||||
/*
|
||||
* If defined, explicitly attempt to more uniformly distribute large allocation
|
||||
* pointer alignments across all cache indices.
|
||||
*/
|
||||
#define JEMALLOC_CACHE_OBLIVIOUS
|
||||
|
||||
/*
|
||||
* If defined, enable logging facilities. We make this a configure option to
|
||||
* avoid taking extra branches everywhere.
|
||||
*/
|
||||
/* #undef JEMALLOC_LOG */
|
||||
|
||||
/*
|
||||
* If defined, use readlinkat() (instead of readlink()) to follow
|
||||
* /etc/malloc_conf.
|
||||
*/
|
||||
/* #undef JEMALLOC_READLINKAT */
|
||||
|
||||
/*
|
||||
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
||||
*/
|
||||
/* #undef JEMALLOC_ZONE */
|
||||
|
||||
/*
|
||||
* Methods for determining whether the OS overcommits.
|
||||
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
|
||||
* /proc/sys/vm.overcommit_memory file.
|
||||
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
|
||||
*/
|
||||
/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */
|
||||
#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
|
||||
|
||||
/* Defined if madvise(2) is available. */
|
||||
#define JEMALLOC_HAVE_MADVISE
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
|
||||
* arguments to madvise(2).
|
||||
*/
|
||||
#define JEMALLOC_HAVE_MADVISE_HUGE
|
||||
|
||||
/*
|
||||
* Methods for purging unused pages differ between operating systems.
|
||||
*
|
||||
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
|
||||
* will be discarded rather than swapped out.
|
||||
* madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
|
||||
* defined, this immediately discards pages,
|
||||
* such that new pages will be demand-zeroed if
|
||||
* the address region is later touched;
|
||||
* otherwise this behaves similarly to
|
||||
* MADV_FREE, though typically with higher
|
||||
* system overhead.
|
||||
*/
|
||||
#define JEMALLOC_PURGE_MADVISE_FREE
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
|
||||
|
||||
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
|
||||
/* #undef JEMALLOC_DEFINE_MADVISE_FREE */
|
||||
|
||||
/*
|
||||
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
|
||||
*/
|
||||
#define JEMALLOC_MADVISE_DONTDUMP
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
*/
|
||||
/* #undef JEMALLOC_THP */
|
||||
|
||||
/* Define if operating system has alloca.h header. */
|
||||
#define JEMALLOC_HAS_ALLOCA_H 1
|
||||
|
||||
/* C99 restrict keyword supported. */
|
||||
#define JEMALLOC_HAS_RESTRICT 1
|
||||
|
||||
/* For use by hash code. */
|
||||
/* #undef JEMALLOC_BIG_ENDIAN */
|
||||
|
||||
/* sizeof(int) == 2^LG_SIZEOF_INT. */
|
||||
#define LG_SIZEOF_INT 2
|
||||
|
||||
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
|
||||
#define LG_SIZEOF_LONG 3
|
||||
|
||||
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
|
||||
#define LG_SIZEOF_LONG_LONG 3
|
||||
|
||||
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
|
||||
#define LG_SIZEOF_INTMAX_T 3
|
||||
|
||||
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
|
||||
#define JEMALLOC_GLIBC_MALLOC_HOOK
|
||||
|
||||
/* glibc memalign hook. */
|
||||
#define JEMALLOC_GLIBC_MEMALIGN_HOOK
|
||||
|
||||
/* pthread support */
|
||||
#define JEMALLOC_HAVE_PTHREAD
|
||||
|
||||
/* dlsym() support */
|
||||
#define JEMALLOC_HAVE_DLSYM
|
||||
|
||||
/* Adaptive mutex support in pthreads. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
|
||||
|
||||
/* GNU specific sched_getcpu support */
|
||||
#define JEMALLOC_HAVE_SCHED_GETCPU
|
||||
|
||||
/* GNU specific sched_setaffinity support */
|
||||
#define JEMALLOC_HAVE_SCHED_SETAFFINITY
|
||||
|
||||
/*
|
||||
* If defined, all the features necessary for background threads are present.
|
||||
*/
|
||||
#define JEMALLOC_BACKGROUND_THREAD 1
|
||||
|
||||
/*
|
||||
* If defined, jemalloc symbols are not exported (doesn't work when
|
||||
* JEMALLOC_PREFIX is not defined).
|
||||
*/
|
||||
/* #undef JEMALLOC_EXPORT */
|
||||
|
||||
/* config.malloc_conf options string. */
|
||||
#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@"
|
||||
|
||||
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||
#define JEMALLOC_IS_MALLOC 1
|
||||
|
||||
/*
|
||||
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
|
||||
*/
|
||||
#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
|
||||
|
||||
/* Performs additional safety checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
2
contrib/libhdfs3
vendored
2
contrib/libhdfs3
vendored
@ -1 +1 @@
|
||||
Subproject commit 082e55f17d1c58bf124290fb044fea40e985ec11
|
||||
Subproject commit a8c37ee001af1ae88e5dfa637ae5b31b087c96d3
|
@ -66,7 +66,7 @@
|
||||
#cmakedefine WITH_SASL_OAUTHBEARER 1
|
||||
#cmakedefine WITH_SASL_CYRUS 1
|
||||
// crc32chw
|
||||
#if !defined(__PPC__) && (!defined(__aarch64__) || defined(__ARM_FEATURE_CRC32)) && !(defined(__aarch64__) && defined(__APPLE__))
|
||||
#if !defined(__PPC__) && !defined(__riscv) && (!defined(__aarch64__) || defined(__ARM_FEATURE_CRC32)) && !(defined(__aarch64__) && defined(__APPLE__))
|
||||
#define WITH_CRC32C_HW 1
|
||||
#endif
|
||||
// regex
|
||||
|
2
contrib/poco
vendored
2
contrib/poco
vendored
@ -1 +1 @@
|
||||
Subproject commit 39fd359765a3a77b46d94ec3c5def3c7802a920f
|
||||
Subproject commit 258b9ba6cd245ff88e9346f75c43464c403f329d
|
@ -51,6 +51,7 @@ if (USE_INTERNAL_POCO_LIBRARY)
|
||||
"${LIBRARY_DIR}/Foundation/src/Channel.cpp"
|
||||
"${LIBRARY_DIR}/Foundation/src/Checksum.cpp"
|
||||
"${LIBRARY_DIR}/Foundation/src/Clock.cpp"
|
||||
"${LIBRARY_DIR}/Foundation/src/CompressedLogFile.cpp"
|
||||
"${LIBRARY_DIR}/Foundation/src/Condition.cpp"
|
||||
"${LIBRARY_DIR}/Foundation/src/Configurable.cpp"
|
||||
"${LIBRARY_DIR}/Foundation/src/ConsoleChannel.cpp"
|
||||
@ -222,7 +223,7 @@ if (USE_INTERNAL_POCO_LIBRARY)
|
||||
POCO_OS_FAMILY_UNIX
|
||||
)
|
||||
target_include_directories (_poco_foundation SYSTEM PUBLIC "${LIBRARY_DIR}/Foundation/include")
|
||||
target_link_libraries (_poco_foundation PRIVATE Poco::Foundation::PCRE ${ZLIB_LIBRARIES})
|
||||
target_link_libraries (_poco_foundation PRIVATE Poco::Foundation::PCRE ${ZLIB_LIBRARIES} lz4)
|
||||
else ()
|
||||
add_library (Poco::Foundation UNKNOWN IMPORTED GLOBAL)
|
||||
|
||||
|
2
contrib/rocksdb
vendored
2
contrib/rocksdb
vendored
@ -1 +1 @@
|
||||
Subproject commit 296c1b8b95fd448b8097a1b2cc9f704ff4a73a2c
|
||||
Subproject commit e7c2b2f7bcf3b4b33892a1a6d25c32a93edfbdb9
|
2
contrib/sentry-native
vendored
2
contrib/sentry-native
vendored
@ -1 +1 @@
|
||||
Subproject commit 94644e92f0a3ff14bd35ed902a8622a2d15f7be4
|
||||
Subproject commit f431047ac8da13179c488018dddf1c0d0771a997
|
2
contrib/sysroot
vendored
2
contrib/sysroot
vendored
@ -1 +1 @@
|
||||
Subproject commit 6172893931e19b028f9cabb7095a44361be863df
|
||||
Subproject commit 1a64956aa7c280448be6526251bb2b8e6d380ab1
|
@ -81,11 +81,11 @@ then
|
||||
fi
|
||||
|
||||
# Also build fuzzers if any sanitizer specified
|
||||
if [ -n "$SANITIZER" ]
|
||||
then
|
||||
# Currently we are in build/build_docker directory
|
||||
../docker/packager/other/fuzzer.sh
|
||||
fi
|
||||
# if [ -n "$SANITIZER" ]
|
||||
# then
|
||||
# # Currently we are in build/build_docker directory
|
||||
# ../docker/packager/other/fuzzer.sh
|
||||
# fi
|
||||
|
||||
ccache --show-config ||:
|
||||
ccache --show-stats ||:
|
||||
|
@ -31,15 +31,15 @@ then
|
||||
fi
|
||||
|
||||
# Also build fuzzers if any sanitizer specified
|
||||
if [ -n "$SANITIZER" ]
|
||||
then
|
||||
# Script is supposed that we are in build directory.
|
||||
mkdir -p build/build_docker
|
||||
cd build/build_docker
|
||||
# Launching build script
|
||||
../docker/packager/other/fuzzer.sh
|
||||
cd
|
||||
fi
|
||||
# if [ -n "$SANITIZER" ]
|
||||
# then
|
||||
# # Script is supposed that we are in build directory.
|
||||
# mkdir -p build/build_docker
|
||||
# cd build/build_docker
|
||||
# # Launching build script
|
||||
# ../docker/packager/other/fuzzer.sh
|
||||
# cd
|
||||
# fi
|
||||
|
||||
ccache --show-config ||:
|
||||
ccache --show-stats ||:
|
||||
|
@ -256,6 +256,12 @@ continue
|
||||
task_exit_code=0
|
||||
echo "success" > status.txt
|
||||
echo "OK" > description.txt
|
||||
elif [ "$fuzzer_exit_code" == "137" ]
|
||||
then
|
||||
# Killed.
|
||||
task_exit_code=$fuzzer_exit_code
|
||||
echo "failure" > status.txt
|
||||
echo "Killed" > description.txt
|
||||
else
|
||||
# The server was alive, but the fuzzer returned some error. This might
|
||||
# be some client-side error detected by fuzzing, or a problem in the
|
||||
|
@ -19,6 +19,7 @@ RUN apt-get update \
|
||||
sqlite3 \
|
||||
curl \
|
||||
tar \
|
||||
lz4 \
|
||||
krb5-user \
|
||||
iproute2 \
|
||||
lsof \
|
||||
|
@ -60,7 +60,7 @@ RUN dockerd --version; docker --version
|
||||
RUN python3 -m pip install \
|
||||
PyMySQL \
|
||||
aerospike==4.0.0 \
|
||||
avro \
|
||||
avro==1.10.2 \
|
||||
cassandra-driver \
|
||||
confluent-kafka==1.5.0 \
|
||||
dict2xml \
|
||||
|
@ -2,7 +2,7 @@ version: '2.3'
|
||||
services:
|
||||
postgres1:
|
||||
image: postgres
|
||||
command: ["postgres", "-c", "logging_collector=on", "-c", "log_directory=/postgres/logs", "-c", "log_filename=postgresql.log", "-c", "log_statement=all", "-c", "max_connections=200"]
|
||||
command: ["postgres", "-c", "wal_level=logical", "-c", "max_replication_slots=2", "-c", "logging_collector=on", "-c", "log_directory=/postgres/logs", "-c", "log_filename=postgresql.log", "-c", "log_statement=all", "-c", "max_connections=200"]
|
||||
restart: always
|
||||
expose:
|
||||
- ${POSTGRES_PORT}
|
||||
@ -11,7 +11,6 @@ services:
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
command: [ "postgres", "-c", "wal_level=logical", "-c", "max_replication_slots=2"]
|
||||
networks:
|
||||
default:
|
||||
aliases:
|
||||
|
@ -19,6 +19,31 @@
|
||||
<opentelemetry_span_log remove="remove"/>
|
||||
<session_log remove="remove"/>
|
||||
|
||||
<!-- performance tests does not uses real block devices,
|
||||
instead they stores everything in memory.
|
||||
|
||||
And so, to avoid extra memory reference switch *_log to Memory engine. -->
|
||||
<query_log>
|
||||
<engine>ENGINE = Memory</engine>
|
||||
<partition_by remove="remove"/>
|
||||
</query_log>
|
||||
<query_thread_log>
|
||||
<engine>ENGINE = Memory</engine>
|
||||
<partition_by remove="remove"/>
|
||||
</query_thread_log>
|
||||
<trace_log>
|
||||
<engine>ENGINE = Memory</engine>
|
||||
<partition_by remove="remove"/>
|
||||
</trace_log>
|
||||
<metric_log>
|
||||
<engine>ENGINE = Memory</engine>
|
||||
<partition_by remove="remove"/>
|
||||
</metric_log>
|
||||
<asynchronous_metric_log>
|
||||
<engine>ENGINE = Memory</engine>
|
||||
<partition_by remove="remove"/>
|
||||
</asynchronous_metric_log>
|
||||
|
||||
<uncompressed_cache_size>1000000000</uncompressed_cache_size>
|
||||
|
||||
<asynchronous_metrics_update_period_s>10</asynchronous_metrics_update_period_s>
|
||||
|
@ -37,6 +37,12 @@ function configure()
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
# avoid too slow startup
|
||||
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml | sed "s|<snapshot_distance>100000</snapshot_distance>|<snapshot_distance>10000</snapshot_distance>|" > /etc/clickhouse-server/config.d/keeper_port.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
sudo chown clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
sudo chgrp clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
|
||||
# for clickhouse-server (via service)
|
||||
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
||||
# for clickhouse-client
|
||||
|
@ -34,7 +34,7 @@ then
|
||||
if [ "${ARCH}" = "x86_64" ]
|
||||
then
|
||||
DIR="macos"
|
||||
elif [ "${ARCH}" = "aarch64" ]
|
||||
elif [ "${ARCH}" = "aarch64" -o "${ARCH}" = "arm64" ]
|
||||
then
|
||||
DIR="macos-aarch64"
|
||||
fi
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 71
|
||||
toc_priority: 72
|
||||
toc_title: Source Code Browser
|
||||
---
|
||||
|
||||
|
@ -9,14 +9,14 @@ This is for the case when you have Linux machine and want to use it to build `cl
|
||||
|
||||
The cross-build for Mac OS X is based on the [Build instructions](../development/build.md), follow them first.
|
||||
|
||||
## Install Clang-8 {#install-clang-8}
|
||||
## Install Clang-13
|
||||
|
||||
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup.
|
||||
For example the commands for Bionic are like:
|
||||
|
||||
``` bash
|
||||
sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" >> /etc/apt/sources.list
|
||||
sudo apt-get install clang-8
|
||||
sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-13 main" >> /etc/apt/sources.list
|
||||
sudo apt-get install clang-13
|
||||
```
|
||||
|
||||
## Install Cross-Compilation Toolset {#install-cross-compilation-toolset}
|
||||
@ -25,6 +25,7 @@ Let’s remember the path where we install `cctools` as ${CCTOOLS}
|
||||
|
||||
``` bash
|
||||
mkdir ${CCTOOLS}
|
||||
cd ${CCTOOLS}
|
||||
|
||||
git clone https://github.com/tpoechtrager/apple-libtapi.git
|
||||
cd apple-libtapi
|
||||
@ -34,7 +35,7 @@ cd ..
|
||||
|
||||
git clone https://github.com/tpoechtrager/cctools-port.git
|
||||
cd cctools-port/cctools
|
||||
./configure --prefix=${CCTOOLS} --with-libtapi=${CCTOOLS} --target=x86_64-apple-darwin
|
||||
./configure --prefix=$(readlink -f ${CCTOOLS}) --with-libtapi=$(readlink -f ${CCTOOLS}) --target=x86_64-apple-darwin
|
||||
make install
|
||||
```
|
||||
|
||||
@ -51,12 +52,10 @@ tar xJf MacOSX10.15.sdk.tar.xz -C build-darwin/cmake/toolchain/darwin-x86_64 --s
|
||||
|
||||
``` bash
|
||||
cd ClickHouse
|
||||
mkdir build-osx
|
||||
CC=clang-8 CXX=clang++-8 cmake . -Bbuild-osx -DCMAKE_TOOLCHAIN_FILE=cmake/darwin/toolchain-x86_64.cmake \
|
||||
-DCMAKE_AR:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ar \
|
||||
-DCMAKE_RANLIB:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ranlib \
|
||||
-DLINKER_NAME=${CCTOOLS}/bin/x86_64-apple-darwin-ld
|
||||
ninja -C build-osx
|
||||
mkdir build-darwin
|
||||
cd build-darwin
|
||||
CC=clang-13 CXX=clang++-13 cmake -DCMAKE_AR:FILEPATH=${CCTOOLS}/bin/aarch64-apple-darwin-ar -DCMAKE_INSTALL_NAME_TOOL=${CCTOOLS}/bin/aarch64-apple-darwin-install_name_tool -DCMAKE_RANLIB:FILEPATH=${CCTOOLS}/bin/aarch64-apple-darwin-ranlib -DLINKER_NAME=${CCTOOLS}/bin/aarch64-apple-darwin-ld -DCMAKE_TOOLCHAIN_FILE=cmake/darwin/toolchain-x86_64.cmake ..
|
||||
ninja
|
||||
```
|
||||
|
||||
The resulting binary will have a Mach-O executable format and can’t be run on Linux.
|
||||
|
30
docs/en/development/build-cross-riscv.md
Normal file
30
docs/en/development/build-cross-riscv.md
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
toc_priority: 68
|
||||
toc_title: Build on Linux for RISC-V 64
|
||||
---
|
||||
|
||||
# How to Build ClickHouse on Linux for RISC-V 64 Architecture {#how-to-build-clickhouse-on-linux-for-risc-v-64-architecture}
|
||||
|
||||
As of writing (11.11.2021) building for risc-v considered to be highly experimental. Not all features can be enabled.
|
||||
|
||||
This is for the case when you have Linux machine and want to use it to build `clickhouse` binary that will run on another Linux machine with RISC-V 64 CPU architecture. This is intended for continuous integration checks that run on Linux servers.
|
||||
|
||||
The cross-build for RISC-V 64 is based on the [Build instructions](../development/build.md), follow them first.
|
||||
|
||||
## Install Clang-13
|
||||
|
||||
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup or do
|
||||
```
|
||||
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
```
|
||||
|
||||
## Build ClickHouse {#build-clickhouse}
|
||||
|
||||
``` bash
|
||||
cd ClickHouse
|
||||
mkdir build-riscv64
|
||||
CC=clang-13 CXX=clang++-13 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DUSE_INTERNAL_PARQUET_LIBRARY=OFF -DENABLE_ORC=OFF -DUSE_INTERNAL_ORC_LIBRARY=OFF -DUSE_UNWIND=OFF -DUSE_INTERNAL_PROTOBUF_LIBRARY=ON -DENABLE_GRPC=OFF -DUSE_INTERNAL_GRPC_LIBRARY=OFF -DENABLE_HDFS=OFF -DUSE_INTERNAL_HDFS3_LIBRARY=OFF -DENABLE_MYSQL=OFF -DUSE_INTERNAL_MYSQL_LIBRARY=OFF
|
||||
ninja -C build-riscv64
|
||||
```
|
||||
|
||||
The resulting binary will run only on Linux with the RISC-V 64 CPU architecture.
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 70
|
||||
toc_priority: 71
|
||||
toc_title: Third-Party Libraries Used
|
||||
---
|
||||
|
||||
|
@ -37,7 +37,7 @@ Next, you need to download the source files onto your working machine. This is c
|
||||
|
||||
In the command line terminal run:
|
||||
|
||||
git clone git@github.com:your_github_username/ClickHouse.git
|
||||
git clone --recursive git@github.com:your_github_username/ClickHouse.git
|
||||
cd ClickHouse
|
||||
|
||||
Note: please, substitute *your_github_username* with what is appropriate!
|
||||
@ -65,7 +65,7 @@ It generally means that the SSH keys for connecting to GitHub are missing. These
|
||||
|
||||
You can also clone the repository via https protocol:
|
||||
|
||||
git clone https://github.com/ClickHouse/ClickHouse.git
|
||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||
|
||||
This, however, will not let you send your changes to the server. You can still use it temporarily and add the SSH keys later replacing the remote address of the repository with `git remote` command.
|
||||
|
||||
@ -241,7 +241,7 @@ Adding third-party libraries: https://clickhouse.com/docs/en/development/contrib
|
||||
|
||||
Writing tests: https://clickhouse.com/docs/en/development/tests/
|
||||
|
||||
List of tasks: https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3A%22easy+task%22
|
||||
List of tasks: https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3Ahacktoberfest
|
||||
|
||||
## Test Data {#test-data}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 68
|
||||
toc_priority: 69
|
||||
toc_title: C++ Guide
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 69
|
||||
toc_priority: 70
|
||||
toc_title: Testing
|
||||
---
|
||||
|
||||
|
@ -11,7 +11,8 @@ This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ec
|
||||
|
||||
``` sql
|
||||
CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression])
|
||||
ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression])
|
||||
[SETTINGS ...]
|
||||
```
|
||||
|
||||
**Engine parameters**
|
||||
@ -23,21 +24,13 @@ ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compressi
|
||||
|
||||
**Example**
|
||||
|
||||
1. Set up the `s3_engine_table` table:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE s3_engine_table (name String, value UInt32) ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip');
|
||||
```
|
||||
CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip')
|
||||
SETTINGS input_format_with_names_use_header = 0;
|
||||
|
||||
2. Fill file:
|
||||
|
||||
``` sql
|
||||
INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3);
|
||||
```
|
||||
|
||||
3. Query the data:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM s3_engine_table LIMIT 2;
|
||||
```
|
||||
|
||||
@ -73,57 +66,54 @@ For more information about virtual columns see [here](../../../engines/table-eng
|
||||
|
||||
Constructions with `{}` are similar to the [remote](../../../sql-reference/table-functions/remote.md) table function.
|
||||
|
||||
**Example**
|
||||
!!! warning "Warning"
|
||||
If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||
|
||||
1. Suppose we have several files in CSV format with the following URIs on S3:
|
||||
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_3.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_1.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_2.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_3.csv'
|
||||
|
||||
There are several ways to make a table consisting of all six files:
|
||||
|
||||
The first way:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_range (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}', 'CSV');
|
||||
```
|
||||
|
||||
Another way:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_question_mark (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_?', 'CSV');
|
||||
```
|
||||
|
||||
Table consists of all the files in both directories (all files should satisfy format and schema described in query):
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV');
|
||||
```
|
||||
|
||||
If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||
|
||||
**Example**
|
||||
**Example with wildcards 1**
|
||||
|
||||
Create table with files named `file-000.csv`, `file-001.csv`, … , `file-999.csv`:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE big_table (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV');
|
||||
CREATE TABLE big_table (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/my_folder/file-{000..999}.csv', 'CSV');
|
||||
```
|
||||
|
||||
## Virtual Columns {#virtual-columns}
|
||||
**Example with wildcards 2**
|
||||
|
||||
- `_path` — Path to the file.
|
||||
- `_file` — Name of the file.
|
||||
Suppose we have several files in CSV format with the following URIs on S3:
|
||||
|
||||
**See Also**
|
||||
- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_1.csv'
|
||||
- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_2.csv'
|
||||
- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_3.csv'
|
||||
- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_1.csv'
|
||||
- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_2.csv'
|
||||
- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_3.csv'
|
||||
|
||||
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns)
|
||||
|
||||
## S3-related settings {#settings}
|
||||
There are several ways to make a table consisting of all six files:
|
||||
|
||||
1. Specify the range of file postfixes:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_range (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/some_file_{1..3}', 'CSV');
|
||||
```
|
||||
|
||||
2. Take all files with `some_file_` prefix (there should be no extra files with such prefix in both folders):
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_question_mark (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/some_file_?', 'CSV');
|
||||
```
|
||||
|
||||
3. Take all the files in both folders (all files should satisfy format and schema described in query):
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_asterisk (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/*', 'CSV');
|
||||
```
|
||||
|
||||
## S3-related Settings {#settings}
|
||||
|
||||
The following settings can be set before query execution or placed into configuration file.
|
||||
|
||||
@ -165,49 +155,6 @@ The following settings can be specified in configuration file for given endpoint
|
||||
</s3>
|
||||
```
|
||||
|
||||
## Usage {#usage-examples}
|
||||
|
||||
Suppose we have several files in CSV format with the following URIs on S3:
|
||||
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_3.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_1.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_2.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_3.csv'
|
||||
|
||||
|
||||
1. There are several ways to make a table consisting of all six files:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_range (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}', 'CSV');
|
||||
```
|
||||
|
||||
2. Another way:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_question_mark (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_?', 'CSV');
|
||||
```
|
||||
|
||||
3. Table consists of all the files in both directories (all files should satisfy format and schema described in query):
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_asterisk (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV');
|
||||
```
|
||||
|
||||
!!! warning "Warning"
|
||||
If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||
|
||||
4. Create table with files named `file-000.csv`, `file-001.csv`, … , `file-999.csv`:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE big_table (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV');
|
||||
```
|
||||
|
||||
## See also
|
||||
|
||||
- [s3 table function](../../../sql-reference/table-functions/s3.md)
|
||||
|
@ -381,8 +381,11 @@ We ran queries using a client located in a Yandex datacenter in Finland on a clu
|
||||
|
||||
| servers | Q1 | Q2 | Q3 | Q4 |
|
||||
|---------|-------|-------|-------|-------|
|
||||
| 1 | 0.490 | 1.224 | 2.104 | 3.593 |
|
||||
| 3 | 0.212 | 0.438 | 0.733 | 1.241 |
|
||||
| 140 | 0.028 | 0.043 | 0.051 | 0.072 |
|
||||
| 1, E5-2650v2 | 0.490 | 1.224 | 2.104 | 3.593 |
|
||||
| 3, E5-2650v2 | 0.212 | 0.438 | 0.733 | 1.241 |
|
||||
| 1, AWS c5n.4xlarge | 0.249 | 1.279 | 1.738 | 3.527 |
|
||||
| 1, AWS c5n.9xlarge | 0.130 | 0.584 | 0.777 | 1.811 |
|
||||
| 3, AWS c5n.9xlarge | 0.057 | 0.231 | 0.285 | 0.641 |
|
||||
| 140, E5-2650v2 | 0.028 | 0.043 | 0.051 | 0.072 |
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/getting_started/example_datasets/nyc_taxi/) <!--hide-->
|
||||
|
@ -177,6 +177,9 @@ This format is also available under the name `TSVRaw`.
|
||||
## TabSeparatedWithNames {#tabseparatedwithnames}
|
||||
|
||||
Differs from the `TabSeparated` format in that the column names are written in the first row.
|
||||
|
||||
During parsing, the first row is expected to contain the column names. You can use column names to determine their position and to check their correctness.
|
||||
|
||||
If setting [input_format_with_names_use_header](../operations/settings/settings.md#settings-input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](../operations/settings/settings.md#settings-input_format_skip_unknown_fields) is set to 1.
|
||||
Otherwise, the first row will be skipped.
|
||||
|
@ -432,7 +432,7 @@ Example:
|
||||
<http_handlers>
|
||||
<rule>
|
||||
<url><![CDATA[/query_param_with_url/\w+/(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></url>
|
||||
<method>GET</method>
|
||||
<methods>GET</methods>
|
||||
<headers>
|
||||
<XXX>TEST_HEADER_VALUE</XXX>
|
||||
<PARAMS_XXX><![CDATA[(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></PARAMS_XXX>
|
||||
@ -639,4 +639,4 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
|
||||
<
|
||||
<html><body>Relative Path File</body></html>
|
||||
* Connection #0 to host localhost left intact
|
||||
```
|
||||
```
|
||||
|
@ -35,6 +35,8 @@ toc_title: Client Libraries
|
||||
- NodeJs
|
||||
- [clickhouse (NodeJs)](https://github.com/TimonKK/clickhouse)
|
||||
- [node-clickhouse](https://github.com/apla/node-clickhouse)
|
||||
- [nestjs-clickhouse](https://github.com/depyronick/nestjs-clickhouse)
|
||||
- [clickhouse-client](https://github.com/depyronick/clickhouse-client)
|
||||
- Perl
|
||||
- [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse)
|
||||
- [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse)
|
||||
|
@ -78,7 +78,8 @@ toc_title: Adopters
|
||||
| <a href="https://ippon.tech" class="favicon">Ippon Technologies</a> | Technology Consulting | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=205) |
|
||||
| <a href="https://www.ivi.ru/" class="favicon">Ivi</a> | Online Cinema | Analytics, Monitoring | — | — | [Article in Russian, Jan 2018](https://habr.com/en/company/ivi/blog/347408/) |
|
||||
| <a href="https://jinshuju.net" class="favicon">Jinshuju 金数据</a> | BI Analytics | Main product | — | — | [Slides in Chinese, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) |
|
||||
| <a href="https://www.kakaocorp.com/" class="favicon">kakaocorp</a> | Internet company | — | — | — | [if(kakao)2020 conference](https://if.kakao.com/session/117) |
|
||||
| <a href="https://jitsu.com" class="favicon">Jitsu</a> | Cloud Software | Data Pipeline | — | — | [Documentation](https://jitsu.com/docs/destinations-configuration/clickhouse-destination), [Hacker News](https://news.ycombinator.com/item?id=29106082) |
|
||||
| <a href="https://www.kakaocorp.com/" class="favicon">kakaocorp</a> | Internet company | — | — | — | [if(kakao)2020](https://tv.kakao.com/channel/3693125/cliplink/414129353), [if(kakao)2021](https://if.kakao.com/session/24) |
|
||||
| <a href="https://www.kodiakdata.com/" class="favicon">Kodiak Data</a> | Clouds | Main product | — | — | [Slides in Engish, April 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) |
|
||||
| <a href="https://kontur.ru" class="favicon">Kontur</a> | Software Development | Metrics | — | — | [Talk in Russian, November 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) |
|
||||
| <a href="https://www.kuaishou.com/" class="favicon">Kuaishou</a> | Video | — | — | — | [ClickHouse Meetup, October 2018](https://clickhouse.com/blog/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018/) |
|
||||
@ -101,11 +102,13 @@ toc_title: Adopters
|
||||
| <a href="https://www.nuna.com/" class="favicon">Nuna Inc.</a> | Health Data Analytics | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=170) |
|
||||
| <a href="https://ok.ru" class="favicon">Ok.ru</a> | Social Network | — | 72 servers | 810 TB compressed, 50bn rows/day, 1.5 TB/day | [SmartData conference, Oct 2021](https://assets.ctfassets.net/oxjq45e8ilak/4JPHkbJenLgZhBGGyyonFP/57472ec6987003ec4078d0941740703b/____________________ClickHouse_______________________.pdf) |
|
||||
| <a href="https://omnicomm.ru/" class="favicon">Omnicomm</a> | Transportation Monitoring | — | — | — | [Facebook post, Oct 2021](https://www.facebook.com/OmnicommTeam/posts/2824479777774500) |
|
||||
| <a href="https://www.oneapm.com/" class="favicon">OneAPM</a> | Monitorings and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) |
|
||||
| <a href="https://www.oneapm.com/" class="favicon">OneAPM</a> | Monitoring and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) |
|
||||
| <a href="https://www.opentargets.org/" class="favicon">Open Targets</a> | Genome Research | Genome Search | — | — | [Tweet, Oct 2021](https://twitter.com/OpenTargets/status/1452570865342758913?s=20), [Blog](https://blog.opentargets.org/graphql/) |
|
||||
| <a href="https://corp.ozon.com/" class="favicon">OZON</a> | E-commerce | — | — | — | [Official website](https://job.ozon.ru/vacancy/razrabotchik-clickhouse-ekspluatatsiya-40991870/) |
|
||||
| <a href="https://panelbear.com/" class="favicon">Panelbear | Analytics | Monitoring and Analytics | — | — | [Tech Stack, November 2020](https://panelbear.com/blog/tech-stack/) |
|
||||
| <a href="https://www.percent.cn/" class="favicon">Percent 百分点</a> | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) |
|
||||
| <a href="https://www.percona.com/" class="favicon">Percona</a> | Performance analysis | Percona Monitoring and Management | — | — | [Official website, Mar 2020](https://www.percona.com/blog/2020/03/30/advanced-query-analysis-in-percona-monitoring-and-management-with-direct-clickhouse-access/) |
|
||||
| <a href="https://piwik.pro/" class="favicon">Piwik PRO</a> | Web Analytics | Main Product | — | — | [Official website, Dec 2018](https://piwik.pro/blog/piwik-pro-clickhouse-faster-efficient-reports/) |
|
||||
| <a href="https://plausible.io/" class="favicon">Plausible</a> | Analytics | Main Product | — | — | [Blog post, June 2020](https://twitter.com/PlausibleHQ/status/1273889629087969280) |
|
||||
| <a href="https://posthog.com/" class="favicon">PostHog</a> | Product Analytics | Main Product | — | — | [Release Notes, Oct 2020](https://posthog.com/blog/the-posthog-array-1-15-0) |
|
||||
| <a href="https://postmates.com/" class="favicon">Postmates</a> | Delivery | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=188) |
|
||||
@ -172,5 +175,7 @@ toc_title: Adopters
|
||||
| <a href="https://shop.okraina.ru/" class="favicon">ООО «МПЗ Богородский»</a> | Agriculture | — | — | — | [Article in Russian, November 2020](https://cloud.yandex.ru/cases/okraina) |
|
||||
| <a href="https://domclick.ru/" class="favicon">ДомКлик</a> | Real Estate | — | — | — | [Article in Russian, October 2021](https://habr.com/ru/company/domclick/blog/585936/) |
|
||||
| <a href="https://www.deepl.com/" class="favicon">Deepl</a> | Machine Learning | — | — | — | [Video, October 2021](https://www.youtube.com/watch?v=WIYJiPwxXdM&t=1182s) |
|
||||
| <a href="https://vercel.com/" class="favicon">Vercel</a> | Traffic and Performance Analytics | — | — | — | Direct reference, October 2021 |
|
||||
| <a href="https://www.your-analytics.org/" class="favicon">YourAnalytics</a> | Web Analytics | — | — | — | [Tweet, Nov 2021](https://twitter.com/mikenikles/status/1460860140249235461) |
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/introduction/adopters/) <!--hide-->
|
||||
|
@ -107,7 +107,7 @@ Loading key from the environment variable:
|
||||
```xml
|
||||
<encryption_codecs>
|
||||
<aes_128_gcm_siv>
|
||||
<key_hex from_env="KEY"></key_hex>
|
||||
<key_hex from_env="ENVVAR"></key_hex>
|
||||
</aes_128_gcm_siv>
|
||||
</encryption_codecs>
|
||||
```
|
||||
@ -120,7 +120,7 @@ Each of these methods can be applied for multiple keys:
|
||||
<encryption_codecs>
|
||||
<aes_128_gcm_siv>
|
||||
<key_hex id="0">00112233445566778899aabbccddeeff</key_hex>
|
||||
<key_hex id="1" from_env=".."></key_hex>
|
||||
<key_hex id="1" from_env="ENVVAR"></key_hex>
|
||||
<current_key_id>1</current_key_id>
|
||||
</aes_128_gcm_siv>
|
||||
</encryption_codecs>
|
||||
@ -763,6 +763,30 @@ Default value: 10000.
|
||||
<max_thread_pool_size>12000</max_thread_pool_size>
|
||||
```
|
||||
|
||||
## max_thread_pool_free_size {#max-thread-pool-free-size}
|
||||
|
||||
The number of threads that are always held in the Global Thread pool.
|
||||
|
||||
Default value: 1000.
|
||||
|
||||
**Example**
|
||||
|
||||
``` xml
|
||||
<max_thread_pool_free_size>1200</max_thread_pool_free_size>
|
||||
```
|
||||
|
||||
## thread_pool_queue_size {#thread-pool-queue-size}
|
||||
|
||||
The limit to the number of jobs that can be scheduled on the Global Thread pool. Increasing queue size leads to larger memory usage. It is recommended to keep this value equal to the `max_thread_pool_size`.
|
||||
|
||||
Default value: 10000.
|
||||
|
||||
**Example**
|
||||
|
||||
``` xml
|
||||
<thread_pool_queue_size>12000</thread_pool_queue_size>
|
||||
```
|
||||
|
||||
## merge_tree {#server_configuration_parameters-merge_tree}
|
||||
|
||||
Fine tuning for tables in the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md).
|
||||
@ -1436,4 +1460,53 @@ To add an LDAP server as a remote user directory of users that are not defined l
|
||||
</ldap>
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/server_configuration_parameters/settings/) <!--hide-->
|
||||
## total_memory_profiler_step {#total-memory-profiler-step}
|
||||
|
||||
Sets the memory size (in bytes) for a stack trace at every peak allocation step. The data is stored in the [system.trace_log](../../operations/system-tables/trace_log.md) system table with `query_id` equal to an empty string.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
|
||||
Default value: `4194304`.
|
||||
|
||||
## total_memory_tracker_sample_probability {#total-memory-tracker-sample-probability}
|
||||
|
||||
Allows to collect random allocations and deallocations and writes them in the [system.trace_log](../../operations/system-tables/trace_log.md) system table with `trace_type` equal to a `MemorySample` with the specified probability. The probability is for every allocation or deallocations, regardless of the size of the allocation. Note that sampling happens only when the amount of untracked memory exceeds the untracked memory limit (default value is `4` MiB). It can be lowered if [total_memory_profiler_step](#total-memory-profiler-step) is lowered. You can set `total_memory_profiler_step` equal to `1` for extra fine-grained sampling.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Writing of random allocations and deallocations in the `system.trace_log` system table is disabled.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## mmap_cache_size {#mmap-cache-size}
|
||||
|
||||
Sets the cache size (in bytes) for mapped files. This setting allows to avoid frequent open/[mmap/munmap](https://en.wikipedia.org/wiki/Mmap)/close calls (which are very expensive due to consequent page faults) and to reuse mappings from several threads and queries. The setting value is the number of mapped regions (usually equal to the number of mapped files). The amount of data in mapped files can be monitored in [system.metrics](../../operations/system-tables/metrics.md), [system.metric_log](../../operations/system-tables/metric_log.md) system tables by the `MMappedFiles` and `MMappedFileBytes` metrics, in [system.asynchronous_metrics](../../operations/system-tables/asynchronous_metrics.md), [system.asynchronous_metrics_log](../../operations/system-tables/asynchronous_metric_log.md) by the `MMapCacheCells` metric, and also in [system.events](../../operations/system-tables/events.md), [system.processes](../../operations/system-tables/processes.md), [system.query_log](../../operations/system-tables/query_log.md), [system.query_thread_log](../../operations/system-tables/query_thread_log.md), [system.query_views_log](../../operations/system-tables/query_views_log.md) by the `CreatedReadBufferMMap`, `CreatedReadBufferMMapFailed`, `MMappedFileCacheHits`, `MMappedFileCacheMisses` events. Note that the amount of data in mapped files does not consume memory directly and is not accounted in query or server memory usage — because this memory can be discarded similar to OS page cache. The cache is dropped (the files are closed) automatically on the removal of old parts in tables of the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) family, also it can be dropped manually by the `SYSTEM DROP MMAP CACHE` query.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
|
||||
Default value: `1000`.
|
||||
|
||||
## compiled_expression_cache_size {#compiled-expression-cache-size}
|
||||
|
||||
Sets the cache size (in bytes) for [compiled expressions](../../operations/caches.md).
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
|
||||
Default value: `134217728`.
|
||||
|
||||
## compiled_expression_cache_elements_size {#compiled_expression_cache_elements_size}
|
||||
|
||||
Sets the cache size (in elements) for [compiled expressions](../../operations/caches.md).
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
|
||||
Default value: `10000`.
|
||||
|
@ -343,3 +343,16 @@ Default value: `0`.
|
||||
**Usage**
|
||||
|
||||
The value of the `min_bytes_to_rebalance_partition_over_jbod` setting should be less than the value of the [max_bytes_to_merge_at_max_space_in_pool](../../operations/settings/merge-tree-settings.md#max-bytes-to-merge-at-max-space-in-pool) setting. Otherwise, ClickHouse throws an exception.
|
||||
|
||||
## detach_not_byte_identical_parts {#detach_not_byte_identical_parts}
|
||||
|
||||
Enables or disables detaching a data part on a replica after a merge or a mutation, if it is not byte-identical to data parts on other replicas. If disabled, the data part is removed. Activate this setting if you want to analyze such parts later.
|
||||
|
||||
The setting is applicable to `MergeTree` tables with enabled [data replication](../../engines/table-engines/mergetree-family/replication.md).
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Parts are removed.
|
||||
- 1 — Parts are detached.
|
||||
|
||||
Default value: `0`.
|
||||
|
@ -1236,6 +1236,9 @@ Default value: `0`.
|
||||
|
||||
Could be used for throttling speed when replicating the data to add or replace new nodes.
|
||||
|
||||
!!! note "Note"
|
||||
60000000 bytes/s approximatly corresponds to 457 Mbps (60000000 / 1024 / 1024 * 8).
|
||||
|
||||
## max_replicated_sends_network_bandwidth_for_server {#max_replicated_sends_network_bandwidth_for_server}
|
||||
|
||||
Limits the maximum speed of data exchange over the network in bytes per second for [replicated](../../engines/table-engines/mergetree-family/replication.md) sends for the server. Only has meaning at server startup. You can also limit the speed for a particular table with [max_replicated_sends_network_bandwidth](../../operations/settings/merge-tree-settings.md#max_replicated_sends_network_bandwidth) setting.
|
||||
@ -1253,6 +1256,9 @@ Default value: `0`.
|
||||
|
||||
Could be used for throttling speed when replicating the data to add or replace new nodes.
|
||||
|
||||
!!! note "Note"
|
||||
60000000 bytes/s approximatly corresponds to 457 Mbps (60000000 / 1024 / 1024 * 8).
|
||||
|
||||
## connect_timeout_with_failover_ms {#connect-timeout-with-failover-ms}
|
||||
|
||||
The timeout in milliseconds for connecting to a remote server for a Distributed table engine, if the ‘shard’ and ‘replica’ sections are used in the cluster definition.
|
||||
@ -4042,3 +4048,14 @@ Possible values:
|
||||
- 0 — Timeout disabled.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## min_bytes_to_use_mmap_io {#min-bytes-to-use-mmap-io}
|
||||
|
||||
This is an experimental setting. Sets the minimum amount of memory for reading large files without copying data from the kernel to userspace. Recommended threshold is about 64 MB, because [mmap/munmap](https://en.wikipedia.org/wiki/Mmap) is slow. It makes sense only for large files and helps only if data reside in the page cache.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Big files read with only copying data from kernel to userspace.
|
||||
|
||||
Default value: `0`.
|
||||
|
@ -6,6 +6,7 @@ Columns:
|
||||
|
||||
- `database` ([String](../../sql-reference/data-types/string.md)) — Name of the database containing the dictionary created by DDL query. Empty string for other dictionaries.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — [Dictionary name](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md).
|
||||
- `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — Dictionary UUID.
|
||||
- `status` ([Enum8](../../sql-reference/data-types/enum.md)) — Dictionary status. Possible values:
|
||||
- `NOT_LOADED` — Dictionary was not loaded because it was not used.
|
||||
- `LOADED` — Dictionary loaded successfully.
|
||||
@ -15,9 +16,10 @@ Columns:
|
||||
- `FAILED_AND_RELOADING` — Could not load the dictionary as a result of an error and is loading now.
|
||||
- `origin` ([String](../../sql-reference/data-types/string.md)) — Path to the configuration file that describes the dictionary.
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — Type of a dictionary allocation. [Storing Dictionaries in Memory](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md).
|
||||
- `key` — [Key type](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key): Numeric Key ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) or Сomposite key ([String](../../sql-reference/data-types/string.md)) — form “(type 1, type 2, …, type n)”.
|
||||
- `key.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of [key names](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key) provided by the dictionary.
|
||||
- `key.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Corresponding array of [key types](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key) provided by the dictionary.
|
||||
- `attribute.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of [attribute names](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes) provided by the dictionary.
|
||||
- `attribute.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Corresponding array of [attribute types](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes) that are provided by the dictionary.
|
||||
- `attribute.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Corresponding array of [attribute types](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes) provided by the dictionary.
|
||||
- `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Amount of RAM allocated for the dictionary.
|
||||
- `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of queries since the dictionary was loaded or since the last successful reboot.
|
||||
- `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — For cache dictionaries, the percentage of uses for which the value was in the cache.
|
||||
@ -31,34 +33,56 @@ Columns:
|
||||
- `last_successful_update_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — End time for loading or updating the dictionary. Helps to monitor some troubles with external sources and investigate causes.
|
||||
- `loading_duration` ([Float32](../../sql-reference/data-types/float.md)) — Duration of a dictionary loading.
|
||||
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn’t be created.
|
||||
- `comment` ([String](../../sql-reference/data-types/string.md)) — Text of the comment to dictionary.
|
||||
|
||||
**Example**
|
||||
|
||||
Configure the dictionary.
|
||||
Configure the dictionary:
|
||||
|
||||
``` sql
|
||||
CREATE DICTIONARY dictdb.dict
|
||||
CREATE DICTIONARY dictionary_with_comment
|
||||
(
|
||||
`key` Int64 DEFAULT -1,
|
||||
`value_default` String DEFAULT 'world',
|
||||
`value_expression` String DEFAULT 'xxx' EXPRESSION 'toString(127 * 172)'
|
||||
id UInt64,
|
||||
value String
|
||||
)
|
||||
PRIMARY KEY key
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dicttbl' DB 'dictdb'))
|
||||
LIFETIME(MIN 0 MAX 1)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'source_table'))
|
||||
LAYOUT(FLAT())
|
||||
LIFETIME(MIN 0 MAX 1000)
|
||||
COMMENT 'The temporary dictionary';
|
||||
```
|
||||
|
||||
Make sure that the dictionary is loaded.
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.dictionaries
|
||||
SELECT * FROM system.dictionaries LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─database─┬─name─┬─status─┬─origin──────┬─type─┬─key────┬─attribute.names──────────────────────┬─attribute.types─────┬─bytes_allocated─┬─query_count─┬─hit_rate─┬─element_count─┬───────────load_factor─┬─source─────────────────────┬─lifetime_min─┬─lifetime_max─┬──loading_start_time─┌──last_successful_update_time─┬──────loading_duration─┬─last_exception─┐
|
||||
│ dictdb │ dict │ LOADED │ dictdb.dict │ Flat │ UInt64 │ ['value_default','value_expression'] │ ['String','String'] │ 74032 │ 0 │ 1 │ 1 │ 0.0004887585532746823 │ ClickHouse: dictdb.dicttbl │ 0 │ 1 │ 2020-03-04 04:17:34 │ 2020-03-04 04:30:34 │ 0.002 │ │
|
||||
└──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘
|
||||
Row 1:
|
||||
──────
|
||||
database: default
|
||||
name: dictionary_with_comment
|
||||
uuid: 4654d460-0d03-433a-8654-d4600d03d33a
|
||||
status: NOT_LOADED
|
||||
origin: 4654d460-0d03-433a-8654-d4600d03d33a
|
||||
type:
|
||||
key.names: ['id']
|
||||
key.types: ['UInt64']
|
||||
attribute.names: ['value']
|
||||
attribute.types: ['String']
|
||||
bytes_allocated: 0
|
||||
query_count: 0
|
||||
hit_rate: 0
|
||||
found_rate: 0
|
||||
element_count: 0
|
||||
load_factor: 0
|
||||
source:
|
||||
lifetime_min: 0
|
||||
lifetime_max: 0
|
||||
loading_start_time: 1970-01-01 00:00:00
|
||||
last_successful_update_time: 1970-01-01 00:00:00
|
||||
loading_duration: 0
|
||||
last_exception:
|
||||
comment: The temporary dictionary
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/system-tables/dictionaries) <!--hide-->
|
||||
|
77
docs/en/operations/system-tables/session_log.md
Normal file
77
docs/en/operations/system-tables/session_log.md
Normal file
@ -0,0 +1,77 @@
|
||||
# system.session_log {#system_tables-session_log}
|
||||
|
||||
Contains information about all successful and failed login and logout events.
|
||||
|
||||
Columns:
|
||||
|
||||
- `type` ([Enum8](../../sql-reference/data-types/enum.md)) — Login/logout result. Possible values:
|
||||
- `LoginFailure` — Login error.
|
||||
- `LoginSuccess` — Successful login.
|
||||
- `Logout` — Logout from the system.
|
||||
- `auth_id` ([UUID](../../sql-reference/data-types/uuid.md)) — Authentication ID, which is a UUID that is automatically generated each time user logins.
|
||||
- `session_id` ([String](../../sql-reference/data-types/string.md)) — Session ID that is passed by client via [HTTP](../../interfaces/http.md) interface.
|
||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — Login/logout date.
|
||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Login/logout time.
|
||||
- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Login/logout starting time with microseconds precision.
|
||||
- `user` ([String](../../sql-reference/data-types/string.md)) — User name.
|
||||
- `auth_type` ([Enum8](../../sql-reference/data-types/enum.md)) — The authentication type. Possible values:
|
||||
- `NO_PASSWORD`
|
||||
- `PLAINTEXT_PASSWORD`
|
||||
- `SHA256_PASSWORD`
|
||||
- `DOUBLE_SHA1_PASSWORD`
|
||||
- `LDAP`
|
||||
- `KERBEROS`
|
||||
- `profiles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of profiles set for all roles and/or users.
|
||||
- `roles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of roles to which the profile is applied.
|
||||
- `settings` ([Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md), [String](../../sql-reference/data-types/string.md)))) — Settings that were changed when the client logged in/out.
|
||||
- `client_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — The IP address that was used to log in/out.
|
||||
- `client_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to log in/out.
|
||||
- `interface` ([Enum8](../../sql-reference/data-types/enum.md)) — The interface from which the login was initiated. Possible values:
|
||||
- `TCP`
|
||||
- `HTTP`
|
||||
- `gRPC`
|
||||
- `MySQL`
|
||||
- `PostgreSQL`
|
||||
- `client_hostname` ([String](../../sql-reference/data-types/string.md)) — The hostname of the client machine where the [clickhouse-client](../../interfaces/cli.md) or another TCP client is run.
|
||||
- `client_name` ([String](../../sql-reference/data-types/string.md)) — The `clickhouse-client` or another TCP client name.
|
||||
- `client_revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Revision of the `clickhouse-client` or another TCP client.
|
||||
- `client_version_major` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The major version of the `clickhouse-client` or another TCP client.
|
||||
- `client_version_minor` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The minor version of the `clickhouse-client` or another TCP client.
|
||||
- `client_version_patch` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Patch component of the `clickhouse-client` or another TCP client version.
|
||||
- `failure_reason` ([String](../../sql-reference/data-types/string.md)) — The exception message containing the reason for the login/logout failure.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.session_log LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
type: LoginSuccess
|
||||
auth_id: 45e6bd83-b4aa-4a23-85e6-bd83b4aa1a23
|
||||
session_id:
|
||||
event_date: 2021-10-14
|
||||
event_time: 2021-10-14 20:33:52
|
||||
event_time_microseconds: 2021-10-14 20:33:52.104247
|
||||
user: default
|
||||
auth_type: PLAINTEXT_PASSWORD
|
||||
profiles: ['default']
|
||||
roles: []
|
||||
settings: [('load_balancing','random'),('max_memory_usage','10000000000')]
|
||||
client_address: ::ffff:127.0.0.1
|
||||
client_port: 38490
|
||||
interface: TCP
|
||||
client_hostname:
|
||||
client_name: ClickHouse client
|
||||
client_revision: 54449
|
||||
client_version_major: 21
|
||||
client_version_minor: 10
|
||||
client_version_patch: 0
|
||||
failure_reason:
|
||||
```
|
@ -6,7 +6,7 @@ Columns:
|
||||
|
||||
- `policy_name` ([String](../../sql-reference/data-types/string.md)) — Name of the storage policy.
|
||||
- `volume_name` ([String](../../sql-reference/data-types/string.md)) — Volume name defined in the storage policy.
|
||||
- `volume_priority` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Volume order number in the configuration.
|
||||
- `volume_priority` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Volume order number in the configuration, the data fills the volumes according this priority, i.e. data during inserts and merges is written to volumes with a lower priority (taking into account other rules: TTL, `max_data_part_size`, `move_factor`).
|
||||
- `disks` ([Array(String)](../../sql-reference/data-types/array.md)) — Disk names, defined in the storage policy.
|
||||
- `max_data_part_size` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Maximum size of a data part that can be stored on volume disks (0 — no limit).
|
||||
- `move_factor` ([Float64](../../sql-reference/data-types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order.
|
||||
|
@ -69,12 +69,17 @@ Regardless of RAID use, always use replication for data security.
|
||||
Enable NCQ with a long queue. For HDD, choose the CFQ scheduler, and for SSD, choose noop. Don’t reduce the ‘readahead’ setting.
|
||||
For HDD, enable the write cache.
|
||||
|
||||
Make sure that [fstrim](https://en.wikipedia.org/wiki/Trim_(computing)) is enabled for NVME and SSD disks in your OS (usually it's implemented using a cronjob or systemd service).
|
||||
|
||||
## File System {#file-system}
|
||||
|
||||
Ext4 is the most reliable option. Set the mount options `noatime`.
|
||||
XFS should be avoided. It works mostly fine but there are some reports about lower performance.
|
||||
Most other file systems should also work fine.
|
||||
|
||||
Do not use compressed filesystems, because ClickHouse does compression on its own and better.
|
||||
It's not recommended to use encrypted filesystems, because you can use builtin encryption in ClickHouse, which is better.
|
||||
|
||||
## Linux Kernel {#linux-kernel}
|
||||
|
||||
Don’t use an outdated Linux kernel.
|
||||
@ -262,4 +267,8 @@ script
|
||||
end script
|
||||
```
|
||||
|
||||
## Antivirus software {#antivirus-software}
|
||||
|
||||
If you use antivirus software configure it to skip folders with Clickhouse datafiles (`/var/lib/clickhouse`) otherwise performance may be reduced and you may experience unexpected errors during data ingestion and background merges.
|
||||
|
||||
{## [Original article](https://clickhouse.com/docs/en/operations/tips/) ##}
|
||||
|
@ -10,6 +10,10 @@ The String type replaces the types VARCHAR, BLOB, CLOB, and others from other DB
|
||||
|
||||
When creating tables, numeric parameters for string fields can be set (e.g. `VARCHAR(255)`), but ClickHouse ignores them.
|
||||
|
||||
Aliases:
|
||||
|
||||
- `String` — `LONGTEXT`, `MEDIUMTEXT`, `TINYTEXT`, `TEXT`, `LONGBLOB`, `MEDIUMBLOB`, `TINYBLOB`, `BLOB`, `VARCHAR`, `CHAR`.
|
||||
|
||||
## Encodings {#encodings}
|
||||
|
||||
ClickHouse does not have the concept of encodings. Strings can contain an arbitrary set of bytes, which are stored and output as-is.
|
||||
|
@ -66,7 +66,6 @@ Types of sources (`source_type`):
|
||||
- DBMS
|
||||
- [ODBC](#dicts-external_dicts_dict_sources-odbc)
|
||||
- [MySQL](#dicts-external_dicts_dict_sources-mysql)
|
||||
- [PostgreSQL](#dicts-external_dicts_dict_sources-postgresql)
|
||||
- [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse)
|
||||
- [MongoDB](#dicts-external_dicts_dict_sources-mongodb)
|
||||
- [Redis](#dicts-external_dicts_dict_sources-redis)
|
||||
@ -210,45 +209,6 @@ Setting fields:
|
||||
|
||||
When creating a dictionary using the DDL command (`CREATE DICTIONARY ...`) remote hosts for HTTP dictionaries are checked against the contents of `remote_url_allow_hosts` section from config to prevent database users to access arbitrary HTTP server.
|
||||
|
||||
## ODBC {#dicts-external_dicts_dict_sources-odbc}
|
||||
|
||||
You can use this method to connect any database that has an ODBC driver.
|
||||
|
||||
Example of settings:
|
||||
|
||||
``` xml
|
||||
<source>
|
||||
<odbc>
|
||||
<db>DatabaseName</db>
|
||||
<table>ShemaName.TableName</table>
|
||||
<connection_string>DSN=some_parameters</connection_string>
|
||||
<invalidate_query>SQL_QUERY</invalidate_query>
|
||||
</odbc>
|
||||
</source>
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
``` sql
|
||||
SOURCE(ODBC(
|
||||
db 'DatabaseName'
|
||||
table 'SchemaName.TableName'
|
||||
connection_string 'DSN=some_parameters'
|
||||
invalidate_query 'SQL_QUERY'
|
||||
))
|
||||
```
|
||||
|
||||
Setting fields:
|
||||
|
||||
- `db` – Name of the database. Omit it if the database name is set in the `<connection_string>` parameters.
|
||||
- `table` – Name of the table and schema if exists.
|
||||
- `connection_string` – Connection string.
|
||||
- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
||||
|
||||
ClickHouse receives quoting symbols from ODBC-driver and quote all settings in queries to driver, so it’s necessary to set table name accordingly to table name case in database.
|
||||
|
||||
If you have a problems with encodings when using Oracle, see the corresponding [F.A.Q.](../../../faq/integration/oracle-odbc.md) item.
|
||||
|
||||
### Known Vulnerability of the ODBC Dictionary Functionality {#known-vulnerability-of-the-odbc-dictionary-functionality}
|
||||
|
||||
!!! attention "Attention"
|
||||
@ -464,6 +424,51 @@ LIFETIME(MIN 300 MAX 360)
|
||||
|
||||
## DBMS {#dbms}
|
||||
|
||||
### ODBC {#dicts-external_dicts_dict_sources-odbc}
|
||||
|
||||
You can use this method to connect any database that has an ODBC driver.
|
||||
|
||||
Example of settings:
|
||||
|
||||
``` xml
|
||||
<source>
|
||||
<odbc>
|
||||
<db>DatabaseName</db>
|
||||
<table>ShemaName.TableName</table>
|
||||
<connection_string>DSN=some_parameters</connection_string>
|
||||
<invalidate_query>SQL_QUERY</invalidate_query>
|
||||
<query>SELECT id, value_1, value_2 FROM ShemaName.TableName</query>
|
||||
</odbc>
|
||||
</source>
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
``` sql
|
||||
SOURCE(ODBC(
|
||||
db 'DatabaseName'
|
||||
table 'SchemaName.TableName'
|
||||
connection_string 'DSN=some_parameters'
|
||||
invalidate_query 'SQL_QUERY'
|
||||
query 'SELECT id, value_1, value_2 FROM db_name.table_name'
|
||||
))
|
||||
```
|
||||
|
||||
Setting fields:
|
||||
|
||||
- `db` – Name of the database. Omit it if the database name is set in the `<connection_string>` parameters.
|
||||
- `table` – Name of the table and schema if exists.
|
||||
- `connection_string` – Connection string.
|
||||
- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
||||
- `query` – The custom query. Optional parameter.
|
||||
|
||||
!!! info "Note"
|
||||
The `table` and `query` fields cannot be used together. And either one of the `table` or `query` fields must be declared.
|
||||
|
||||
ClickHouse receives quoting symbols from ODBC-driver and quote all settings in queries to driver, so it’s necessary to set table name accordingly to table name case in database.
|
||||
|
||||
If you have a problems with encodings when using Oracle, see the corresponding [F.A.Q.](../../../faq/integration/oracle-odbc.md) item.
|
||||
|
||||
### Mysql {#dicts-external_dicts_dict_sources-mysql}
|
||||
|
||||
Example of settings:
|
||||
@ -487,6 +492,7 @@ Example of settings:
|
||||
<where>id=10</where>
|
||||
<invalidate_query>SQL_QUERY</invalidate_query>
|
||||
<fail_on_connection_loss>true</fail_on_connection_loss>
|
||||
<query>SELECT id, value_1, value_2 FROM db_name.table_name</query>
|
||||
</mysql>
|
||||
</source>
|
||||
```
|
||||
@ -505,6 +511,7 @@ SOURCE(MYSQL(
|
||||
where 'id=10'
|
||||
invalidate_query 'SQL_QUERY'
|
||||
fail_on_connection_loss 'true'
|
||||
query 'SELECT id, value_1, value_2 FROM db_name.table_name'
|
||||
))
|
||||
```
|
||||
|
||||
@ -531,6 +538,11 @@ Setting fields:
|
||||
|
||||
- `fail_on_connection_loss` – The configuration parameter that controls behavior of the server on connection loss. If `true`, an exception is thrown immediately if the connection between client and server was lost. If `false`, the ClickHouse server retries to execute the query three times before throwing an exception. Note that retrying leads to increased response times. Default value: `false`.
|
||||
|
||||
- `query` – The custom query. Optional parameter.
|
||||
|
||||
!!! info "Note"
|
||||
The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared.
|
||||
|
||||
MySQL can be connected on a local host via sockets. To do this, set `host` and `socket`.
|
||||
|
||||
Example of settings:
|
||||
@ -547,6 +559,7 @@ Example of settings:
|
||||
<where>id=10</where>
|
||||
<invalidate_query>SQL_QUERY</invalidate_query>
|
||||
<fail_on_connection_loss>true</fail_on_connection_loss>
|
||||
<query>SELECT id, value_1, value_2 FROM db_name.table_name</query>
|
||||
</mysql>
|
||||
</source>
|
||||
```
|
||||
@ -564,6 +577,7 @@ SOURCE(MYSQL(
|
||||
where 'id=10'
|
||||
invalidate_query 'SQL_QUERY'
|
||||
fail_on_connection_loss 'true'
|
||||
query 'SELECT id, value_1, value_2 FROM db_name.table_name'
|
||||
))
|
||||
```
|
||||
|
||||
@ -582,6 +596,7 @@ Example of settings:
|
||||
<table>ids</table>
|
||||
<where>id=10</where>
|
||||
<secure>1</secure>
|
||||
<query>SELECT id, value_1, value_2 FROM default.ids</query>
|
||||
</clickhouse>
|
||||
</source>
|
||||
```
|
||||
@ -598,6 +613,7 @@ SOURCE(CLICKHOUSE(
|
||||
table 'ids'
|
||||
where 'id=10'
|
||||
secure 1
|
||||
query 'SELECT id, value_1, value_2 FROM default.ids'
|
||||
));
|
||||
```
|
||||
|
||||
@ -612,6 +628,10 @@ Setting fields:
|
||||
- `where` – The selection criteria. May be omitted.
|
||||
- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
||||
- `secure` - Use ssl for connection.
|
||||
- `query` – The custom query. Optional parameter.
|
||||
|
||||
!!! info "Note"
|
||||
The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared.
|
||||
|
||||
### Mongodb {#dicts-external_dicts_dict_sources-mongodb}
|
||||
|
||||
@ -703,27 +723,30 @@ Example of settings:
|
||||
<consistency>One</consistency>
|
||||
<where>"SomeColumn" = 42</where>
|
||||
<max_threads>8</max_threads>
|
||||
<query>SELECT id, value_1, value_2 FROM database_name.table_name</query>
|
||||
</cassandra>
|
||||
</source>
|
||||
```
|
||||
|
||||
Setting fields:
|
||||
- `host` – The Cassandra host or comma-separated list of hosts.
|
||||
- `port` – The port on the Cassandra servers. If not specified, default port 9042 is used.
|
||||
- `user` – Name of the Cassandra user.
|
||||
- `password` – Password of the Cassandra user.
|
||||
- `keyspace` – Name of the keyspace (database).
|
||||
- `column_family` – Name of the column family (table).
|
||||
- `allow_filering` – Flag to allow or not potentially expensive conditions on clustering key columns. Default value is 1.
|
||||
- `partition_key_prefix` – Number of partition key columns in primary key of the Cassandra table.
|
||||
Required for compose key dictionaries. Order of key columns in the dictionary definition must be the same as in Cassandra.
|
||||
Default value is 1 (the first key column is a partition key and other key columns are clustering key).
|
||||
- `consistency` – Consistency level. Possible values: `One`, `Two`, `Three`,
|
||||
`All`, `EachQuorum`, `Quorum`, `LocalQuorum`, `LocalOne`, `Serial`, `LocalSerial`. Default is `One`.
|
||||
- `where` – Optional selection criteria.
|
||||
- `max_threads` – The maximum number of threads to use for loading data from multiple partitions in compose key dictionaries.
|
||||
|
||||
### PosgreSQL {#dicts-external_dicts_dict_sources-postgresql}
|
||||
- `host` – The Cassandra host or comma-separated list of hosts.
|
||||
- `port` – The port on the Cassandra servers. If not specified, default port 9042 is used.
|
||||
- `user` – Name of the Cassandra user.
|
||||
- `password` – Password of the Cassandra user.
|
||||
- `keyspace` – Name of the keyspace (database).
|
||||
- `column_family` – Name of the column family (table).
|
||||
- `allow_filering` – Flag to allow or not potentially expensive conditions on clustering key columns. Default value is 1.
|
||||
- `partition_key_prefix` – Number of partition key columns in primary key of the Cassandra table. Required for compose key dictionaries. Order of key columns in the dictionary definition must be the same as in Cassandra. Default value is 1 (the first key column is a partition key and other key columns are clustering key).
|
||||
- `consistency` – Consistency level. Possible values: `One`, `Two`, `Three`, `All`, `EachQuorum`, `Quorum`, `LocalQuorum`, `LocalOne`, `Serial`, `LocalSerial`. Default value is `One`.
|
||||
- `where` – Optional selection criteria.
|
||||
- `max_threads` – The maximum number of threads to use for loading data from multiple partitions in compose key dictionaries.
|
||||
- `query` – The custom query. Optional parameter.
|
||||
|
||||
!!! info "Note"
|
||||
The `column_family` or `where` fields cannot be used together with the `query` field. And either one of the `column_family` or `query` fields must be declared.
|
||||
|
||||
### PostgreSQL {#dicts-external_dicts_dict_sources-postgresql}
|
||||
|
||||
Example of settings:
|
||||
|
||||
@ -737,6 +760,7 @@ Example of settings:
|
||||
<table>table_name</table>
|
||||
<where>id=10</where>
|
||||
<invalidate_query>SQL_QUERY</invalidate_query>
|
||||
<query>SELECT id, value_1, value_2 FROM db_name.table_name</query>
|
||||
</postgresql>
|
||||
</source>
|
||||
```
|
||||
@ -755,6 +779,7 @@ SOURCE(POSTGRESQL(
|
||||
replica(host 'example01-2' port 5432 priority 2)
|
||||
where 'id=10'
|
||||
invalidate_query 'SQL_QUERY'
|
||||
query 'SELECT id, value_1, value_2 FROM db_name.table_name'
|
||||
))
|
||||
```
|
||||
|
||||
@ -764,11 +789,15 @@ Setting fields:
|
||||
- `port` – The port on the PostgreSQL server. You can specify it for all replicas, or for each one individually (inside `<replica>`).
|
||||
- `user` – Name of the PostgreSQL user. You can specify it for all replicas, or for each one individually (inside `<replica>`).
|
||||
- `password` – Password of the PostgreSQL user. You can specify it for all replicas, or for each one individually (inside `<replica>`).
|
||||
- `replica` – Section of replica configurations. There can be multiple sections.
|
||||
- `replica/host` – The PostgreSQL host.
|
||||
- `replica/port` – The PostgreSQL port.
|
||||
- `replica/priority` – The replica priority. When attempting to connect, ClickHouse traverses the replicas in order of priority. The lower the number, the higher the priority.
|
||||
- `replica` – Section of replica configurations. There can be multiple sections:
|
||||
- `replica/host` – The PostgreSQL host.
|
||||
- `replica/port` – The PostgreSQL port.
|
||||
- `replica/priority` – The replica priority. When attempting to connect, ClickHouse traverses the replicas in order of priority. The lower the number, the higher the priority.
|
||||
- `db` – Name of the database.
|
||||
- `table` – Name of the table.
|
||||
- `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` clause in PostgreSQL, for example, `id > 10 AND id < 20`. Optional parameter.
|
||||
- `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` clause in PostgreSQL. For example, `id > 10 AND id < 20`. Optional parameter.
|
||||
- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
||||
- `query` – The custom query. Optional parameter.
|
||||
|
||||
!!! info "Note"
|
||||
The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared.
|
||||
|
@ -5,7 +5,7 @@ toc_title: Bit
|
||||
|
||||
# Bit Functions {#bit-functions}
|
||||
|
||||
Bit functions work for any pair of types from UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, or Float64.
|
||||
Bit functions work for any pair of types from `UInt8`, `UInt16`, `UInt32`, `UInt64`, `Int8`, `Int16`, `Int32`, `Int64`, `Float32`, or `Float64`. Some functions support `String` and `FixedString` types.
|
||||
|
||||
The result type is an integer with bits equal to the maximum bits of its arguments. If at least one of the arguments is signed, the result is a signed number. If an argument is a floating-point number, it is cast to Int64.
|
||||
|
||||
@ -19,8 +19,100 @@ The result type is an integer with bits equal to the maximum bits of its argumen
|
||||
|
||||
## bitShiftLeft(a, b) {#bitshiftlefta-b}
|
||||
|
||||
Shifts the binary representation of a value to the left by a specified number of bit positions.
|
||||
|
||||
A `FixedString` or a `String` is treated as a single multibyte value.
|
||||
|
||||
Bits of a `FixedString` value are lost as they are shifted out. On the contrary, a `String` value is extended with additional bytes, so no bits are lost.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
bitShiftLeft(a, b)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `a` — A value to shift. [Integer types](../../sql-reference/data-types/int-uint.md), [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `b` — The number of shift positions. [Unsigned integer types](../../sql-reference/data-types/int-uint.md), 64 bit types or less are allowed.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Shifted value.
|
||||
|
||||
The type of the returned value is the same as the type of the input value.
|
||||
|
||||
**Example**
|
||||
|
||||
In the following queries [bin](encoding-functions.md#bin) and [hex](encoding-functions.md#hex) functions are used to show bits of shifted values.
|
||||
|
||||
``` sql
|
||||
SELECT 99 AS a, bin(a), bitShiftLeft(a, 2) AS a_shifted, bin(a_shifted);
|
||||
SELECT 'abc' AS a, hex(a), bitShiftLeft(a, 4) AS a_shifted, hex(a_shifted);
|
||||
SELECT toFixedString('abc', 3) AS a, hex(a), bitShiftLeft(a, 4) AS a_shifted, hex(a_shifted);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌──a─┬─bin(99)──┬─a_shifted─┬─bin(bitShiftLeft(99, 2))─┐
|
||||
│ 99 │ 01100011 │ 140 │ 10001100 │
|
||||
└────┴──────────┴───────────┴──────────────────────────┘
|
||||
┌─a───┬─hex('abc')─┬─a_shifted─┬─hex(bitShiftLeft('abc', 4))─┐
|
||||
│ abc │ 616263 │ &0 │ 06162630 │
|
||||
└─────┴────────────┴───────────┴─────────────────────────────┘
|
||||
┌─a───┬─hex(toFixedString('abc', 3))─┬─a_shifted─┬─hex(bitShiftLeft(toFixedString('abc', 3), 4))─┐
|
||||
│ abc │ 616263 │ &0 │ 162630 │
|
||||
└─────┴──────────────────────────────┴───────────┴───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## bitShiftRight(a, b) {#bitshiftrighta-b}
|
||||
|
||||
Shifts the binary representation of a value to the right by a specified number of bit positions.
|
||||
|
||||
A `FixedString` or a `String` is treated as a single multibyte value. Note that the length of a `String` value is reduced as bits are shifted out.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
bitShiftRight(a, b)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `a` — A value to shift. [Integer types](../../sql-reference/data-types/int-uint.md), [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `b` — The number of shift positions. [Unsigned integer types](../../sql-reference/data-types/int-uint.md), 64 bit types or less are allowed.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Shifted value.
|
||||
|
||||
The type of the returned value is the same as the type of the input value.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT 101 AS a, bin(a), bitShiftRight(a, 2) AS a_shifted, bin(a_shifted);
|
||||
SELECT 'abc' AS a, hex(a), bitShiftRight(a, 12) AS a_shifted, hex(a_shifted);
|
||||
SELECT toFixedString('abc', 3) AS a, hex(a), bitShiftRight(a, 12) AS a_shifted, hex(a_shifted);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌───a─┬─bin(101)─┬─a_shifted─┬─bin(bitShiftRight(101, 2))─┐
|
||||
│ 101 │ 01100101 │ 25 │ 00011001 │
|
||||
└─────┴──────────┴───────────┴────────────────────────────┘
|
||||
┌─a───┬─hex('abc')─┬─a_shifted─┬─hex(bitShiftRight('abc', 12))─┐
|
||||
│ abc │ 616263 │ │ 0616 │
|
||||
└─────┴────────────┴───────────┴───────────────────────────────┘
|
||||
┌─a───┬─hex(toFixedString('abc', 3))─┬─a_shifted─┬─hex(bitShiftRight(toFixedString('abc', 3), 12))─┐
|
||||
│ abc │ 616263 │ │ 000616 │
|
||||
└─────┴──────────────────────────────┴───────────┴─────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## bitRotateLeft(a, b) {#bitrotatelefta-b}
|
||||
|
||||
## bitRotateRight(a, b) {#bitrotaterighta-b}
|
||||
|
@ -216,6 +216,44 @@ Example:
|
||||
SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8') = [('a',5),('b',7),('c',11)];
|
||||
```
|
||||
|
||||
## JSONExtractKeys {#jsonextractkeysjson-indices-or-keys}
|
||||
|
||||
Parses a JSON string and extracts the keys.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
JSONExtractKeys(json[, a, b, c...])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `json` — [String](../../sql-reference/data-types/string.md) with valid JSON.
|
||||
- `a, b, c...` — Comma-separated indices or keys that specify the path to the inner field in a nested JSON object. Each argument can be either a [String](../../sql-reference/data-types/string.md) to get the field by the key or an [Integer](../../sql-reference/data-types/int-uint.md) to get the N-th field (indexed from 1, negative integers count from the end). If not set, the whole JSON is parsed as the top-level object. Optional parameter.
|
||||
|
||||
**Returned value**
|
||||
|
||||
Array with the keys of the JSON.
|
||||
|
||||
Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT JSONExtractKeys('{"a": "hello", "b": [-100, 200.0, 300]}');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
text
|
||||
┌─JSONExtractKeys('{"a": "hello", "b": [-100, 200.0, 300]}')─┐
|
||||
│ ['a','b'] │
|
||||
└────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## JSONExtractRaw(json\[, indices_or_keys\]…) {#jsonextractrawjson-indices-or-keys}
|
||||
|
||||
Returns a part of JSON as unparsed string.
|
||||
|
@ -2463,3 +2463,39 @@ Result:
|
||||
│ Linux 4.15.0-55-generic │
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
## zookeeperSessionUptime {#zookeepersessionuptime}
|
||||
|
||||
Returns the uptime of the current ZooKeeper session in seconds.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
zookeeperSessionUptime()
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- None.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Uptime of the current ZooKeeper session in seconds.
|
||||
|
||||
Type: [UInt32](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT zookeeperSessionUptime();
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─zookeeperSessionUptime()─┐
|
||||
│ 286 │
|
||||
└──────────────────────────┘
|
||||
```
|
||||
|
@ -320,7 +320,7 @@ Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operat
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapKeys(map)
|
||||
mapValues(map)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
44
docs/en/sql-reference/operators/exists.md
Normal file
44
docs/en/sql-reference/operators/exists.md
Normal file
@ -0,0 +1,44 @@
|
||||
# EXISTS {#exists-operator}
|
||||
|
||||
The `EXISTS` operator checks how many records are in the result of a subquery. If it is empty, then the operator returns `0`. Otherwise, it returns `1`.
|
||||
|
||||
`EXISTS` can be used in a [WHERE](../../sql-reference/statements/select/where.md) clause.
|
||||
|
||||
!!! warning "Warning"
|
||||
References to main query tables and columns are not supported in a subquery.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
WHERE EXISTS(subquery)
|
||||
```
|
||||
|
||||
**Example**
|
||||
|
||||
Query with a subquery returning several rows:
|
||||
|
||||
``` sql
|
||||
SELECT count() FROM numbers(10) WHERE EXISTS(SELECT number FROM numbers(10) WHERE number > 8);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─count()─┐
|
||||
│ 10 │
|
||||
└─────────┘
|
||||
```
|
||||
|
||||
Query with a subquery that returns an empty result:
|
||||
|
||||
``` sql
|
||||
SELECT count() FROM numbers(10) WHERE EXISTS(SELECT number FROM numbers(10) WHERE number > 11);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─count()─┐
|
||||
│ 0 │
|
||||
└─────────┘
|
||||
```
|
@ -71,7 +71,7 @@ For tuple subtraction: [tupleMinus](../../sql-reference/functions/tuple-function
|
||||
|
||||
## Operators for Working with Data Sets {#operators-for-working-with-data-sets}
|
||||
|
||||
*See [IN operators](../../sql-reference/operators/in.md).*
|
||||
See [IN operators](../../sql-reference/operators/in.md) and [EXISTS](../../sql-reference/operators/exists.md) operator.
|
||||
|
||||
`a IN ...` – The `in(a, b)` function.
|
||||
|
||||
|
@ -212,6 +212,9 @@ SET mutations_sync = 2;
|
||||
CREATE TABLE tmp (x Int64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY tuple();
|
||||
INSERT INTO tmp SELECT * FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE tmp ADD COLUMN s String MATERIALIZED toString(x);
|
||||
|
||||
ALTER TABLE tmp MATERIALIZE COLUMN s;
|
||||
|
||||
SELECT groupArray(x), groupArray(s) FROM tmp;
|
||||
```
|
||||
|
||||
|
58
docs/en/sql-reference/statements/alter/comment.md
Normal file
58
docs/en/sql-reference/statements/alter/comment.md
Normal file
@ -0,0 +1,58 @@
|
||||
---
|
||||
toc_priority: 51
|
||||
toc_title: COMMENT
|
||||
---
|
||||
|
||||
# ALTER TABLE … MODIFY COMMENT {#alter-modify-comment}
|
||||
|
||||
Adds, modifies, or removes comment to the table, regardless if it was set before or not. Comment change is reflected in both [system.tables](../../../operations/system-tables/tables.md) and `SHOW CREATE TABLE` query.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY COMMENT 'Comment'
|
||||
```
|
||||
|
||||
**Examples**
|
||||
|
||||
Creating a table with comment (for more information, see the [COMMENT] clause(../../../sql-reference/statements/create/table.md#comment-table)):
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_comment
|
||||
(
|
||||
`k` UInt64,
|
||||
`s` String
|
||||
)
|
||||
ENGINE = Memory()
|
||||
COMMENT 'The temporary table';
|
||||
```
|
||||
|
||||
Modifying the table comment:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table_with_comment MODIFY COMMENT 'new comment on a table';
|
||||
SELECT comment FROM system.tables WHERE database = currentDatabase() AND name = 'table_with_comment';
|
||||
```
|
||||
|
||||
Output of a new comment:
|
||||
|
||||
```text
|
||||
┌─comment────────────────┐
|
||||
│ new comment on a table │
|
||||
└────────────────────────┘
|
||||
```
|
||||
|
||||
Removing the table comment:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table_with_comment MODIFY COMMENT '';
|
||||
SELECT comment FROM system.tables WHERE database = currentDatabase() AND name = 'table_with_comment';
|
||||
```
|
||||
|
||||
Output of a removed comment:
|
||||
|
||||
```text
|
||||
┌─comment─┐
|
||||
│ │
|
||||
└─────────┘
|
||||
```
|
@ -32,6 +32,8 @@ These `ALTER` statements modify entities related to role-based access control:
|
||||
- [ROW POLICY](../../../sql-reference/statements/alter/row-policy.md)
|
||||
- [SETTINGS PROFILE](../../../sql-reference/statements/alter/settings-profile.md)
|
||||
|
||||
[ALTER TABLE ... MODIFY COMMENT](../../../sql-reference/statements/alter/comment.md) statement adds, modifies, or removes comments to the table, regardless if it was set before or not.
|
||||
|
||||
## Mutations {#mutations}
|
||||
|
||||
`ALTER` queries that are intended to manipulate table data are implemented with a mechanism called “mutations”, most notably [ALTER TABLE … DELETE](../../../sql-reference/statements/alter/delete.md) and [ALTER TABLE … UPDATE](../../../sql-reference/statements/alter/update.md). They are asynchronous background processes similar to merges in [MergeTree](../../../engines/table-engines/mergetree-family/index.md) tables that to produce new “mutated” versions of parts.
|
||||
|
@ -7,13 +7,13 @@ toc_title: DICTIONARY
|
||||
|
||||
Creates a new [external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) with given [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) and [lifetime](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
||||
|
||||
Syntax:
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
CREATE DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster]
|
||||
CREATE DICTIONARY [OR REPLACE][IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster]
|
||||
(
|
||||
key1 type1 [DEFAULT|EXPRESSION expr1] [IS_OBJECT_ID],
|
||||
key2 type2 [DEFAULT|EXPRESSION expr2] ,
|
||||
key2 type2 [DEFAULT|EXPRESSION expr2],
|
||||
attr1 type2 [DEFAULT|EXPRESSION expr3] [HIERARCHICAL|INJECTIVE],
|
||||
attr2 type2 [DEFAULT|EXPRESSION expr4] [HIERARCHICAL|INJECTIVE]
|
||||
)
|
||||
@ -21,6 +21,8 @@ PRIMARY KEY key1, key2
|
||||
SOURCE(SOURCE_NAME([param1 value1 ... paramN valueN]))
|
||||
LAYOUT(LAYOUT_NAME([param_name param_value]))
|
||||
LIFETIME({MIN min_val MAX max_val | max_val})
|
||||
SETTINGS(setting_name = setting_value, setting_name = setting_value, ...)
|
||||
COMMENT 'Comment'
|
||||
```
|
||||
|
||||
External dictionary structure consists of attributes. Dictionary attributes are specified similarly to table columns. The only required attribute property is its type, all other properties may have default values.
|
||||
@ -30,3 +32,68 @@ External dictionary structure consists of attributes. Dictionary attributes are
|
||||
Depending on dictionary [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) one or more attributes can be specified as dictionary keys.
|
||||
|
||||
For more information, see [External Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) section.
|
||||
|
||||
You can add a comment to the dictionary when you creating it using `COMMENT` clause.
|
||||
|
||||
**Example**
|
||||
|
||||
Input table `source_table`:
|
||||
|
||||
``` text
|
||||
┌─id─┬─value──┐
|
||||
│ 1 │ First │
|
||||
│ 2 │ Second │
|
||||
└────┴────────┘
|
||||
```
|
||||
|
||||
Creating the dictionary:
|
||||
|
||||
``` sql
|
||||
CREATE DICTIONARY dictionary_with_comment
|
||||
(
|
||||
id UInt64,
|
||||
value String
|
||||
)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'source_table'))
|
||||
LAYOUT(FLAT())
|
||||
LIFETIME(MIN 0 MAX 1000)
|
||||
COMMENT 'The temporary dictionary';
|
||||
```
|
||||
|
||||
Output the dictionary:
|
||||
|
||||
``` sql
|
||||
SHOW CREATE DICTIONARY dictionary_with_comment;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─statement───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ CREATE DICTIONARY default.dictionary_with_comment
|
||||
(
|
||||
`id` UInt64,
|
||||
`value` String
|
||||
)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'source_table'))
|
||||
LIFETIME(MIN 0 MAX 1000)
|
||||
LAYOUT(FLAT())
|
||||
COMMENT 'The temporary dictionary' │
|
||||
└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Output the comment to dictionary:
|
||||
|
||||
``` sql
|
||||
SELECT comment FROM system.dictionaries WHERE name == 'dictionary_with_comment' AND database == currentDatabase();
|
||||
```
|
||||
|
||||
```text
|
||||
┌─comment──────────────────┐
|
||||
│ The temporary dictionary │
|
||||
└──────────────────────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [system.dictionaries](../../../operations/system-tables/dictionaries.md) — This table contains information about [external dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
||||
|
@ -252,21 +252,47 @@ CREATE TABLE codec_example
|
||||
ENGINE = MergeTree()
|
||||
```
|
||||
|
||||
<!--
|
||||
|
||||
### Encryption Codecs {#create-query-encryption-codecs}
|
||||
|
||||
These codecs don't actually compress data, but instead encrypt data on disk. These are only available when an encryption key is specified by [encryption](../../../operations/server-configuration-parameters/settings.md#server-settings-encryption) settings. Note that encryption only makes sense at the end of codec pipelines, because encrypted data usually can't be compressed in any meaningful way.
|
||||
|
||||
Encryption codecs:
|
||||
|
||||
- `Encrypted('AES-128-GCM-SIV')` — Encrypts data with AES-128 in [RFC 8452](https://tools.ietf.org/html/rfc8452) GCM-SIV mode. This codec uses a fixed nonce and encryption is therefore deterministic. This makes it compatible with deduplicating engines such as [ReplicatedMergeTree](../../../engines/table-engines/mergetree-family/replication.md) but has a weakness: when the same data block is encrypted twice, the resulting ciphertext will be exactly the same so an adversary who can read the disk can see this equivalence (although only the equivalence).
|
||||
- `CODEC('AES-128-GCM-SIV')` — Encrypts data with AES-128 in [RFC 8452](https://tools.ietf.org/html/rfc8452) GCM-SIV mode.
|
||||
- `CODEC('AES-256-GCM-SIV')` — Encrypts data with AES-256 in GCM-SIV mode.
|
||||
|
||||
These codecs use a fixed nonce and encryption is therefore deterministic. This makes it compatible with deduplicating engines such as [ReplicatedMergeTree](../../../engines/table-engines/mergetree-family/replication.md) but has a weakness: when the same data block is encrypted twice, the resulting ciphertext will be exactly the same so an adversary who can read the disk can see this equivalence (although only the equivalence, without getting its content).
|
||||
|
||||
!!! attention "Attention"
|
||||
Most engines including the "*MergeTree" family create index files on disk without applying codecs. This means plaintext will appear on disk if an encrypted column is indexed.
|
||||
|
||||
!!! attention "Attention"
|
||||
If you perform a SELECT query mentioning a specific value in an encrypted column (such as in its WHERE clause), the value may appear in [system.query_log](../../../operations/system-tables/query_log.md). You may want to disable the logging.
|
||||
-->
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
CREATE TABLE mytable
|
||||
(
|
||||
x String Codec(AES_128_GCM_SIV)
|
||||
)
|
||||
ENGINE = MergeTree ORDER BY x;
|
||||
```
|
||||
|
||||
!!!note "Note"
|
||||
If compression needs to be applied, it must be explicitly specified. Otherwise, only encryption will be applied to data.
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
CREATE TABLE mytable
|
||||
(
|
||||
x String Codec(Delta, LZ4, AES_128_GCM_SIV)
|
||||
)
|
||||
ENGINE = MergeTree ORDER BY x;
|
||||
```
|
||||
|
||||
## Temporary Tables {#temporary-tables}
|
||||
|
||||
ClickHouse supports temporary tables which have the following characteristics:
|
||||
|
@ -6,9 +6,51 @@ toc_title: WHERE
|
||||
|
||||
`WHERE` clause allows to filter the data that is coming from [FROM](../../../sql-reference/statements/select/from.md) clause of `SELECT`.
|
||||
|
||||
If there is a `WHERE` clause, it must contain an expression with the `UInt8` type. This is usually an expression with comparison and logical operators. Rows where this expression evaluates to 0 are excluded from further transformations or result.
|
||||
If there is a `WHERE` clause, it must contain an expression with the `UInt8` type. This is usually an expression with comparison and logical operators. Rows where this expression evaluates to `0` are excluded from further transformations or result.
|
||||
|
||||
`WHERE` expression is evaluated on the ability to use indexes and partition pruning, if the underlying table engine supports that.
|
||||
|
||||
!!! note "Note"
|
||||
There’s a filtering optimization called [prewhere](../../../sql-reference/statements/select/prewhere.md).
|
||||
There is a filtering optimization called [PREWHERE](../../../sql-reference/statements/select/prewhere.md).
|
||||
|
||||
If you need to test a value for [NULL](../../../sql-reference/syntax.md#null-literal), use [IS NULL](../../operators/index.md#operator-is-null) and [IS NOT NULL](../../operators/index.md#is-not-null) operators or [isNull](../../../sql-reference/functions/functions-for-nulls.md#isnull) and [isNotNull](../../../sql-reference/functions/functions-for-nulls.md#isnotnull) functions.
|
||||
Otherwise an expression with `NULL` never passes.
|
||||
|
||||
**Example**
|
||||
|
||||
To find numbers that are multiples of 3 and are greater than 10 execute the following query on the [numbers table](../../../sql-reference/table-functions/numbers.md):
|
||||
|
||||
``` sql
|
||||
SELECT number FROM numbers(20) WHERE (number > 10) AND (number % 3 == 0);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─number─┐
|
||||
│ 12 │
|
||||
│ 15 │
|
||||
│ 18 │
|
||||
└────────┘
|
||||
```
|
||||
|
||||
Queries with `NULL` values:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE t_null(x Int8, y Nullable(Int8)) ENGINE=MergeTree() ORDER BY x;
|
||||
INSERT INTO t_null VALUES (1, NULL), (2, 3);
|
||||
|
||||
SELECT * FROM t_null WHERE y IS NULL;
|
||||
SELECT * FROM t_null WHERE y != 0;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─x─┬────y─┐
|
||||
│ 1 │ ᴺᵁᴸᴸ │
|
||||
└───┴──────┘
|
||||
┌─x─┬─y─┐
|
||||
│ 2 │ 3 │
|
||||
└───┴───┘
|
||||
```
|
||||
|
@ -13,6 +13,8 @@ SHOW CREATE [TEMPORARY] [TABLE|DICTIONARY|VIEW] [db.]table|view [INTO OUTFILE fi
|
||||
|
||||
Returns a single `String`-type ‘statement’ column, which contains a single value – the `CREATE` query used for creating the specified object.
|
||||
|
||||
Note that if you use this statement to get `CREATE` query of system tables, you will get a *fake* query, which only declares table structure, but cannot be used to create table.
|
||||
|
||||
## SHOW DATABASES {#show-databases}
|
||||
|
||||
Prints a list of all databases.
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 71
|
||||
toc_priority: 72
|
||||
toc_title: "Навигация по коду ClickHouse"
|
||||
---
|
||||
|
||||
|
1
docs/ru/development/build-cross-riscv.md
Symbolic link
1
docs/ru/development/build-cross-riscv.md
Symbolic link
@ -0,0 +1 @@
|
||||
../../en/development/build-cross-riscv.md
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 70
|
||||
toc_priority: 71
|
||||
toc_title: "Используемые сторонние библиотеки"
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 68
|
||||
toc_priority: 69
|
||||
toc_title: "Как писать код на C++"
|
||||
---
|
||||
|
||||
|
@ -34,7 +34,9 @@ DDL-запросы с базой данных `Replicated` работают по
|
||||
|
||||
В системной таблице [system.clusters](../../operations/system-tables/clusters.md) есть кластер с именем, как у реплицируемой базы, который состоит из всех реплик базы. Этот кластер обновляется автоматически при создании/удалении реплик, и его можно использовать для [Distributed](../../engines/table-engines/special/distributed.md#distributed) таблиц.
|
||||
|
||||
При создании новой реплики базы, эта реплика сама создаёт таблицы. Если реплика долго была недоступна и отстала от лога репликации — она сверяет свои локальные метаданные с актуальными метаданными в ZooKeeper, перекладывает лишние таблицы с данными в отдельную нереплицируемую базу (чтобы случайно не удалить что-нибудь лишнее), создаёт недостающие таблицы, обновляет имена таблиц, если были переименования. Данные реплицируются на уровне `ReplicatedMergeTree`, т.е. если таблица не реплицируемая, то данные реплицироваться не будут (база отвечает только за метаданные).
|
||||
При создании новой реплики базы, эта реплика сама создаёт таблицы. Если реплика долго была недоступна и отстала от лога репликации — она сверяет свои локальные метаданные с актуальными метаданными в ZooKeeper, перекладывает лишние таблицы с данными в отдельную нереплицируемую базу (чтобы случайно не удалить что-нибудь лишнее), создаёт недостающие таблицы, обновляет имена таблиц, если были переименования. Данные реплицируются на уровне `ReplicatedMergeTree`, т.е. если таблица не реплицируемая, то данные реплицироваться не будут (база отвечает только за метаданные).
|
||||
|
||||
Запросы [`ALTER TABLE ATTACH|FETCH|DROP|DROP DETACHED|DETACH PARTITION|PART`](../../sql-reference/statements/alter/partition.md) допустимы, но не реплицируются. Движок базы данных может только добавить/извлечь/удалить партицию или кусок нынешней реплики. Однако если сама таблица использует движок реплицируемой таблицы, тогда данные будут реплицированы после применения `ATTACH`.
|
||||
|
||||
## Примеры использования {#usage-example}
|
||||
|
||||
|
@ -11,7 +11,8 @@ toc_title: S3
|
||||
|
||||
``` sql
|
||||
CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression])
|
||||
ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression])
|
||||
[SETTINGS ...]
|
||||
```
|
||||
|
||||
**Параметры движка**
|
||||
@ -24,9 +25,12 @@ ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compressi
|
||||
**Пример**
|
||||
|
||||
``` sql
|
||||
CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip');
|
||||
CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip')
|
||||
SETTINGS input_format_with_names_use_header = 0;
|
||||
|
||||
INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3);
|
||||
|
||||
SELECT * FROM s3_engine_table LIMIT 2;
|
||||
```
|
||||
|
||||
@ -54,7 +58,7 @@ SELECT * FROM s3_engine_table LIMIT 2;
|
||||
|
||||
## Символы подстановки {#wildcards-in-path}
|
||||
|
||||
Аргумент `path` может указывать на несколько файлов, используя подстановочные знаки. Для обработки файл должен существовать и соответствовать всему шаблону пути. Список файлов определяется во время выполнения запроса `SELECT` (не в момент выполнения запроса `CREATE`).
|
||||
Аргумент `path` может указывать на несколько файлов, используя символы подстановки. Для обработки файл должен существовать и соответствовать всему шаблону пути. Список файлов определяется во время выполнения запроса `SELECT` (не в момент выполнения запроса `CREATE`).
|
||||
|
||||
- `*` — заменяет любое количество любых символов, кроме `/`, включая пустую строку.
|
||||
- `?` — заменяет любые одиночные символы.
|
||||
@ -63,6 +67,52 @@ SELECT * FROM s3_engine_table LIMIT 2;
|
||||
|
||||
Конструкции с `{}` аналогичны функции [remote](../../../sql-reference/table-functions/remote.md).
|
||||
|
||||
!!! warning "Примечание"
|
||||
Если список файлов содержит диапазоны чисел с ведущими нулями, используйте конструкцию с фигурными скобками для каждой цифры отдельно или используйте `?`.
|
||||
|
||||
**Пример подстановки 1**
|
||||
|
||||
Таблица содержит данные из файлов с именами `file-000.csv`, `file-001.csv`, … , `file-999.csv`:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE big_table (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/my_folder/file-{000..999}.csv', 'CSV');
|
||||
```
|
||||
|
||||
**Пример подстановки 2**
|
||||
|
||||
Предположим, есть несколько файлов в формате CSV со следующими URL-адресами в S3:
|
||||
|
||||
- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_1.csv'
|
||||
- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_2.csv'
|
||||
- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_3.csv'
|
||||
- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_1.csv'
|
||||
- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_2.csv'
|
||||
- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_3.csv'
|
||||
|
||||
Существует несколько способов создать таблицу, включающую в себя все шесть файлов:
|
||||
|
||||
1. Задайте диапазон для суффиксов в названии файла:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_range (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/some_file_{1..3}', 'CSV');
|
||||
```
|
||||
|
||||
2. Таблица содержит все файлы с префиксом `some_file_` (в каталогах не должно быть других файлов с таким префиксом):
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_question_mark (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/some_file_?', 'CSV');
|
||||
```
|
||||
|
||||
3. Таблица содержит все файлы в обоих каталогах (в каталогах не должно быть других файлов, соответствующих формату и схеме, описанным в запросе):
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_asterisk (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/*', 'CSV');
|
||||
```
|
||||
|
||||
## Настройки движка S3 {#s3-settings}
|
||||
|
||||
Перед выполнением запроса или в конфигурационном файле могут быть установлены следующие настройки:
|
||||
@ -108,47 +158,6 @@ SELECT * FROM s3_engine_table LIMIT 2;
|
||||
</s3>
|
||||
```
|
||||
|
||||
## Примеры использования {#usage-examples}
|
||||
|
||||
Предположим, у нас есть несколько файлов в формате CSV со следующими URL-адресами в S3:
|
||||
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_3.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_1.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_2.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_3.csv'
|
||||
|
||||
1. Существует несколько способов создать таблицу, включающую в себя все шесть файлов:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_range (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}', 'CSV');
|
||||
```
|
||||
|
||||
2. Другой способ:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_question_mark (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_?', 'CSV');
|
||||
```
|
||||
|
||||
3. Таблица содержит все файлы в обоих каталогах (все файлы должны соответствовать формату и схеме, описанным в запросе):
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_asterisk (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV');
|
||||
```
|
||||
|
||||
Если список файлов содержит диапазоны чисел с ведущими нулями, используйте конструкцию с фигурными скобками для каждой цифры отдельно или используйте `?`.
|
||||
|
||||
4. Создание таблицы из файлов с именами `file-000.csv`, `file-001.csv`, … , `file-999.csv`:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE big_table (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV');
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
|
||||
- [Табличная функция s3](../../../sql-reference/table-functions/s3.md)
|
||||
|
@ -9,7 +9,7 @@ toc_title: AggregatingMergeTree
|
||||
|
||||
Таблицы типа `AggregatingMergeTree` могут использоваться для инкрементальной агрегации данных, в том числе, для агрегирующих материализованных представлений.
|
||||
|
||||
Движок обрабатывает все столбцы типа [AggregateFunction](../../../engines/table-engines/mergetree-family/aggregatingmergetree.md).
|
||||
Движок обрабатывает все столбцы типа [AggregateFunction](../../../sql-reference/data-types/aggregatefunction.md).
|
||||
|
||||
Использование `AggregatingMergeTree` оправдано только в том случае, когда это уменьшает количество строк на порядки.
|
||||
|
||||
|
@ -64,9 +64,9 @@ WHERE table = 'visits'
|
||||
└───────────┴────────────────┴────────┘
|
||||
```
|
||||
|
||||
Столбец `partition` содержит имена всех партиций таблицы. Таблица `visits` из нашего примера содержит две партиции: `201901` и `201902`. Используйте значения из этого столбца в запросах [ALTER … PARTITION](#alter_manipulations-with-partitions).
|
||||
Столбец `partition` содержит имена всех партиций таблицы. Таблица `visits` из нашего примера содержит две партиции: `201901` и `201902`. Используйте значения из этого столбца в запросах [ALTER … PARTITION](../../../sql-reference/statements/alter/partition.md).
|
||||
|
||||
Столбец `name` содержит названия кусков партиций. Значения из этого столбца можно использовать в запросах [ALTER ATTACH PART](#alter_attach-partition).
|
||||
Столбец `name` содержит названия кусков партиций. Значения из этого столбца можно использовать в запросах [ALTER ATTACH PART](../../../sql-reference/statements/alter/partition.md#alter_attach-partition).
|
||||
|
||||
Столбец `active` отображает состояние куска. `1` означает, что кусок активен; `0` – неактивен. К неактивным можно отнести куски, оставшиеся после слияния данных. Поврежденные куски также отображаются как неактивные. Неактивные куски удаляются приблизительно через 10 минут после того, как было выполнено слияние.
|
||||
|
||||
@ -82,7 +82,7 @@ WHERE table = 'visits'
|
||||
|
||||
Как видно из примера выше, таблица содержит несколько отдельных кусков для одной и той же партиции (например, куски `201901_1_3_1` и `201901_1_9_2` принадлежат партиции `201901`). Это означает, что эти куски еще не были объединены – в файловой системе они хранятся отдельно. После того как будет выполнено автоматическое слияние данных (выполняется примерно спустя 10 минут после вставки данных), исходные куски будут объединены в один более крупный кусок и помечены как неактивные.
|
||||
|
||||
Вы можете запустить внеочередное слияние данных с помощью запроса [OPTIMIZE](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md#misc_operations-optimize). Пример:
|
||||
Вы можете запустить внеочередное слияние данных с помощью запроса [OPTIMIZE](../../../sql-reference/statements/optimize.md). Пример:
|
||||
|
||||
``` sql
|
||||
OPTIMIZE TABLE visits PARTITION 201902;
|
||||
@ -123,11 +123,11 @@ drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 detached
|
||||
|
||||
Директория `detached` содержит куски, отсоединенные от таблицы с помощью запроса [DETACH](../../../sql-reference/statements/alter/partition.md#alter_detach-partition). Поврежденные куски также попадают в эту директорию – они не удаляются с сервера.
|
||||
|
||||
Сервер не использует куски из директории `detached`. Вы можете в любое время добавлять, удалять, модифицировать данные в директории detached - сервер не будет об этом знать, пока вы не сделаете запрос [ATTACH](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md#alter_attach-partition).
|
||||
Сервер не использует куски из директории `detached`. Вы можете в любое время добавлять, удалять, модифицировать данные в директории detached - сервер не будет об этом знать, пока вы не сделаете запрос [ATTACH](../../../sql-reference/statements/alter/partition.md#alter_attach-partition).
|
||||
|
||||
Следует иметь в виду, что при работающем сервере нельзя вручную изменять набор кусков на файловой системе, так как сервер не будет знать об этом.
|
||||
Для нереплицируемых таблиц, вы можете это делать при остановленном сервере, однако это не рекомендуется.
|
||||
Для реплицируемых таблиц, набор кусков нельзя менять в любом случае.
|
||||
|
||||
ClickHouse позволяет производить различные манипуляции с кусками: удалять, копировать из одной таблицы в другую или создавать их резервные копии. Подробнее см. в разделе [Манипуляции с партициями и кусками](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md#alter_manipulations-with-partitions).
|
||||
ClickHouse позволяет производить различные манипуляции с кусками: удалять, копировать из одной таблицы в другую или создавать их резервные копии. Подробнее см. в разделе [Манипуляции с партициями и кусками](../../../sql-reference/statements/alter/partition.md).
|
||||
|
||||
|
@ -132,7 +132,7 @@ ClickHouse может слить куски данных таким образо
|
||||
[(1, 100), (2, 150)] + [(1, -100)] -> [(2, 150)]
|
||||
```
|
||||
|
||||
При запросе данных используйте функцию [sumMap(key, value)](../../../engines/table-engines/mergetree-family/summingmergetree.md) для агрегации `Map`.
|
||||
При запросе данных используйте функцию [sumMap(key, value)](../../../sql-reference/aggregate-functions/reference/summap.md#agg_functions-summap) для агрегации `Map`.
|
||||
|
||||
Для вложенной структуры данных не нужно указывать её столбцы в кортеже столбцов для суммирования.
|
||||
|
||||
|
@ -62,7 +62,7 @@ WHERE name = 'products'
|
||||
└──────────┴──────┴────────┴─────────────────┴─────────────────┴─────────────────┴───────────────┴─────────────────┘
|
||||
```
|
||||
|
||||
В таком виде данные из словаря можно получить при помощи функций [dictGet\*](../../../engines/table-engines/special/dictionary.md#ext_dict_functions).
|
||||
В таком виде данные из словаря можно получить при помощи функций [dictGet\*](../../../sql-reference/functions/ext-dict-functions.md#dictget).
|
||||
|
||||
Такое представление неудобно, когда нам необходимо получить данные в чистом виде, а также при выполнении операции `JOIN`. Для этих случаев можно использовать движок `Dictionary`, который отобразит данные словаря в таблицу.
|
||||
|
||||
|
@ -21,11 +21,11 @@ File(Format)
|
||||
|
||||
`Format` должен быть таким, который ClickHouse может использовать и в запросах `INSERT` и в запросах `SELECT`. Полный список поддерживаемых форматов смотрите в разделе [Форматы](../../../interfaces/formats.md#formats).
|
||||
|
||||
Сервер ClickHouse не позволяет указать путь к файлу, с которым будет работать `File`. Используется путь к хранилищу, определенный параметром [path](../../../operations/server-configuration-parameters/settings.md) в конфигурации сервера.
|
||||
Сервер ClickHouse не позволяет указать путь к файлу, с которым будет работать `File`. Используется путь к хранилищу, определенный параметром [path](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-path) в конфигурации сервера.
|
||||
|
||||
При создании таблицы с помощью `File(Format)` сервер ClickHouse создает в хранилище каталог с именем таблицы, а после добавления в таблицу данных помещает туда файл `data.Format`.
|
||||
|
||||
Можно вручную создать в хранилище каталог таблицы, поместить туда файл, затем на сервере ClickHouse добавить ([ATTACH](../../../engines/table-engines/special/file.md)) информацию о таблице, соответствующей имени каталога и прочитать из файла данные.
|
||||
Можно вручную создать в хранилище каталог таблицы, поместить туда файл, затем на сервере ClickHouse добавить ([ATTACH](../../../sql-reference/statements/attach.md#attach)) информацию о таблице, соответствующей имени каталога и прочитать из файла данные.
|
||||
|
||||
!!! warning "Warning"
|
||||
Будьте аккуратны с этой функциональностью, поскольку сервер ClickHouse не отслеживает внешние изменения данных. Если в файл будет производиться запись одновременно со стороны сервера ClickHouse и с внешней стороны, то результат непредсказуем.
|
||||
|
@ -5,7 +5,7 @@ toc_title: Join
|
||||
|
||||
# Join {#join}
|
||||
|
||||
Подготовленная структура данных для использования в операциях [JOIN](../../../engines/table-engines/special/join.md#select-join).
|
||||
Подготовленная структура данных для использования в операциях [JOIN](../../../sql-reference/statements/select/join.md#select-join).
|
||||
|
||||
## Создание таблицы {#creating-a-table}
|
||||
|
||||
@ -21,8 +21,8 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
||||
**Параметры движка**
|
||||
|
||||
- `join_strictness` – [строгость JOIN](../../../engines/table-engines/special/join.md#select-join-types).
|
||||
- `join_type` – [тип JOIN](../../../engines/table-engines/special/join.md#select-join-types).
|
||||
- `join_strictness` – [строгость JOIN](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||
- `join_type` – [тип JOIN](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||
- `k1[, k2, ...]` – ключевые столбцы секции `USING` с которыми выполняется операция `JOIN`.
|
||||
|
||||
Вводите параметры `join_strictness` и `join_type` без кавычек, например, `Join(ANY, LEFT, col1)`. Они должны быть такими же как и в той операции `JOIN`, в которой таблица будет использоваться. Если параметры не совпадают, ClickHouse не генерирует исключение и может возвращать неверные данные.
|
||||
@ -42,7 +42,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
Основные применения `Join` таблиц:
|
||||
|
||||
- Использование в правой части секции `JOIN`.
|
||||
- Извлечение данных из таблицы таким же образом как из словаря с помощью функции [joinGet](../../../engines/table-engines/special/join.md#joinget).
|
||||
- Извлечение данных из таблицы таким же образом как из словаря с помощью функции [joinGet](../../../sql-reference/functions/other-functions.md#joinget).
|
||||
|
||||
### Удаление данных {#deleting-data}
|
||||
|
||||
|
@ -163,8 +163,8 @@ SELECT * FROM nestedt FORMAT TSV
|
||||
## TabSeparatedWithNames {#tabseparatedwithnames}
|
||||
|
||||
Отличается от формата `TabSeparated` тем, что в первой строке пишутся имена столбцов.
|
||||
При парсинге, первая строка полностью игнорируется. Вы не можете использовать имена столбцов, чтобы указать их порядок расположения, или чтобы проверить их корректность.
|
||||
(Поддержка обработки заголовка при парсинге может быть добавлена в будущем.)
|
||||
|
||||
При парсинге первая строка должна содержать имена столбцов. Вы можете использовать имена столбцов, чтобы указать их порядок расположения, или чтобы проверить их корректность.
|
||||
|
||||
Этот формат также доступен под именем `TSVWithNames`.
|
||||
|
||||
|
@ -430,7 +430,7 @@ $ curl -v 'http://localhost:8123/predefined_query'
|
||||
<http_handlers>
|
||||
<rule>
|
||||
<url><![CDATA[/query_param_with_url/\w+/(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></url>
|
||||
<method>GET</method>
|
||||
<methods>GET</methods>
|
||||
<headers>
|
||||
<XXX>TEST_HEADER_VALUE</XXX>
|
||||
<PARAMS_XXX><![CDATA[(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></PARAMS_XXX>
|
||||
|
@ -31,6 +31,8 @@ toc_title: "Клиентские библиотеки от сторонних р
|
||||
- NodeJs
|
||||
- [clickhouse (NodeJs)](https://github.com/TimonKK/clickhouse)
|
||||
- [node-clickhouse](https://github.com/apla/node-clickhouse)
|
||||
- [nestjs-clickhouse](https://github.com/depyronick/nestjs-clickhouse)
|
||||
- [clickhouse-client](https://github.com/depyronick/clickhouse-client)
|
||||
- Perl
|
||||
- [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse)
|
||||
- [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse)
|
||||
|
38
docs/ru/interfaces/third-party/gui.md
vendored
38
docs/ru/interfaces/third-party/gui.md
vendored
@ -69,6 +69,14 @@ toc_title: "Визуальные интерфейсы от сторонних р
|
||||
- Проводник по базе данных.
|
||||
- Инструменты визуализации, позволяющие представить данные в различных формах.
|
||||
|
||||
### Grafana {#grafana}
|
||||
|
||||
[Grafana](https://grafana.com/grafana/plugins/vertamedia-clickhouse-datasource) — платформа для мониторинга и визуализации.
|
||||
|
||||
"С помощью Grafana вы можете делать запросы, визуализировать, получать уведомления и разбираться в метриках, где бы они ни хранились. Создавайте, исследуйте, делитесь дашбордами с командой и прививайте культуру принятия решений на основе данных. Мы пользуемся доверием и любовью пользователей" — grafana.com.
|
||||
|
||||
Плагин источника данных ClickHouse поддерживает ClickHouse в качестве бэкенд базы данных.
|
||||
|
||||
### DBeaver {#dbeaver}
|
||||
|
||||
[DBeaver](https://dbeaver.io/) - универсальный desktop клиент баз данных с поддержкой ClickHouse.
|
||||
@ -109,6 +117,36 @@ toc_title: "Визуальные интерфейсы от сторонних р
|
||||
|
||||
[MindsDB](https://mindsdb.com/) — это продукт с открытым исходным кодом, реализующий слой искусственного интеллекта (Artificial Intelligence, AI) для различных СУБД, в том числе для ClickHouse. MindsDB облегчает процессы создания, обучения и развертывания современных моделей машинного обучения. Графический пользовательский интерфейс MindsDB Studio позволяет обучать новые модели на основе данных в БД, интерпретировать сделанные моделями прогнозы, выявлять потенциальные ошибки в данных, визуализировать и оценивать достоверность моделей с помощью функции Explainable AI, так чтобы вы могли быстрее адаптировать и настраивать ваши модели машинного обучения.
|
||||
|
||||
### DBM {#dbm}
|
||||
|
||||
[DBM](https://dbm.incubator.edurt.io/) DBM — инструмент для визуального менеджмента в ClickHouse!
|
||||
|
||||
Основные возможности:
|
||||
|
||||
- Поддержка истории запросов (пагинация, очистка и т.д.)
|
||||
- Поддержка отдельных секций запросов
|
||||
- Поддержка остановки запроса
|
||||
- Поддержка управления таблицами (метаданные, удаление, предпросмотр)
|
||||
- Поддержка управления базами данных (удаление, создание)
|
||||
- Поддержка пользовательских запросов
|
||||
- Поддержка управления различными источниками данных (проверка соединения, мониторинг)
|
||||
- Поддержка монитора (процессор, соединение, запрос)
|
||||
- Поддержка миграции данных
|
||||
|
||||
### Bytebase {#bytebase}
|
||||
|
||||
[Bytebase](https://bytebase.com) — сетевой инструмент для смены схем и контроля версий с открытым исходным кодом для работы в команде. Поддерживает различные базы данных, в том числе ClickHouse.
|
||||
|
||||
Основные возможности:
|
||||
|
||||
- Проверка схемы для разработчиков и администраторов баз данных.
|
||||
- "База данных в виде кода", хранение схемы базы данных с помощью системы контроля версий, например, GitLab, а также активация развертывания по коммиту.
|
||||
- Рациональное развертывание и соответствующая среда.
|
||||
- Полная история миграций.
|
||||
- Определение смещения схемы.
|
||||
- Резервное копирование и восстановление.
|
||||
- Управление доступом на основе ролей.
|
||||
|
||||
## Коммерческие {#commercial}
|
||||
|
||||
### DataGrip {#datagrip}
|
||||
|
@ -1432,3 +1432,54 @@ ClickHouse использует ZooKeeper для хранения метадан
|
||||
</roles>
|
||||
</ldap>
|
||||
```
|
||||
|
||||
## total_memory_profiler_step {#total-memory-profiler-step}
|
||||
|
||||
Задает размер памяти (в байтах) для трассировки стека на каждом шаге выделения максимума памяти. Данные хранятся в системной таблице [system.trace_log](../../operations/system-tables/trace_log.md) с `query_id`, равным пустой строке.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- Положительное целое число.
|
||||
|
||||
Значение по умолчанию: `4194304`.
|
||||
|
||||
## total_memory_tracker_sample_probability {#total-memory-tracker-sample-probability}
|
||||
|
||||
Позволяет собирать случайные выделения и освобождения памяти и записывать их в системную таблицу [system.trace_log](../../operations/system-tables/trace_log.md) с `trace_type`, равным `MemorySample`, с указанной вероятностью. Вероятность касается каждого выделения или освобождения памяти, независимо от размера выделения. Обратите внимание, что выборка происходит только тогда, когда объем неотслеживаемой памяти превышает лимит неотслеживаемой памяти (значение по умолчанию: `4` MiB). Значение настройки может быть уменьшено, если значение настройки [total_memory_profiler_step](#total-memory-profiler-step) уменьшено. Вы можете установить значение настройки `total_memory_profiler_step`, равным `1`, для особой детализованной выборки.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- Положительное целое число.
|
||||
- 0 — запись случайных выделений и освобождений памяти в системную таблицу `system.trace_log` отключена.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
## mmap_cache_size {#mmap-cache-size}
|
||||
|
||||
Задает размер кеша (в байтах) для сопоставленных файлов. Эта настройка позволяет избежать частых открытых/[mmap/munmap](https://en.wikipedia.org/wiki/Mmap)/закрытых вызовов (очень дорогостоящие из-за последующих ошибок страниц) и повторного использования сопоставления из нескольких потоков и запросов. Значение настройки — это количество сопоставленных областей (обычно равно количеству сопоставленных файлов). Объем данных в сопоставленных файлах можно отслеживать в системных таблицах [system.metrics](../../operations/system-tables/metrics.md), [system.metric_log](../../operations/system-tables/metric_log.md) по метрикам `MMappedFiles` и `MMappedFileBytes`, в таблицах [system.asynchronous_metrics](../../operations/system-tables/asynchronous_metrics.md), [system.asynchronous_metrics_log](../../operations/system-tables/asynchronous_metric_log.md) по метрике `MMapCacheCells`, а также в [system.events](../../operations/system-tables/events.md), [system.processes](../../operations/system-tables/processes.md), [system.query_log](../../operations/system-tables/query_log.md), [system.query_thread_log](../../operations/system-tables/query_thread_log.md), [system.query_views_log](../../operations/system-tables/query_views_log.md) по событиям `CreatedReadBufferMMap`, `CreatedReadBufferMMapFailed`, `MMappedFileCacheHits`, `MMappedFileCacheMisses`. Обратите внимание, что объем данных в сопоставленных файлах не потребляет память напрямую и не учитывается в запросе или использовании памяти сервера, поскольку эта память может быть удалена аналогично кешу страниц ОС. Кеш удаляется (т.е. файлы закрываются) автоматически при удалении старых кусков в таблицах семейства [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md), также его можно удалить вручную с помощью запроса `SYSTEM DROP MMAP CACHE`.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- Положительное целое число.
|
||||
|
||||
Значение по умолчанию: `1000`.
|
||||
|
||||
## compiled_expression_cache_size {#compiled-expression-cache-size}
|
||||
|
||||
Задает размер кеша (в байтах) для [скомпилированных выражений](../../operations/caches.md).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- Положительное целое число.
|
||||
|
||||
Значение по умолчанию: `134217728`.
|
||||
|
||||
## compiled_expression_cache_elements_size {#compiled_expression_cache_elements_size}
|
||||
|
||||
Задает размер кеша (в элементах) для [скомпилированных выражений](../../operations/caches.md).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- Положительное целое число.
|
||||
|
||||
Значение по умолчанию: `10000`.
|
||||
|
@ -342,3 +342,16 @@ Eсли суммарное число активных кусков во все
|
||||
**Использование**
|
||||
|
||||
Значение настройки `min_bytes_to_rebalance_partition_over_jbod` должно быть меньше значения настройки [max_bytes_to_merge_at_max_space_in_pool](../../operations/settings/merge-tree-settings.md#max-bytes-to-merge-at-max-space-in-pool). Иначе ClickHouse сгенерирует исключение.
|
||||
|
||||
## detach_not_byte_identical_parts {#detach_not_byte_identical_parts}
|
||||
|
||||
Настраивает отключение куска данных после выполнения слияния или мутации, если на одной реплике его содержимое побайтно не совпадает с кусками на других репликах. Если настройка отключена, то кусок удаляется. Активируйте это поведение, если хотите анализировать такие куски позже.
|
||||
|
||||
Эта настройка применяется к таблицам `MergeTree` с включенной [репликацией данных](../../engines/table-engines/mergetree-family/replication.md).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — куски данных удаляются.
|
||||
- 1 — куски данных открепляются.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
@ -1146,6 +1146,9 @@ SELECT type, query FROM system.query_log WHERE log_comment = 'log_comment test'
|
||||
|
||||
Может быть использована для ограничения скорости сети при репликации данных для добавления или замены новых узлов.
|
||||
|
||||
!!! note "Note"
|
||||
60000000 байт/с примерно соответствует 457 Мбит/с (60000000 / 1024 / 1024 * 8).
|
||||
|
||||
## max_replicated_sends_network_bandwidth_for_server {#max_replicated_sends_network_bandwidth_for_server}
|
||||
|
||||
Ограничивает максимальную скорость обмена данными в сети (в байтах в секунду) для [репликационных](../../engines/table-engines/mergetree-family/replication.md) отправок. Применяется только при запуске сервера. Можно также ограничить скорость для конкретной таблицы с помощью настройки [max_replicated_sends_network_bandwidth](../../operations/settings/merge-tree-settings.md#max_replicated_sends_network_bandwidth).
|
||||
@ -1163,6 +1166,9 @@ SELECT type, query FROM system.query_log WHERE log_comment = 'log_comment test'
|
||||
|
||||
Может быть использована для ограничения скорости сети при репликации данных для добавления или замены новых узлов.
|
||||
|
||||
!!! note "Note"
|
||||
60000000 байт/с примерно соответствует 457 Мбит/с (60000000 / 1024 / 1024 * 8).
|
||||
|
||||
## connect_timeout_with_failover_ms {#connect-timeout-with-failover-ms}
|
||||
|
||||
Таймаут в миллисекундах на соединение с удалённым сервером, для движка таблиц Distributed, если используются секции shard и replica в описании кластера.
|
||||
@ -3802,3 +3808,13 @@ SELECT * FROM positional_arguments ORDER BY 2,3;
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
## min_bytes_to_use_mmap_io {#min-bytes-to-use-mmap-io}
|
||||
|
||||
Это экспериментальная настройка. Устанавливает минимальный объем памяти для чтения больших файлов без копирования данных из ядра в пространство пользователей. Рекомендуемый лимит составляет около 64 MB, поскольку [mmap/munmap](https://en.wikipedia.org/wiki/Mmap) работает медленно. Это имеет смысл только для больших файлов и помогает только в том случае, если данные находятся в кеше страниц.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- Положительное целое число.
|
||||
- 0 — большие файлы считываются только с копированием данных из ядра в пространство пользователей.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
@ -4,58 +4,84 @@
|
||||
|
||||
Столбцы:
|
||||
|
||||
- `database` ([String](../../sql-reference/data-types/string.md)) — Имя базы данных, в которой находится словарь, созданный с помощью DDL-запроса. Пустая строка для других словарей.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — [Имя словаря](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md).
|
||||
- `status` ([Enum8](../../sql-reference/data-types/enum.md)) — Статус словаря. Возможные значения:
|
||||
- `NOT_LOADED` — Словарь не загружен, потому что не использовался.
|
||||
- `LOADED` — Словарь загружен успешно.
|
||||
- `FAILED` — Словарь не загружен в результате ошибки.
|
||||
- `LOADING` — Словарь в процессе загрузки.
|
||||
- `LOADED_AND_RELOADING` — Словарь загружен успешно, сейчас перезагружается (частые причины: запрос [SYSTEM RELOAD DICTIONARY](../../sql-reference/statements/system.md#query_language-system-reload-dictionary), таймаут, изменение настроек словаря).
|
||||
- `FAILED_AND_RELOADING` — Словарь не загружен в результате ошибки, сейчас перезагружается.
|
||||
- `origin` ([String](../../sql-reference/data-types/string.md)) — Путь к конфигурационному файлу, описывающему словарь.
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — Тип размещения словаря. [Хранение словарей в памяти](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md).
|
||||
- `key` — [Тип ключа](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key): Числовой ключ ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) или Составной ключ ([String](../../sql-reference/data-types/string.md)) — строка вида “(тип 1, тип 2, …, тип n)”.
|
||||
- `attribute.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Массив [имен атрибутов](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes), предоставляемых справочником.
|
||||
- `attribute.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Соответствующий массив [типов атрибутов](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes), предоставляемых справочником.
|
||||
- `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Объем оперативной памяти, используемый словарем.
|
||||
- `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Количество запросов с момента загрузки словаря или с момента последней успешной перезагрузки.
|
||||
- `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — Для cache-словарей — процент закэшированных значений.
|
||||
- `found_rate` ([Float64](../../sql-reference/data-types/float.md)) — Процент обращений к словарю, при которых значение было найдено.
|
||||
- `element_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Количество элементов, хранящихся в словаре.
|
||||
- `load_factor` ([Float64](../../sql-reference/data-types/float.md)) — Процент заполнения словаря (для хэшированного словаря — процент заполнения хэш-таблицы).
|
||||
- `source` ([String](../../sql-reference/data-types/string.md)) — Текст, описывающий [источник данных](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) для словаря.
|
||||
- `lifetime_min` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Минимальное [время обновления](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах.
|
||||
- `lifetime_max` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Максимальное [время обновления](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах.
|
||||
- `loading_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Время начала загрузки словаря.
|
||||
- `loading_duration` ([Float32](../../sql-reference/data-types/float.md)) — Время, затраченное на загрузку словаря.
|
||||
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — Текст ошибки, возникающей при создании или перезагрузке словаря, если словарь не удалось создать.
|
||||
- `database` ([String](../../sql-reference/data-types/string.md)) — имя базы данных, в которой находится словарь, созданный с помощью DDL-запроса. Пустая строка для других словарей.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — [имя словаря](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md).
|
||||
- `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — уникальный UUID словаря.
|
||||
- `status` ([Enum8](../../sql-reference/data-types/enum.md)) — статус словаря. Возможные значения:
|
||||
- `NOT_LOADED` — словарь не загружен, потому что не использовался.
|
||||
- `LOADED` — словарь загружен успешно.
|
||||
- `FAILED` — словарь не загружен в результате ошибки.
|
||||
- `LOADING` — словарь в процессе загрузки.
|
||||
- `LOADED_AND_RELOADING` — словарь загружен успешно, сейчас перезагружается (частые причины: запрос [SYSTEM RELOAD DICTIONARY](../../sql-reference/statements/system.md#query_language-system-reload-dictionary), таймаут, изменение настроек словаря).
|
||||
- `FAILED_AND_RELOADING` — словарь не загружен в результате ошибки, сейчас перезагружается.
|
||||
- `origin` ([String](../../sql-reference/data-types/string.md)) — путь к конфигурационному файлу, описывающему словарь.
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — тип размещения словаря. [Хранение словарей в памяти](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md).
|
||||
- `key.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — массив [имен ключей](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key), предоставляемых словарем.
|
||||
- `key.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — соответствующий массив [типов ключей](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key), предоставляемых словарем.
|
||||
- `attribute.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — массив [имен атрибутов](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes), предоставляемых словарем.
|
||||
- `attribute.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — соответствующий массив [типов атрибутов](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes), предоставляемых словарем.
|
||||
- `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — объем оперативной памяти, используемый словарем.
|
||||
- `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — количество запросов с момента загрузки словаря или с момента последней успешной перезагрузки.
|
||||
- `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — для cache-словарей — процент закэшированных значений.
|
||||
- `found_rate` ([Float64](../../sql-reference/data-types/float.md)) — процент обращений к словарю, при которых значение было найдено.
|
||||
- `element_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — количество элементов, хранящихся в словаре.
|
||||
- `load_factor` ([Float64](../../sql-reference/data-types/float.md)) — процент заполнения словаря (для хэшированного словаря — процент заполнения хэш-таблицы).
|
||||
- `source` ([String](../../sql-reference/data-types/string.md)) — текст, описывающий [источник данных](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) для словаря.
|
||||
- `lifetime_min` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — минимальное [время обновления](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах.
|
||||
- `lifetime_max` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — максимальное [время обновления](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах.
|
||||
- `loading_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время начала загрузки словаря.
|
||||
- `loading_duration` ([Float32](../../sql-reference/data-types/float.md)) — время, затраченное на загрузку словаря.
|
||||
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — текст ошибки, возникающей при создании или перезагрузке словаря, если словарь не удалось создать.
|
||||
- `comment` ([String](../../sql-reference/data-types/string.md)) — текст комментария к словарю.
|
||||
|
||||
**Пример**
|
||||
|
||||
Настройте словарь.
|
||||
Настройте словарь:
|
||||
|
||||
``` sql
|
||||
CREATE DICTIONARY dictdb.dict
|
||||
CREATE DICTIONARY dictionary_with_comment
|
||||
(
|
||||
`key` Int64 DEFAULT -1,
|
||||
`value_default` String DEFAULT 'world',
|
||||
`value_expression` String DEFAULT 'xxx' EXPRESSION 'toString(127 * 172)'
|
||||
id UInt64,
|
||||
value String
|
||||
)
|
||||
PRIMARY KEY key
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dicttbl' DB 'dictdb'))
|
||||
LIFETIME(MIN 0 MAX 1)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'source_table'))
|
||||
LAYOUT(FLAT())
|
||||
LIFETIME(MIN 0 MAX 1000)
|
||||
COMMENT 'The temporary dictionary';
|
||||
```
|
||||
|
||||
Убедитесь, что словарь загружен.
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.dictionaries
|
||||
SELECT * FROM system.dictionaries LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─database─┬─name─┬─status─┬─origin──────┬─type─┬─key────┬─attribute.names──────────────────────┬─attribute.types─────┬─bytes_allocated─┬─query_count─┬─hit_rate─┬─element_count─┬───────────load_factor─┬─source─────────────────────┬─lifetime_min─┬─lifetime_max─┬──loading_start_time─┌──last_successful_update_time─┬──────loading_duration─┬─last_exception─┐
|
||||
│ dictdb │ dict │ LOADED │ dictdb.dict │ Flat │ UInt64 │ ['value_default','value_expression'] │ ['String','String'] │ 74032 │ 0 │ 1 │ 1 │ 0.0004887585532746823 │ ClickHouse: dictdb.dicttbl │ 0 │ 1 │ 2020-03-04 04:17:34 │ 2020-03-04 04:30:34 │ 0.002 │ │
|
||||
└──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘
|
||||
Row 1:
|
||||
──────
|
||||
database: default
|
||||
name: dictionary_with_comment
|
||||
uuid: 4654d460-0d03-433a-8654-d4600d03d33a
|
||||
status: NOT_LOADED
|
||||
origin: 4654d460-0d03-433a-8654-d4600d03d33a
|
||||
type:
|
||||
key.names: ['id']
|
||||
key.types: ['UInt64']
|
||||
attribute.names: ['value']
|
||||
attribute.types: ['String']
|
||||
bytes_allocated: 0
|
||||
query_count: 0
|
||||
hit_rate: 0
|
||||
found_rate: 0
|
||||
element_count: 0
|
||||
load_factor: 0
|
||||
source:
|
||||
lifetime_min: 0
|
||||
lifetime_max: 0
|
||||
loading_start_time: 1970-01-01 00:00:00
|
||||
last_successful_update_time: 1970-01-01 00:00:00
|
||||
loading_duration: 0
|
||||
last_exception:
|
||||
comment: The temporary dictionary
|
||||
```
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
Интервал сброса данных в таблицу задаётся параметром `flush_interval_milliseconds` в разделе настроек сервера [query_thread_log](../server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log). Чтобы принудительно записать логи из буфера памяти в таблицу, используйте запрос [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs).
|
||||
|
||||
ClickHouse не удаляет данные из таблицы автоматически. Подробности в разделе [Введение](#system-tables-introduction).
|
||||
ClickHouse не удаляет данные из таблицы автоматически. Подробности в разделе [Введение](../../operations/system-tables/index.md#system-tables-introduction).
|
||||
|
||||
Чтобы уменьшить количество запросов, регистрирующихся в таблице `query_thread_log`, вы можете использовать настройку [log_queries_probability](../../operations/settings/settings.md#log-queries-probability).
|
||||
|
||||
|
77
docs/ru/operations/system-tables/session_log.md
Normal file
77
docs/ru/operations/system-tables/session_log.md
Normal file
@ -0,0 +1,77 @@
|
||||
# system.session_log {#system_tables-session_log}
|
||||
|
||||
Содержит информацию о всех успешных и неудачных событиях входа и выхода из системы.
|
||||
|
||||
Столбцы:
|
||||
|
||||
- `type` ([Enum8](../../sql-reference/data-types/enum.md)) — результат входа или выхода из системы. Возможные значения:
|
||||
- `LoginFailure` — ошибка входа в систему.
|
||||
- `LoginSuccess` — успешный вход в систему.
|
||||
- `Logout` — выход из системы.
|
||||
- `auth_id` ([UUID](../../sql-reference/data-types/uuid.md)) — идентификатор аутентификации, представляющий собой UUID, который автоматически генерируется при каждом входе пользователя в систему.
|
||||
- `session_id` ([String](../../sql-reference/data-types/string.md)) — идентификатор сессии, который передается клиентом через [HTTP](../../interfaces/http.md)-интерфейс.
|
||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — дата входа или выхода из системы.
|
||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время входа или выхода из системы.
|
||||
- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — время начала входа или выхода из системы с точностью до микросекунд.
|
||||
- `user` ([String](../../sql-reference/data-types/string.md)) — имя пользователя.
|
||||
- `auth_type` ([Enum8](../../sql-reference/data-types/enum.md)) — тип аутентификации. Возможные значения:
|
||||
- `NO_PASSWORD`
|
||||
- `PLAINTEXT_PASSWORD`
|
||||
- `SHA256_PASSWORD`
|
||||
- `DOUBLE_SHA1_PASSWORD`
|
||||
- `LDAP`
|
||||
- `KERBEROS`
|
||||
- `profiles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — список профилей, установленных для всех ролей и (или) пользователей.
|
||||
- `roles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — список ролей, к которым применяется данный профиль.
|
||||
- `settings` ([Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md), [String](../../sql-reference/data-types/string.md)))) — настройки, которые были изменены при входе или выходе клиента из системы.
|
||||
- `client_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP-адрес, который использовался для входа или выхода из системы.
|
||||
- `client_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт клиента, который использовался для входа или выхода из системы.
|
||||
- `interface` ([Enum8](../../sql-reference/data-types/enum.md)) — интерфейс, с которого был инициирован вход в систему. Возможные значения:
|
||||
- `TCP`
|
||||
- `HTTP`
|
||||
- `gRPC`
|
||||
- `MySQL`
|
||||
- `PostgreSQL`
|
||||
- `client_hostname` ([String](../../sql-reference/data-types/string.md)) — имя хоста клиентской машины, с которой присоединился [clickhouse-client](../../interfaces/cli.md) или другой TCP клиент.
|
||||
- `client_name` ([String](../../sql-reference/data-types/string.md)) — `clickhouse-client` или другой TCP клиент.
|
||||
- `client_revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ревизия `clickhouse-client` или другого TCP клиента.
|
||||
- `client_version_major` ([UInt32](../../sql-reference/data-types/int-uint.md)) — старшая версия `clickhouse-client` или другого TCP клиента.
|
||||
- `client_version_minor` ([UInt32](../../sql-reference/data-types/int-uint.md)) — младшая версия `clickhouse-client` или другого TCP клиента.
|
||||
- `client_version_patch` ([UInt32](../../sql-reference/data-types/int-uint.md)) — патч `clickhouse-client` или другого TCP клиента.
|
||||
- `failure_reason` ([String](../../sql-reference/data-types/string.md)) — сообщение об исключении, содержащее причину сбоя при входе или выходе из системы.
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.session_log LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
type: LoginSuccess
|
||||
auth_id: 45e6bd83-b4aa-4a23-85e6-bd83b4aa1a23
|
||||
session_id:
|
||||
event_date: 2021-10-14
|
||||
event_time: 2021-10-14 20:33:52
|
||||
event_time_microseconds: 2021-10-14 20:33:52.104247
|
||||
user: default
|
||||
auth_type: PLAINTEXT_PASSWORD
|
||||
profiles: ['default']
|
||||
roles: []
|
||||
settings: [('load_balancing','random'),('max_memory_usage','10000000000')]
|
||||
client_address: ::ffff:127.0.0.1
|
||||
client_port: 38490
|
||||
interface: TCP
|
||||
client_hostname:
|
||||
client_name: ClickHouse client
|
||||
client_revision: 54449
|
||||
client_version_major: 21
|
||||
client_version_minor: 10
|
||||
client_version_patch: 0
|
||||
failure_reason:
|
||||
```
|
@ -6,7 +6,7 @@
|
||||
|
||||
- `policy_name` ([String](../../sql-reference/data-types/string.md)) — имя политики хранения.
|
||||
- `volume_name` ([String](../../sql-reference/data-types/string.md)) — имя тома, который содержится в политике хранения.
|
||||
- `volume_priority` ([UInt64](../../sql-reference/data-types/int-uint.md)) — порядковый номер тома согласно конфигурации.
|
||||
- `volume_priority` ([UInt64](../../sql-reference/data-types/int-uint.md)) — порядковый номер тома согласно конфигурации, приоритет согласно которому данные заполняют тома, т.е. данные при инсертах и мержах записываются на тома с более низким приоритетом (с учетом других правил: TTL, `max_data_part_size`, `move_factor`).
|
||||
- `disks` ([Array(String)](../../sql-reference/data-types/array.md)) — имена дисков, содержащихся в политике хранения.
|
||||
- `max_data_part_size` ([UInt64](../../sql-reference/data-types/int-uint.md)) — максимальный размер куска данных, который может храниться на дисках тома (0 — без ограничений).
|
||||
- `move_factor` — доля доступного свободного места на томе, если места становится меньше, то данные начнут перемещение на следующий том, если он есть (по умолчанию 0.1).
|
||||
|
@ -66,7 +66,6 @@ SETTINGS(format_csv_allow_single_quotes = 0)
|
||||
- СУБД:
|
||||
- [ODBC](#dicts-external_dicts_dict_sources-odbc)
|
||||
- [MySQL](#dicts-external_dicts_dict_sources-mysql)
|
||||
- [PostgreSQL](#dicts-external_dicts_dict_sources-postgresql)
|
||||
- [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse)
|
||||
- [MongoDB](#dicts-external_dicts_dict_sources-mongodb)
|
||||
- [Redis](#dicts-external_dicts_dict_sources-redis)
|
||||
@ -210,45 +209,6 @@ SOURCE(HTTP(
|
||||
|
||||
При создании словаря с помощью DDL-команды (`CREATE DICTIONARY ...`) удаленные хосты для HTTP-словарей проверяются в разделе `remote_url_allow_hosts` из конфигурации сервера. Иначе пользователи базы данных будут иметь доступ к произвольному HTTP-серверу.
|
||||
|
||||
## ODBC {#dicts-external_dicts_dict_sources-odbc}
|
||||
|
||||
Этим способом можно подключить любую базу данных, имеющую ODBC драйвер.
|
||||
|
||||
Пример настройки:
|
||||
|
||||
``` xml
|
||||
<source>
|
||||
<odbc>
|
||||
<db>DatabaseName</db>
|
||||
<table>ShemaName.TableName</table>
|
||||
<connection_string>DSN=some_parameters</connection_string>
|
||||
<invalidate_query>SQL_QUERY</invalidate_query>
|
||||
</odbc>
|
||||
</source>
|
||||
```
|
||||
|
||||
или
|
||||
|
||||
``` sql
|
||||
SOURCE(ODBC(
|
||||
db 'DatabaseName'
|
||||
table 'SchemaName.TableName'
|
||||
connection_string 'DSN=some_parameters'
|
||||
invalidate_query 'SQL_QUERY'
|
||||
))
|
||||
```
|
||||
|
||||
Поля настройки:
|
||||
|
||||
- `db` — имя базы данных. Не указывать, если имя базы задано в параметрах. `<connection_string>`.
|
||||
- `table` — имя таблицы и схемы, если она есть.
|
||||
- `connection_string` — строка соединения.
|
||||
- `invalidate_query` — запрос для проверки статуса словаря. Необязательный параметр. Читайте подробнее в разделе [Обновление словарей](external-dicts-dict-lifetime.md).
|
||||
|
||||
ClickHouse получает от ODBC-драйвера информацию о квотировании и квотирует настройки в запросах к драйверу, поэтому имя таблицы нужно указывать в соответствии с регистром имени таблицы в базе данных.
|
||||
|
||||
Если у вас есть проблемы с кодировками при использовании Oracle, ознакомьтесь с соответствующим разделом [FAQ](../../../faq/integration/oracle-odbc.md).
|
||||
|
||||
### Выявленная уязвимость в функционировании ODBC словарей {#vyiavlennaia-uiazvimost-v-funktsionirovanii-odbc-slovarei}
|
||||
|
||||
!!! attention "Внимание"
|
||||
@ -464,6 +424,51 @@ LIFETIME(MIN 300 MAX 360)
|
||||
|
||||
## СУБД {#subd}
|
||||
|
||||
### ODBC {#dicts-external_dicts_dict_sources-odbc}
|
||||
|
||||
Этим способом можно подключить любую базу данных, имеющую ODBC драйвер.
|
||||
|
||||
Пример настройки:
|
||||
|
||||
``` xml
|
||||
<source>
|
||||
<odbc>
|
||||
<db>DatabaseName</db>
|
||||
<table>ShemaName.TableName</table>
|
||||
<connection_string>DSN=some_parameters</connection_string>
|
||||
<invalidate_query>SQL_QUERY</invalidate_query>
|
||||
<query>SELECT id, value_1, value_2 FROM ShemaName.TableName</query>
|
||||
</odbc>
|
||||
</source>
|
||||
```
|
||||
|
||||
или
|
||||
|
||||
``` sql
|
||||
SOURCE(ODBC(
|
||||
db 'DatabaseName'
|
||||
table 'SchemaName.TableName'
|
||||
connection_string 'DSN=some_parameters'
|
||||
invalidate_query 'SQL_QUERY'
|
||||
query 'SELECT id, value_1, value_2 FROM db_name.table_name'
|
||||
))
|
||||
```
|
||||
|
||||
Поля настройки:
|
||||
|
||||
- `db` — имя базы данных. Не указывать, если имя базы задано в параметрах. `<connection_string>`.
|
||||
- `table` — имя таблицы и схемы, если она есть.
|
||||
- `connection_string` — строка соединения.
|
||||
- `invalidate_query` — запрос для проверки статуса словаря. Необязательный параметр. Читайте подробнее в разделе [Обновление словарей](external-dicts-dict-lifetime.md).
|
||||
- `query` – пользовательский запрос. Необязательный параметр.
|
||||
|
||||
!!! info "Примечание"
|
||||
Поля `table` и `query` не могут быть использованы вместе. Также обязательно должен быть один из источников данных: `table` или `query`.
|
||||
|
||||
ClickHouse получает от ODBC-драйвера информацию о квотировании и квотирует настройки в запросах к драйверу, поэтому имя таблицы нужно указывать в соответствии с регистром имени таблицы в базе данных.
|
||||
|
||||
Если у вас есть проблемы с кодировками при использовании Oracle, ознакомьтесь с соответствующим разделом [FAQ](../../../faq/integration/oracle-odbc.md).
|
||||
|
||||
### MySQL {#dicts-external_dicts_dict_sources-mysql}
|
||||
|
||||
Пример настройки:
|
||||
@ -487,6 +492,7 @@ LIFETIME(MIN 300 MAX 360)
|
||||
<where>id=10</where>
|
||||
<invalidate_query>SQL_QUERY</invalidate_query>
|
||||
<fail_on_connection_loss>true</fail_on_connection_loss>
|
||||
<query>SELECT id, value_1, value_2 FROM db_name.table_name</query>
|
||||
</mysql>
|
||||
</source>
|
||||
```
|
||||
@ -505,6 +511,7 @@ SOURCE(MYSQL(
|
||||
where 'id=10'
|
||||
invalidate_query 'SQL_QUERY'
|
||||
fail_on_connection_loss 'true'
|
||||
query 'SELECT id, value_1, value_2 FROM db_name.table_name'
|
||||
))
|
||||
```
|
||||
|
||||
@ -531,6 +538,11 @@ SOURCE(MYSQL(
|
||||
|
||||
- `fail_on_connection_loss` – параметр конфигурации, контролирующий поведение сервера при потере соединения. Если значение `true`, то исключение генерируется сразу же, если соединение между клиентом и сервером было потеряно. Если значение `false`, то сервер повторно попытается выполнить запрос три раза прежде чем сгенерировать исключение. Имейте в виду, что повторные попытки могут увеличить время выполнения запроса. Значение по умолчанию: `false`.
|
||||
|
||||
- `query` – пользовательский запрос. Необязательный параметр.
|
||||
|
||||
!!! info "Примечание"
|
||||
Поля `table` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `table` или `query`.
|
||||
|
||||
MySQL можно подключить на локальном хосте через сокеты, для этого необходимо задать `host` и `socket`.
|
||||
|
||||
Пример настройки:
|
||||
@ -547,6 +559,7 @@ MySQL можно подключить на локальном хосте чер
|
||||
<where>id=10</where>
|
||||
<invalidate_query>SQL_QUERY</invalidate_query>
|
||||
<fail_on_connection_loss>true</fail_on_connection_loss>
|
||||
<query>SELECT id, value_1, value_2 FROM db_name.table_name</query>
|
||||
</mysql>
|
||||
</source>
|
||||
```
|
||||
@ -564,6 +577,7 @@ SOURCE(MYSQL(
|
||||
where 'id=10'
|
||||
invalidate_query 'SQL_QUERY'
|
||||
fail_on_connection_loss 'true'
|
||||
query 'SELECT id, value_1, value_2 FROM db_name.table_name'
|
||||
))
|
||||
```
|
||||
|
||||
@ -582,6 +596,7 @@ SOURCE(MYSQL(
|
||||
<table>ids</table>
|
||||
<where>id=10</where>
|
||||
<secure>1</secure>
|
||||
<query>SELECT id, value_1, value_2 FROM default.ids</query>
|
||||
</clickhouse>
|
||||
</source>
|
||||
```
|
||||
@ -598,6 +613,7 @@ SOURCE(CLICKHOUSE(
|
||||
table 'ids'
|
||||
where 'id=10'
|
||||
secure 1
|
||||
query 'SELECT id, value_1, value_2 FROM default.ids'
|
||||
));
|
||||
```
|
||||
|
||||
@ -612,6 +628,10 @@ SOURCE(CLICKHOUSE(
|
||||
- `where` — условие выбора. Может отсутствовать.
|
||||
- `invalidate_query` — запрос для проверки статуса словаря. Необязательный параметр. Читайте подробнее в разделе [Обновление словарей](external-dicts-dict-lifetime.md).
|
||||
- `secure` - флаг, разрешающий или не разрешающий защищённое SSL-соединение.
|
||||
- `query` – пользовательский запрос. Необязательный параметр.
|
||||
|
||||
!!! info "Примечание"
|
||||
Поля `table` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `table` или `query`.
|
||||
|
||||
### MongoDB {#dicts-external_dicts_dict_sources-mongodb}
|
||||
|
||||
@ -703,25 +723,30 @@ SOURCE(REDIS(
|
||||
<consistency>One</consistency>
|
||||
<where>"SomeColumn" = 42</where>
|
||||
<max_threads>8</max_threads>
|
||||
<query>SELECT id, value_1, value_2 FROM database_name.table_name</query>
|
||||
</cassandra>
|
||||
</source>
|
||||
```
|
||||
|
||||
Поля настройки:
|
||||
- `host` – Имя хоста с установленной Cassandra или разделенный через запятую список хостов.
|
||||
- `port` – Порт на серверах Cassandra. Если не указан, используется значение по умолчанию 9042.
|
||||
- `user` – Имя пользователя для соединения с Cassandra.
|
||||
- `password` – Пароль для соединения с Cassandra.
|
||||
- `keyspace` – Имя keyspace (база данных).
|
||||
- `column_family` – Имя семейства столбцов (таблица).
|
||||
- `allow_filering` – Флаг, разрешающий или не разрешающий потенциально дорогостоящие условия на кластеризации ключевых столбцов. Значение по умолчанию 1.
|
||||
- `partition_key_prefix` – Количество партиций ключевых столбцов в первичном ключе таблицы Cassandra.
|
||||
Необходимо для составления ключей словаря. Порядок ключевых столбцов в определении словеря должен быть таким же как в Cassandra.
|
||||
Значение по умолчанию 1 (первый ключевой столбец это ключ партицирования, остальные ключевые столбцы - ключи кластеризации).
|
||||
- `consistency` – Уровень консистентности. Возмодные значения: `One`, `Two`, `Three`,
|
||||
`All`, `EachQuorum`, `Quorum`, `LocalQuorum`, `LocalOne`, `Serial`, `LocalSerial`. Значение по умолчанию `One`.
|
||||
- `where` – Опциональный критерий выборки.
|
||||
- `max_threads` – Максимальное кол-во тредов для загрузки данных из нескольких партиций в словарь.
|
||||
|
||||
- `host` – имя хоста с установленной Cassandra или разделенный через запятую список хостов.
|
||||
- `port` – порт на серверах Cassandra. Если не указан, используется значение по умолчанию: 9042.
|
||||
- `user` – имя пользователя для соединения с Cassandra.
|
||||
- `password` – пароль для соединения с Cassandra.
|
||||
- `keyspace` – имя keyspace (база данных).
|
||||
- `column_family` – имя семейства столбцов (таблица).
|
||||
- `allow_filering` – флаг, разрешающий или не разрешающий потенциально дорогостоящие условия на кластеризации ключевых столбцов. Значение по умолчанию: 1.
|
||||
- `partition_key_prefix` – количество партиций ключевых столбцов в первичном ключе таблицы Cassandra.
|
||||
Необходимо для составления ключей словаря. Порядок ключевых столбцов в определении словаря должен быть таким же, как в Cassandra.
|
||||
Значение по умолчанию: 1 (первый ключевой столбец - это ключ партицирования, остальные ключевые столбцы - ключи кластеризации).
|
||||
- `consistency` – уровень консистентности. Возможные значения: `One`, `Two`, `Three`, `All`, `EachQuorum`, `Quorum`, `LocalQuorum`, `LocalOne`, `Serial`, `LocalSerial`. Значение по умолчанию: `One`.
|
||||
- `where` – опциональный критерий выборки.
|
||||
- `max_threads` – максимальное количество тредов для загрузки данных из нескольких партиций в словарь.
|
||||
- `query` – пользовательский запрос. Необязательный параметр.
|
||||
|
||||
!!! info "Примечание"
|
||||
Поля `column_family` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `column_family` или `query`.
|
||||
|
||||
### PosgreSQL {#dicts-external_dicts_dict_sources-postgresql}
|
||||
|
||||
@ -737,6 +762,7 @@ SOURCE(REDIS(
|
||||
<table>table_name</table>
|
||||
<where>id=10</where>
|
||||
<invalidate_query>SQL_QUERY</invalidate_query>
|
||||
<query>SELECT id, value_1, value_2 FROM db_name.table_name</query>
|
||||
</postgresql>
|
||||
</source>
|
||||
```
|
||||
@ -755,20 +781,25 @@ SOURCE(POSTGRESQL(
|
||||
replica(host 'example01-2' port 5432 priority 2)
|
||||
where 'id=10'
|
||||
invalidate_query 'SQL_QUERY'
|
||||
query 'SELECT id, value_1, value_2 FROM db_name.table_name'
|
||||
))
|
||||
```
|
||||
|
||||
Setting fields:
|
||||
Поля настройки:
|
||||
|
||||
- `host` – Хост для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри `<replica>`).
|
||||
- `port` – Порт для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри `<replica>`).
|
||||
- `user` – Имя пользователя для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри `<replica>`).
|
||||
- `password` – Пароль для пользователя PostgreSQL.
|
||||
- `replica` – Раздел конфигурации реплик. Может быть несколько.
|
||||
- `replica/host` – хост PostgreSQL.
|
||||
- `replica/port` – порт PostgreSQL .
|
||||
- `replica/priority` – Приоритет реплики. Во время попытки соединения, ClickHouse будет перебирать реплики в порядке приоритет. Меньшее значение означает более высокий приоритет.
|
||||
- `db` – Имя базы данных.
|
||||
- `table` – Имя таблицы.
|
||||
- `where` – Условие выборки. Синтаксис для условий такой же как для `WHERE` выражения в PostgreSQL, для примера, `id > 10 AND id < 20`. Необязательный параметр.
|
||||
- `invalidate_query` – Запрос для проверки условия загрузки словаря. Необязательный параметр. Читайте больше в разделе [Обновление словарей](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
||||
- `host` – хост для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри `<replica>`).
|
||||
- `port` – порт для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри `<replica>`).
|
||||
- `user` – имя пользователя для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри `<replica>`).
|
||||
- `password` – пароль для пользователя PostgreSQL.
|
||||
- `replica` – раздел конфигурации реплик. Может быть несколько.
|
||||
- `replica/host` – хост PostgreSQL.
|
||||
- `replica/port` – порт PostgreSQL .
|
||||
- `replica/priority` – приоритет реплики. Во время попытки соединения ClickHouse будет перебирать реплики в порядке приоритета. Меньшее значение означает более высокий приоритет.
|
||||
- `db` – имя базы данных.
|
||||
- `table` – имя таблицы.
|
||||
- `where` – условие выборки. Синтаксис для условий такой же, как для выражения `WHERE` в PostgreSQL. Например, `id > 10 AND id < 20`. Необязательный параметр.
|
||||
- `invalidate_query` – запрос для проверки условия загрузки словаря. Необязательный параметр. Более подробную информацию смотрите в разделе [обновление словарей](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
||||
- `query` – пользовательский запрос. Необязательный параметр.
|
||||
|
||||
!!! info "Примечание"
|
||||
Поля `table` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `table` или `query`.
|
||||
|
@ -5,7 +5,7 @@ toc_title: "Битовые функции"
|
||||
|
||||
# Битовые функции {#bitovye-funktsii}
|
||||
|
||||
Битовые функции работают для любой пары типов из UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64.
|
||||
Битовые функции работают для любой пары типов из `UInt8`, `UInt16`, `UInt32`, `UInt64`, `Int8`, `Int16`, `Int32`, `Int64`, `Float32`, `Float64`.
|
||||
|
||||
Тип результата - целое число, битность которого равна максимальной битности аргументов. Если хотя бы один аргумент знаковый, то результат - знаковое число. Если аргумент - число с плавающей запятой - оно приводится к Int64.
|
||||
|
||||
@ -19,8 +19,100 @@ toc_title: "Битовые функции"
|
||||
|
||||
## bitShiftLeft(a, b) {#bitshiftlefta-b}
|
||||
|
||||
Сдвигает влево на заданное количество битов бинарное представление значения.
|
||||
|
||||
Если передан аргумент типа `FixedString` или `String`, то он рассматривается, как одно многобайтовое значение.
|
||||
|
||||
Биты `FixedString` теряются по мере того, как выдвигаются за пределы строки. Значение `String` дополняется байтами, поэтому его биты не теряются.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
bitShiftLeft(a, b)
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `a` — сдвигаемое значение. [Целое число](../../sql-reference/data-types/int-uint.md), [String](../../sql-reference/data-types/string.md) или [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `b` — величина сдвига. [Беззнаковое целое число](../../sql-reference/data-types/int-uint.md), допустимы типы с разрядностью не более 64 битов.
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Сдвинутое значение.
|
||||
|
||||
Тип совпадает с типом сдвигаемого значения.
|
||||
|
||||
**Пример**
|
||||
|
||||
В запросах используются функции [bin](encoding-functions.md#bin) и [hex](encoding-functions.md#hex), чтобы наглядно показать биты после сдвига.
|
||||
|
||||
``` sql
|
||||
SELECT 99 AS a, bin(a), bitShiftLeft(a, 2) AS a_shifted, bin(a_shifted);
|
||||
SELECT 'abc' AS a, hex(a), bitShiftLeft(a, 4) AS a_shifted, hex(a_shifted);
|
||||
SELECT toFixedString('abc', 3) AS a, hex(a), bitShiftLeft(a, 4) AS a_shifted, hex(a_shifted);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌──a─┬─bin(99)──┬─a_shifted─┬─bin(bitShiftLeft(99, 2))─┐
|
||||
│ 99 │ 01100011 │ 140 │ 10001100 │
|
||||
└────┴──────────┴───────────┴──────────────────────────┘
|
||||
┌─a───┬─hex('abc')─┬─a_shifted─┬─hex(bitShiftLeft('abc', 4))─┐
|
||||
│ abc │ 616263 │ &0 │ 06162630 │
|
||||
└─────┴────────────┴───────────┴─────────────────────────────┘
|
||||
┌─a───┬─hex(toFixedString('abc', 3))─┬─a_shifted─┬─hex(bitShiftLeft(toFixedString('abc', 3), 4))─┐
|
||||
│ abc │ 616263 │ &0 │ 162630 │
|
||||
└─────┴──────────────────────────────┴───────────┴───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## bitShiftRight(a, b) {#bitshiftrighta-b}
|
||||
|
||||
Сдвигает вправо на заданное количество битов бинарное представление значения.
|
||||
|
||||
Если передан аргумент типа `FixedString` или `String`, то он рассматривается, как одно многобайтовое значение. Длина значения типа `String` уменьшается по мере сдвига.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
bitShiftRight(a, b)
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `a` — сдвигаемое значение. [Целое число](../../sql-reference/data-types/int-uint.md), [String](../../sql-reference/data-types/string.md) или [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `b` — величина сдвига. [Беззнаковое целое число](../../sql-reference/data-types/int-uint.md), допустимы типы с разрядностью не более 64 битов.
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Сдвинутое значение.
|
||||
|
||||
Тип совпадает с типом сдвигаемого значения.
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT 101 AS a, bin(a), bitShiftRight(a, 2) AS a_shifted, bin(a_shifted);
|
||||
SELECT 'abc' AS a, hex(a), bitShiftRight(a, 12) AS a_shifted, hex(a_shifted);
|
||||
SELECT toFixedString('abc', 3) AS a, hex(a), bitShiftRight(a, 12) AS a_shifted, hex(a_shifted);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌───a─┬─bin(101)─┬─a_shifted─┬─bin(bitShiftRight(101, 2))─┐
|
||||
│ 101 │ 01100101 │ 25 │ 00011001 │
|
||||
└─────┴──────────┴───────────┴────────────────────────────┘
|
||||
┌─a───┬─hex('abc')─┬─a_shifted─┬─hex(bitShiftRight('abc', 12))─┐
|
||||
│ abc │ 616263 │ │ 0616 │
|
||||
└─────┴────────────┴───────────┴───────────────────────────────┘
|
||||
┌─a───┬─hex(toFixedString('abc', 3))─┬─a_shifted─┬─hex(bitShiftRight(toFixedString('abc', 3), 12))─┐
|
||||
│ abc │ 616263 │ │ 000616 │
|
||||
└─────┴──────────────────────────────┴───────────┴─────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## bitTest {#bittest}
|
||||
|
||||
Принимает любое целое число и конвертирует его в [двоичное число](https://en.wikipedia.org/wiki/Binary_number), возвращает значение бита в указанной позиции. Отсчет начинается с 0 справа налево.
|
||||
|
@ -216,6 +216,44 @@ SELECT JSONExtract('{"day": 5}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \
|
||||
SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8') = [('a',5),('b',7),('c',11)];
|
||||
```
|
||||
|
||||
## JSONExtractKeys {#jsonextractkeysjson-indices-or-keys}
|
||||
|
||||
Парсит строку JSON и извлекает ключи.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
JSONExtractKeys(json[, a, b, c...])
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `json` — [строка](../data-types/string.md), содержащая валидный JSON.
|
||||
- `a, b, c...` — индексы или ключи, разделенные запятыми, которые указывают путь к внутреннему полю во вложенном объекте JSON. Каждый аргумент может быть либо [строкой](../data-types/string.md) для получения поля по ключу, либо [целым числом](../data-types/int-uint.md) для получения N-го поля (индексирование начинается с 1, отрицательные числа используются для отсчета с конца). Если параметр не задан, весь JSON разбирается как объект верхнего уровня. Необязательный параметр.
|
||||
|
||||
**Возвращаемые значения**
|
||||
|
||||
Массив с ключами JSON.
|
||||
|
||||
Тип: [Array](../data-types/array.md)([String](../data-types/string.md)).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
SELECT JSONExtractKeys('{"a": "hello", "b": [-100, 200.0, 300]}');
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
```
|
||||
text
|
||||
┌─JSONExtractKeys('{"a": "hello", "b": [-100, 200.0, 300]}')─┐
|
||||
│ ['a','b'] │
|
||||
└────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## JSONExtractRaw(json\[, indices_or_keys\]…) {#jsonextractrawjson-indices-or-keys}
|
||||
|
||||
Возвращает часть JSON в виде строки, содержащей неразобранную подстроку.
|
||||
|
@ -2411,3 +2411,39 @@ SELECT getOSKernelVersion();
|
||||
│ Linux 4.15.0-55-generic │
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
## zookeeperSessionUptime {#zookeepersessionuptime}
|
||||
|
||||
Возвращает аптайм текущего сеанса ZooKeeper в секундах.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
zookeeperSessionUptime()
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- Нет.
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Аптайм текущего сеанса ZooKeeper в секундах.
|
||||
|
||||
Тип: [UInt32](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT zookeeperSessionUptime();
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─zookeeperSessionUptime()─┐
|
||||
│ 286 │
|
||||
└──────────────────────────┘
|
||||
```
|
||||
|
44
docs/ru/sql-reference/operators/exists.md
Normal file
44
docs/ru/sql-reference/operators/exists.md
Normal file
@ -0,0 +1,44 @@
|
||||
# EXISTS {#exists-operator}
|
||||
|
||||
Оператор `EXISTS` проверяет, сколько строк содержит результат выполнения подзапроса. Если результат пустой, то оператор возвращает `0`. В остальных случаях оператор возвращает `1`.
|
||||
|
||||
`EXISTS` может быть использован в секции [WHERE](../../sql-reference/statements/select/where.md).
|
||||
|
||||
!!! warning "Предупреждение"
|
||||
Ссылки на таблицы или столбцы основного запроса не поддерживаются в подзапросе.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
```sql
|
||||
WHERE EXISTS(subquery)
|
||||
```
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос с подзапросом, возвращающим несколько строк:
|
||||
|
||||
``` sql
|
||||
SELECT count() FROM numbers(10) WHERE EXISTS(SELECT number FROM numbers(10) WHERE number > 8);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─count()─┐
|
||||
│ 10 │
|
||||
└─────────┘
|
||||
```
|
||||
|
||||
Запрос с подзапросом, возвращающим пустой результат:
|
||||
|
||||
``` sql
|
||||
SELECT count() FROM numbers(10) WHERE EXISTS(SELECT number FROM numbers(10) WHERE number > 11);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─count()─┐
|
||||
│ 0 │
|
||||
└─────────┘
|
||||
```
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user