mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 01:22:04 +00:00
Merge branch 'master' into karnevil-DOCSUP-18631
This commit is contained in:
commit
9a60033fef
32
.github/workflows/backport.yml
vendored
Normal file
32
.github/workflows/backport.yml
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
name: CherryPick
|
||||
concurrency:
|
||||
group: cherry-pick
|
||||
on: # yamllint disable-line rule:truthy
|
||||
schedule:
|
||||
- cron: '0 */3 * * *'
|
||||
jobs:
|
||||
CherryPick:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Cherry pick
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/cherry_pick
|
||||
ROBOT_CLICKHOUSE_SSH_KEY: ${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
REPO_OWNER: "ClickHouse"
|
||||
REPO_NAME: "ClickHouse"
|
||||
REPO_TEAM: "core"
|
||||
run: |
|
||||
sudo pip install GitPython
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 cherry_pick.py
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
13
.github/workflows/cancel.yml
vendored
Normal file
13
.github/workflows/cancel.yml
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
name: Cancel
|
||||
on: # yamllint disable-line rule:truthy
|
||||
workflow_run:
|
||||
workflows: ["CIGithubActions", "ReleaseCI"]
|
||||
types:
|
||||
- requested
|
||||
jobs:
|
||||
cancel:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- uses: styfle/cancel-workflow-action@0.9.1
|
||||
with:
|
||||
workflow_id: ${{ github.event.workflow.id }}
|
1491
.github/workflows/main.yml
vendored
Normal file
1491
.github/workflows/main.yml
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1384
.github/workflows/master.yml
vendored
Normal file
1384
.github/workflows/master.yml
vendored
Normal file
File diff suppressed because it is too large
Load Diff
55
.github/workflows/release.yml
vendored
Normal file
55
.github/workflows/release.yml
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
name: DocsReleaseChecks
|
||||
concurrency:
|
||||
group: master-release
|
||||
cancel-in-progress: true
|
||||
on: # yamllint disable-line rule:truthy
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- 'website/**'
|
||||
- 'benchmark/**'
|
||||
- 'docker/**'
|
||||
jobs:
|
||||
DockerHubPush:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_images_check.py
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images.json
|
||||
DocsRelease:
|
||||
needs: DockerHubPush
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{runner.temp}}/docs_release
|
||||
- name: Docs Release
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/docs_release
|
||||
REPO_COPY: ${{runner.temp}}/docs_release/ClickHouse
|
||||
CLOUDFLARE_TOKEN: ${{secrets.CLOUDFLARE}}
|
||||
ROBOT_CLICKHOUSE_SSH_KEY: ${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
run: |
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 docs_release.py
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
933
.github/workflows/release_branches.yml
vendored
Normal file
933
.github/workflows/release_branches.yml
vendored
Normal file
@ -0,0 +1,933 @@
|
||||
name: ReleaseCI
|
||||
on: # yamllint disable-line rule:truthy
|
||||
push:
|
||||
branches:
|
||||
- '21.**'
|
||||
- '22.**'
|
||||
- '23.**'
|
||||
- '24.**'
|
||||
- 'backport/**'
|
||||
jobs:
|
||||
DockerHubPush:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_images_check.py
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images.json
|
||||
CompatibilityCheck:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: CompatibilityCheck
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/compatibility_check
|
||||
REPO_COPY: ${{runner.temp}}/compatibility_check/ClickHouse
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 compatibility_check.py 0
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
#########################################################################################
|
||||
#################################### ORDINARY BUILDS ####################################
|
||||
#########################################################################################
|
||||
BuilderDebRelease:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/build_check
|
||||
IMAGES_PATH: ${{runner.temp}}/images_path
|
||||
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
BUILD_NUMBER: 0
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderDebAsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/build_check
|
||||
IMAGES_PATH: ${{runner.temp}}/images_path
|
||||
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
BUILD_NUMBER: 3
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderDebUBsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/build_check
|
||||
IMAGES_PATH: ${{runner.temp}}/images_path
|
||||
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
BUILD_NUMBER: 4
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderDebTsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/build_check
|
||||
IMAGES_PATH: ${{runner.temp}}/images_path
|
||||
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
BUILD_NUMBER: 5
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderDebMsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/build_check
|
||||
IMAGES_PATH: ${{runner.temp}}/images_path
|
||||
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
BUILD_NUMBER: 6
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderDebDebug:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/build_check
|
||||
IMAGES_PATH: ${{runner.temp}}/images_path
|
||||
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
BUILD_NUMBER: 7
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
############################################################################################
|
||||
##################################### BUILD REPORTER #######################################
|
||||
############################################################################################
|
||||
BuilderReport:
|
||||
needs:
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAsan
|
||||
- BuilderDebTsan
|
||||
- BuilderDebUBsan
|
||||
- BuilderDebMsan
|
||||
- BuilderDebDebug
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Report Builder
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/report_check
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
##############################################################################################
|
||||
########################### FUNCTIONAl STATELESS TESTS #######################################
|
||||
##############################################################################################
|
||||
FunctionalStatelessTestRelease:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateless tests (release, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT: 10800
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateless tests (address, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT: 10800
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateless_tsan
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateless tests (thread, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT: 10800
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestUBsan:
|
||||
needs: [BuilderDebUBsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateless_ubsan
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateless tests (ubsan, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateless_ubsan/ClickHouse
|
||||
KILL_TIMEOUT: 10800
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestMsan:
|
||||
needs: [BuilderDebMsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateless tests (memory, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT: 10800
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateless tests (debug, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT: 10800
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
##############################################################################################
|
||||
############################ FUNCTIONAl STATEFUL TESTS #######################################
|
||||
##############################################################################################
|
||||
FunctionalStatefulTestRelease:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateful_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateful tests (release, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateful_debug/ClickHouse
|
||||
KILL_TIMEOUT: 3600
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatefulTestAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateful_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateful tests (address, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateful_debug/ClickHouse
|
||||
KILL_TIMEOUT: 3600
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatefulTestTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateful_tsan
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateful tests (thread, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateful_tsan/ClickHouse
|
||||
KILL_TIMEOUT: 3600
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatefulTestMsan:
|
||||
needs: [BuilderDebMsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateful_msan
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateful tests (memory, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateful_msan/ClickHouse
|
||||
KILL_TIMEOUT: 3600
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatefulTestUBsan:
|
||||
needs: [BuilderDebUBsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateful_ubsan
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateful tests (ubsan, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateful_ubsan/ClickHouse
|
||||
KILL_TIMEOUT: 3600
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatefulTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateful_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateful tests (debug, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateful_debug/ClickHouse
|
||||
KILL_TIMEOUT: 3600
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
##############################################################################################
|
||||
######################################### STRESS TESTS #######################################
|
||||
##############################################################################################
|
||||
StressTestAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Stress test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stress_thread
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stress test (address, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stress_thread/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
StressTestTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Stress test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stress_thread
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stress test (thread, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stress_thread/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
StressTestMsan:
|
||||
needs: [BuilderDebMsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Stress test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stress_memory
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stress test (memory, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stress_memory/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
StressTestUBsan:
|
||||
needs: [BuilderDebUBsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Stress test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stress_undefined
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stress test (undefined, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stress_undefined/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
StressTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Stress test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stress_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stress test (debug, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stress_debug/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
#############################################################################################
|
||||
############################# INTEGRATION TESTS #############################################
|
||||
#############################################################################################
|
||||
IntegrationTestsAsan:
|
||||
needs: [BuilderDebAsan, FunctionalStatelessTestAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Integration test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Integration tests (asan, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
IntegrationTestsTsan:
|
||||
needs: [BuilderDebTsan, FunctionalStatelessTestTsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Integration test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Integration tests (thread, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
IntegrationTestsRelease:
|
||||
needs: [BuilderDebRelease, FunctionalStatelessTestRelease]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Integration test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/integration_tests_release
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Integration tests (release, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/integration_tests_release/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FinishCheck:
|
||||
needs:
|
||||
- DockerHubPush
|
||||
- BuilderReport
|
||||
- FunctionalStatelessTestDebug
|
||||
- FunctionalStatelessTestRelease
|
||||
- FunctionalStatelessTestAsan
|
||||
- FunctionalStatelessTestTsan
|
||||
- FunctionalStatelessTestMsan
|
||||
- FunctionalStatelessTestUBsan
|
||||
- FunctionalStatefulTestDebug
|
||||
- FunctionalStatefulTestRelease
|
||||
- FunctionalStatefulTestAsan
|
||||
- FunctionalStatefulTestTsan
|
||||
- FunctionalStatefulTestMsan
|
||||
- FunctionalStatefulTestUBsan
|
||||
- StressTestDebug
|
||||
- StressTestAsan
|
||||
- StressTestTsan
|
||||
- StressTestMsan
|
||||
- StressTestUBsan
|
||||
- IntegrationTestsAsan
|
||||
- IntegrationTestsRelease
|
||||
- IntegrationTestsTsan
|
||||
- CompatibilityCheck
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Finish label
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 finish_check.py
|
8
.gitmodules
vendored
8
.gitmodules
vendored
@ -76,7 +76,7 @@
|
||||
url = https://github.com/ClickHouse-Extras/libcxxabi.git
|
||||
[submodule "contrib/snappy"]
|
||||
path = contrib/snappy
|
||||
url = https://github.com/google/snappy
|
||||
url = https://github.com/ClickHouse-Extras/snappy.git
|
||||
[submodule "contrib/cppkafka"]
|
||||
path = contrib/cppkafka
|
||||
url = https://github.com/mfontanini/cppkafka.git
|
||||
@ -140,7 +140,7 @@
|
||||
url = https://github.com/ClickHouse-Extras/libc-headers.git
|
||||
[submodule "contrib/replxx"]
|
||||
path = contrib/replxx
|
||||
url = https://github.com/ClickHouse-Extras/replxx.git
|
||||
url = https://github.com/AmokHuginnsson/replxx.git
|
||||
[submodule "contrib/avro"]
|
||||
path = contrib/avro
|
||||
url = https://github.com/ClickHouse-Extras/avro.git
|
||||
@ -213,6 +213,7 @@
|
||||
[submodule "contrib/boringssl"]
|
||||
path = contrib/boringssl
|
||||
url = https://github.com/ClickHouse-Extras/boringssl.git
|
||||
branch = MergeWithUpstream
|
||||
[submodule "contrib/NuRaft"]
|
||||
path = contrib/NuRaft
|
||||
url = https://github.com/ClickHouse-Extras/NuRaft.git
|
||||
@ -249,6 +250,9 @@
|
||||
[submodule "contrib/magic_enum"]
|
||||
path = contrib/magic_enum
|
||||
url = https://github.com/Neargye/magic_enum
|
||||
[submodule "contrib/libprotobuf-mutator"]
|
||||
path = contrib/libprotobuf-mutator
|
||||
url = https://github.com/google/libprotobuf-mutator
|
||||
[submodule "contrib/sysroot"]
|
||||
path = contrib/sysroot
|
||||
url = https://github.com/ClickHouse-Extras/sysroot.git
|
||||
|
268
CHANGELOG.md
268
CHANGELOG.md
@ -1,4 +1,270 @@
|
||||
### ClickHouse release v21.10, 2021-10-14
|
||||
### ClickHouse release v21.11, 2021-11-09
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
||||
* Change order of json_path and json arguments in SQL/JSON functions (to be consistent with the standard). Closes [#30449](https://github.com/ClickHouse/ClickHouse/issues/30449). [#30474](https://github.com/ClickHouse/ClickHouse/pull/30474) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Remove `MergeTree` table setting `write_final_mark`. It will be always `true`. [#30455](https://github.com/ClickHouse/ClickHouse/pull/30455) ([Kseniia Sumarokova](https://github.com/kssenii)). No actions required, all tables are compatible with the new version.
|
||||
* Function `bayesAB` is removed. Please help to return this function back, refreshed. This closes [#26233](https://github.com/ClickHouse/ClickHouse/issues/26233). [#29934](https://github.com/ClickHouse/ClickHouse/pull/29934) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* This is relevant only if you already started using the experimental `clickhouse-keeper` support. Now ClickHouse Keeper snapshots compressed with `ZSTD` codec by default instead of custom ClickHouse LZ4 block compression. This behavior can be turned off with `compress_snapshots_with_zstd_format` coordination setting (must be equal on all quorum replicas). Backward incompatibility is quite rare and may happen only when new node will send snapshot (happens in case of recovery) to the old node which is unable to read snapshots in ZSTD format. [#29417](https://github.com/ClickHouse/ClickHouse/pull/29417) ([alesapin](https://github.com/alesapin)).
|
||||
|
||||
#### New Feature
|
||||
|
||||
* New asynchronous INSERT mode allows to accumulate inserted data and store it in a single batch in background. On client it can be enabled by setting `async_insert` for `INSERT` queries with data inlined in query or in separate buffer (e.g. for `INSERT` queries via HTTP protocol). If `wait_for_async_insert` is true (by default) the client will wait until data will be flushed to table. On server-side it controlled by the settings `async_insert_threads`, `async_insert_max_data_size` and `async_insert_busy_timeout_ms`. Implements [#18282](https://github.com/ClickHouse/ClickHouse/issues/18282). [#27537](https://github.com/ClickHouse/ClickHouse/pull/27537) ([Anton Popov](https://github.com/CurtizJ)). [#20557](https://github.com/ClickHouse/ClickHouse/pull/20557) ([Ivan](https://github.com/abyss7)). Notes on performance: with asynchronous inserts you can do up to around 10 000 individual INSERT queries per second, so it is still recommended to insert in batches if you want to achieve performance up to millions inserted rows per second.
|
||||
* Add interactive mode for `clickhouse-local`. So, you can just run `clickhouse-local` to get a command line ClickHouse interface without connecting to a server and process data from files and external data sources. Also merge the code of `clickhouse-client` and `clickhouse-local` together. Closes [#7203](https://github.com/ClickHouse/ClickHouse/issues/7203). Closes [#25516](https://github.com/ClickHouse/ClickHouse/issues/25516). Closes [#22401](https://github.com/ClickHouse/ClickHouse/issues/22401). [#26231](https://github.com/ClickHouse/ClickHouse/pull/26231) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Added support for executable (scriptable) user defined functions. These are UDFs that can be written in any programming language. [#28803](https://github.com/ClickHouse/ClickHouse/pull/28803) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Allow predefined connections to external data sources. This allows to avoid specifying credentials or addresses while using external data sources, they can be referenced by names instead. Closes [#28367](https://github.com/ClickHouse/ClickHouse/issues/28367). [#28577](https://github.com/ClickHouse/ClickHouse/pull/28577) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Added `INFORMATION_SCHEMA` database with `SCHEMATA`, `TABLES`, `VIEWS` and `COLUMNS` views to the corresponding tables in `system` database. Closes [#9770](https://github.com/ClickHouse/ClickHouse/issues/9770). [#28691](https://github.com/ClickHouse/ClickHouse/pull/28691) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Support `EXISTS (subquery)`. Closes [#6852](https://github.com/ClickHouse/ClickHouse/issues/6852). [#29731](https://github.com/ClickHouse/ClickHouse/pull/29731) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Session logging for audit. Logging all successful and failed login and logout events to a new `system.session_log` table. [#22415](https://github.com/ClickHouse/ClickHouse/pull/22415) ([Vasily Nemkov](https://github.com/Enmk)) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Support multidimensional cosine distance and euclidean distance functions; L1, L2, Lp, Linf distances and norms. Scalar product on tuples and various arithmetic operators on tuples. This fully closes [#4509](https://github.com/ClickHouse/ClickHouse/issues/4509) and even more. [#27933](https://github.com/ClickHouse/ClickHouse/pull/27933) ([Alexey Boykov](https://github.com/mathalex)).
|
||||
* Add support for compression and decompression for `INTO OUTFILE` and `FROM INFILE` (with autodetect or with additional optional parameter). [#27135](https://github.com/ClickHouse/ClickHouse/pull/27135) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* Add CORS (Cross Origin Resource Sharing) support with HTTP `OPTIONS` request. It means, now Grafana will work with serverless requests without a kludges. Closes [#18693](https://github.com/ClickHouse/ClickHouse/issues/18693). [#29155](https://github.com/ClickHouse/ClickHouse/pull/29155) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* Queries with JOIN ON now supports disjunctions (OR). [#21320](https://github.com/ClickHouse/ClickHouse/pull/21320) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
* Added function `tokens`. That allow to split string into tokens using non-alpha numeric ASCII characters as separators. [#29981](https://github.com/ClickHouse/ClickHouse/pull/29981) ([Maksim Kita](https://github.com/kitaisreal)). Added function `ngrams` to extract ngrams from text. Closes [#29699](https://github.com/ClickHouse/ClickHouse/issues/29699). [#29738](https://github.com/ClickHouse/ClickHouse/pull/29738) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add functions for Unicode normalization: `normalizeUTF8NFC`, `normalizeUTF8NFD`, `normalizeUTF8NFKC`, `normalizeUTF8NFKD` functions. [#28633](https://github.com/ClickHouse/ClickHouse/pull/28633) ([darkkeks](https://github.com/darkkeks)).
|
||||
* Streaming consumption of application log files in ClickHouse with `FileLog` table engine. It's like `Kafka` or `RabbitMQ` engine but for append-only and rotated logs in local filesystem. Closes [#6953](https://github.com/ClickHouse/ClickHouse/issues/6953). [#25969](https://github.com/ClickHouse/ClickHouse/pull/25969) ([flynn](https://github.com/ucasfl)) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add `CapnProto` output format, refactor `CapnProto` input format. [#29291](https://github.com/ClickHouse/ClickHouse/pull/29291) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow to write number in query as binary literal. Example `SELECT 0b001;`. [#29304](https://github.com/ClickHouse/ClickHouse/pull/29304) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Added `hashed_array` dictionary type. It saves memory when using dictionaries with multiple attributes. Closes [#30236](https://github.com/ClickHouse/ClickHouse/issues/30236). [#30242](https://github.com/ClickHouse/ClickHouse/pull/30242) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Added `JSONExtractKeys` function. [#30056](https://github.com/ClickHouse/ClickHouse/pull/30056) ([Vitaly](https://github.com/orloffv)).
|
||||
* Add a function `getOSKernelVersion` - it returns a string with OS kernel version. [#29755](https://github.com/ClickHouse/ClickHouse/pull/29755) ([Memo](https://github.com/Joeywzr)).
|
||||
* Added `MD4` and `SHA384` functions. MD4 is an obsolete and insecure hash function, it can be used only in rare cases when MD4 is already being used in some legacy system and you need to get exactly the same result. [#29602](https://github.com/ClickHouse/ClickHouse/pull/29602) ([Nikita Tikhomirov](https://github.com/NSTikhomirov)).
|
||||
* HSTS can be enabled for Clickhouse HTTP server by setting `hsts_max_age` in configuration file with a positive number. [#29516](https://github.com/ClickHouse/ClickHouse/pull/29516) ([凌涛](https://github.com/lingtaolf)).
|
||||
* Huawei OBS Storage support. Closes [#24294](https://github.com/ClickHouse/ClickHouse/issues/24294). [#29511](https://github.com/ClickHouse/ClickHouse/pull/29511) ([kevin wan](https://github.com/MaxWk)).
|
||||
* New function `mapContainsKeyLike` to get the map that key matches a simple regular expression. [#29471](https://github.com/ClickHouse/ClickHouse/pull/29471) ([凌涛](https://github.com/lingtaolf)). New function `mapExtractKeyLike` to get the map only kept elements matched specified pattern. [#30793](https://github.com/ClickHouse/ClickHouse/pull/30793) ([凌涛](https://github.com/lingtaolf)).
|
||||
* Implemented `ALTER TABLE x MODIFY COMMENT`. [#29264](https://github.com/ClickHouse/ClickHouse/pull/29264) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
* Adds H3 inspection functions that are missing from ClickHouse but are available via the H3 api: https://h3geo.org/docs/api/inspection. [#29209](https://github.com/ClickHouse/ClickHouse/pull/29209) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Allow non-replicated ALTER TABLE FETCH and ATTACH in Replicated databases. [#29202](https://github.com/ClickHouse/ClickHouse/pull/29202) ([Kevin Michel](https://github.com/kmichel-aiven)).
|
||||
* Added a setting `output_format_csv_null_representation`: This is the same as `output_format_tsv_null_representation` but is for CSV output. [#29123](https://github.com/ClickHouse/ClickHouse/pull/29123) ([PHO](https://github.com/depressed-pho)).
|
||||
* Added function `zookeeperSessionUptime()` which returns uptime of current ZooKeeper session in seconds. [#28983](https://github.com/ClickHouse/ClickHouse/pull/28983) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Implements the `h3ToGeoBoundary` function. [#28952](https://github.com/ClickHouse/ClickHouse/pull/28952) ([Ivan Veselov](https://github.com/fuzzERot)).
|
||||
* Add aggregate function `exponentialMovingAverage` that can be used as window function. This closes [#27511](https://github.com/ClickHouse/ClickHouse/issues/27511). [#28914](https://github.com/ClickHouse/ClickHouse/pull/28914) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Allow to include subcolumns of table columns into `DESCRIBE` query result (can be enabled by setting `describe_include_subcolumns`). [#28905](https://github.com/ClickHouse/ClickHouse/pull/28905) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* `Executable`, `ExecutablePool` added option `send_chunk_header`. If this option is true then chunk rows_count with line break will be sent to client before chunk. [#28833](https://github.com/ClickHouse/ClickHouse/pull/28833) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* `tokenbf_v1` and `ngram` support Map with key of String of FixedSring type. It enhance data skipping in query with map key filter. ```sql CREATE TABLE map_tokenbf ( row_id UInt32, map Map(String, String), INDEX map_tokenbf map TYPE ngrambf_v1(4,256,2,0) GRANULARITY 1 ) Engine=MergeTree() Order by id ``` With table above, the query `select * from map_tokebf where map['K']='V'` will skip the granule that doesn't contain key `A` . Of course, how many rows will skipped is depended on the `granularity` and `index_granularity` you set. [#28511](https://github.com/ClickHouse/ClickHouse/pull/28511) ([凌涛](https://github.com/lingtaolf)).
|
||||
* Send profile events from server to client. New packet type `ProfileEvents` was introduced. Closes [#26177](https://github.com/ClickHouse/ClickHouse/issues/26177). [#28364](https://github.com/ClickHouse/ClickHouse/pull/28364) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Bit shift operations for `FixedString` and `String` data types. This closes [#27763](https://github.com/ClickHouse/ClickHouse/issues/27763). [#28325](https://github.com/ClickHouse/ClickHouse/pull/28325) ([小路](https://github.com/nicelulu)).
|
||||
* Support adding / deleting tables to replication from PostgreSQL dynamically in database engine MaterializedPostgreSQL. Support alter for database settings. Closes [#27573](https://github.com/ClickHouse/ClickHouse/issues/27573). [#28301](https://github.com/ClickHouse/ClickHouse/pull/28301) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Added function accurateCastOrDefault(x, T). Closes [#21330](https://github.com/ClickHouse/ClickHouse/issues/21330). Authors @taiyang-li. [#23028](https://github.com/ClickHouse/ClickHouse/pull/23028) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add Function `toUUIDOrDefault`, `toUInt8/16/32/64/256OrDefault`, `toInt8/16/32/64/128/256OrDefault`, which enables user defining default value(not null) when string parsing is failed. [#21330](https://github.com/ClickHouse/ClickHouse/pull/21330) ([taiyang-li](https://github.com/taiyang-li)).
|
||||
|
||||
#### Performance Improvement
|
||||
|
||||
* Background merges can be preempted by each other and they are scheduled with appropriate priorities. Now long running merges won't prevent short merges to proceed. This is needed for a better scheduling and controlling of merges execution. It reduces the chances to get "too many parts" error. [#22381](https://github.com/ClickHouse/ClickHouse/issues/22381). [#25165](https://github.com/ClickHouse/ClickHouse/pull/25165) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). Added an ability to execute more merges and mutations than the number of threads in background pool. Merges and mutations will be executed step by step according to their sizes (lower is more prioritized). The ratio of the number of tasks to threads to execute is controlled by a setting `background_merges_mutations_concurrency_ratio`, 2 by default. [#29140](https://github.com/ClickHouse/ClickHouse/pull/29140) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Allow to use asynchronous reads for remote filesystems. Lower the number of seeks while reading from remote filesystems. It improves performance tremendously and makes the experimental `web` and `s3` disks to work faster than EBS under certain conditions. [#29205](https://github.com/ClickHouse/ClickHouse/pull/29205) ([Kseniia Sumarokova](https://github.com/kssenii)). In the meantime, the `web` disk type (static dataset hosted on a web server) is graduated from being experimental to be production ready.
|
||||
* Queries with `INTO OUTFILE` in `clickhouse-client` will use multiple threads. Fix the issue with flickering progress-bar when using `INTO OUTFILE`. This closes [#30873](https://github.com/ClickHouse/ClickHouse/issues/30873). This closes [#30872](https://github.com/ClickHouse/ClickHouse/issues/30872). [#30886](https://github.com/ClickHouse/ClickHouse/pull/30886) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Reduce amount of redundant compressed data read from disk for some types `SELECT` queries (only for `MergeTree` engines family). [#30111](https://github.com/ClickHouse/ClickHouse/pull/30111) ([alesapin](https://github.com/alesapin)).
|
||||
* Remove some redundant `seek` calls while reading compressed blocks in MergeTree table engines family. [#29766](https://github.com/ClickHouse/ClickHouse/pull/29766) ([alesapin](https://github.com/alesapin)).
|
||||
* Make `url` table function to process multiple URLs in parallel. This closes [#29670](https://github.com/ClickHouse/ClickHouse/issues/29670) and closes [#29671](https://github.com/ClickHouse/ClickHouse/issues/29671). [#29673](https://github.com/ClickHouse/ClickHouse/pull/29673) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve performance of aggregation in order of primary key (with enabled setting `optimize_aggregation_in_order`). [#30266](https://github.com/ClickHouse/ClickHouse/pull/30266) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Now clickhouse is using DNS cache while communicating with external S3. [#29999](https://github.com/ClickHouse/ClickHouse/pull/29999) ([alesapin](https://github.com/alesapin)).
|
||||
* Add support for pushdown of `IS NULL`/`IS NOT NULL` to external databases (i.e. MySQL). [#29463](https://github.com/ClickHouse/ClickHouse/pull/29463) ([Azat Khuzhin](https://github.com/azat)). Transform `isNull`/`isNotNull` to `IS NULL`/`IS NOT NULL` (for external dbs, i.e. MySQL). [#29446](https://github.com/ClickHouse/ClickHouse/pull/29446) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* SELECT queries from Dictionary tables will use multiple threads. [#30500](https://github.com/ClickHouse/ClickHouse/pull/30500) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Improve performance for filtering (WHERE operation) of `Decimal` columns. [#30431](https://github.com/ClickHouse/ClickHouse/pull/30431) ([Jun Jin](https://github.com/vesslanjin)).
|
||||
* Remove branchy code in filter operation with a better implementation with popcnt/ctz which have better performance. [#29881](https://github.com/ClickHouse/ClickHouse/pull/29881) ([Jun Jin](https://github.com/vesslanjin)).
|
||||
* Improve filter bytemask generator (used for WHERE operator) function all in one with SSE/AVX2/AVX512 instructions. Note that by default ClickHouse is only using SSE, so it's only relevant for custom builds. [#30014](https://github.com/ClickHouse/ClickHouse/pull/30014) ([jasperzhu](https://github.com/jinjunzh)). [#30670](https://github.com/ClickHouse/ClickHouse/pull/30670) ([jasperzhu](https://github.com/jinjunzh)).
|
||||
* Improve the performance of SUM aggregate function of Nullable floating point numbers. [#28906](https://github.com/ClickHouse/ClickHouse/pull/28906) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Speed up part loading process with multiple disks are in use. The idea is similar to https://github.com/ClickHouse/ClickHouse/pull/16423 . Prod env shows improvement: 24 min -> 16 min . [#28363](https://github.com/ClickHouse/ClickHouse/pull/28363) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Reduce default settings for S3 multipart upload part size to lower memory usage. [#28679](https://github.com/ClickHouse/ClickHouse/pull/28679) ([ianton-ru](https://github.com/ianton-ru)).
|
||||
* Speed up `bitmapAnd` function. [#28332](https://github.com/ClickHouse/ClickHouse/pull/28332) ([dddounaiking](https://github.com/OodounaikingoO)).
|
||||
* Removed sub-optimal mutation notifications in `StorageMergeTree` when merges are still going. [#27552](https://github.com/ClickHouse/ClickHouse/pull/27552) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||
* Attempt to improve performance of string comparison. [#28767](https://github.com/ClickHouse/ClickHouse/pull/28767) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Primary key index and partition filter can work in tuple. [#29281](https://github.com/ClickHouse/ClickHouse/pull/29281) ([凌涛](https://github.com/lingtaolf)).
|
||||
* If query has multiple quantile aggregate functions with the same arguments but different level parameter, they will be fused together and executed in one pass if the setting `optimize_syntax_fuse_functions` is enabled. [#26657](https://github.com/ClickHouse/ClickHouse/pull/26657) ([hexiaoting](https://github.com/hexiaoting)).
|
||||
* Now min-max aggregation over the first expression of primary key is optimized by projection. This is for [#329](https://github.com/ClickHouse/ClickHouse/issues/329). [#29918](https://github.com/ClickHouse/ClickHouse/pull/29918) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
#### Experimental Feature
|
||||
|
||||
* Add ability to change nodes configuration (in `.xml` file) for ClickHouse Keeper. [#30372](https://github.com/ClickHouse/ClickHouse/pull/30372) ([alesapin](https://github.com/alesapin)).
|
||||
* Add `sparkbar` aggregate function. This closes [#26175](https://github.com/ClickHouse/ClickHouse/issues/26175). [#27481](https://github.com/ClickHouse/ClickHouse/pull/27481) ([小路](https://github.com/nicelulu)). Note: there is one flaw in this function, the behaviour will be changed in future releases.
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Allow user to change log levels without restart. [#29586](https://github.com/ClickHouse/ClickHouse/pull/29586) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Multiple improvements for SQL UDF. Queries for manipulation of SQL User Defined Functions now support ON CLUSTER clause. Example `CREATE FUNCTION test_function ON CLUSTER 'cluster' AS x -> x + 1;`. Closes [#30666](https://github.com/ClickHouse/ClickHouse/issues/30666). [#30734](https://github.com/ClickHouse/ClickHouse/pull/30734) ([Maksim Kita](https://github.com/kitaisreal)). Support `CREATE OR REPLACE`, `CREATE IF NOT EXISTS` syntaxes. [#30454](https://github.com/ClickHouse/ClickHouse/pull/30454) ([Maksim Kita](https://github.com/kitaisreal)). Added DROP IF EXISTS support. Example `DROP FUNCTION IF EXISTS test_function`. [#30437](https://github.com/ClickHouse/ClickHouse/pull/30437) ([Maksim Kita](https://github.com/kitaisreal)). Support lambdas. Example `CREATE FUNCTION lambda_function AS x -> arrayMap(element -> element * 2, x);`. [#30435](https://github.com/ClickHouse/ClickHouse/pull/30435) ([Maksim Kita](https://github.com/kitaisreal)). Support SQL user defined functions for `clickhouse-local`. [#30179](https://github.com/ClickHouse/ClickHouse/pull/30179) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Enable per-query memory profiler (set to `memory_profiler_step` = 4MiB) globally. [#29455](https://github.com/ClickHouse/ClickHouse/pull/29455) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Added columns `data_compressed_bytes`, `data_uncompressed_bytes`, `marks_bytes` into `system.data_skipping_indices`. Added columns `secondary_indices_compressed_bytes`, `secondary_indices_uncompressed_bytes`, `secondary_indices_marks_bytes` into `system.parts`. Closes [#29697](https://github.com/ClickHouse/ClickHouse/issues/29697). [#29896](https://github.com/ClickHouse/ClickHouse/pull/29896) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add `table` alias to system.tables and `database` alias to system.databases [#29677](https://github.com/ClickHouse/ClickHouse/issues/29677). [#29882](https://github.com/ClickHouse/ClickHouse/pull/29882) ([kevin wan](https://github.com/MaxWk)).
|
||||
* Correctly resolve interdependencies between tables on server startup. Closes [#8004](https://github.com/ClickHouse/ClickHouse/issues/8004), closes [#15170](https://github.com/ClickHouse/ClickHouse/issues/15170). [#28373](https://github.com/ClickHouse/ClickHouse/pull/28373) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Avoid error "Division by zero" when denominator is Nullable in functions `divide`, `intDiv` and `modulo`. Closes [#22621](https://github.com/ClickHouse/ClickHouse/issues/22621). [#28352](https://github.com/ClickHouse/ClickHouse/pull/28352) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow to parse values of `Date` data type in text formats as `YYYYMMDD` in addition to `YYYY-MM-DD`. This closes [#30870](https://github.com/ClickHouse/ClickHouse/issues/30870). [#30871](https://github.com/ClickHouse/ClickHouse/pull/30871) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Web UI: render bars in table cells. [#29792](https://github.com/ClickHouse/ClickHouse/pull/29792) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* User can now create dictionaries with comments: `CREATE DICTIONARY ... COMMENT 'vaue'` ... [#29899](https://github.com/ClickHouse/ClickHouse/pull/29899) ([Vasily Nemkov](https://github.com/Enmk)). Users now can set comments to database in `CREATE DATABASE` statement ... [#29429](https://github.com/ClickHouse/ClickHouse/pull/29429) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
* Introduce `compiled_expression_cache_elements_size` setting. If you will ever want to use this setting, you will already know what it does. [#30667](https://github.com/ClickHouse/ClickHouse/pull/30667) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* clickhouse-format now supports option `--query`. In previous versions you have to pass the query to stdin. [#29325](https://github.com/ClickHouse/ClickHouse/pull/29325) ([凌涛](https://github.com/lingtaolf)).
|
||||
* Support `ALTER TABLE` for tables in `Memory` databases. Memory databases are used in `clickhouse-local`. [#30866](https://github.com/ClickHouse/ClickHouse/pull/30866) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Arrays of all serializable types are now supported by `arrayStringConcat`. [#30840](https://github.com/ClickHouse/ClickHouse/pull/30840) ([Nickita Taranov](https://github.com/nickitat)).
|
||||
* ClickHouse now will account docker/cgroups limitations to get system memory amount. See [#25662](https://github.com/ClickHouse/ClickHouse/issues/25662). [#30574](https://github.com/ClickHouse/ClickHouse/pull/30574) ([Pavel Medvedev](https://github.com/pmed)).
|
||||
* Fetched table structure for PostgreSQL database is more reliable now. [#30477](https://github.com/ClickHouse/ClickHouse/pull/30477) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Full support of positional arguments in GROUP BY and ORDER BY. [#30433](https://github.com/ClickHouse/ClickHouse/pull/30433) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Allow extracting non-string element as string using JSONExtractString. This is for [pull/25452#issuecomment-927123287](https://github.com/ClickHouse/ClickHouse/pull/25452#issuecomment-927123287). [#30426](https://github.com/ClickHouse/ClickHouse/pull/30426) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Added an ability to use FINAL clause in SELECT queries from `GraphiteMergeTree`. [#30360](https://github.com/ClickHouse/ClickHouse/pull/30360) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Minor improvements in replica cloning and enqueuing fetch for broken parts, that should avoid extremely rare hanging of `GET_PART` entries in replication queue. [#30346](https://github.com/ClickHouse/ClickHouse/pull/30346) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Allow symlinks to files in `user_files` directory for file table function. [#30309](https://github.com/ClickHouse/ClickHouse/pull/30309) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fixed comparison of `Date32` with `Date`, `DateTime`, `DateTime64` and `String`. [#30219](https://github.com/ClickHouse/ClickHouse/pull/30219) ([liang.huang](https://github.com/lhuang09287750)).
|
||||
* Allow to remove `SAMPLE BY` expression from `MergeTree` tables (`ALTER TABLE <table> REMOVE SAMPLE BY`). [#30180](https://github.com/ClickHouse/ClickHouse/pull/30180) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Now `Keeper` (as part of `clickhouse-server`) will start asynchronously if it can connect to some other node. [#30170](https://github.com/ClickHouse/ClickHouse/pull/30170) ([alesapin](https://github.com/alesapin)).
|
||||
* Now `clickhouse-client` supports native multi-line editing. [#30143](https://github.com/ClickHouse/ClickHouse/pull/30143) ([Amos Bird](https://github.com/amosbird)).
|
||||
* `polygon` dictionaries (reverse geocoding): added support for reading the dictionary content with SELECT query method if setting `store_polygon_key_column` = true. Closes [#30090](https://github.com/ClickHouse/ClickHouse/issues/30090). [#30142](https://github.com/ClickHouse/ClickHouse/pull/30142) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add ClickHouse logo to Play UI. [#29674](https://github.com/ClickHouse/ClickHouse/pull/29674) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Better exception message while reading column from Arrow-supported formats like `Arrow`, `ArrowStream`, `Parquet` and `ORC`. This closes [#29926](https://github.com/ClickHouse/ClickHouse/issues/29926). [#29927](https://github.com/ClickHouse/ClickHouse/pull/29927) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix data-race between flush and startup in `Buffer` tables. This can appear in tests. [#29930](https://github.com/ClickHouse/ClickHouse/pull/29930) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix `lock-order-inversion` between `DROP TABLE` for `DatabaseMemory` and `LiveView`. Live View is an experimental feature. Memory database is used in clickhouse-local. [#29929](https://github.com/ClickHouse/ClickHouse/pull/29929) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix lock-order-inversion between periodic dictionary reload and config reload. [#29928](https://github.com/ClickHouse/ClickHouse/pull/29928) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Update zoneinfo files to 2021c. [#29925](https://github.com/ClickHouse/ClickHouse/pull/29925) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add ability to configure retries and delays between them for `clickhouse-copier`. [#29921](https://github.com/ClickHouse/ClickHouse/pull/29921) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add `shutdown_wait_unfinished_queries` server setting to allowing waiting for running queries up to `shutdown_wait_unfinished` time. This is for [#24451](https://github.com/ClickHouse/ClickHouse/issues/24451). [#29914](https://github.com/ClickHouse/ClickHouse/pull/29914) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Add ability to trace peak memory usage (with new trace_type in `system.trace_log` - `MemoryPeak`). [#29858](https://github.com/ClickHouse/ClickHouse/pull/29858) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* PostgreSQL foreign tables: Added partitioned table prefix 'p' for the query for fetching replica identity index. [#29828](https://github.com/ClickHouse/ClickHouse/pull/29828) ([Shoh Jahon](https://github.com/Shohjahon)).
|
||||
* Apply `max_untracked_memory`/`memory_profiler_step`/`memory_profiler_sample_probability` during mutate/merge to profile memory usage during merges. [#29681](https://github.com/ClickHouse/ClickHouse/pull/29681) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Query obfuscator: `clickhouse-format --obfuscate` now works with more types of queries. [#29672](https://github.com/ClickHouse/ClickHouse/pull/29672) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed the issue: `clickhouse-format --obfuscate` cannot process queries with embedded dictionaries (functions `regionTo...`). [#29667](https://github.com/ClickHouse/ClickHouse/pull/29667) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix incorrect Nullable processing of JSON functions. This fixes [#29615](https://github.com/ClickHouse/ClickHouse/issues/29615) . Mark as improvement because https://github.com/ClickHouse/ClickHouse/pull/28012 is not released. [#29659](https://github.com/ClickHouse/ClickHouse/pull/29659) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Increase `listen_backlog` by default (to match default in newer linux kernel). [#29643](https://github.com/ClickHouse/ClickHouse/pull/29643) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Reload dictionaries, models, user defined executable functions if servers config `dictionaries_config`, `models_config`, `user_defined_executable_functions_config` changes. Closes [#28142](https://github.com/ClickHouse/ClickHouse/issues/28142). [#29529](https://github.com/ClickHouse/ClickHouse/pull/29529) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Get rid of pointless restriction on projection name. Now projection name can start with `tmp_`. [#29520](https://github.com/ClickHouse/ClickHouse/pull/29520) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fixed `There is no query or query context has expired` error in mutations with nested subqueries. Do not allow subqueries in mutation if table is replicated and `allow_nondeterministic_mutations` setting is disabled. [#29495](https://github.com/ClickHouse/ClickHouse/pull/29495) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Apply config changes to `max_concurrent_queries` during runtime (no need to restart). [#29414](https://github.com/ClickHouse/ClickHouse/pull/29414) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Added setting `use_skip_indexes`. [#29405](https://github.com/ClickHouse/ClickHouse/pull/29405) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add support for `FREEZE`ing in-memory parts (for backups). [#29376](https://github.com/ClickHouse/ClickHouse/pull/29376) ([Mo Xuan](https://github.com/mo-avatar)).
|
||||
* Pass through initial query_id for `clickhouse-benchmark` (previously if you run remote query via `clickhouse-benchmark`, queries on shards will not be linked to the initial query via `initial_query_id`). [#29364](https://github.com/ClickHouse/ClickHouse/pull/29364) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Skip indexes `tokenbf_v1` and `ngrambf_v1`: added support for `Array` data type with key of `String` of `FixedString` type. [#29280](https://github.com/ClickHouse/ClickHouse/pull/29280) ([Maksim Kita](https://github.com/kitaisreal)). Skip indexes `tokenbf_v1` and `ngrambf_v1` added support for `Map` data type with key of `String` of `FixedString` type. Author @lingtaolf. [#29220](https://github.com/ClickHouse/ClickHouse/pull/29220) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Function `has`: added support for `Map` data type. [#29267](https://github.com/ClickHouse/ClickHouse/pull/29267) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add `compress_logs` settings for clickhouse-keeper which allow to compress clickhouse-keeper logs (for replicated state machine) in `ZSTD` . Implements: [#26977](https://github.com/ClickHouse/ClickHouse/issues/26977). [#29223](https://github.com/ClickHouse/ClickHouse/pull/29223) ([alesapin](https://github.com/alesapin)).
|
||||
* Add a setting `external_table_strict_query` - it will force passing the whole WHERE expression in queries to foreign databases even if it is incompatible. [#29206](https://github.com/ClickHouse/ClickHouse/pull/29206) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Disable projections when `ARRAY JOIN` is used. In previous versions projection analysis may break aliases in array join. [#29139](https://github.com/ClickHouse/ClickHouse/pull/29139) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Support more types in `MsgPack` input/output format. [#29077](https://github.com/ClickHouse/ClickHouse/pull/29077) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow to input and output `LowCardinality` columns in `ORC` input/output format. [#29062](https://github.com/ClickHouse/ClickHouse/pull/29062) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Select from `system.distributed_ddl_queue` might show incorrect values, it's fixed. [#29061](https://github.com/ClickHouse/ClickHouse/pull/29061) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Correct behaviour with unknown methods for HTTP connection. Solves [#29050](https://github.com/ClickHouse/ClickHouse/issues/29050). [#29057](https://github.com/ClickHouse/ClickHouse/pull/29057) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* `clickhouse-keeper`: Fix bug in `clickhouse-keeper-converter` which can lead to some data loss while restoring from ZooKeeper logs (not snapshot). [#29030](https://github.com/ClickHouse/ClickHouse/pull/29030) ([小路](https://github.com/nicelulu)). Fix bug in `clickhouse-keeper-converter` which can lead to incorrect ZooKeeper log deserialization. [#29071](https://github.com/ClickHouse/ClickHouse/pull/29071) ([小路](https://github.com/nicelulu)).
|
||||
* Apply settings from `CREATE ... AS SELECT` queries (fixes: [#28810](https://github.com/ClickHouse/ClickHouse/issues/28810)). [#28962](https://github.com/ClickHouse/ClickHouse/pull/28962) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Respect default database setting for ALTER TABLE ... ON CLUSTER ... REPLACE/MOVE PARTITION FROM/TO ... [#28955](https://github.com/ClickHouse/ClickHouse/pull/28955) ([anneji-dev](https://github.com/anneji-dev)).
|
||||
* gRPC protocol: Allow change server-side compression from client. [#28953](https://github.com/ClickHouse/ClickHouse/pull/28953) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Skip "no data" exception when reading thermal sensors for asynchronous metrics. This closes [#28852](https://github.com/ClickHouse/ClickHouse/issues/28852). [#28882](https://github.com/ClickHouse/ClickHouse/pull/28882) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed logical race condition that might cause `Dictionary not found` error for existing dictionary in rare cases. [#28853](https://github.com/ClickHouse/ClickHouse/pull/28853) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Relax nested function for If-combinator check (but forbid nested identical combinators). [#28828](https://github.com/ClickHouse/ClickHouse/pull/28828) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix possible uncaught exception during server termination. [#28761](https://github.com/ClickHouse/ClickHouse/pull/28761) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Forbid cleaning of tmp directories that can be used by an active mutation/merge if mutation/merge is extraordinarily long. [#28760](https://github.com/ClickHouse/ClickHouse/pull/28760) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Allow optimization `optimize_arithmetic_operations_in_aggregate_functions = 1` when alias is used. [#28746](https://github.com/ClickHouse/ClickHouse/pull/28746) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Implement `detach_not_byte_identical_parts` setting for `ReplicatedMergeTree`, that will detach instead of remove not byte-identical parts (after mege/mutate). [#28708](https://github.com/ClickHouse/ClickHouse/pull/28708) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Implement `max_suspicious_broken_parts_bytes` setting for `MergeTree` (to limit total size of all broken parts, default is `1GiB`). [#28707](https://github.com/ClickHouse/ClickHouse/pull/28707) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Enable expanding macros in `RabbitMQ` table settings. [#28683](https://github.com/ClickHouse/ClickHouse/pull/28683) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Restore the possibility to read data of a table using the `Log` engine in multiple threads. [#28125](https://github.com/ClickHouse/ClickHouse/pull/28125) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix misbehavior of NULL column handling in JSON functions. This fixes [#27930](https://github.com/ClickHouse/ClickHouse/issues/27930). [#28012](https://github.com/ClickHouse/ClickHouse/pull/28012) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Allow to set the size of Mark/Uncompressed cache for skip indices separately from columns. [#27961](https://github.com/ClickHouse/ClickHouse/pull/27961) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Allow to mix JOIN with `USING` with other JOIN types. [#23881](https://github.com/ClickHouse/ClickHouse/pull/23881) ([darkkeks](https://github.com/darkkeks)).
|
||||
* Update aws-sdk submodule for throttling in Yandex Cloud S3. [#30646](https://github.com/ClickHouse/ClickHouse/pull/30646) ([ianton-ru](https://github.com/ianton-ru)).
|
||||
* Fix releasing query ID and session ID at the end of query processing while handing gRPC call. [#29954](https://github.com/ClickHouse/ClickHouse/pull/29954) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix shutdown of `AccessControlManager` to fix flaky test. [#29951](https://github.com/ClickHouse/ClickHouse/pull/29951) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix failed assertion in reading from `HDFS`. Update libhdfs3 library to be able to run in tests in debug. Closes [#29251](https://github.com/ClickHouse/ClickHouse/issues/29251). Closes [#27814](https://github.com/ClickHouse/ClickHouse/issues/27814). [#29276](https://github.com/ClickHouse/ClickHouse/pull/29276) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
|
||||
* Add support for FreeBSD builds for Aarch64 machines. [#29952](https://github.com/ClickHouse/ClickHouse/pull/29952) ([MikaelUrankar](https://github.com/MikaelUrankar)).
|
||||
* Recursive submodules are no longer needed for ClickHouse. [#30315](https://github.com/ClickHouse/ClickHouse/pull/30315) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* ClickHouse can be statically built with Musl. This is added as experiment, it does not support building `odbc-bridge`, `library-bridge`, integration with CatBoost and some libraries. [#30248](https://github.com/ClickHouse/ClickHouse/pull/30248) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Enable `Protobuf`, `Arrow`, `ORC`, `Parquet` for `AArch64` and `Darwin` (macOS) builds. This closes [#29248](https://github.com/ClickHouse/ClickHouse/issues/29248). This closes [#28018](https://github.com/ClickHouse/ClickHouse/issues/28018). [#30015](https://github.com/ClickHouse/ClickHouse/pull/30015) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add cross-build for PowerPC (powerpc64le). This closes [#9589](https://github.com/ClickHouse/ClickHouse/issues/9589). Enable support for interaction with MySQL for AArch64 and PowerPC. This closes [#26301](https://github.com/ClickHouse/ClickHouse/issues/26301). [#30010](https://github.com/ClickHouse/ClickHouse/pull/30010) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Leave only required files in cross-compile toolchains. Include them as submodules (earlier they were downloaded as tarballs). [#29974](https://github.com/ClickHouse/ClickHouse/pull/29974) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Implemented structure-aware fuzzing approach in ClickHouse for select statement parser. [#30012](https://github.com/ClickHouse/ClickHouse/pull/30012) ([Paul](https://github.com/PaulCher)).
|
||||
* Turning on experimental constexpr expressions evaluator for clang to speed up template code compilation. [#29668](https://github.com/ClickHouse/ClickHouse/pull/29668) ([myrrc](https://github.com/myrrc)).
|
||||
* Add ability to compile using newer version fo glibc without using new symbols. [#29594](https://github.com/ClickHouse/ClickHouse/pull/29594) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Reduce Debug build binary size by clang optimization option. [#28736](https://github.com/ClickHouse/ClickHouse/pull/28736) ([flynn](https://github.com/ucasfl)).
|
||||
* Now all images for CI will be placed in the separate dockerhub repo. [#28656](https://github.com/ClickHouse/ClickHouse/pull/28656) ([alesapin](https://github.com/alesapin)).
|
||||
* Improve support for build with clang-13. [#28046](https://github.com/ClickHouse/ClickHouse/pull/28046) ([Sergei Semin](https://github.com/syominsergey)).
|
||||
* Add ability to print raw profile events to `clickhouse-client` (This can be useful for debugging and for testing). [#30064](https://github.com/ClickHouse/ClickHouse/pull/30064) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add time dependency for clickhouse-server unit (systemd and sysvinit init). [#28891](https://github.com/ClickHouse/ClickHouse/pull/28891) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Reload stacktrace cache when symbol is reloaded. [#28137](https://github.com/ClickHouse/ClickHouse/pull/28137) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
#### Bug Fix
|
||||
|
||||
* Functions for case-insensitive search in UTF-8 strings like `positionCaseInsensitiveUTF8` and `countSubstringsCaseInsensitiveUTF8` might find substrings that actually does not match in very rare cases, it's fixed. [#30663](https://github.com/ClickHouse/ClickHouse/pull/30663) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix reading from empty file on encrypted disk. [#30494](https://github.com/ClickHouse/ClickHouse/pull/30494) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix transformation of disjunctions chain to `IN` (controlled by settings `optimize_min_equality_disjunction_chain_length`) in distributed queries with settings `legacy_column_name_of_tuple_literal = 0`. [#28658](https://github.com/ClickHouse/ClickHouse/pull/28658) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Allow using a materialized column as the sharding key in a distributed table even if `insert_allow_materialized_columns=0`:. [#28637](https://github.com/ClickHouse/ClickHouse/pull/28637) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix `ORDER BY ... WITH FILL` with set `TO` and `FROM` and no rows in result set. [#30888](https://github.com/ClickHouse/ClickHouse/pull/30888) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix set index not used in AND/OR expressions when there are more than two operands. This fixes [#30416](https://github.com/ClickHouse/ClickHouse/issues/30416) . [#30887](https://github.com/ClickHouse/ClickHouse/pull/30887) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix crash when projection with hashing function is materialized. This fixes [#30861](https://github.com/ClickHouse/ClickHouse/issues/30861) . The issue is similar to https://github.com/ClickHouse/ClickHouse/pull/28560 which is a lack of proper understanding of the invariant of header's emptyness. [#30877](https://github.com/ClickHouse/ClickHouse/pull/30877) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fixed ambiguity when extracting auxiliary ZooKeeper name from ZooKeeper path in `ReplicatedMergeTree`. Previously server might fail to start with `Unknown auxiliary ZooKeeper name` if ZooKeeper path contains a colon. Fixes [#29052](https://github.com/ClickHouse/ClickHouse/issues/29052). Also it was allowed to specify ZooKeeper path that does not start with slash, but now it's deprecated and creation of new tables with such path is not allowed. Slashes and colons in auxiliary ZooKeeper names are not allowed too. [#30822](https://github.com/ClickHouse/ClickHouse/pull/30822) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Clean temporary directory when localBackup failed by some reason. [#30797](https://github.com/ClickHouse/ClickHouse/pull/30797) ([ianton-ru](https://github.com/ianton-ru)).
|
||||
* Fixed a race condition between `REPLACE/MOVE PARTITION` and background merge in non-replicated `MergeTree` that might cause a part of moved/replaced data to remain in partition. Fixes [#29327](https://github.com/ClickHouse/ClickHouse/issues/29327). [#30717](https://github.com/ClickHouse/ClickHouse/pull/30717) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix PREWHERE with WHERE in case of always true PREWHERE. [#30668](https://github.com/ClickHouse/ClickHouse/pull/30668) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Limit push down optimization could cause a error `Cannot find column`. Fixes [#30438](https://github.com/ClickHouse/ClickHouse/issues/30438). [#30562](https://github.com/ClickHouse/ClickHouse/pull/30562) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Add missing parenthesis for `isNotNull`/`isNull` rewrites to `IS [NOT] NULL` (fixes queries that has something like `isNotNull(1)+isNotNull(2)`). [#30520](https://github.com/ClickHouse/ClickHouse/pull/30520) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix deadlock on ALTER with scalar subquery to the same table, close [#30461](https://github.com/ClickHouse/ClickHouse/issues/30461). [#30492](https://github.com/ClickHouse/ClickHouse/pull/30492) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fixed segfault which might happen if session expired during execution of REPLACE PARTITION. [#30432](https://github.com/ClickHouse/ClickHouse/pull/30432) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Queries with condition like `IN (subquery)` could return incorrect result in case if aggregate projection applied. Fixed creation of sets for projections. [#30310](https://github.com/ClickHouse/ClickHouse/pull/30310) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix column alias resolution of JOIN queries when projection is enabled. This fixes [#30146](https://github.com/ClickHouse/ClickHouse/issues/30146). [#30293](https://github.com/ClickHouse/ClickHouse/pull/30293) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix some deficiency in `replaceRegexpAll` function. [#30292](https://github.com/ClickHouse/ClickHouse/pull/30292) ([Memo](https://github.com/Joeywzr)).
|
||||
* Fix ComplexKeyHashedDictionary, ComplexKeySparseHashedDictionary parsing `preallocate` option from layout config. [#30246](https://github.com/ClickHouse/ClickHouse/pull/30246) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix `[I]LIKE` function. Closes [#28661](https://github.com/ClickHouse/ClickHouse/issues/28661). [#30244](https://github.com/ClickHouse/ClickHouse/pull/30244) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix crash with shortcircuit and lowcardinality in multiIf. [#30243](https://github.com/ClickHouse/ClickHouse/pull/30243) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* FlatDictionary, HashedDictionary fix bytes_allocated calculation for nullable attributes. [#30238](https://github.com/ClickHouse/ClickHouse/pull/30238) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Allow identifiers starting with numbers in multiple joins. [#30230](https://github.com/ClickHouse/ClickHouse/pull/30230) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix reading from `MergeTree` with `max_read_buffer_size = 0` (when the user wants to shoot himself in the foot) (can lead to exceptions `Can't adjust last granule`, `LOGICAL_ERROR`, or even data loss). [#30192](https://github.com/ClickHouse/ClickHouse/pull/30192) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix `pread_fake_async`/`pread_threadpool` with `min_bytes_to_use_direct_io`. [#30191](https://github.com/ClickHouse/ClickHouse/pull/30191) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix INSERT SELECT incorrectly fills MATERIALIZED column based of Nullable column. [#30189](https://github.com/ClickHouse/ClickHouse/pull/30189) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Support nullable arguments in function `initializeAggregation`. [#30177](https://github.com/ClickHouse/ClickHouse/pull/30177) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix error `Port is already connected` for queries with `GLOBAL IN` and `WITH TOTALS`. Only for 21.9 and 21.10. [#30086](https://github.com/ClickHouse/ClickHouse/pull/30086) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix race between MOVE PARTITION and merges/mutations for MergeTree. [#30074](https://github.com/ClickHouse/ClickHouse/pull/30074) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Dropped `Memory` database might reappear after server restart, it's fixed ([#29795](https://github.com/ClickHouse/ClickHouse/issues/29795)). Also added `force_remove_data_recursively_on_drop` setting as a workaround for `Directory not empty` error when dropping `Ordinary` database (because it's not possible to remove data leftovers manually in cloud environment). [#30054](https://github.com/ClickHouse/ClickHouse/pull/30054) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix crash of sample by `tuple()`, closes [#30004](https://github.com/ClickHouse/ClickHouse/issues/30004). [#30016](https://github.com/ClickHouse/ClickHouse/pull/30016) ([flynn](https://github.com/ucasfl)).
|
||||
* try to close issue: [#29965](https://github.com/ClickHouse/ClickHouse/issues/29965). [#29976](https://github.com/ClickHouse/ClickHouse/pull/29976) ([hexiaoting](https://github.com/hexiaoting)).
|
||||
* Fix possible data-race between `FileChecker` and `StorageLog`/`StorageStripeLog`. [#29959](https://github.com/ClickHouse/ClickHouse/pull/29959) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix data-race between `LogSink::writeMarks()` and `LogSource` in `StorageLog`. [#29946](https://github.com/ClickHouse/ClickHouse/pull/29946) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix potential resource leak of the concurrent query limit of merge tree tables introduced in https://github.com/ClickHouse/ClickHouse/pull/19544. [#29879](https://github.com/ClickHouse/ClickHouse/pull/29879) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix system tables recreation check (fails to detect changes in enum values). [#29857](https://github.com/ClickHouse/ClickHouse/pull/29857) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* MaterializedMySQL: Fix an issue where if the connection to MySQL was lost, only parts of a transaction could be processed. [#29837](https://github.com/ClickHouse/ClickHouse/pull/29837) ([Håvard Kvålen](https://github.com/havardk)).
|
||||
* Avoid `Timeout exceeded: elapsed 18446744073.709553 seconds` error that might happen in extremely rare cases, presumably due to some bug in kernel. Fixes [#29154](https://github.com/ClickHouse/ClickHouse/issues/29154). [#29811](https://github.com/ClickHouse/ClickHouse/pull/29811) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix bad cast in `ATTACH TABLE ... FROM 'path'` query when non-string literal is used instead of path. It may lead to reading of uninitialized memory. [#29790](https://github.com/ClickHouse/ClickHouse/pull/29790) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix concurrent access to `LowCardinality` during `GROUP BY` (in combination with `Buffer` tables it may lead to troubles). [#29782](https://github.com/ClickHouse/ClickHouse/pull/29782) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix incorrect `GROUP BY` (multiple rows with the same keys in result) in case of distributed query when shards had mixed versions `<= 21.3` and `>= 21.4`, `GROUP BY` key had several columns all with fixed size, and two-level aggregation was activated (see `group_by_two_level_threshold` and `group_by_two_level_threshold_bytes`). Fixes [#29580](https://github.com/ClickHouse/ClickHouse/issues/29580). [#29735](https://github.com/ClickHouse/ClickHouse/pull/29735) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixed incorrect behaviour of setting `materialized_postgresql_tables_list` at server restart. Found in [#28529](https://github.com/ClickHouse/ClickHouse/issues/28529). [#29686](https://github.com/ClickHouse/ClickHouse/pull/29686) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Condition in filter predicate could be lost after push-down optimisation. [#29625](https://github.com/ClickHouse/ClickHouse/pull/29625) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix JIT expression compilation with aliases and short-circuit expression evaluation. Closes [#29403](https://github.com/ClickHouse/ClickHouse/issues/29403). [#29574](https://github.com/ClickHouse/ClickHouse/pull/29574) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix rare segfault in `ALTER MODIFY` query when using incorrect table identifier in `DEFAULT` expression like `x.y.z...` Fixes [#29184](https://github.com/ClickHouse/ClickHouse/issues/29184). [#29573](https://github.com/ClickHouse/ClickHouse/pull/29573) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix nullptr deference for `GROUP BY WITH TOTALS HAVING` (when the column from `HAVING` wasn't selected). [#29553](https://github.com/ClickHouse/ClickHouse/pull/29553) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Avoid deadlocks when reading and writting on Join table engine tables at the same time. [#29544](https://github.com/ClickHouse/ClickHouse/pull/29544) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix bug in check `pathStartsWith` becuase there was bug with the usage of `std::mismatch`: ` The behavior is undefined if the second range is shorter than the first range.`. [#29531](https://github.com/ClickHouse/ClickHouse/pull/29531) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* In ODBC bridge add retries for error Invalid cursor state. It is a retriable error. Closes [#29473](https://github.com/ClickHouse/ClickHouse/issues/29473). [#29518](https://github.com/ClickHouse/ClickHouse/pull/29518) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fixed incorrect table name parsing on loading of `Lazy` database. Fixes [#29456](https://github.com/ClickHouse/ClickHouse/issues/29456). [#29476](https://github.com/ClickHouse/ClickHouse/pull/29476) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix possible `Block structure mismatch` for subqueries with pushed-down `HAVING` predicate. Fixes [#29010](https://github.com/ClickHouse/ClickHouse/issues/29010). [#29475](https://github.com/ClickHouse/ClickHouse/pull/29475) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix Logical error `Cannot capture columns` in functions greatest/least. Closes [#29334](https://github.com/ClickHouse/ClickHouse/issues/29334). [#29454](https://github.com/ClickHouse/ClickHouse/pull/29454) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* RocksDB table engine: fix race condition during multiple DB opening (and get back some tests that triggers the problem on CI). [#29393](https://github.com/ClickHouse/ClickHouse/pull/29393) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix replicated access storage not shutting down cleanly when misconfigured. [#29388](https://github.com/ClickHouse/ClickHouse/pull/29388) ([Kevin Michel](https://github.com/kmichel-aiven)).
|
||||
* Remove window function `nth_value` as it is not memory-safe. This closes [#29347](https://github.com/ClickHouse/ClickHouse/issues/29347). [#29348](https://github.com/ClickHouse/ClickHouse/pull/29348) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix vertical merges of projection parts. This fixes [#29253](https://github.com/ClickHouse/ClickHouse/issues/29253) . This PR also fixes several projection merge/mutation issues introduced in https://github.com/ClickHouse/ClickHouse/pull/25165. [#29337](https://github.com/ClickHouse/ClickHouse/pull/29337) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix hanging DDL queries on Replicated database while adding a new replica. [#29328](https://github.com/ClickHouse/ClickHouse/pull/29328) ([Kevin Michel](https://github.com/kmichel-aiven)).
|
||||
* Fix connection timeouts (`send_timeout`/`receive_timeout`). [#29282](https://github.com/ClickHouse/ClickHouse/pull/29282) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix possible `Table columns structure in ZooKeeper is different from local table structure` exception while recreating or creating new replicas of `ReplicatedMergeTree`, when one of table columns have default expressions with case-insensitive functions. [#29266](https://github.com/ClickHouse/ClickHouse/pull/29266) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Send normal `Database doesn't exist error` (`UNKNOWN_DATABASE`) to the client (via TCP) instead of `Attempt to read after eof` (`ATTEMPT_TO_READ_AFTER_EOF`). [#29229](https://github.com/ClickHouse/ClickHouse/pull/29229) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix segfault while inserting into column with type LowCardinality(Nullable) in Avro input format. [#29132](https://github.com/ClickHouse/ClickHouse/pull/29132) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Do not allow to reuse previous credentials in case of inter-server secret (Before INSERT via Buffer/Kafka to Distributed table with interserver secret configured for that cluster, may re-use previously set user for that connection). [#29060](https://github.com/ClickHouse/ClickHouse/pull/29060) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Handle `any_join_distinct_right_table_keys` when join with dictionary, close [#29007](https://github.com/ClickHouse/ClickHouse/issues/29007). [#29014](https://github.com/ClickHouse/ClickHouse/pull/29014) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix "Not found column ... in block" error, when join on alias column, close [#26980](https://github.com/ClickHouse/ClickHouse/issues/26980). [#29008](https://github.com/ClickHouse/ClickHouse/pull/29008) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix the number of threads used in `GLOBAL IN` subquery (it was executed in single threads since [#19414](https://github.com/ClickHouse/ClickHouse/issues/19414) bugfix). [#28997](https://github.com/ClickHouse/ClickHouse/pull/28997) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix bad optimizations of ORDER BY if it contains WITH FILL. This closes [#28908](https://github.com/ClickHouse/ClickHouse/issues/28908). This closes [#26049](https://github.com/ClickHouse/ClickHouse/issues/26049). [#28910](https://github.com/ClickHouse/ClickHouse/pull/28910) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix higher-order array functions (`SIGSEGV` for `arrayCompact`/`ILLEGAL_COLUMN` for `arrayDifference`/`arrayCumSumNonNegative`) with consts. [#28904](https://github.com/ClickHouse/ClickHouse/pull/28904) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix waiting for mutation with `mutations_sync=2`. [#28889](https://github.com/ClickHouse/ClickHouse/pull/28889) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix queries to external databases (i.e. MySQL) with multiple columns in IN ( i.e. `(k,v) IN ((1, 2))` ). [#28888](https://github.com/ClickHouse/ClickHouse/pull/28888) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix bug with `LowCardinality` in short-curcuit function evaluation. Closes [#28884](https://github.com/ClickHouse/ClickHouse/issues/28884). [#28887](https://github.com/ClickHouse/ClickHouse/pull/28887) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix reading of subcolumns from compact parts. [#28873](https://github.com/ClickHouse/ClickHouse/pull/28873) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fixed a race condition between `DROP PART` and `REPLACE/MOVE PARTITION` that might cause replicas to diverge in rare cases. [#28864](https://github.com/ClickHouse/ClickHouse/pull/28864) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix expressions compilation with short circuit evaluation. [#28821](https://github.com/ClickHouse/ClickHouse/pull/28821) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix extremely rare case when ReplicatedMergeTree replicas can diverge after hard reboot of all replicas. The error looks like `Part ... intersects (previous|next) part ...`. [#28817](https://github.com/ClickHouse/ClickHouse/pull/28817) ([alesapin](https://github.com/alesapin)).
|
||||
* Better check for connection usability and also catch any exception in `RabbitMQ` shutdown just in case. [#28797](https://github.com/ClickHouse/ClickHouse/pull/28797) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix benign race condition in ReplicatedMergeTreeQueue. Shouldn't be visible for user, but can lead to subtle bugs. [#28734](https://github.com/ClickHouse/ClickHouse/pull/28734) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix possible crash for `SELECT` with partially created aggregate projection in case of exception. [#28700](https://github.com/ClickHouse/ClickHouse/pull/28700) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix the coredump in the creation of distributed tables, when the parameters passed in are wrong. [#28686](https://github.com/ClickHouse/ClickHouse/pull/28686) ([Zhiyong Wang](https://github.com/ljcui)).
|
||||
* Add Settings.Names, Settings.Values aliases for system.processes table. [#28685](https://github.com/ClickHouse/ClickHouse/pull/28685) ([Vitaly](https://github.com/orloffv)).
|
||||
* Support for S2 Geometry library: Fix the number of arguments required by `s2RectAdd` and `s2RectContains` functions. [#28663](https://github.com/ClickHouse/ClickHouse/pull/28663) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Fix invalid constant type conversion when Nullable or LowCardinality primary key is used. [#28636](https://github.com/ClickHouse/ClickHouse/pull/28636) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix "Column is not under aggregate function and not in GROUP BY" with PREWHERE (Fixes: [#28461](https://github.com/ClickHouse/ClickHouse/issues/28461)). [#28502](https://github.com/ClickHouse/ClickHouse/pull/28502) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
||||
|
||||
### ClickHouse release v21.10, 2021-10-16
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
||||
|
@ -136,6 +136,19 @@ if (ENABLE_FUZZING)
|
||||
message (STATUS "Fuzzing instrumentation enabled")
|
||||
set (FUZZER "libfuzzer")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -nostdlib++")
|
||||
set (ENABLE_CLICKHOUSE_ODBC_BRIDGE OFF)
|
||||
set (ENABLE_LIBRARIES 0)
|
||||
set (ENABLE_SSL 1)
|
||||
set (USE_INTERNAL_SSL_LIBRARY 1)
|
||||
set (USE_UNWIND ON)
|
||||
set (ENABLE_EMBEDDED_COMPILER 0)
|
||||
set (ENABLE_EXAMPLES 0)
|
||||
set (ENABLE_UTILS 0)
|
||||
set (ENABLE_THINLTO 0)
|
||||
set (ENABLE_TCMALLOC 0)
|
||||
set (ENABLE_JEMALLOC 0)
|
||||
set (ENABLE_CHECK_HEAVY_BUILDS 1)
|
||||
set (GLIBC_COMPATIBILITY OFF)
|
||||
endif()
|
||||
|
||||
# Global libraries
|
||||
@ -188,7 +201,7 @@ endif ()
|
||||
option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests" ON)
|
||||
option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF)
|
||||
|
||||
if (OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64) AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND CMAKE_VERSION VERSION_GREATER "3.9.0")
|
||||
if (OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64) AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND NOT USE_MUSL)
|
||||
# Only for Linux, x86_64 or aarch64.
|
||||
option(GLIBC_COMPATIBILITY "Enable compatibility with older glibc libraries." ON)
|
||||
elseif(GLIBC_COMPATIBILITY)
|
||||
@ -203,10 +216,6 @@ if (GLIBC_COMPATIBILITY)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -include ${CMAKE_CURRENT_SOURCE_DIR}/base/glibc-compatibility/glibc-compat-2.32.h")
|
||||
endif()
|
||||
|
||||
if (NOT CMAKE_VERSION VERSION_GREATER "3.9.0")
|
||||
message (WARNING "CMake version must be greater than 3.9.0 for production builds.")
|
||||
endif ()
|
||||
|
||||
# Make sure the final executable has symbols exported
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -rdynamic")
|
||||
|
||||
@ -570,6 +579,7 @@ include (cmake/find/yaml-cpp.cmake)
|
||||
include (cmake/find/s2geometry.cmake)
|
||||
include (cmake/find/nlp.cmake)
|
||||
include (cmake/find/bzip2.cmake)
|
||||
include (cmake/find/filelog.cmake)
|
||||
|
||||
if(NOT USE_INTERNAL_PARQUET_LIBRARY)
|
||||
set (ENABLE_ORC OFF CACHE INTERNAL "")
|
||||
@ -582,6 +592,7 @@ include (cmake/find/cassandra.cmake)
|
||||
include (cmake/find/sentry.cmake)
|
||||
include (cmake/find/stats.cmake)
|
||||
include (cmake/find/datasketches.cmake)
|
||||
include (cmake/find/libprotobuf-mutator.cmake)
|
||||
|
||||
set (USE_INTERNAL_CITYHASH_LIBRARY ON CACHE INTERNAL "")
|
||||
find_contrib_lib(cityhash)
|
||||
@ -613,7 +624,7 @@ macro (add_executable target)
|
||||
# invoke built-in add_executable
|
||||
# explicitly acquire and interpose malloc symbols by clickhouse_malloc
|
||||
# if GLIBC_COMPATIBILITY is ON and ENABLE_THINLTO is on than provide memcpy symbol explicitly to neutrialize thinlto's libcall generation.
|
||||
if (GLIBC_COMPATIBILITY AND ENABLE_THINLTO)
|
||||
if (ARCH_AMD64 AND GLIBC_COMPATIBILITY AND ENABLE_THINLTO)
|
||||
_add_executable (${ARGV} $<TARGET_OBJECTS:clickhouse_malloc> $<TARGET_OBJECTS:memcpy>)
|
||||
else ()
|
||||
_add_executable (${ARGV} $<TARGET_OBJECTS:clickhouse_malloc>)
|
||||
|
@ -1,14 +1,14 @@
|
||||
[![ClickHouse — open source distributed column-oriented DBMS](https://github.com/ClickHouse/ClickHouse/raw/master/website/images/logo-400x240.png)](https://clickhouse.com)
|
||||
|
||||
ClickHouse® is an open-source column-oriented database management system that allows generating analytical data reports in real time.
|
||||
ClickHouse® is an open-source column-oriented database management system that allows generating analytical data reports in real-time.
|
||||
|
||||
## Useful Links
|
||||
|
||||
* [Official website](https://clickhouse.com/) has quick high-level overview of ClickHouse on main page.
|
||||
* [Tutorial](https://clickhouse.com/docs/en/getting_started/tutorial/) shows how to set up and query small ClickHouse cluster.
|
||||
* [Official website](https://clickhouse.com/) has a quick high-level overview of ClickHouse on the main page.
|
||||
* [Tutorial](https://clickhouse.com/docs/en/getting_started/tutorial/) shows how to set up and query a small ClickHouse cluster.
|
||||
* [Documentation](https://clickhouse.com/docs/en/) provides more in-depth information.
|
||||
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
|
||||
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-rxm3rdrk-lIUmhLC3V8WTaL0TGxsOmg) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time.
|
||||
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-rxm3rdrk-lIUmhLC3V8WTaL0TGxsOmg) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
|
||||
* [Blog](https://clickhouse.com/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events.
|
||||
* [Code Browser](https://clickhouse.com/codebrowser/html_report/ClickHouse/index.html) with syntax highlight and navigation.
|
||||
* [Contacts](https://clickhouse.com/company/#contact) can help to get your questions answered if there are any.
|
||||
|
@ -177,8 +177,6 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_)
|
||||
}
|
||||
|
||||
|
||||
#if !defined(ARCADIA_BUILD) /// Arcadia's variant of CCTZ already has the same implementation.
|
||||
|
||||
/// Prefer to load timezones from blobs linked to the binary.
|
||||
/// The blobs are provided by "tzdata" library.
|
||||
/// This allows to avoid dependency on system tzdata.
|
||||
@ -234,5 +232,3 @@ namespace cctz_extension
|
||||
|
||||
ZoneInfoSourceFactory zone_info_source_factory = custom_factory;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -5,6 +5,10 @@
|
||||
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/select.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
|
||||
#ifdef OS_LINUX
|
||||
/// We can detect if code is linked with one or another readline variants or open the library dynamically.
|
||||
@ -99,7 +103,6 @@ String LineReader::readLine(const String & first_prompt, const String & second_p
|
||||
continue;
|
||||
}
|
||||
|
||||
#if !defined(ARCADIA_BUILD) /// C++20
|
||||
const char * has_extender = nullptr;
|
||||
for (const auto * extender : extenders)
|
||||
{
|
||||
@ -129,7 +132,6 @@ String LineReader::readLine(const String & first_prompt, const String & second_p
|
||||
if (input.empty())
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
line += (line.empty() ? "" : "\n") + input;
|
||||
|
||||
|
@ -108,6 +108,11 @@ public:
|
||||
LocalDate toDate() const { return LocalDate(m_year, m_month, m_day); }
|
||||
LocalDateTime toStartOfDate() const { return LocalDateTime(m_year, m_month, m_day, 0, 0, 0); }
|
||||
|
||||
time_t to_time_t(const DateLUTImpl & time_zone = DateLUT::instance()) const
|
||||
{
|
||||
return time_zone.makeDateTime(m_year, m_month, m_day, m_hour, m_minute, m_second);
|
||||
}
|
||||
|
||||
std::string toString() const
|
||||
{
|
||||
std::string s{"0000-00-00 00:00:00"};
|
||||
|
@ -25,6 +25,16 @@ void trim(String & s)
|
||||
s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end());
|
||||
}
|
||||
|
||||
std::string getEditor()
|
||||
{
|
||||
const char * editor = std::getenv("EDITOR");
|
||||
|
||||
if (!editor || !*editor)
|
||||
editor = "vim";
|
||||
|
||||
return editor;
|
||||
}
|
||||
|
||||
/// Copied from replxx::src/util.cxx::now_ms_str() under the terms of 3-clause BSD license of Replxx.
|
||||
/// Copyright (c) 2017-2018, Marcin Konarski (amok at codestation.org)
|
||||
/// Copyright (c) 2010, Salvatore Sanfilippo (antirez at gmail dot com)
|
||||
@ -123,6 +133,7 @@ ReplxxLineReader::ReplxxLineReader(
|
||||
Patterns delimiters_,
|
||||
replxx::Replxx::highlighter_callback_t highlighter_)
|
||||
: LineReader(history_file_path_, multiline_, std::move(extenders_), std::move(delimiters_)), highlighter(std::move(highlighter_))
|
||||
, editor(getEditor())
|
||||
{
|
||||
using namespace std::placeholders;
|
||||
using Replxx = replxx::Replxx;
|
||||
@ -177,6 +188,10 @@ ReplxxLineReader::ReplxxLineReader(
|
||||
/// bind C-p/C-n to history-previous/history-next like readline.
|
||||
rx.bind_key(Replxx::KEY::control('N'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_NEXT, code); });
|
||||
rx.bind_key(Replxx::KEY::control('P'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_PREVIOUS, code); });
|
||||
|
||||
/// bind C-j to ENTER action.
|
||||
rx.bind_key(Replxx::KEY::control('J'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::COMMIT_LINE, code); });
|
||||
|
||||
/// By default COMPLETE_NEXT/COMPLETE_PREV was binded to C-p/C-n, re-bind
|
||||
/// to M-P/M-N (that was used for HISTORY_COMMON_PREFIX_SEARCH before, but
|
||||
/// it also binded to M-p/M-n).
|
||||
@ -232,14 +247,13 @@ void ReplxxLineReader::addToHistory(const String & line)
|
||||
rx.print("Unlock of history file failed: %s\n", errnoToString(errno).c_str());
|
||||
}
|
||||
|
||||
int ReplxxLineReader::execute(const std::string & command)
|
||||
/// See comments in ShellCommand::executeImpl()
|
||||
/// (for the vfork via dlsym())
|
||||
int ReplxxLineReader::executeEditor(const std::string & path)
|
||||
{
|
||||
std::vector<char> argv0("sh", &("sh"[3]));
|
||||
std::vector<char> argv1("-c", &("-c"[3]));
|
||||
std::vector<char> argv2(command.data(), command.data() + command.size() + 1);
|
||||
|
||||
const char * filename = "/bin/sh";
|
||||
char * const argv[] = {argv0.data(), argv1.data(), argv2.data(), nullptr};
|
||||
std::vector<char> argv0(editor.data(), editor.data() + editor.size() + 1);
|
||||
std::vector<char> argv1(path.data(), path.data() + path.size() + 1);
|
||||
char * const argv[] = {argv0.data(), argv1.data(), nullptr};
|
||||
|
||||
static void * real_vfork = dlsym(RTLD_DEFAULT, "vfork");
|
||||
if (!real_vfork)
|
||||
@ -256,6 +270,7 @@ int ReplxxLineReader::execute(const std::string & command)
|
||||
return -1;
|
||||
}
|
||||
|
||||
/// Child
|
||||
if (0 == pid)
|
||||
{
|
||||
sigset_t mask;
|
||||
@ -263,16 +278,26 @@ int ReplxxLineReader::execute(const std::string & command)
|
||||
sigprocmask(0, nullptr, &mask);
|
||||
sigprocmask(SIG_UNBLOCK, &mask, nullptr);
|
||||
|
||||
execv(filename, argv);
|
||||
execvp(editor.c_str(), argv);
|
||||
rx.print("Cannot execute %s: %s\n", editor.c_str(), errnoToString(errno).c_str());
|
||||
_exit(-1);
|
||||
}
|
||||
|
||||
int status = 0;
|
||||
if (-1 == waitpid(pid, &status, 0))
|
||||
do
|
||||
{
|
||||
rx.print("Cannot waitpid: %s\n", errnoToString(errno).c_str());
|
||||
return -1;
|
||||
}
|
||||
int exited_pid = waitpid(pid, &status, 0);
|
||||
if (exited_pid == -1)
|
||||
{
|
||||
if (errno == EINTR)
|
||||
continue;
|
||||
|
||||
rx.print("Cannot waitpid: %s\n", errnoToString(errno).c_str());
|
||||
return -1;
|
||||
}
|
||||
else
|
||||
break;
|
||||
} while (true);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -286,10 +311,6 @@ void ReplxxLineReader::openEditor()
|
||||
return;
|
||||
}
|
||||
|
||||
const char * editor = std::getenv("EDITOR");
|
||||
if (!editor || !*editor)
|
||||
editor = "vim";
|
||||
|
||||
replxx::Replxx::State state(rx.get_state());
|
||||
|
||||
size_t bytes_written = 0;
|
||||
@ -312,7 +333,7 @@ void ReplxxLineReader::openEditor()
|
||||
return;
|
||||
}
|
||||
|
||||
if (0 == execute(fmt::format("{} {}", editor, filename)))
|
||||
if (0 == executeEditor(filename))
|
||||
{
|
||||
try
|
||||
{
|
||||
|
@ -22,7 +22,7 @@ public:
|
||||
private:
|
||||
InputStatus readOneLine(const String & prompt) override;
|
||||
void addToHistory(const String & line) override;
|
||||
int execute(const std::string & command);
|
||||
int executeEditor(const std::string & path);
|
||||
void openEditor();
|
||||
|
||||
replxx::Replxx rx;
|
||||
@ -31,4 +31,6 @@ private:
|
||||
// used to call flock() to synchronize multiple clients using same history file
|
||||
int history_file_fd = -1;
|
||||
bool bracketed_paste_enabled = false;
|
||||
|
||||
std::string editor;
|
||||
};
|
||||
|
@ -28,8 +28,8 @@
|
||||
#define NO_INLINE __attribute__((__noinline__))
|
||||
#define MAY_ALIAS __attribute__((__may_alias__))
|
||||
|
||||
#if !defined(__x86_64__) && !defined(__aarch64__) && !defined(__PPC__)
|
||||
# error "The only supported platforms are x86_64 and AArch64, PowerPC (work in progress)"
|
||||
#if !defined(__x86_64__) && !defined(__aarch64__) && !defined(__PPC__) && !(defined(__riscv) && (__riscv_xlen == 64))
|
||||
# error "The only supported platforms are x86_64 and AArch64, PowerPC (work in progress) and RISC-V 64 (experimental)"
|
||||
#endif
|
||||
|
||||
/// Check for presence of address sanitizer
|
||||
@ -83,10 +83,6 @@
|
||||
# define BOOST_USE_UCONTEXT 1
|
||||
#endif
|
||||
|
||||
#if defined(ARCADIA_BUILD) && defined(BOOST_USE_UCONTEXT)
|
||||
# undef BOOST_USE_UCONTEXT
|
||||
#endif
|
||||
|
||||
/// TODO: Strange enough, there is no way to detect UB sanitizer.
|
||||
|
||||
/// Explicitly allow undefined behaviour for certain functions. Use it as a function attribute.
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <stdexcept>
|
||||
#include <fstream>
|
||||
#include <base/getMemoryAmount.h>
|
||||
#include <base/getPageSize.h>
|
||||
|
||||
@ -23,7 +24,22 @@ uint64_t getMemoryAmountOrZero()
|
||||
if (page_size <= 0)
|
||||
return 0;
|
||||
|
||||
return num_pages * page_size;
|
||||
uint64_t memory_amount = num_pages * page_size;
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
// Try to lookup at the Cgroup limit
|
||||
std::ifstream cgroup_limit("/sys/fs/cgroup/memory/memory.limit_in_bytes");
|
||||
if (cgroup_limit.is_open())
|
||||
{
|
||||
uint64_t memory_limit = 0; // in case of read error
|
||||
cgroup_limit >> memory_limit;
|
||||
if (memory_limit > 0 && memory_limit < memory_amount)
|
||||
memory_amount = memory_limit;
|
||||
}
|
||||
#endif
|
||||
|
||||
return memory_amount;
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
@ -49,12 +49,3 @@ namespace
|
||||
#define LOG_WARNING(logger, ...) LOG_IMPL(logger, DB::LogsLevel::warning, Poco::Message::PRIO_WARNING, __VA_ARGS__)
|
||||
#define LOG_ERROR(logger, ...) LOG_IMPL(logger, DB::LogsLevel::error, Poco::Message::PRIO_ERROR, __VA_ARGS__)
|
||||
#define LOG_FATAL(logger, ...) LOG_IMPL(logger, DB::LogsLevel::error, Poco::Message::PRIO_FATAL, __VA_ARGS__)
|
||||
|
||||
|
||||
/// Compatibility for external projects.
|
||||
#if defined(ARCADIA_BUILD)
|
||||
using Poco::Logger;
|
||||
using Poco::Message;
|
||||
using DB::LogsLevel;
|
||||
using DB::CurrentThread;
|
||||
#endif
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
#include <base/defines.h>
|
||||
|
||||
#if defined(__linux__) && !defined(THREAD_SANITIZER)
|
||||
#if defined(__linux__) && !defined(THREAD_SANITIZER) && !defined(USE_MUSL)
|
||||
#define USE_PHDR_CACHE 1
|
||||
#endif
|
||||
|
||||
|
@ -3,41 +3,24 @@
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wold-style-cast"
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <miniselect/floyd_rivest_select.h> // Y_IGNORE
|
||||
#else
|
||||
# include <algorithm>
|
||||
#endif
|
||||
#include <miniselect/floyd_rivest_select.h>
|
||||
|
||||
template <class RandomIt>
|
||||
void nth_element(RandomIt first, RandomIt nth, RandomIt last)
|
||||
{
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
::miniselect::floyd_rivest_select(first, nth, last);
|
||||
#else
|
||||
::std::nth_element(first, nth, last);
|
||||
#endif
|
||||
}
|
||||
|
||||
template <class RandomIt>
|
||||
void partial_sort(RandomIt first, RandomIt middle, RandomIt last)
|
||||
{
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
::miniselect::floyd_rivest_partial_sort(first, middle, last);
|
||||
#else
|
||||
::std::partial_sort(first, middle, last);
|
||||
#endif
|
||||
}
|
||||
|
||||
template <class RandomIt, class Compare>
|
||||
void partial_sort(RandomIt first, RandomIt middle, RandomIt last, Compare compare)
|
||||
{
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
::miniselect::floyd_rivest_partial_sort(first, middle, last, compare);
|
||||
#else
|
||||
::std::partial_sort(first, middle, last, compare);
|
||||
#endif
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
|
||||
}
|
||||
|
@ -63,9 +63,10 @@
|
||||
#include <Common/Elf.h>
|
||||
#include <filesystem>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <Common/config_version.h>
|
||||
#endif
|
||||
#include <loggers/OwnFormattingChannel.h>
|
||||
#include <loggers/OwnPatternFormatter.h>
|
||||
|
||||
#include <Common/config_version.h>
|
||||
|
||||
#if defined(OS_DARWIN)
|
||||
# pragma GCC diagnostic ignored "-Wunused-macros"
|
||||
@ -677,6 +678,34 @@ void BaseDaemon::initialize(Application & self)
|
||||
if ((!log_path.empty() && is_daemon) || config().has("logger.stderr"))
|
||||
{
|
||||
std::string stderr_path = config().getString("logger.stderr", log_path + "/stderr.log");
|
||||
|
||||
/// Check that stderr is writable before freopen(),
|
||||
/// since freopen() will make stderr invalid on error,
|
||||
/// and logging to stderr will be broken,
|
||||
/// so the following code (that is used in every program) will not write anything:
|
||||
///
|
||||
/// int main(int argc, char ** argv)
|
||||
/// {
|
||||
/// try
|
||||
/// {
|
||||
/// DB::SomeApp app;
|
||||
/// return app.run(argc, argv);
|
||||
/// }
|
||||
/// catch (...)
|
||||
/// {
|
||||
/// std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
|
||||
/// return 1;
|
||||
/// }
|
||||
/// }
|
||||
if (access(stderr_path.c_str(), W_OK))
|
||||
{
|
||||
int fd;
|
||||
if ((fd = creat(stderr_path.c_str(), 0600)) == -1 && errno != EEXIST)
|
||||
throw Poco::OpenFileException("File " + stderr_path + " (logger.stderr) is not writable");
|
||||
if (fd != -1)
|
||||
::close(fd);
|
||||
}
|
||||
|
||||
if (!freopen(stderr_path.c_str(), "a+", stderr))
|
||||
throw Poco::OpenFileException("Cannot attach stderr to " + stderr_path);
|
||||
|
||||
@ -975,6 +1004,14 @@ void BaseDaemon::setupWatchdog()
|
||||
memcpy(argv0, new_process_name, std::min(strlen(new_process_name), original_process_name.size()));
|
||||
}
|
||||
|
||||
/// If streaming compression of logs is used then we write watchdog logs to cerr
|
||||
if (config().getRawString("logger.stream_compress", "false") == "true")
|
||||
{
|
||||
Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter;
|
||||
Poco::AutoPtr<DB::OwnFormattingChannel> log = new DB::OwnFormattingChannel(pf, new Poco::ConsoleChannel(std::cerr));
|
||||
logger().setChannel(log);
|
||||
}
|
||||
|
||||
logger().information(fmt::format("Will watch for the process with pid {}", pid));
|
||||
|
||||
/// Forward signals to the child process.
|
||||
|
@ -13,15 +13,14 @@
|
||||
#include <Common/StackTrace.h>
|
||||
#include <Common/getNumberOfPhysicalCPUCores.h>
|
||||
#include <Core/ServerUUID.h>
|
||||
#include <Common/hex.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include "Common/config_version.h"
|
||||
# include <Common/config.h>
|
||||
#endif
|
||||
#include "Common/config_version.h"
|
||||
#include <Common/config.h>
|
||||
|
||||
#if USE_SENTRY
|
||||
|
||||
# include <sentry.h> // Y_IGNORE
|
||||
# include <sentry.h>
|
||||
# include <stdio.h>
|
||||
# include <filesystem>
|
||||
|
||||
@ -64,41 +63,6 @@ void setExtras()
|
||||
sentry_set_extra("disk_free_space", sentry_value_new_string(formatReadableSizeWithBinarySuffix(fs::space(server_data_path).free).c_str()));
|
||||
}
|
||||
|
||||
void sentry_logger(sentry_level_e level, const char * message, va_list args, void *)
|
||||
{
|
||||
auto * logger = &Poco::Logger::get("SentryWriter");
|
||||
size_t size = 1024;
|
||||
char buffer[size];
|
||||
#ifdef __clang__
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wformat-nonliteral"
|
||||
#endif
|
||||
if (vsnprintf(buffer, size, message, args) >= 0)
|
||||
{
|
||||
#ifdef __clang__
|
||||
#pragma clang diagnostic pop
|
||||
#endif
|
||||
switch (level)
|
||||
{
|
||||
case SENTRY_LEVEL_DEBUG:
|
||||
logger->debug(buffer);
|
||||
break;
|
||||
case SENTRY_LEVEL_INFO:
|
||||
logger->information(buffer);
|
||||
break;
|
||||
case SENTRY_LEVEL_WARNING:
|
||||
logger->warning(buffer);
|
||||
break;
|
||||
case SENTRY_LEVEL_ERROR:
|
||||
logger->error(buffer);
|
||||
break;
|
||||
case SENTRY_LEVEL_FATAL:
|
||||
logger->fatal(buffer);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@ -107,13 +71,13 @@ void SentryWriter::initialize(Poco::Util::LayeredConfiguration & config)
|
||||
bool enabled = false;
|
||||
bool debug = config.getBool("send_crash_reports.debug", false);
|
||||
auto * logger = &Poco::Logger::get("SentryWriter");
|
||||
|
||||
if (config.getBool("send_crash_reports.enabled", false))
|
||||
{
|
||||
if (debug || (strlen(VERSION_OFFICIAL) > 0)) //-V560
|
||||
{
|
||||
enabled = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (enabled)
|
||||
{
|
||||
server_data_path = config.getString("path", "");
|
||||
@ -126,7 +90,6 @@ void SentryWriter::initialize(Poco::Util::LayeredConfiguration & config)
|
||||
|
||||
sentry_options_t * options = sentry_options_new(); /// will be freed by sentry_init or sentry_shutdown
|
||||
sentry_options_set_release(options, VERSION_STRING_SHORT);
|
||||
sentry_options_set_logger(options, &sentry_logger, nullptr);
|
||||
if (debug)
|
||||
{
|
||||
sentry_options_set_debug(options, 1);
|
||||
@ -199,34 +162,34 @@ void SentryWriter::onFault(int sig, const std::string & error_message, const Sta
|
||||
if (stack_size > 0)
|
||||
{
|
||||
ssize_t offset = stack_trace.getOffset();
|
||||
char instruction_addr[100];
|
||||
|
||||
char instruction_addr[19]
|
||||
{
|
||||
'0', 'x',
|
||||
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f',
|
||||
'\0'
|
||||
};
|
||||
|
||||
StackTrace::Frames frames;
|
||||
StackTrace::symbolize(stack_trace.getFramePointers(), offset, stack_size, frames);
|
||||
|
||||
for (ssize_t i = stack_size - 1; i >= offset; --i)
|
||||
{
|
||||
const StackTrace::Frame & current_frame = frames[i];
|
||||
sentry_value_t sentry_frame = sentry_value_new_object();
|
||||
UInt64 frame_ptr = reinterpret_cast<UInt64>(current_frame.virtual_addr);
|
||||
|
||||
if (std::snprintf(instruction_addr, sizeof(instruction_addr), "0x%" PRIx64, frame_ptr) >= 0)
|
||||
{
|
||||
sentry_value_set_by_key(sentry_frame, "instruction_addr", sentry_value_new_string(instruction_addr));
|
||||
}
|
||||
writeHexUIntLowercase(frame_ptr, instruction_addr + 2);
|
||||
sentry_value_set_by_key(sentry_frame, "instruction_addr", sentry_value_new_string(instruction_addr));
|
||||
|
||||
if (current_frame.symbol.has_value())
|
||||
{
|
||||
sentry_value_set_by_key(sentry_frame, "function", sentry_value_new_string(current_frame.symbol.value().c_str()));
|
||||
}
|
||||
|
||||
if (current_frame.file.has_value())
|
||||
{
|
||||
sentry_value_set_by_key(sentry_frame, "filename", sentry_value_new_string(current_frame.file.value().c_str()));
|
||||
}
|
||||
|
||||
if (current_frame.line.has_value())
|
||||
{
|
||||
sentry_value_set_by_key(sentry_frame, "lineno", sentry_value_new_int32(current_frame.line.value()));
|
||||
}
|
||||
|
||||
sentry_value_append(sentry_frames, sentry_frame);
|
||||
}
|
||||
|
@ -62,7 +62,13 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
if (!log_path.empty())
|
||||
{
|
||||
createDirectory(log_path);
|
||||
std::cerr << "Logging " << log_level_string << " to " << log_path << std::endl;
|
||||
|
||||
std::string ext;
|
||||
if (config.getRawString("logger.stream_compress", "false") == "true")
|
||||
ext = ".lz4";
|
||||
|
||||
std::cerr << "Logging " << log_level_string << " to " << log_path << ext << std::endl;
|
||||
|
||||
auto log_level = Poco::Logger::parseLevel(log_level_string);
|
||||
if (log_level > max_log_level)
|
||||
{
|
||||
@ -75,6 +81,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
log_file->setProperty(Poco::FileChannel::PROP_ROTATION, config.getRawString("logger.size", "100M"));
|
||||
log_file->setProperty(Poco::FileChannel::PROP_ARCHIVE, "number");
|
||||
log_file->setProperty(Poco::FileChannel::PROP_COMPRESS, config.getRawString("logger.compress", "true"));
|
||||
log_file->setProperty(Poco::FileChannel::PROP_STREAMCOMPRESS, config.getRawString("logger.stream_compress", "false"));
|
||||
log_file->setProperty(Poco::FileChannel::PROP_PURGECOUNT, config.getRawString("logger.count", "1"));
|
||||
log_file->setProperty(Poco::FileChannel::PROP_FLUSH, config.getRawString("logger.flush", "true"));
|
||||
log_file->setProperty(Poco::FileChannel::PROP_ROTATEONOPEN, config.getRawString("logger.rotateOnOpen", "false"));
|
||||
@ -84,7 +91,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
|
||||
Poco::AutoPtr<DB::OwnFormattingChannel> log = new DB::OwnFormattingChannel(pf, log_file);
|
||||
log->setLevel(log_level);
|
||||
split->addChannel(log);
|
||||
split->addChannel(log, "log");
|
||||
}
|
||||
|
||||
const auto errorlog_path = config.getString("logger.errorlog", "");
|
||||
@ -100,13 +107,18 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
max_log_level = errorlog_level;
|
||||
}
|
||||
|
||||
std::cerr << "Logging errors to " << errorlog_path << std::endl;
|
||||
std::string ext;
|
||||
if (config.getRawString("logger.stream_compress", "false") == "true")
|
||||
ext = ".lz4";
|
||||
|
||||
std::cerr << "Logging errors to " << errorlog_path << ext << std::endl;
|
||||
|
||||
error_log_file = new Poco::FileChannel;
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_PATH, fs::weakly_canonical(errorlog_path));
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_ROTATION, config.getRawString("logger.size", "100M"));
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_ARCHIVE, "number");
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_COMPRESS, config.getRawString("logger.compress", "true"));
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_STREAMCOMPRESS, config.getRawString("logger.stream_compress", "false"));
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_PURGECOUNT, config.getRawString("logger.count", "1"));
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_FLUSH, config.getRawString("logger.flush", "true"));
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_ROTATEONOPEN, config.getRawString("logger.rotateOnOpen", "false"));
|
||||
@ -116,7 +128,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
Poco::AutoPtr<DB::OwnFormattingChannel> errorlog = new DB::OwnFormattingChannel(pf, error_log_file);
|
||||
errorlog->setLevel(errorlog_level);
|
||||
errorlog->open();
|
||||
split->addChannel(errorlog);
|
||||
split->addChannel(errorlog, "errorlog");
|
||||
}
|
||||
|
||||
if (config.getBool("logger.use_syslog", false))
|
||||
@ -155,7 +167,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
Poco::AutoPtr<DB::OwnFormattingChannel> log = new DB::OwnFormattingChannel(pf, syslog_channel);
|
||||
log->setLevel(syslog_level);
|
||||
|
||||
split->addChannel(log);
|
||||
split->addChannel(log, "syslog");
|
||||
}
|
||||
|
||||
bool should_log_to_console = isatty(STDIN_FILENO) || isatty(STDERR_FILENO);
|
||||
@ -177,7 +189,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
Poco::AutoPtr<DB::OwnFormattingChannel> log = new DB::OwnFormattingChannel(pf, new Poco::ConsoleChannel);
|
||||
logger.warning("Logging " + console_log_level_string + " to console");
|
||||
log->setLevel(console_log_level);
|
||||
split->addChannel(log);
|
||||
split->addChannel(log, "console");
|
||||
}
|
||||
|
||||
split->open();
|
||||
@ -224,6 +236,89 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
}
|
||||
}
|
||||
|
||||
void Loggers::updateLevels(Poco::Util::AbstractConfiguration & config, Poco::Logger & logger)
|
||||
{
|
||||
int max_log_level = 0;
|
||||
|
||||
const auto log_level_string = config.getString("logger.level", "trace");
|
||||
int log_level = Poco::Logger::parseLevel(log_level_string);
|
||||
if (log_level > max_log_level)
|
||||
max_log_level = log_level;
|
||||
|
||||
const auto log_path = config.getString("logger.log", "");
|
||||
if (!log_path.empty())
|
||||
split->setLevel("log", log_level);
|
||||
else
|
||||
split->setLevel("log", 0);
|
||||
|
||||
// Set level to console
|
||||
bool is_daemon = config.getBool("application.runAsDaemon", false);
|
||||
bool should_log_to_console = isatty(STDIN_FILENO) || isatty(STDERR_FILENO);
|
||||
if (config.getBool("logger.console", false)
|
||||
|| (!config.hasProperty("logger.console") && !is_daemon && should_log_to_console))
|
||||
split->setLevel("console", log_level);
|
||||
else
|
||||
split->setLevel("console", 0);
|
||||
|
||||
// Set level to errorlog
|
||||
int errorlog_level = 0;
|
||||
const auto errorlog_path = config.getString("logger.errorlog", "");
|
||||
if (!errorlog_path.empty())
|
||||
{
|
||||
errorlog_level = Poco::Logger::parseLevel(config.getString("logger.errorlog_level", "notice"));
|
||||
if (errorlog_level > max_log_level)
|
||||
max_log_level = errorlog_level;
|
||||
}
|
||||
split->setLevel("errorlog", errorlog_level);
|
||||
|
||||
// Set level to syslog
|
||||
int syslog_level = 0;
|
||||
if (config.getBool("logger.use_syslog", false))
|
||||
{
|
||||
syslog_level = Poco::Logger::parseLevel(config.getString("logger.syslog_level", log_level_string));
|
||||
if (syslog_level > max_log_level)
|
||||
max_log_level = syslog_level;
|
||||
}
|
||||
split->setLevel("syslog", syslog_level);
|
||||
|
||||
// Global logging level (it can be overridden for specific loggers).
|
||||
logger.setLevel(max_log_level);
|
||||
|
||||
// Set level to all already created loggers
|
||||
std::vector<std::string> names;
|
||||
|
||||
logger.root().names(names);
|
||||
for (const auto & name : names)
|
||||
logger.root().get(name).setLevel(max_log_level);
|
||||
|
||||
logger.root().setLevel(max_log_level);
|
||||
|
||||
// Explicitly specified log levels for specific loggers.
|
||||
{
|
||||
Poco::Util::AbstractConfiguration::Keys loggers_level;
|
||||
config.keys("logger.levels", loggers_level);
|
||||
|
||||
if (!loggers_level.empty())
|
||||
{
|
||||
for (const auto & key : loggers_level)
|
||||
{
|
||||
if (key == "logger" || key.starts_with("logger["))
|
||||
{
|
||||
const std::string name(config.getString("logger.levels." + key + ".name"));
|
||||
const std::string level(config.getString("logger.levels." + key + ".level"));
|
||||
logger.root().get(name).setLevel(level);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Legacy syntax
|
||||
const std::string level(config.getString("logger.levels." + key, "trace"));
|
||||
logger.root().get(key).setLevel(level);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Loggers::closeLogs(Poco::Logger & logger)
|
||||
{
|
||||
if (log_file)
|
||||
|
@ -19,6 +19,8 @@ class Loggers
|
||||
public:
|
||||
void buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Logger & logger, const std::string & cmd_name = "");
|
||||
|
||||
void updateLevels(Poco::Util::AbstractConfiguration & config, Poco::Logger & logger);
|
||||
|
||||
/// Close log files. On next log write files will be reopened.
|
||||
void closeLogs(Poco::Logger & logger);
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
#include <atomic>
|
||||
#include <Poco/AutoPtr.h>
|
||||
#include <Poco/Channel.h>
|
||||
#include <Poco/FormattingChannel.h>
|
||||
@ -14,7 +15,7 @@ class OwnFormattingChannel : public Poco::Channel, public ExtendedLogChannel
|
||||
public:
|
||||
explicit OwnFormattingChannel(
|
||||
Poco::AutoPtr<OwnPatternFormatter> pFormatter_ = nullptr, Poco::AutoPtr<Poco::Channel> pChannel_ = nullptr)
|
||||
: pFormatter(std::move(pFormatter_)), pChannel(std::move(pChannel_))
|
||||
: pFormatter(std::move(pFormatter_)), pChannel(std::move(pChannel_)), priority(Poco::Message::PRIO_TRACE)
|
||||
{
|
||||
}
|
||||
|
||||
@ -45,7 +46,7 @@ public:
|
||||
private:
|
||||
Poco::AutoPtr<OwnPatternFormatter> pFormatter;
|
||||
Poco::AutoPtr<Poco::Channel> pChannel;
|
||||
Poco::Message::Priority priority = Poco::Message::PRIO_TRACE;
|
||||
std::atomic<Poco::Message::Priority> priority;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include "OwnSplitChannel.h"
|
||||
#include "OwnFormattingChannel.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <Core/Block.h>
|
||||
@ -75,7 +76,7 @@ void OwnSplitChannel::logSplit(const Poco::Message & msg)
|
||||
ExtendedLogMessage msg_ext = ExtendedLogMessage::getFrom(msg);
|
||||
|
||||
/// Log data to child channels
|
||||
for (auto & channel : channels)
|
||||
for (auto & [name, channel] : channels)
|
||||
{
|
||||
if (channel.second)
|
||||
channel.second->logExtended(msg_ext); // extended child
|
||||
@ -100,7 +101,7 @@ void OwnSplitChannel::logSplit(const Poco::Message & msg)
|
||||
columns[i++]->insert(msg.getSource());
|
||||
columns[i++]->insert(msg.getText());
|
||||
|
||||
logs_queue->emplace(std::move(columns));
|
||||
[[maybe_unused]] bool push_result = logs_queue->emplace(std::move(columns));
|
||||
}
|
||||
|
||||
/// Also log to system.text_log table, if message is not too noisy
|
||||
@ -137,9 +138,9 @@ void OwnSplitChannel::logSplit(const Poco::Message & msg)
|
||||
}
|
||||
|
||||
|
||||
void OwnSplitChannel::addChannel(Poco::AutoPtr<Poco::Channel> channel)
|
||||
void OwnSplitChannel::addChannel(Poco::AutoPtr<Poco::Channel> channel, const std::string & name)
|
||||
{
|
||||
channels.emplace_back(std::move(channel), dynamic_cast<ExtendedLogChannel *>(channel.get()));
|
||||
channels.emplace(name, ExtendedChannelPtrPair(std::move(channel), dynamic_cast<ExtendedLogChannel *>(channel.get())));
|
||||
}
|
||||
|
||||
void OwnSplitChannel::addTextLog(std::shared_ptr<DB::TextLog> log, int max_priority)
|
||||
@ -149,4 +150,14 @@ void OwnSplitChannel::addTextLog(std::shared_ptr<DB::TextLog> log, int max_prior
|
||||
text_log_max_priority.store(max_priority, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void OwnSplitChannel::setLevel(const std::string & name, int level)
|
||||
{
|
||||
auto it = channels.find(name);
|
||||
if (it != channels.end())
|
||||
{
|
||||
if (auto * channel = dynamic_cast<DB::OwnFormattingChannel *>(it->second.first.get()))
|
||||
channel->setLevel(level);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -18,10 +18,12 @@ public:
|
||||
/// Makes an extended message from msg and passes it to the client logs queue and child (if possible)
|
||||
void log(const Poco::Message & msg) override;
|
||||
/// Adds a child channel
|
||||
void addChannel(Poco::AutoPtr<Poco::Channel> channel);
|
||||
void addChannel(Poco::AutoPtr<Poco::Channel> channel, const std::string & name);
|
||||
|
||||
void addTextLog(std::shared_ptr<DB::TextLog> log, int max_priority);
|
||||
|
||||
void setLevel(const std::string & name, int level);
|
||||
|
||||
private:
|
||||
void logSplit(const Poco::Message & msg);
|
||||
void tryLogSplit(const Poco::Message & msg);
|
||||
@ -29,7 +31,7 @@ private:
|
||||
using ChannelPtr = Poco::AutoPtr<Poco::Channel>;
|
||||
/// Handler and its pointer casted to extended interface
|
||||
using ExtendedChannelPtrPair = std::pair<ChannelPtr, ExtendedLogChannel *>;
|
||||
std::vector<ExtendedChannelPtrPair> channels;
|
||||
std::map<std::string, ExtendedChannelPtrPair> channels;
|
||||
|
||||
std::mutex text_log_mutex;
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
#include <errmsg.h>
|
||||
#include <mysql.h>
|
||||
#else
|
||||
#include <mysql/errmsg.h> //Y_IGNORE
|
||||
#include <mysql/errmsg.h>
|
||||
#include <mysql/mysql.h>
|
||||
#endif
|
||||
|
||||
|
15906
benchmark/duckdb/log
Normal file
15906
benchmark/duckdb/log
Normal file
File diff suppressed because one or more lines are too long
43
benchmark/duckdb/queries.sql
Normal file
43
benchmark/duckdb/queries.sql
Normal file
@ -0,0 +1,43 @@
|
||||
SELECT count(*) FROM hits;
|
||||
SELECT count(*) FROM hits WHERE AdvEngineID != 0;
|
||||
SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM hits;
|
||||
SELECT sum(UserID) FROM hits;
|
||||
SELECT COUNT(DISTINCT UserID) FROM hits;
|
||||
SELECT COUNT(DISTINCT SearchPhrase) FROM hits;
|
||||
SELECT min(EventDate), max(EventDate) FROM hits;
|
||||
SELECT AdvEngineID, count(*) FROM hits WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY count(*) DESC;
|
||||
SELECT RegionID, COUNT(DISTINCT UserID) AS u FROM hits GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||
SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), COUNT(DISTINCT UserID) FROM hits GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
||||
SELECT MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM hits WHERE octet_length(MobilePhoneModel) > 0 GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT MobilePhone, MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM hits WHERE octet_length(MobilePhoneModel) > 0 GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchPhrase, count(*) AS c FROM hits WHERE octet_length(SearchPhrase) > 0 GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE octet_length(SearchPhrase) > 0 GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM hits WHERE octet_length(SearchPhrase) > 0 GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT UserID, count(*) FROM hits GROUP BY UserID ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count(*) FROM hits GROUP BY UserID, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count(*) FROM hits GROUP BY UserID, SearchPhrase LIMIT 10;
|
||||
SELECT UserID, extract(minute FROM (TIMESTAMP '1970-01-01 00:00:00' + to_seconds(EventTime))) AS m, SearchPhrase, count(*) FROM hits GROUP BY UserID, m, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID FROM hits WHERE UserID = 12345678901234567890;
|
||||
SELECT count(*) FROM hits WHERE URL::TEXT LIKE '%metrika%';
|
||||
SELECT SearchPhrase, min(URL), count(*) AS c FROM hits WHERE URL::TEXT LIKE '%metrika%' AND octet_length(SearchPhrase) > 0 GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, min(URL), min(Title), count(*) AS c, COUNT(DISTINCT UserID) FROM hits WHERE Title::TEXT LIKE '%Яндекс%' AND URL::TEXT NOT LIKE '%.yandex.%' AND octet_length(SearchPhrase) > 0 GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT * FROM hits WHERE URL::TEXT LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM hits WHERE octet_length(SearchPhrase) > 0 ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM hits WHERE octet_length(SearchPhrase) > 0 ORDER BY SearchPhrase LIMIT 10;
|
||||
SELECT SearchPhrase FROM hits WHERE octet_length(SearchPhrase) > 0 ORDER BY EventTime, SearchPhrase LIMIT 10;
|
||||
SELECT CounterID, avg(octet_length(URL)) AS l, count(*) AS c FROM hits WHERE octet_length(URL) > 0 GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT regexp_replace(Referer::TEXT, '^https?://(?:www\.)?([^/]+)/.*$', '\1') AS key, avg(octet_length(Referer)) AS l, count(*) AS c, min(Referer) FROM hits WHERE octet_length(Referer) > 0 GROUP BY key HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM hits;
|
||||
SELECT SearchEngineID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM hits WHERE octet_length(SearchPhrase) > 0 GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM hits WHERE octet_length(SearchPhrase) > 0 GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM hits GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS c FROM hits GROUP BY URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT 1, URL, count(*) AS c FROM hits GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM hits GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND octet_length(URL) > 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT Title, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND octet_length(Title) > 0 GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND "refresh" = 0 AND IsLink != 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND "refresh" = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT URLHash, EventDate, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND "refresh" = 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 686716256552154761 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100;
|
||||
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND "refresh" = 0 AND DontCountHits = 0 AND URLHash = 686716256552154761 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
||||
SELECT DATE_TRUNC('minute', (TIMESTAMP '1970-01-01 00:00:00' + to_seconds(EventTime))) AS "Minute", count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-02' AND "refresh" = 0 AND DontCountHits = 0 GROUP BY DATE_TRUNC('minute', (TIMESTAMP '1970-01-01 00:00:00' + to_seconds(EventTime))) ORDER BY DATE_TRUNC('minute', (TIMESTAMP '1970-01-01 00:00:00' + to_seconds(EventTime)));
|
762
benchmark/duckdb/usability.md
Normal file
762
benchmark/duckdb/usability.md
Normal file
File diff suppressed because one or more lines are too long
@ -13,6 +13,7 @@ TRIES=3
|
||||
|
||||
AMD64_BIN_URL="https://builds.clickhouse.com/master/amd64/clickhouse"
|
||||
AARCH64_BIN_URL="https://builds.clickhouse.com/master/aarch64/clickhouse"
|
||||
POWERPC64_BIN_URL="https://builds.clickhouse.com/master/ppc64le/clickhouse"
|
||||
|
||||
# Note: on older Ubuntu versions, 'axel' does not support IPv6. If you are using IPv6-only servers on very old Ubuntu, just don't install 'axel'.
|
||||
|
||||
@ -38,6 +39,8 @@ if [[ ! -f clickhouse ]]; then
|
||||
$FASTER_DOWNLOAD "$AMD64_BIN_URL"
|
||||
elif [[ $CPU == aarch64 ]]; then
|
||||
$FASTER_DOWNLOAD "$AARCH64_BIN_URL"
|
||||
elif [[ $CPU == powerpc64le ]]; then
|
||||
$FASTER_DOWNLOAD "$POWERPC64_BIN_URL"
|
||||
else
|
||||
echo "Unsupported CPU type: $CPU"
|
||||
exit 1
|
||||
@ -52,7 +55,7 @@ fi
|
||||
|
||||
if [[ ! -d data ]]; then
|
||||
if [[ ! -f $DATASET ]]; then
|
||||
$FASTER_DOWNLOAD "https://clickhouse-datasets.s3.yandex.net/hits/partitions/$DATASET"
|
||||
$FASTER_DOWNLOAD "https://datasets.clickhouse.com/hits/partitions/$DATASET"
|
||||
fi
|
||||
|
||||
tar $TAR_PARAMS --strip-components=1 --directory=. -x -v -f $DATASET
|
||||
|
12
benchmark/postgresql/benchmark.sh
Executable file
12
benchmark/postgresql/benchmark.sh
Executable file
@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
grep -v -P '^#' queries.sql | sed -e 's/{table}/hits_100m_pg/' | while read query; do
|
||||
|
||||
echo 3 | sudo tee /proc/sys/vm/drop_caches
|
||||
|
||||
echo "$query";
|
||||
for i in {1..3}; do
|
||||
# For some reason JIT does not work on my machine
|
||||
sudo -u postgres psql tutorial -t -c 'set jit = off' -c '\timing' -c "$query" | grep 'Time' | tee --append log
|
||||
done;
|
||||
done;
|
142
benchmark/postgresql/instructions.md
Normal file
142
benchmark/postgresql/instructions.md
Normal file
@ -0,0 +1,142 @@
|
||||
Create a table in PostgreSQL:
|
||||
|
||||
```
|
||||
CREATE TABLE hits_100m_pg
|
||||
(
|
||||
WatchID BIGINT NOT NULL,
|
||||
JavaEnable SMALLINT NOT NULL,
|
||||
Title TEXT NOT NULL,
|
||||
GoodEvent SMALLINT NOT NULL,
|
||||
EventTime TIMESTAMP NOT NULL,
|
||||
EventDate Date NOT NULL,
|
||||
CounterID INTEGER NOT NULL,
|
||||
ClientIP INTEGER NOT NULL,
|
||||
RegionID INTEGER NOT NULL,
|
||||
UserID BIGINT NOT NULL,
|
||||
CounterClass SMALLINT NOT NULL,
|
||||
OS SMALLINT NOT NULL,
|
||||
UserAgent SMALLINT NOT NULL,
|
||||
URL TEXT NOT NULL,
|
||||
Referer TEXT NOT NULL,
|
||||
Refresh SMALLINT NOT NULL,
|
||||
RefererCategoryID SMALLINT NOT NULL,
|
||||
RefererRegionID INTEGER NOT NULL,
|
||||
URLCategoryID SMALLINT NOT NULL,
|
||||
URLRegionID INTEGER NOT NULL,
|
||||
ResolutionWidth SMALLINT NOT NULL,
|
||||
ResolutionHeight SMALLINT NOT NULL,
|
||||
ResolutionDepth SMALLINT NOT NULL,
|
||||
FlashMajor SMALLINT NOT NULL,
|
||||
FlashMinor SMALLINT NOT NULL,
|
||||
FlashMinor2 TEXT NOT NULL,
|
||||
NetMajor SMALLINT NOT NULL,
|
||||
NetMinor SMALLINT NOT NULL,
|
||||
UserAgentMajor SMALLINT NOT NULL,
|
||||
UserAgentMinor CHAR(2) NOT NULL,
|
||||
CookieEnable SMALLINT NOT NULL,
|
||||
JavascriptEnable SMALLINT NOT NULL,
|
||||
IsMobile SMALLINT NOT NULL,
|
||||
MobilePhone SMALLINT NOT NULL,
|
||||
MobilePhoneModel TEXT NOT NULL,
|
||||
Params TEXT NOT NULL,
|
||||
IPNetworkID INTEGER NOT NULL,
|
||||
TraficSourceID SMALLINT NOT NULL,
|
||||
SearchEngineID SMALLINT NOT NULL,
|
||||
SearchPhrase TEXT NOT NULL,
|
||||
AdvEngineID SMALLINT NOT NULL,
|
||||
IsArtifical SMALLINT NOT NULL,
|
||||
WindowClientWidth SMALLINT NOT NULL,
|
||||
WindowClientHeight SMALLINT NOT NULL,
|
||||
ClientTimeZone SMALLINT NOT NULL,
|
||||
ClientEventTime TIMESTAMP NOT NULL,
|
||||
SilverlightVersion1 SMALLINT NOT NULL,
|
||||
SilverlightVersion2 SMALLINT NOT NULL,
|
||||
SilverlightVersion3 INTEGER NOT NULL,
|
||||
SilverlightVersion4 SMALLINT NOT NULL,
|
||||
PageCharset TEXT NOT NULL,
|
||||
CodeVersion INTEGER NOT NULL,
|
||||
IsLink SMALLINT NOT NULL,
|
||||
IsDownload SMALLINT NOT NULL,
|
||||
IsNotBounce SMALLINT NOT NULL,
|
||||
FUniqID BIGINT NOT NULL,
|
||||
OriginalURL TEXT NOT NULL,
|
||||
HID INTEGER NOT NULL,
|
||||
IsOldCounter SMALLINT NOT NULL,
|
||||
IsEvent SMALLINT NOT NULL,
|
||||
IsParameter SMALLINT NOT NULL,
|
||||
DontCountHits SMALLINT NOT NULL,
|
||||
WithHash SMALLINT NOT NULL,
|
||||
HitColor CHAR NOT NULL,
|
||||
LocalEventTime TIMESTAMP NOT NULL,
|
||||
Age SMALLINT NOT NULL,
|
||||
Sex SMALLINT NOT NULL,
|
||||
Income SMALLINT NOT NULL,
|
||||
Interests SMALLINT NOT NULL,
|
||||
Robotness SMALLINT NOT NULL,
|
||||
RemoteIP INTEGER NOT NULL,
|
||||
WindowName INTEGER NOT NULL,
|
||||
OpenerName INTEGER NOT NULL,
|
||||
HistoryLength SMALLINT NOT NULL,
|
||||
BrowserLanguage TEXT NOT NULL,
|
||||
BrowserCountry TEXT NOT NULL,
|
||||
SocialNetwork TEXT NOT NULL,
|
||||
SocialAction TEXT NOT NULL,
|
||||
HTTPError SMALLINT NOT NULL,
|
||||
SendTiming INTEGER NOT NULL,
|
||||
DNSTiming INTEGER NOT NULL,
|
||||
ConnectTiming INTEGER NOT NULL,
|
||||
ResponseStartTiming INTEGER NOT NULL,
|
||||
ResponseEndTiming INTEGER NOT NULL,
|
||||
FetchTiming INTEGER NOT NULL,
|
||||
SocialSourceNetworkID SMALLINT NOT NULL,
|
||||
SocialSourcePage TEXT NOT NULL,
|
||||
ParamPrice BIGINT NOT NULL,
|
||||
ParamOrderID TEXT NOT NULL,
|
||||
ParamCurrency TEXT NOT NULL,
|
||||
ParamCurrencyID SMALLINT NOT NULL,
|
||||
OpenstatServiceName TEXT NOT NULL,
|
||||
OpenstatCampaignID TEXT NOT NULL,
|
||||
OpenstatAdID TEXT NOT NULL,
|
||||
OpenstatSourceID TEXT NOT NULL,
|
||||
UTMSource TEXT NOT NULL,
|
||||
UTMMedium TEXT NOT NULL,
|
||||
UTMCampaign TEXT NOT NULL,
|
||||
UTMContent TEXT NOT NULL,
|
||||
UTMTerm TEXT NOT NULL,
|
||||
FromTag TEXT NOT NULL,
|
||||
HasGCLID SMALLINT NOT NULL,
|
||||
RefererHash BIGINT NOT NULL,
|
||||
URLHash BIGINT NOT NULL,
|
||||
CLID INTEGER NOT NULL
|
||||
);
|
||||
```
|
||||
|
||||
Create a dump from ClickHouse:
|
||||
|
||||
```
|
||||
SELECT WatchID::Int64, JavaEnable, replaceAll(replaceAll(replaceAll(toValidUTF8(Title), '\0', ''), '"', ''), '\\', ''), GoodEvent, EventTime, EventDate, CounterID::Int32, ClientIP::Int32, RegionID::Int32,
|
||||
UserID::Int64, CounterClass, OS, UserAgent, replaceAll(replaceAll(replaceAll(toValidUTF8(URL), '\0', ''), '"', ''), '\\', ''), replaceAll(replaceAll(replaceAll(toValidUTF8(Referer), '\0', ''), '"', ''), '\\', ''), Refresh, RefererCategoryID::Int16, RefererRegionID::Int32,
|
||||
URLCategoryID::Int16, URLRegionID::Int32, ResolutionWidth::Int16, ResolutionHeight::Int16, ResolutionDepth, FlashMajor, FlashMinor,
|
||||
FlashMinor2, NetMajor, NetMinor, UserAgentMajor::Int16, replaceAll(replaceAll(replaceAll(toValidUTF8(UserAgentMinor::String), '\0', ''), '"', ''), '\\', ''), CookieEnable, JavascriptEnable, IsMobile, MobilePhone,
|
||||
replaceAll(replaceAll(replaceAll(toValidUTF8(MobilePhoneModel), '\0', ''), '"', ''), '\\', ''), replaceAll(replaceAll(replaceAll(toValidUTF8(Params), '\0', ''), '"', ''), '\\', ''), IPNetworkID::Int32, TraficSourceID, SearchEngineID::Int16, replaceAll(replaceAll(replaceAll(toValidUTF8(SearchPhrase), '\0', ''), '"', ''), '\\', ''),
|
||||
AdvEngineID, IsArtifical, WindowClientWidth::Int16, WindowClientHeight::Int16, ClientTimeZone, ClientEventTime,
|
||||
SilverlightVersion1, SilverlightVersion2, SilverlightVersion3::Int32, SilverlightVersion4::Int16, replaceAll(replaceAll(replaceAll(toValidUTF8(PageCharset), '\0', ''), '"', ''), '\\', ''),
|
||||
CodeVersion::Int32, IsLink, IsDownload, IsNotBounce, FUniqID::Int64, replaceAll(replaceAll(replaceAll(toValidUTF8(OriginalURL), '\0', ''), '"', ''), '\\', ''), HID::Int32, IsOldCounter, IsEvent,
|
||||
IsParameter, DontCountHits, WithHash, replaceAll(replaceAll(replaceAll(toValidUTF8(HitColor::String), '\0', ''), '"', ''), '\\', ''), LocalEventTime, Age, Sex, Income, Interests::Int16, Robotness, RemoteIP::Int32,
|
||||
WindowName, OpenerName, HistoryLength, replaceAll(replaceAll(replaceAll(toValidUTF8(BrowserLanguage::String), '\0', ''), '"', ''), '\\', ''), replaceAll(replaceAll(replaceAll(toValidUTF8(BrowserCountry::String), '\0', ''), '"', ''), '\\', ''),
|
||||
replaceAll(replaceAll(replaceAll(toValidUTF8(SocialNetwork), '\0', ''), '"', ''), '\\', ''), replaceAll(replaceAll(replaceAll(toValidUTF8(SocialAction), '\0', ''), '"', ''), '\\', ''),
|
||||
HTTPError, least(SendTiming, 30000), least(DNSTiming, 30000), least(ConnectTiming, 30000), least(ResponseStartTiming, 30000),
|
||||
least(ResponseEndTiming, 30000), least(FetchTiming, 30000), SocialSourceNetworkID,
|
||||
replaceAll(replaceAll(replaceAll(toValidUTF8(SocialSourcePage), '\0', ''), '"', ''), '\\', ''), ParamPrice, replaceAll(replaceAll(replaceAll(toValidUTF8(ParamOrderID), '\0', ''), '"', ''), '\\', ''), replaceAll(replaceAll(replaceAll(toValidUTF8(ParamCurrency::String), '\0', ''), '"', ''), '\\', ''),
|
||||
ParamCurrencyID::Int16, OpenstatServiceName, OpenstatCampaignID, OpenstatAdID, OpenstatSourceID,
|
||||
UTMSource, UTMMedium, UTMCampaign, UTMContent, UTMTerm, FromTag, HasGCLID, RefererHash::Int64, URLHash::Int64, CLID::Int32
|
||||
FROM hits_100m_obfuscated
|
||||
INTO OUTFILE 'dump.tsv'
|
||||
FORMAT TSV
|
||||
```
|
||||
|
||||
Insert data into PostgreSQL:
|
||||
|
||||
```
|
||||
\copy hits_100m_pg FROM 'dump.tsv';
|
||||
```
|
129
benchmark/postgresql/log
Normal file
129
benchmark/postgresql/log
Normal file
@ -0,0 +1,129 @@
|
||||
Time: 122020.258 ms (02:02.020)
|
||||
Time: 5060.281 ms (00:05.060)
|
||||
Time: 5052.692 ms (00:05.053)
|
||||
Time: 129594.172 ms (02:09.594)
|
||||
Time: 8079.623 ms (00:08.080)
|
||||
Time: 7866.964 ms (00:07.867)
|
||||
Time: 129584.717 ms (02:09.585)
|
||||
Time: 8276.161 ms (00:08.276)
|
||||
Time: 8153.295 ms (00:08.153)
|
||||
Time: 123707.890 ms (02:03.708)
|
||||
Time: 6835.297 ms (00:06.835)
|
||||
Time: 6607.039 ms (00:06.607)
|
||||
Time: 166640.676 ms (02:46.641)
|
||||
Time: 75401.239 ms (01:15.401)
|
||||
Time: 73526.027 ms (01:13.526)
|
||||
Time: 272715.750 ms (04:32.716)
|
||||
Time: 182721.613 ms (03:02.722)
|
||||
Time: 182880.525 ms (03:02.881)
|
||||
Time: 127108.191 ms (02:07.108)
|
||||
Time: 6542.913 ms (00:06.543)
|
||||
Time: 6339.887 ms (00:06.340)
|
||||
Time: 127339.314 ms (02:07.339)
|
||||
Time: 8376.381 ms (00:08.376)
|
||||
Time: 7831.872 ms (00:07.832)
|
||||
Time: 179176.439 ms (02:59.176)
|
||||
Time: 58559.297 ms (00:58.559)
|
||||
Time: 58139.265 ms (00:58.139)
|
||||
Time: 182019.101 ms (03:02.019)
|
||||
Time: 58435.027 ms (00:58.435)
|
||||
Time: 58130.994 ms (00:58.131)
|
||||
Time: 132449.502 ms (02:12.450)
|
||||
Time: 11203.104 ms (00:11.203)
|
||||
Time: 11048.435 ms (00:11.048)
|
||||
Time: 128445.641 ms (02:08.446)
|
||||
Time: 11602.145 ms (00:11.602)
|
||||
Time: 11418.356 ms (00:11.418)
|
||||
Time: 162831.387 ms (02:42.831)
|
||||
Time: 41510.710 ms (00:41.511)
|
||||
Time: 41682.899 ms (00:41.683)
|
||||
Time: 171898.965 ms (02:51.899)
|
||||
Time: 47379.274 ms (00:47.379)
|
||||
Time: 47429.908 ms (00:47.430)
|
||||
Time: 161607.811 ms (02:41.608)
|
||||
Time: 41674.409 ms (00:41.674)
|
||||
Time: 40854.340 ms (00:40.854)
|
||||
Time: 175247.929 ms (02:55.248)
|
||||
Time: 46721.776 ms (00:46.722)
|
||||
Time: 46507.631 ms (00:46.508)
|
||||
Time: 335961.271 ms (05:35.961)
|
||||
Time: 248535.866 ms (04:08.536)
|
||||
Time: 247383.678 ms (04:07.384)
|
||||
Time: 132852.983 ms (02:12.853)
|
||||
Time: 14939.304 ms (00:14.939)
|
||||
Time: 14607.525 ms (00:14.608)
|
||||
Time: 243461.844 ms (04:03.462)
|
||||
Time: 157307.904 ms (02:37.308)
|
||||
Time: 155093.101 ms (02:35.093)
|
||||
Time: 122090.761 ms (02:02.091)
|
||||
Time: 6411.266 ms (00:06.411)
|
||||
Time: 6308.178 ms (00:06.308)
|
||||
Time: 126584.819 ms (02:06.585)
|
||||
Time: 8836.471 ms (00:08.836)
|
||||
Time: 8532.176 ms (00:08.532)
|
||||
Time: 125225.097 ms (02:05.225)
|
||||
Time: 10236.910 ms (00:10.237)
|
||||
Time: 9849.757 ms (00:09.850)
|
||||
Time: 139140.064 ms (02:19.140)
|
||||
Time: 21797.859 ms (00:21.798)
|
||||
Time: 21559.214 ms (00:21.559)
|
||||
Time: 124757.485 ms (02:04.757)
|
||||
Time: 8728.403 ms (00:08.728)
|
||||
Time: 8714.130 ms (00:08.714)
|
||||
Time: 120687.258 ms (02:00.687)
|
||||
Time: 8366.245 ms (00:08.366)
|
||||
Time: 8146.856 ms (00:08.147)
|
||||
Time: 122327.148 ms (02:02.327)
|
||||
Time: 8698.359 ms (00:08.698)
|
||||
Time: 8480.807 ms (00:08.481)
|
||||
Time: 123958.614 ms (02:03.959)
|
||||
Time: 8595.931 ms (00:08.596)
|
||||
Time: 8241.773 ms (00:08.242)
|
||||
Time: 128982.905 ms (02:08.983)
|
||||
Time: 11252.783 ms (00:11.253)
|
||||
Time: 10957.931 ms (00:10.958)
|
||||
Time: 208455.385 ms (03:28.455)
|
||||
Time: 102530.897 ms (01:42.531)
|
||||
Time: 102049.298 ms (01:42.049)
|
||||
Time: 131268.420 ms (02:11.268)
|
||||
Time: 21094.466 ms (00:21.094)
|
||||
Time: 20934.610 ms (00:20.935)
|
||||
Time: 164084.134 ms (02:44.084)
|
||||
Time: 77418.547 ms (01:17.419)
|
||||
Time: 75422.290 ms (01:15.422)
|
||||
Time: 174800.022 ms (02:54.800)
|
||||
Time: 87859.594 ms (01:27.860)
|
||||
Time: 85733.954 ms (01:25.734)
|
||||
Time: 419357.463 ms (06:59.357)
|
||||
Time: 339047.269 ms (05:39.047)
|
||||
Time: 334808.230 ms (05:34.808)
|
||||
Time: 475011.901 ms (07:55.012)
|
||||
Time: 344406.246 ms (05:44.406)
|
||||
Time: 347197.731 ms (05:47.198)
|
||||
Time: 464657.732 ms (07:44.658)
|
||||
Time: 332084.079 ms (05:32.084)
|
||||
Time: 330921.322 ms (05:30.921)
|
||||
Time: 152490.615 ms (02:32.491)
|
||||
Time: 30954.343 ms (00:30.954)
|
||||
Time: 31379.062 ms (00:31.379)
|
||||
Time: 128539.127 ms (02:08.539)
|
||||
Time: 12802.672 ms (00:12.803)
|
||||
Time: 12494.088 ms (00:12.494)
|
||||
Time: 125850.120 ms (02:05.850)
|
||||
Time: 10318.773 ms (00:10.319)
|
||||
Time: 9953.030 ms (00:09.953)
|
||||
Time: 126602.092 ms (02:06.602)
|
||||
Time: 8935.571 ms (00:08.936)
|
||||
Time: 8711.184 ms (00:08.711)
|
||||
Time: 133222.456 ms (02:13.222)
|
||||
Time: 11848.869 ms (00:11.849)
|
||||
Time: 11752.640 ms (00:11.753)
|
||||
Time: 126950.067 ms (02:06.950)
|
||||
Time: 11260.892 ms (00:11.261)
|
||||
Time: 10943.649 ms (00:10.944)
|
||||
Time: 128451.171 ms (02:08.451)
|
||||
Time: 10984.980 ms (00:10.985)
|
||||
Time: 10770.609 ms (00:10.771)
|
||||
Time: 124621.000 ms (02:04.621)
|
||||
Time: 8885.466 ms (00:08.885)
|
||||
Time: 8857.296 ms (00:08.857)
|
43
benchmark/postgresql/queries.sql
Normal file
43
benchmark/postgresql/queries.sql
Normal file
@ -0,0 +1,43 @@
|
||||
SELECT count(*) FROM {table};
|
||||
SELECT count(*) FROM {table} WHERE AdvEngineID != 0;
|
||||
SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM {table};
|
||||
SELECT sum(UserID) FROM {table};
|
||||
SELECT COUNT(DISTINCT UserID) FROM {table};
|
||||
SELECT COUNT(DISTINCT SearchPhrase) FROM {table};
|
||||
SELECT min(EventDate), max(EventDate) FROM {table};
|
||||
SELECT AdvEngineID, count(*) FROM {table} WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY count(*) DESC;
|
||||
SELECT RegionID, COUNT(DISTINCT UserID) AS u FROM {table} GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||
SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), COUNT(DISTINCT UserID) FROM {table} GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
||||
SELECT MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM {table} WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT MobilePhone, MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM {table} WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchPhrase, count(*) AS c FROM {table} WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM {table} WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM {table} WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT UserID, count(*) FROM {table} GROUP BY UserID ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count(*) FROM {table} GROUP BY UserID, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count(*) FROM {table} GROUP BY UserID, SearchPhrase LIMIT 10;
|
||||
SELECT UserID, extract(minute FROM EventTime) AS m, SearchPhrase, count(*) FROM {table} GROUP BY UserID, m, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID FROM {table} WHERE UserID = -6101065172474983726;
|
||||
SELECT count(*) FROM {table} WHERE URL LIKE '%metrika%';
|
||||
SELECT SearchPhrase, min(URL), count(*) AS c FROM {table} WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, min(URL), min(Title), count(*) AS c, COUNT(DISTINCT UserID) FROM {table} WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT * FROM {table} WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10;
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10;
|
||||
SELECT CounterID, avg(length(URL)) AS l, count(*) AS c FROM {table} WHERE URL != '' GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT REGEXP_REPLACE(Referer, '^https?://(?:www\.)?([^/]+)/.*$', '\1') AS key, avg(length(Referer)) AS l, count(*) AS c, min(Referer) FROM {table} WHERE Referer != '' GROUP BY key HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM {table};
|
||||
SELECT SearchEngineID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM {table} WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM {table} WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM {table} GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS c FROM {table} GROUP BY URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT 1, URL, count(*) AS c FROM {table} GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM {table} GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT Title, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND IsLink != 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT URLHash, EventDate, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 686716256552154761 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100;
|
||||
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND DontCountHits = 0 AND URLHash = 686716256552154761 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
||||
SELECT DATE_TRUNC('minute', EventTime) AS "Minute", count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-02' AND "refresh" = 0 AND DontCountHits = 0 GROUP BY DATE_TRUNC('minute', EventTime) ORDER BY DATE_TRUNC('minute', EventTime);
|
11
benchmark/timescaledb/benchmark.sh
Executable file
11
benchmark/timescaledb/benchmark.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
grep -v -P '^#' queries.sql | sed -e 's/{table}/hits_100m_obfuscated/' | while read query; do
|
||||
|
||||
echo 3 | sudo tee /proc/sys/vm/drop_caches
|
||||
|
||||
echo "$query";
|
||||
for i in {1..3}; do
|
||||
sudo -u postgres psql tutorial -t -c 'set jit = off' -c '\timing' -c "$query" | grep 'Time' | tee --append log
|
||||
done;
|
||||
done;
|
215
benchmark/timescaledb/log
Normal file
215
benchmark/timescaledb/log
Normal file
@ -0,0 +1,215 @@
|
||||
3
|
||||
SELECT count(*) FROM hits_100m_obfuscated;
|
||||
Time: 3259.733 ms (00:03.260)
|
||||
Time: 3135.484 ms (00:03.135)
|
||||
Time: 3135.579 ms (00:03.136)
|
||||
3
|
||||
SELECT count(*) FROM hits_100m_obfuscated WHERE AdvEngineID != 0;
|
||||
Time: 146854.557 ms (02:26.855)
|
||||
Time: 6921.736 ms (00:06.922)
|
||||
Time: 6619.892 ms (00:06.620)
|
||||
3
|
||||
SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM hits_100m_obfuscated;
|
||||
Time: 146568.297 ms (02:26.568)
|
||||
Time: 7481.610 ms (00:07.482)
|
||||
Time: 7258.209 ms (00:07.258)
|
||||
3
|
||||
SELECT sum(UserID) FROM hits_100m_obfuscated;
|
||||
Time: 146864.106 ms (02:26.864)
|
||||
Time: 5690.024 ms (00:05.690)
|
||||
Time: 5381.820 ms (00:05.382)
|
||||
3
|
||||
SELECT COUNT(DISTINCT UserID) FROM hits_100m_obfuscated;
|
||||
Time: 227507.331 ms (03:47.507)
|
||||
Time: 69165.471 ms (01:09.165)
|
||||
Time: 72216.950 ms (01:12.217)
|
||||
3
|
||||
SELECT COUNT(DISTINCT SearchPhrase) FROM hits_100m_obfuscated;
|
||||
Time: 323644.397 ms (05:23.644)
|
||||
Time: 177578.740 ms (02:57.579)
|
||||
Time: 175055.738 ms (02:55.056)
|
||||
3
|
||||
SELECT min(EventDate), max(EventDate) FROM hits_100m_obfuscated;
|
||||
Time: 146147.843 ms (02:26.148)
|
||||
Time: 5735.128 ms (00:05.735)
|
||||
Time: 5428.638 ms (00:05.429)
|
||||
3
|
||||
SELECT AdvEngineID, count(*) FROM hits_100m_obfuscated WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY count(*) DESC;
|
||||
Time: 148658.450 ms (02:28.658)
|
||||
Time: 7014.882 ms (00:07.015)
|
||||
Time: 6599.736 ms (00:06.600)
|
||||
3
|
||||
SELECT RegionID, COUNT(DISTINCT UserID) AS u FROM hits_100m_obfuscated GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||
Time: 202423.122 ms (03:22.423)
|
||||
Time: 54439.047 ms (00:54.439)
|
||||
Time: 54800.354 ms (00:54.800)
|
||||
3
|
||||
SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), COUNT(DISTINCT UserID) FROM hits_100m_obfuscated GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
||||
Time: 201152.491 ms (03:21.152)
|
||||
Time: 55875.854 ms (00:55.876)
|
||||
Time: 55200.330 ms (00:55.200)
|
||||
3
|
||||
SELECT MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM hits_100m_obfuscated WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
Time: 146042.603 ms (02:26.043)
|
||||
Time: 9931.633 ms (00:09.932)
|
||||
Time: 10037.032 ms (00:10.037)
|
||||
3
|
||||
SELECT MobilePhone, MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM hits_100m_obfuscated WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
Time: 150811.952 ms (02:30.812)
|
||||
Time: 10320.230 ms (00:10.320)
|
||||
Time: 9993.232 ms (00:09.993)
|
||||
3
|
||||
SELECT SearchPhrase, count(*) AS c FROM hits_100m_obfuscated WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
Time: 173071.218 ms (02:53.071)
|
||||
Time: 34314.835 ms (00:34.315)
|
||||
Time: 34420.919 ms (00:34.421)
|
||||
3
|
||||
SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits_100m_obfuscated WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
Time: 172874.155 ms (02:52.874)
|
||||
Time: 43704.494 ms (00:43.704)
|
||||
Time: 43918.380 ms (00:43.918)
|
||||
3
|
||||
SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM hits_100m_obfuscated WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
Time: 178484.822 ms (02:58.485)
|
||||
Time: 36850.436 ms (00:36.850)
|
||||
Time: 35789.029 ms (00:35.789)
|
||||
3
|
||||
SELECT UserID, count(*) FROM hits_100m_obfuscated GROUP BY UserID ORDER BY count(*) DESC LIMIT 10;
|
||||
Time: 169720.759 ms (02:49.721)
|
||||
Time: 24125.730 ms (00:24.126)
|
||||
Time: 23782.745 ms (00:23.783)
|
||||
3
|
||||
SELECT UserID, SearchPhrase, count(*) FROM hits_100m_obfuscated GROUP BY UserID, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
Time: 182335.631 ms (03:02.336)
|
||||
Time: 37324.563 ms (00:37.325)
|
||||
Time: 37124.250 ms (00:37.124)
|
||||
3
|
||||
SELECT UserID, SearchPhrase, count(*) FROM hits_100m_obfuscated GROUP BY UserID, SearchPhrase LIMIT 10;
|
||||
Time: 163799.714 ms (02:43.800)
|
||||
Time: 18514.031 ms (00:18.514)
|
||||
Time: 18968.524 ms (00:18.969)
|
||||
3
|
||||
SELECT UserID, extract(minute FROM EventTime) AS m, SearchPhrase, count(*) FROM hits_100m_obfuscated GROUP BY UserID, m, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
Time: 294799.480 ms (04:54.799)
|
||||
Time: 149592.992 ms (02:29.593)
|
||||
Time: 149466.291 ms (02:29.466)
|
||||
3
|
||||
SELECT UserID FROM hits_100m_obfuscated WHERE UserID = -6101065172474983726;
|
||||
Time: 140797.496 ms (02:20.797)
|
||||
Time: 5312.321 ms (00:05.312)
|
||||
Time: 5020.502 ms (00:05.021)
|
||||
3
|
||||
SELECT count(*) FROM hits_100m_obfuscated WHERE URL LIKE '%metrika%';
|
||||
Time: 143092.287 ms (02:23.092)
|
||||
Time: 7893.874 ms (00:07.894)
|
||||
Time: 7661.326 ms (00:07.661)
|
||||
3
|
||||
SELECT SearchPhrase, min(URL), count(*) AS c FROM hits_100m_obfuscated WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
Time: 143682.424 ms (02:23.682)
|
||||
Time: 9249.962 ms (00:09.250)
|
||||
Time: 9073.876 ms (00:09.074)
|
||||
3
|
||||
SELECT SearchPhrase, min(URL), min(Title), count(*) AS c, COUNT(DISTINCT UserID) FROM hits_100m_obfuscated WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
Time: 150965.884 ms (02:30.966)
|
||||
Time: 20350.812 ms (00:20.351)
|
||||
Time: 20074.939 ms (00:20.075)
|
||||
3
|
||||
SELECT * FROM hits_100m_obfuscated WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
||||
Time: 4674.669 ms (00:04.675)
|
||||
Time: 4532.389 ms (00:04.532)
|
||||
Time: 4555.457 ms (00:04.555)
|
||||
3
|
||||
SELECT SearchPhrase FROM hits_100m_obfuscated WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10;
|
||||
Time: 5.177 ms
|
||||
Time: 5.031 ms
|
||||
Time: 4.419 ms
|
||||
3
|
||||
SELECT SearchPhrase FROM hits_100m_obfuscated WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10;
|
||||
Time: 141152.210 ms (02:21.152)
|
||||
Time: 7492.968 ms (00:07.493)
|
||||
Time: 7300.428 ms (00:07.300)
|
||||
3
|
||||
SELECT SearchPhrase FROM hits_100m_obfuscated WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10;
|
||||
Time: 30.736 ms
|
||||
Time: 5.018 ms
|
||||
Time: 5.132 ms
|
||||
3
|
||||
SELECT CounterID, avg(length(URL)) AS l, count(*) AS c FROM hits_100m_obfuscated WHERE URL != '' GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
Time: 144034.016 ms (02:24.034)
|
||||
Time: 10701.672 ms (00:10.702)
|
||||
Time: 10348.565 ms (00:10.349)
|
||||
3
|
||||
SELECT REGEXP_REPLACE(Referer, '^https?://(?:www.)?([^/]+)/.*$', '1') AS key, avg(length(Referer)) AS l, count(*) AS c, min(Referer) FROM hits_100m_obfuscated WHERE Referer != '' GROUP BY key HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
Time: 191575.080 ms (03:11.575)
|
||||
Time: 97836.706 ms (01:37.837)
|
||||
Time: 97673.219 ms (01:37.673)
|
||||
3
|
||||
SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM hits_100m_obfuscated;
|
||||
Time: 143652.317 ms (02:23.652)
|
||||
Time: 22185.656 ms (00:22.186)
|
||||
Time: 21887.411 ms (00:21.887)
|
||||
3
|
||||
SELECT SearchEngineID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM hits_100m_obfuscated WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
Time: 153481.944 ms (02:33.482)
|
||||
Time: 17748.628 ms (00:17.749)
|
||||
Time: 17551.116 ms (00:17.551)
|
||||
3
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM hits_100m_obfuscated WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
Time: 167448.684 ms (02:47.449)
|
||||
Time: 25902.961 ms (00:25.903)
|
||||
Time: 25592.018 ms (00:25.592)
|
||||
3
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM hits_100m_obfuscated GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
Time: 299183.443 ms (04:59.183)
|
||||
Time: 145349.772 ms (02:25.350)
|
||||
Time: 143214.688 ms (02:23.215)
|
||||
3
|
||||
SELECT URL, count(*) AS c FROM hits_100m_obfuscated GROUP BY URL ORDER BY c DESC LIMIT 10;
|
||||
Time: 389851.369 ms (06:29.851)
|
||||
Time: 228158.639 ms (03:48.159)
|
||||
Time: 231811.118 ms (03:51.811)
|
||||
3
|
||||
SELECT 1, URL, count(*) AS c FROM hits_100m_obfuscated GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
||||
Time: 407458.343 ms (06:47.458)
|
||||
Time: 230125.530 ms (03:50.126)
|
||||
Time: 230764.511 ms (03:50.765)
|
||||
3
|
||||
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM hits_100m_obfuscated GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
|
||||
Time: 174098.556 ms (02:54.099)
|
||||
Time: 23503.975 ms (00:23.504)
|
||||
Time: 24322.856 ms (00:24.323)
|
||||
3
|
||||
SELECT URL, count(*) AS PageViews FROM hits_100m_obfuscated WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
||||
Time: 145906.025 ms (02:25.906)
|
||||
Time: 10824.695 ms (00:10.825)
|
||||
Time: 10484.885 ms (00:10.485)
|
||||
3
|
||||
SELECT Title, count(*) AS PageViews FROM hits_100m_obfuscated WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
||||
Time: 144063.711 ms (02:24.064)
|
||||
Time: 8947.980 ms (00:08.948)
|
||||
Time: 8608.434 ms (00:08.608)
|
||||
3
|
||||
SELECT URL, count(*) AS PageViews FROM hits_100m_obfuscated WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND IsLink != 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
||||
Time: 141883.596 ms (02:21.884)
|
||||
Time: 7977.257 ms (00:07.977)
|
||||
Time: 7673.547 ms (00:07.674)
|
||||
3
|
||||
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM hits_100m_obfuscated WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
|
||||
Time: 147100.084 ms (02:27.100)
|
||||
Time: 9527.812 ms (00:09.528)
|
||||
Time: 9457.663 ms (00:09.458)
|
||||
3
|
||||
SELECT URLHash, EventDate, count(*) AS PageViews FROM hits_100m_obfuscated WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 686716256552154761 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100;
|
||||
Time: 144585.669 ms (02:24.586)
|
||||
Time: 10815.223 ms (00:10.815)
|
||||
Time: 10594.707 ms (00:10.595)
|
||||
3
|
||||
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_100m_obfuscated WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND DontCountHits = 0 AND URLHash = 686716256552154761 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
||||
Time: 145738.341 ms (02:25.738)
|
||||
Time: 10592.979 ms (00:10.593)
|
||||
Time: 10181.477 ms (00:10.181)
|
||||
3
|
||||
SELECT DATE_TRUNC('minute', EventTime) AS "Minute", count(*) AS PageViews FROM hits_100m_obfuscated WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-02' AND "refresh" = 0 AND DontCountHits = 0 GROUP BY DATE_TRUNC('minute', EventTime) ORDER BY DATE_TRUNC('minute', EventTime);
|
||||
Time: 145023.796 ms (02:25.024)
|
||||
Time: 8035.337 ms (00:08.035)
|
||||
Time: 7865.698 ms (00:07.866)
|
129
benchmark/timescaledb/log_compressed
Normal file
129
benchmark/timescaledb/log_compressed
Normal file
@ -0,0 +1,129 @@
|
||||
Time: 1784.299 ms (00:01.784)
|
||||
Time: 1223.461 ms (00:01.223)
|
||||
Time: 1200.665 ms (00:01.201)
|
||||
Time: 22730.141 ms (00:22.730)
|
||||
Time: 1379.227 ms (00:01.379)
|
||||
Time: 1361.595 ms (00:01.362)
|
||||
Time: 29888.235 ms (00:29.888)
|
||||
Time: 3160.611 ms (00:03.161)
|
||||
Time: 3207.363 ms (00:03.207)
|
||||
Time: 53922.569 ms (00:53.923)
|
||||
Time: 2301.456 ms (00:02.301)
|
||||
Time: 2277.009 ms (00:02.277)
|
||||
Time: 45363.999 ms (00:45.364)
|
||||
Time: 43765.848 ms (00:43.766)
|
||||
Time: 44066.621 ms (00:44.067)
|
||||
Time: 172945.633 ms (02:52.946)
|
||||
Time: 136944.098 ms (02:16.944)
|
||||
Time: 138268.413 ms (02:18.268)
|
||||
Time: 16764.579 ms (00:16.765)
|
||||
Time: 2579.907 ms (00:02.580)
|
||||
Time: 2590.390 ms (00:02.590)
|
||||
Time: 1498.034 ms (00:01.498)
|
||||
Time: 1434.534 ms (00:01.435)
|
||||
Time: 1448.123 ms (00:01.448)
|
||||
Time: 113533.016 ms (01:53.533)
|
||||
Time: 78465.335 ms (01:18.465)
|
||||
Time: 80778.839 ms (01:20.779)
|
||||
Time: 90456.388 ms (01:30.456)
|
||||
Time: 87050.166 ms (01:27.050)
|
||||
Time: 88426.851 ms (01:28.427)
|
||||
Time: 45021.632 ms (00:45.022)
|
||||
Time: 12486.342 ms (00:12.486)
|
||||
Time: 12222.489 ms (00:12.222)
|
||||
Time: 44246.843 ms (00:44.247)
|
||||
Time: 15606.856 ms (00:15.607)
|
||||
Time: 15251.554 ms (00:15.252)
|
||||
Time: 29654.719 ms (00:29.655)
|
||||
Time: 29441.858 ms (00:29.442)
|
||||
Time: 29608.141 ms (00:29.608)
|
||||
Time: 103547.383 ms (01:43.547)
|
||||
Time: 104733.648 ms (01:44.734)
|
||||
Time: 105779.016 ms (01:45.779)
|
||||
Time: 29695.834 ms (00:29.696)
|
||||
Time: 15395.447 ms (00:15.395)
|
||||
Time: 15819.650 ms (00:15.820)
|
||||
Time: 27841.552 ms (00:27.842)
|
||||
Time: 29521.849 ms (00:29.522)
|
||||
Time: 27508.521 ms (00:27.509)
|
||||
Time: 56665.709 ms (00:56.666)
|
||||
Time: 56459.321 ms (00:56.459)
|
||||
Time: 56407.620 ms (00:56.408)
|
||||
Time: 27488.888 ms (00:27.489)
|
||||
Time: 25557.427 ms (00:25.557)
|
||||
Time: 25634.140 ms (00:25.634)
|
||||
Time: 97376.463 ms (01:37.376)
|
||||
Time: 96047.902 ms (01:36.048)
|
||||
Time: 99918.341 ms (01:39.918)
|
||||
Time: 6294.887 ms (00:06.295)
|
||||
Time: 6407.262 ms (00:06.407)
|
||||
Time: 6376.369 ms (00:06.376)
|
||||
Time: 40787.808 ms (00:40.788)
|
||||
Time: 11206.256 ms (00:11.206)
|
||||
Time: 11219.871 ms (00:11.220)
|
||||
Time: 12420.227 ms (00:12.420)
|
||||
Time: 12548.301 ms (00:12.548)
|
||||
Time: 12468.458 ms (00:12.468)
|
||||
Time: 57679.878 ms (00:57.680)
|
||||
Time: 35466.123 ms (00:35.466)
|
||||
Time: 35562.064 ms (00:35.562)
|
||||
Time: 13551.276 ms (00:13.551)
|
||||
Time: 13417.313 ms (00:13.417)
|
||||
Time: 13645.287 ms (00:13.645)
|
||||
Time: 150.297 ms
|
||||
Time: 55.995 ms
|
||||
Time: 55.796 ms
|
||||
Time: 3059.796 ms (00:03.060)
|
||||
Time: 3038.246 ms (00:03.038)
|
||||
Time: 3041.210 ms (00:03.041)
|
||||
Time: 4461.720 ms (00:04.462)
|
||||
Time: 4446.691 ms (00:04.447)
|
||||
Time: 4424.526 ms (00:04.425)
|
||||
Time: 29275.463 ms (00:29.275)
|
||||
Time: 17558.747 ms (00:17.559)
|
||||
Time: 17438.621 ms (00:17.439)
|
||||
Time: 203316.184 ms (03:23.316)
|
||||
Time: 190037.946 ms (03:10.038)
|
||||
Time: 189276.624 ms (03:09.277)
|
||||
Time: 36921.542 ms (00:36.922)
|
||||
Time: 36963.771 ms (00:36.964)
|
||||
Time: 36660.406 ms (00:36.660)
|
||||
Time: 38307.345 ms (00:38.307)
|
||||
Time: 17597.355 ms (00:17.597)
|
||||
Time: 17324.776 ms (00:17.325)
|
||||
Time: 39857.567 ms (00:39.858)
|
||||
Time: 26776.411 ms (00:26.776)
|
||||
Time: 26592.819 ms (00:26.593)
|
||||
Time: 162782.290 ms (02:42.782)
|
||||
Time: 160722.582 ms (02:40.723)
|
||||
Time: 162487.263 ms (02:42.487)
|
||||
Time: 261494.290 ms (04:21.494)
|
||||
Time: 263594.014 ms (04:23.594)
|
||||
Time: 260436.201 ms (04:20.436)
|
||||
Time: 265758.455 ms (04:25.758)
|
||||
Time: 270087.523 ms (04:30.088)
|
||||
Time: 266617.218 ms (04:26.617)
|
||||
Time: 30677.159 ms (00:30.677)
|
||||
Time: 28933.542 ms (00:28.934)
|
||||
Time: 29815.271 ms (00:29.815)
|
||||
Time: 19754.932 ms (00:19.755)
|
||||
Time: 16851.157 ms (00:16.851)
|
||||
Time: 16703.289 ms (00:16.703)
|
||||
Time: 10379.500 ms (00:10.379)
|
||||
Time: 10267.336 ms (00:10.267)
|
||||
Time: 10287.944 ms (00:10.288)
|
||||
Time: 17320.582 ms (00:17.321)
|
||||
Time: 9786.410 ms (00:09.786)
|
||||
Time: 9760.578 ms (00:09.761)
|
||||
Time: 33487.352 ms (00:33.487)
|
||||
Time: 26056.528 ms (00:26.057)
|
||||
Time: 25958.258 ms (00:25.958)
|
||||
Time: 28020.227 ms (00:28.020)
|
||||
Time: 5609.725 ms (00:05.610)
|
||||
Time: 5538.744 ms (00:05.539)
|
||||
Time: 15119.473 ms (00:15.119)
|
||||
Time: 5057.455 ms (00:05.057)
|
||||
Time: 5063.154 ms (00:05.063)
|
||||
Time: 3627.703 ms (00:03.628)
|
||||
Time: 3645.232 ms (00:03.645)
|
||||
Time: 3546.855 ms (00:03.547)
|
43
benchmark/timescaledb/queries.sql
Normal file
43
benchmark/timescaledb/queries.sql
Normal file
@ -0,0 +1,43 @@
|
||||
SELECT count(*) FROM {table};
|
||||
SELECT count(*) FROM {table} WHERE AdvEngineID != 0;
|
||||
SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM {table};
|
||||
SELECT sum(UserID) FROM {table};
|
||||
SELECT COUNT(DISTINCT UserID) FROM {table};
|
||||
SELECT COUNT(DISTINCT SearchPhrase) FROM {table};
|
||||
SELECT min(EventDate), max(EventDate) FROM {table};
|
||||
SELECT AdvEngineID, count(*) FROM {table} WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY count(*) DESC;
|
||||
SELECT RegionID, COUNT(DISTINCT UserID) AS u FROM {table} GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||
SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), COUNT(DISTINCT UserID) FROM {table} GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
||||
SELECT MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM {table} WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT MobilePhone, MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM {table} WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchPhrase, count(*) AS c FROM {table} WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM {table} WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM {table} WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT UserID, count(*) FROM {table} GROUP BY UserID ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count(*) FROM {table} GROUP BY UserID, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count(*) FROM {table} GROUP BY UserID, SearchPhrase LIMIT 10;
|
||||
SELECT UserID, extract(minute FROM EventTime) AS m, SearchPhrase, count(*) FROM {table} GROUP BY UserID, m, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID FROM {table} WHERE UserID = -6101065172474983726;
|
||||
SELECT count(*) FROM {table} WHERE URL LIKE '%metrika%';
|
||||
SELECT SearchPhrase, min(URL), count(*) AS c FROM {table} WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, min(URL), min(Title), count(*) AS c, COUNT(DISTINCT UserID) FROM {table} WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT * FROM {table} WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10;
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10;
|
||||
SELECT CounterID, avg(length(URL)) AS l, count(*) AS c FROM {table} WHERE URL != '' GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT REGEXP_REPLACE(Referer, '^https?://(?:www\.)?([^/]+)/.*$', '\1') AS key, avg(length(Referer)) AS l, count(*) AS c, min(Referer) FROM {table} WHERE Referer != '' GROUP BY key HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM {table};
|
||||
SELECT SearchEngineID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM {table} WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM {table} WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM {table} GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS c FROM {table} GROUP BY URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT 1, URL, count(*) AS c FROM {table} GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM {table} GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT Title, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND IsLink != 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT URLHash, EventDate, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 686716256552154761 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100;
|
||||
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND DontCountHits = 0 AND URLHash = 686716256552154761 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
||||
SELECT DATE_TRUNC('minute', EventTime) AS "Minute", count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-02' AND "refresh" = 0 AND DontCountHits = 0 GROUP BY DATE_TRUNC('minute', EventTime) ORDER BY DATE_TRUNC('minute', EventTime);
|
1663
benchmark/timescaledb/usability.md
Normal file
1663
benchmark/timescaledb/usability.md
Normal file
File diff suppressed because it is too large
Load Diff
@ -16,3 +16,7 @@ endif ()
|
||||
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64le.*|PPC64LE.*)")
|
||||
set (ARCH_PPC64LE 1)
|
||||
endif ()
|
||||
if (CMAKE_SYSTEM_PROCESSOR MATCHES "riscv64")
|
||||
set (ARCH_RISCV64 1)
|
||||
endif ()
|
||||
|
||||
|
@ -2,11 +2,11 @@
|
||||
|
||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||
SET(VERSION_REVISION 54456)
|
||||
SET(VERSION_REVISION 54457)
|
||||
SET(VERSION_MAJOR 21)
|
||||
SET(VERSION_MINOR 11)
|
||||
SET(VERSION_MINOR 12)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 7a4a0b0edef0ad6e0aa662cd3b90c3f4acf796e7)
|
||||
SET(VERSION_DESCRIBE v21.11.1.1-prestable)
|
||||
SET(VERSION_STRING 21.11.1.1)
|
||||
SET(VERSION_GITHASH 503a418dedf0011e9040c3a1b6913e0b5488be4c)
|
||||
SET(VERSION_DESCRIBE v21.12.1.1-prestable)
|
||||
SET(VERSION_STRING 21.12.1.1)
|
||||
# end of autochange
|
||||
|
@ -18,6 +18,10 @@ option (ENABLE_PCLMULQDQ "Use pclmulqdq instructions on x86_64" 1)
|
||||
option (ENABLE_POPCNT "Use popcnt instructions on x86_64" 1)
|
||||
option (ENABLE_AVX "Use AVX instructions on x86_64" 0)
|
||||
option (ENABLE_AVX2 "Use AVX2 instructions on x86_64" 0)
|
||||
option (ENABLE_AVX512 "Use AVX512 instructions on x86_64" 0)
|
||||
option (ENABLE_BMI "Use BMI instructions on x86_64" 0)
|
||||
option (ENABLE_AVX2_FOR_SPEC_OP "Use avx2 instructions for specific operations on x86_64" 0)
|
||||
option (ENABLE_AVX512_FOR_SPEC_OP "Use avx512 instructions for specific operations on x86_64" 0)
|
||||
|
||||
option (ARCH_NATIVE "Add -march=native compiler flag. This makes your binaries non-portable but more performant code may be generated. This option overrides ENABLE_* options for specific instruction set. Highly not recommended to use." 0)
|
||||
|
||||
@ -127,6 +131,57 @@ else ()
|
||||
if (HAVE_AVX2 AND ENABLE_AVX2)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
|
||||
endif ()
|
||||
|
||||
set (TEST_FLAG "-mavx512f -mavx512bw")
|
||||
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
|
||||
check_cxx_source_compiles("
|
||||
#include <immintrin.h>
|
||||
int main() {
|
||||
auto a = _mm512_setzero_epi32();
|
||||
(void)a;
|
||||
auto b = _mm512_add_epi16(__m512i(), __m512i());
|
||||
(void)b;
|
||||
return 0;
|
||||
}
|
||||
" HAVE_AVX512)
|
||||
if (HAVE_AVX512 AND ENABLE_AVX512)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
|
||||
endif ()
|
||||
|
||||
set (TEST_FLAG "-mbmi")
|
||||
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
|
||||
check_cxx_source_compiles("
|
||||
#include <immintrin.h>
|
||||
int main() {
|
||||
auto a = _blsr_u32(0);
|
||||
(void)a;
|
||||
return 0;
|
||||
}
|
||||
" HAVE_BMI)
|
||||
if (HAVE_BMI AND ENABLE_BMI)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
|
||||
endif ()
|
||||
|
||||
#Limit avx2/avx512 flag for specific source build
|
||||
set (X86_INTRINSICS_FLAGS "")
|
||||
if (ENABLE_AVX2_FOR_SPEC_OP)
|
||||
if (HAVE_BMI)
|
||||
set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mbmi")
|
||||
endif ()
|
||||
if (HAVE_AVX AND HAVE_AVX2)
|
||||
set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mavx -mavx2")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
if (ENABLE_AVX512_FOR_SPEC_OP)
|
||||
set (X86_INTRINSICS_FLAGS "")
|
||||
if (HAVE_BMI)
|
||||
set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mbmi")
|
||||
endif ()
|
||||
if (HAVE_AVX512)
|
||||
set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mavx512f -mavx512bw -mprefer-vector-width=256")
|
||||
endif ()
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
cmake_pop_check_state ()
|
||||
|
@ -10,7 +10,7 @@ if (NOT ENABLE_AMQPCPP)
|
||||
endif()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP/CMakeLists.txt")
|
||||
message (WARNING "submodule contrib/AMQP-CPP is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/AMQP-CPP is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal AMQP-CPP library")
|
||||
set (USE_AMQPCPP 0)
|
||||
return()
|
||||
|
@ -13,7 +13,7 @@ option (USE_INTERNAL_AVRO_LIBRARY
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/avro/lang/c++/CMakeLists.txt")
|
||||
if (USE_INTERNAL_AVRO_LIBRARY)
|
||||
message(WARNING "submodule contrib/avro is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message(WARNING "submodule contrib/avro is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot find internal avro")
|
||||
set(USE_INTERNAL_AVRO_LIBRARY 0)
|
||||
endif()
|
||||
|
@ -10,11 +10,11 @@ endif()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/base64/LICENSE")
|
||||
set (MISSING_INTERNAL_BASE64_LIBRARY 1)
|
||||
message (WARNING "submodule contrib/base64 is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/base64 is missing. to fix try run: \n git submodule update --init")
|
||||
endif ()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/base64")
|
||||
message (WARNING "submodule contrib/base64 is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/base64 is missing. to fix try run: \n git submodule update --init")
|
||||
else()
|
||||
set (BASE64_LIBRARY base64)
|
||||
set (USE_BASE64 1)
|
||||
|
@ -16,7 +16,7 @@ endif()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/brotli/c/include/brotli/decode.h")
|
||||
if (USE_INTERNAL_BROTLI_LIBRARY)
|
||||
message (WARNING "submodule contrib/brotli is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/brotli is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot find internal brotli")
|
||||
set (USE_INTERNAL_BROTLI_LIBRARY 0)
|
||||
endif ()
|
||||
|
@ -6,7 +6,7 @@ if (NOT ENABLE_BZIP2)
|
||||
endif()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/bzip2/bzlib.h")
|
||||
message (WARNING "submodule contrib/bzip2 is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/bzip2 is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal bzip2 library")
|
||||
set (USE_NLP 0)
|
||||
return()
|
||||
|
@ -11,7 +11,7 @@ option (USE_INTERNAL_CAPNP_LIBRARY "Set to FALSE to use system capnproto library
|
||||
|
||||
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/capnproto/CMakeLists.txt")
|
||||
if(USE_INTERNAL_CAPNP_LIBRARY)
|
||||
message(WARNING "submodule contrib/capnproto is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message(WARNING "submodule contrib/capnproto is missing. to fix try run: \n git submodule update --init")
|
||||
message(${RECONFIGURE_MESSAGE_LEVEL} "cannot find internal capnproto")
|
||||
set(USE_INTERNAL_CAPNP_LIBRARY 0)
|
||||
endif()
|
||||
@ -34,8 +34,6 @@ endif()
|
||||
if (CAPNP_LIBRARIES)
|
||||
set (USE_CAPNP 1)
|
||||
elseif(NOT MISSING_INTERNAL_CAPNP_LIBRARY)
|
||||
add_subdirectory(contrib/capnproto-cmake)
|
||||
|
||||
set (CAPNP_LIBRARIES capnpc)
|
||||
set (USE_CAPNP 1)
|
||||
set (USE_INTERNAL_CAPNP_LIBRARY 1)
|
||||
|
@ -14,7 +14,7 @@ if (APPLE)
|
||||
endif()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/cassandra")
|
||||
message (ERROR "submodule contrib/cassandra is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (ERROR "submodule contrib/cassandra is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal Cassandra")
|
||||
set (USE_CASSANDRA 0)
|
||||
return()
|
||||
|
@ -17,7 +17,7 @@ option (USE_INTERNAL_LIBCXX_LIBRARY "Disable to use system libcxx and libcxxabi
|
||||
|
||||
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libcxx/CMakeLists.txt")
|
||||
if (USE_INTERNAL_LIBCXX_LIBRARY)
|
||||
message(WARNING "submodule contrib/libcxx is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message(WARNING "submodule contrib/libcxx is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal libcxx")
|
||||
set(USE_INTERNAL_LIBCXX_LIBRARY 0)
|
||||
endif()
|
||||
|
@ -6,7 +6,7 @@ endif()
|
||||
|
||||
OPTION(ENABLE_CYRUS_SASL "Enable cyrus-sasl" ${DEFAULT_ENABLE_CYRUS_SASL})
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/cyrus-sasl/README")
|
||||
message (WARNING "submodule contrib/cyrus-sasl is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/cyrus-sasl is missing. to fix try run: \n git submodule update --init")
|
||||
set (ENABLE_CYRUS_SASL 0)
|
||||
endif ()
|
||||
|
||||
|
@ -6,7 +6,7 @@ option (USE_INTERNAL_DATASKETCHES_LIBRARY "Set to FALSE to use system DataSketch
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/theta/CMakeLists.txt")
|
||||
if (USE_INTERNAL_DATASKETCHES_LIBRARY)
|
||||
message(WARNING "submodule contrib/datasketches-cpp is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message(WARNING "submodule contrib/datasketches-cpp is missing. to fix try run: \n git submodule update --init")
|
||||
endif()
|
||||
set(MISSING_INTERNAL_DATASKETCHES_LIBRARY 1)
|
||||
set(USE_INTERNAL_DATASKETCHES_LIBRARY 0)
|
||||
|
@ -1,5 +1,5 @@
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/fast_float/include/fast_float/fast_float.h")
|
||||
message (FATAL_ERROR "submodule contrib/fast_float is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (FATAL_ERROR "submodule contrib/fast_float is missing. to fix try run: \n git submodule update --init")
|
||||
endif ()
|
||||
|
||||
set(FAST_FLOAT_LIBRARY fast_float)
|
||||
|
@ -10,7 +10,7 @@ if(NOT ENABLE_FASTOPS)
|
||||
endif()
|
||||
|
||||
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/fastops/fastops/fastops.h")
|
||||
message(WARNING "submodule contrib/fastops is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message(WARNING "submodule contrib/fastops is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal fastops library")
|
||||
set(MISSING_INTERNAL_FASTOPS_LIBRARY 1)
|
||||
endif()
|
||||
|
8
cmake/find/filelog.cmake
Normal file
8
cmake/find/filelog.cmake
Normal file
@ -0,0 +1,8 @@
|
||||
# StorageFileLog only support Linux platform
|
||||
if (OS_LINUX)
|
||||
set (USE_FILELOG 1)
|
||||
message (STATUS "Using StorageFileLog = 1")
|
||||
else()
|
||||
message(STATUS "StorageFileLog is only supported on Linux")
|
||||
endif ()
|
||||
|
@ -26,7 +26,7 @@ option(USE_INTERNAL_GRPC_LIBRARY "Set to FALSE to use system gRPC library instea
|
||||
|
||||
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/grpc/CMakeLists.txt")
|
||||
if(USE_INTERNAL_GRPC_LIBRARY)
|
||||
message(WARNING "submodule contrib/grpc is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message(WARNING "submodule contrib/grpc is missing. to fix try run: \n git submodule update --init")
|
||||
message(${RECONFIGURE_MESSAGE_LEVEL} "Can't use internal grpc")
|
||||
set(USE_INTERNAL_GRPC_LIBRARY 0)
|
||||
endif()
|
||||
|
@ -4,7 +4,7 @@ option (USE_INTERNAL_GTEST_LIBRARY "Set to FALSE to use system Google Test inste
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/googletest/googletest/CMakeLists.txt")
|
||||
if (USE_INTERNAL_GTEST_LIBRARY)
|
||||
message (WARNING "submodule contrib/googletest is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/googletest is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal gtest")
|
||||
set (USE_INTERNAL_GTEST_LIBRARY 0)
|
||||
endif ()
|
||||
|
@ -11,7 +11,7 @@ option(USE_INTERNAL_H3_LIBRARY "Set to FALSE to use system h3 library instead of
|
||||
|
||||
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/h3/src/h3lib/include/h3Index.h")
|
||||
if(USE_INTERNAL_H3_LIBRARY)
|
||||
message(WARNING "submodule contrib/h3 is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message(WARNING "submodule contrib/h3 is missing. to fix try run: \n git submodule update --init")
|
||||
message(${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal h3 library")
|
||||
set(USE_INTERNAL_H3_LIBRARY 0)
|
||||
endif()
|
||||
|
@ -16,7 +16,7 @@ option(USE_INTERNAL_HDFS3_LIBRARY "Set to FALSE to use system HDFS3 instead of b
|
||||
|
||||
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libhdfs3/include/hdfs/hdfs.h")
|
||||
if(USE_INTERNAL_HDFS3_LIBRARY)
|
||||
message(WARNING "submodule contrib/libhdfs3 is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message(WARNING "submodule contrib/libhdfs3 is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot use internal HDFS3 library")
|
||||
set(USE_INTERNAL_HDFS3_LIBRARY 0)
|
||||
endif()
|
||||
|
@ -16,7 +16,7 @@ option (USE_INTERNAL_ICU_LIBRARY "Set to FALSE to use system ICU library instead
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/icu/icu4c/LICENSE")
|
||||
if (USE_INTERNAL_ICU_LIBRARY)
|
||||
message (WARNING "submodule contrib/icu is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/icu is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal ICU")
|
||||
set (USE_INTERNAL_ICU_LIBRARY 0)
|
||||
endif ()
|
||||
|
@ -1,7 +1,7 @@
|
||||
OPTION(ENABLE_KRB5 "Enable krb5" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/krb5/README")
|
||||
message (WARNING "submodule contrib/krb5 is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/krb5 is missing. to fix try run: \n git submodule update --init")
|
||||
set (ENABLE_KRB5 0)
|
||||
endif ()
|
||||
|
||||
|
@ -15,7 +15,7 @@ option (USE_INTERNAL_LDAP_LIBRARY "Set to FALSE to use system *LDAP library inst
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/openldap/README")
|
||||
if (USE_INTERNAL_LDAP_LIBRARY)
|
||||
message (WARNING "Submodule contrib/openldap is missing. To fix try running:\n git submodule update --init --recursive")
|
||||
message (WARNING "Submodule contrib/openldap is missing. To fix try running:\n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal LDAP library")
|
||||
endif ()
|
||||
|
||||
|
@ -16,7 +16,7 @@ endif()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libgsasl/src/gsasl.h")
|
||||
if (USE_INTERNAL_LIBGSASL_LIBRARY)
|
||||
message (WARNING "submodule contrib/libgsasl is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/libgsasl is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal libgsasl")
|
||||
set (USE_INTERNAL_LIBGSASL_LIBRARY 0)
|
||||
endif ()
|
||||
|
@ -5,14 +5,14 @@ if (NOT ENABLE_LIBPQXX)
|
||||
endif()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libpqxx/CMakeLists.txt")
|
||||
message (WARNING "submodule contrib/libpqxx is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/libpqxx is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal libpqxx library")
|
||||
set (USE_LIBPQXX 0)
|
||||
return()
|
||||
endif()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libpq/include")
|
||||
message (ERROR "submodule contrib/libpq is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (ERROR "submodule contrib/libpq is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal libpq needed for libpqxx")
|
||||
set (USE_LIBPQXX 0)
|
||||
return()
|
||||
|
11
cmake/find/libprotobuf-mutator.cmake
Normal file
11
cmake/find/libprotobuf-mutator.cmake
Normal file
@ -0,0 +1,11 @@
|
||||
option(USE_LIBPROTOBUF_MUTATOR "Enable libprotobuf-mutator" ${ENABLE_FUZZING})
|
||||
|
||||
if (NOT USE_LIBPROTOBUF_MUTATOR)
|
||||
return()
|
||||
endif()
|
||||
|
||||
set(LibProtobufMutator_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libprotobuf-mutator")
|
||||
|
||||
if (NOT EXISTS "${LibProtobufMutator_SOURCE_DIR}/README.md")
|
||||
message (ERROR "submodule contrib/libprotobuf-mutator is missing. to fix try run: \n git submodule update --init")
|
||||
endif()
|
@ -5,7 +5,7 @@ if (OS_DARWIN AND COMPILER_GCC)
|
||||
endif()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libuv")
|
||||
message (WARNING "submodule contrib/libuv is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/libuv is missing. to fix try run: \n git submodule update --init")
|
||||
SET(MISSING_INTERNAL_LIBUV_LIBRARY 1)
|
||||
return()
|
||||
endif()
|
||||
|
@ -2,7 +2,7 @@ option (USE_INTERNAL_LIBXML2_LIBRARY "Set to FALSE to use system libxml2 library
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libxml2/libxml.h")
|
||||
if (USE_INTERNAL_LIBXML2_LIBRARY)
|
||||
message (WARNING "submodule contrib/libxml2 is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/libxml2 is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal libxml")
|
||||
set (USE_INTERNAL_LIBXML2_LIBRARY 0)
|
||||
endif ()
|
||||
|
@ -12,7 +12,7 @@ if (NOT ENABLE_EMBEDDED_COMPILER)
|
||||
endif()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/llvm/llvm/CMakeLists.txt")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "submodule /contrib/llvm is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "submodule /contrib/llvm is missing. to fix try run: \n git submodule update --init")
|
||||
endif ()
|
||||
|
||||
set (USE_EMBEDDED_COMPILER 1)
|
||||
|
@ -11,7 +11,7 @@ option (USE_INTERNAL_MSGPACK_LIBRARY "Set to FALSE to use system msgpack library
|
||||
|
||||
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/msgpack-c/include/msgpack.hpp")
|
||||
if(USE_INTERNAL_MSGPACK_LIBRARY)
|
||||
message(WARNING "Submodule contrib/msgpack-c is missing. To fix try run: \n git submodule update --init --recursive")
|
||||
message(WARNING "Submodule contrib/msgpack-c is missing. To fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot use internal msgpack")
|
||||
set(USE_INTERNAL_MSGPACK_LIBRARY 0)
|
||||
endif()
|
||||
|
@ -16,7 +16,7 @@ option(USE_INTERNAL_MYSQL_LIBRARY "Set to FALSE to use system mysqlclient librar
|
||||
|
||||
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/mariadb-connector-c/README")
|
||||
if(USE_INTERNAL_MYSQL_LIBRARY)
|
||||
message(WARNING "submodule contrib/mariadb-connector-c is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message(WARNING "submodule contrib/mariadb-connector-c is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal mysql library")
|
||||
set(USE_INTERNAL_MYSQL_LIBRARY 0)
|
||||
endif()
|
||||
|
@ -7,7 +7,7 @@ if (NOT USE_INTERNAL_NANODBC_LIBRARY)
|
||||
endif ()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/nanodbc/CMakeLists.txt")
|
||||
message (FATAL_ERROR "submodule contrib/nanodbc is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (FATAL_ERROR "submodule contrib/nanodbc is missing. to fix try run: \n git submodule update --init")
|
||||
endif()
|
||||
|
||||
set (NANODBC_LIBRARY nanodbc)
|
||||
|
@ -7,21 +7,21 @@ if (NOT ENABLE_NLP)
|
||||
endif()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libstemmer_c/Makefile")
|
||||
message (WARNING "submodule contrib/libstemmer_c is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/libstemmer_c is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal libstemmer_c library, NLP functions will be disabled")
|
||||
set (USE_NLP 0)
|
||||
return()
|
||||
endif ()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/wordnet-blast/CMakeLists.txt")
|
||||
message (WARNING "submodule contrib/wordnet-blast is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/wordnet-blast is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal wordnet-blast library, NLP functions will be disabled")
|
||||
set (USE_NLP 0)
|
||||
return()
|
||||
endif ()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/lemmagen-c/README.md")
|
||||
message (WARNING "submodule contrib/lemmagen-c is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/lemmagen-c is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal lemmagen-c library, NLP functions will be disabled")
|
||||
set (USE_NLP 0)
|
||||
return()
|
||||
|
@ -5,7 +5,7 @@ if (NOT ENABLE_NURAFT)
|
||||
endif()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/NuRaft/CMakeLists.txt")
|
||||
message (WARNING "submodule contrib/NuRaft is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/NuRaft is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal NuRaft library")
|
||||
set (USE_NURAFT 0)
|
||||
return()
|
||||
|
@ -18,7 +18,7 @@ include(cmake/find/snappy.cmake)
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/orc/c++/include/orc/OrcFile.hh")
|
||||
if(USE_INTERNAL_ORC_LIBRARY)
|
||||
message(WARNING "submodule contrib/orc is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message(WARNING "submodule contrib/orc is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal ORC")
|
||||
set(USE_INTERNAL_ORC_LIBRARY 0)
|
||||
endif()
|
||||
|
@ -20,7 +20,7 @@ endif()
|
||||
|
||||
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/CMakeLists.txt")
|
||||
if(USE_INTERNAL_PARQUET_LIBRARY)
|
||||
message(WARNING "submodule contrib/arrow (required for Parquet) is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message(WARNING "submodule contrib/arrow (required for Parquet) is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use internal parquet library")
|
||||
set(USE_INTERNAL_PARQUET_LIBRARY 0)
|
||||
endif()
|
||||
|
@ -15,7 +15,7 @@ option(USE_INTERNAL_PROTOBUF_LIBRARY "Set to FALSE to use system protobuf instea
|
||||
|
||||
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/protobuf/cmake/CMakeLists.txt")
|
||||
if(USE_INTERNAL_PROTOBUF_LIBRARY)
|
||||
message(WARNING "submodule contrib/protobuf is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message(WARNING "submodule contrib/protobuf is missing. to fix try run: \n git submodule update --init")
|
||||
message(${RECONFIGURE_MESSAGE_LEVEL} "Can't use internal protobuf")
|
||||
set(USE_INTERNAL_PROTOBUF_LIBRARY 0)
|
||||
endif()
|
||||
|
@ -10,7 +10,7 @@ option(USE_INTERNAL_RAPIDJSON_LIBRARY "Set to FALSE to use system rapidjson libr
|
||||
|
||||
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/rapidjson/include/rapidjson/rapidjson.h")
|
||||
if(USE_INTERNAL_RAPIDJSON_LIBRARY)
|
||||
message(WARNING "submodule contrib/rapidjson is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message(WARNING "submodule contrib/rapidjson is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal rapidjson library")
|
||||
set(USE_INTERNAL_RAPIDJSON_LIBRARY 0)
|
||||
endif()
|
||||
|
@ -11,7 +11,7 @@ option (USE_INTERNAL_RDKAFKA_LIBRARY "Set to FALSE to use system librdkafka inst
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/cppkafka/CMakeLists.txt")
|
||||
if(USE_INTERNAL_RDKAFKA_LIBRARY)
|
||||
message (WARNING "submodule contrib/cppkafka is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/cppkafka is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal cppkafka")
|
||||
set (USE_INTERNAL_RDKAFKA_LIBRARY 0)
|
||||
endif()
|
||||
@ -20,7 +20,7 @@ endif ()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/librdkafka/CMakeLists.txt")
|
||||
if(USE_INTERNAL_RDKAFKA_LIBRARY OR MISSING_INTERNAL_CPPKAFKA_LIBRARY)
|
||||
message (WARNING "submodule contrib/librdkafka is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/librdkafka is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal rdkafka")
|
||||
set (USE_INTERNAL_RDKAFKA_LIBRARY 0)
|
||||
endif()
|
||||
|
@ -2,7 +2,7 @@ option (USE_INTERNAL_RE2_LIBRARY "Set to FALSE to use system re2 library instead
|
||||
|
||||
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/re2/CMakeLists.txt")
|
||||
if(USE_INTERNAL_RE2_LIBRARY)
|
||||
message(WARNING "submodule contrib/re2 is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message(WARNING "submodule contrib/re2 is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal re2 library")
|
||||
endif()
|
||||
set(USE_INTERNAL_RE2_LIBRARY 0)
|
||||
|
@ -15,7 +15,7 @@ option(USE_INTERNAL_ROCKSDB_LIBRARY "Set to FALSE to use system ROCKSDB library
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/rocksdb/CMakeLists.txt")
|
||||
if (USE_INTERNAL_ROCKSDB_LIBRARY)
|
||||
message (WARNING "submodule contrib is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib is missing. to fix try run: \n git submodule update --init")
|
||||
message(${RECONFIGURE_MESSAGE_LEVEL} "cannot find internal rocksdb")
|
||||
endif()
|
||||
set (MISSING_INTERNAL_ROCKSDB 1)
|
||||
|
@ -3,7 +3,7 @@ option(ENABLE_S2_GEOMETRY "Enable S2 geometry library" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (ENABLE_S2_GEOMETRY)
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/s2geometry")
|
||||
message (WARNING "submodule contrib/s2geometry is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/s2geometry is missing. to fix try run: \n git submodule update --init")
|
||||
set (ENABLE_S2_GEOMETRY 0)
|
||||
set (USE_S2_GEOMETRY 0)
|
||||
else()
|
||||
|
@ -23,7 +23,7 @@ if (NOT USE_INTERNAL_AWS_S3_LIBRARY)
|
||||
endif()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-s3")
|
||||
message (WARNING "submodule contrib/aws is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/aws is missing. to fix try run: \n git submodule update --init")
|
||||
if (USE_INTERNAL_AWS_S3_LIBRARY)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal S3 library")
|
||||
endif ()
|
||||
|
@ -2,7 +2,7 @@ set (SENTRY_LIBRARY "sentry")
|
||||
|
||||
set (SENTRY_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/sentry-native/include")
|
||||
if (NOT EXISTS "${SENTRY_INCLUDE_DIR}/sentry.h")
|
||||
message (WARNING "submodule contrib/sentry-native is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/sentry-native is missing. to fix try run: \n git submodule update --init")
|
||||
if (USE_SENTRY)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal sentry library")
|
||||
endif()
|
||||
|
@ -1,7 +1,7 @@
|
||||
option (USE_SIMDJSON "Use simdjson" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/simdjson/include/simdjson.h")
|
||||
message (WARNING "submodule contrib/simdjson is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/simdjson is missing. to fix try run: \n git submodule update --init")
|
||||
if (USE_SIMDJSON)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal simdjson library")
|
||||
endif()
|
||||
|
@ -5,7 +5,7 @@ if (NOT ENABLE_SQLITE)
|
||||
endif()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/sqlite-amalgamation/sqlite3.c")
|
||||
message (WARNING "submodule contrib/sqlite3-amalgamation is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/sqlite3-amalgamation is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal sqlite library")
|
||||
set (USE_SQLITE 0)
|
||||
return()
|
||||
|
@ -13,7 +13,7 @@ option(USE_INTERNAL_SSL_LIBRARY "Set to FALSE to use system *ssl library instead
|
||||
|
||||
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/boringssl/README.md")
|
||||
if(USE_INTERNAL_SSL_LIBRARY)
|
||||
message(WARNING "submodule contrib/boringssl is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message(WARNING "submodule contrib/boringssl is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal ssl library")
|
||||
endif()
|
||||
set(USE_INTERNAL_SSL_LIBRARY 0)
|
||||
@ -53,12 +53,7 @@ endif ()
|
||||
if (NOT OPENSSL_FOUND AND NOT MISSING_INTERNAL_SSL_LIBRARY)
|
||||
set (USE_INTERNAL_SSL_LIBRARY 1)
|
||||
set (OPENSSL_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/boringssl")
|
||||
|
||||
if (ARCH_AMD64)
|
||||
set (OPENSSL_INCLUDE_DIR "${OPENSSL_ROOT_DIR}/include")
|
||||
elseif (ARCH_AARCH64)
|
||||
set (OPENSSL_INCLUDE_DIR "${OPENSSL_ROOT_DIR}/include")
|
||||
endif ()
|
||||
set (OPENSSL_INCLUDE_DIR "${OPENSSL_ROOT_DIR}/include")
|
||||
set (OPENSSL_CRYPTO_LIBRARY crypto)
|
||||
set (OPENSSL_SSL_LIBRARY ssl)
|
||||
set (OPENSSL_FOUND 1)
|
||||
|
@ -2,11 +2,11 @@ option(ENABLE_STATS "Enable StatsLib library" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (ENABLE_STATS)
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/stats")
|
||||
message (WARNING "submodule contrib/stats is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/stats is missing. to fix try run: \n git submodule update --init")
|
||||
set (ENABLE_STATS 0)
|
||||
set (USE_STATS 0)
|
||||
elseif (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/gcem")
|
||||
message (WARNING "submodule contrib/gcem is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (WARNING "submodule contrib/gcem is missing. to fix try run: \n git submodule update --init")
|
||||
set (ENABLE_STATS 0)
|
||||
set (USE_STATS 0)
|
||||
else()
|
||||
|
@ -2,7 +2,7 @@ option (USE_INTERNAL_XZ_LIBRARY "Set to OFF to use system xz (lzma) library inst
|
||||
|
||||
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/xz/src/liblzma/api/lzma.h")
|
||||
if(USE_INTERNAL_XZ_LIBRARY)
|
||||
message(WARNING "submodule contrib/xz is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message(WARNING "submodule contrib/xz is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal xz (lzma) library")
|
||||
set(USE_INTERNAL_XZ_LIBRARY 0)
|
||||
endif()
|
||||
|
@ -5,5 +5,5 @@ if (NOT USE_YAML_CPP)
|
||||
endif()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/yaml-cpp/README.md")
|
||||
message (ERROR "submodule contrib/yaml-cpp is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (ERROR "submodule contrib/yaml-cpp is missing. to fix try run: \n git submodule update --init")
|
||||
endif()
|
||||
|
@ -12,7 +12,7 @@ endif ()
|
||||
|
||||
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/${INTERNAL_ZLIB_NAME}/zlib.h")
|
||||
if(USE_INTERNAL_ZLIB_LIBRARY)
|
||||
message(WARNING "submodule contrib/${INTERNAL_ZLIB_NAME} is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message(WARNING "submodule contrib/${INTERNAL_ZLIB_NAME} is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal zlib library")
|
||||
endif()
|
||||
set(USE_INTERNAL_ZLIB_LIBRARY 0)
|
||||
|
@ -2,7 +2,7 @@ option (USE_INTERNAL_ZSTD_LIBRARY "Set to FALSE to use system zstd library inste
|
||||
|
||||
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/zstd/lib/zstd.h")
|
||||
if(USE_INTERNAL_ZSTD_LIBRARY)
|
||||
message(WARNING "submodule contrib/zstd is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message(WARNING "submodule contrib/zstd is missing. to fix try run: \n git submodule update --init")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal zstd library")
|
||||
set(USE_INTERNAL_ZSTD_LIBRARY 0)
|
||||
endif()
|
||||
|
@ -5,7 +5,7 @@ set (DEFAULT_LIBS "-nodefaultlibs")
|
||||
|
||||
# We need builtins from Clang's RT even without libcxx - for ubsan+int128.
|
||||
# See https://bugs.llvm.org/show_bug.cgi?id=16404
|
||||
if (COMPILER_CLANG AND NOT (CMAKE_CROSSCOMPILING AND ARCH_AARCH64))
|
||||
if (COMPILER_CLANG AND NOT CMAKE_CROSSCOMPILING)
|
||||
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-libgcc-file-name --rtlib=compiler-rt OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
else ()
|
||||
set (BUILTINS_LIBRARY "-lgcc")
|
||||
@ -14,6 +14,8 @@ endif ()
|
||||
if (OS_ANDROID)
|
||||
# pthread and rt are included in libc
|
||||
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -ldl")
|
||||
elseif (USE_MUSL)
|
||||
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -static -lc")
|
||||
else ()
|
||||
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -lrt -lpthread -ldl")
|
||||
endif ()
|
||||
@ -26,7 +28,7 @@ set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS})
|
||||
# glibc-compatibility library relies to constant version of libc headers
|
||||
# (because minor changes in function attributes between different glibc versions will introduce incompatibilities)
|
||||
# This is for x86_64. For other architectures we have separate toolchains.
|
||||
if (ARCH_AMD64 AND NOT_UNBUNDLED)
|
||||
if (ARCH_AMD64 AND NOT_UNBUNDLED AND NOT CMAKE_CROSSCOMPILING)
|
||||
set(CMAKE_C_STANDARD_INCLUDE_DIRECTORIES ${ClickHouse_SOURCE_DIR}/contrib/libc-headers/x86_64-linux-gnu ${ClickHouse_SOURCE_DIR}/contrib/libc-headers)
|
||||
set(CMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES ${ClickHouse_SOURCE_DIR}/contrib/libc-headers/x86_64-linux-gnu ${ClickHouse_SOURCE_DIR}/contrib/libc-headers)
|
||||
endif ()
|
||||
@ -37,8 +39,10 @@ set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
if (NOT OS_ANDROID)
|
||||
# Our compatibility layer doesn't build under Android, many errors in musl.
|
||||
add_subdirectory(base/glibc-compatibility)
|
||||
if (NOT USE_MUSL)
|
||||
# Our compatibility layer doesn't build under Android, many errors in musl.
|
||||
add_subdirectory(base/glibc-compatibility)
|
||||
endif ()
|
||||
add_subdirectory(base/harmful)
|
||||
endif ()
|
||||
|
||||
|
32
cmake/linux/toolchain-ppc64le.cmake
Normal file
32
cmake/linux/toolchain-ppc64le.cmake
Normal file
@ -0,0 +1,32 @@
|
||||
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
|
||||
|
||||
set (CMAKE_SYSTEM_NAME "Linux")
|
||||
set (CMAKE_SYSTEM_PROCESSOR "ppc64le")
|
||||
set (CMAKE_C_COMPILER_TARGET "ppc64le-linux-gnu")
|
||||
set (CMAKE_CXX_COMPILER_TARGET "ppc64le-linux-gnu")
|
||||
set (CMAKE_ASM_COMPILER_TARGET "ppc64le-linux-gnu")
|
||||
|
||||
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-powerpc64le")
|
||||
|
||||
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/powerpc64le-linux-gnu/libc")
|
||||
|
||||
find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-13" "llvm-ar-12" "llvm-ar-11" "llvm-ar-10" "llvm-ar-9" "llvm-ar-8")
|
||||
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-13" "llvm-ranlib-12" "llvm-ranlib-11" "llvm-ranlib-10" "llvm-ranlib-9")
|
||||
|
||||
set (CMAKE_AR "${LLVM_AR_PATH}" CACHE FILEPATH "" FORCE)
|
||||
set (CMAKE_RANLIB "${LLVM_RANLIB_PATH}" CACHE FILEPATH "" FORCE)
|
||||
|
||||
set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
|
||||
set (LINKER_NAME "ld.lld" CACHE STRING "" FORCE)
|
||||
|
||||
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||
|
||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
||||
|
||||
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
||||
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
32
cmake/linux/toolchain-riscv64.cmake
Normal file
32
cmake/linux/toolchain-riscv64.cmake
Normal file
@ -0,0 +1,32 @@
|
||||
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
|
||||
|
||||
set (CMAKE_SYSTEM_NAME "Linux")
|
||||
set (CMAKE_SYSTEM_PROCESSOR "riscv64")
|
||||
set (CMAKE_C_COMPILER_TARGET "riscv64-linux-gnu")
|
||||
set (CMAKE_CXX_COMPILER_TARGET "riscv64-linux-gnu")
|
||||
set (CMAKE_ASM_COMPILER_TARGET "riscv64-linux-gnu")
|
||||
|
||||
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-riscv64")
|
||||
|
||||
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}")
|
||||
|
||||
find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-13" "llvm-ar-12" "llvm-ar-11" "llvm-ar-10" "llvm-ar-9" "llvm-ar-8")
|
||||
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-13" "llvm-ranlib-12" "llvm-ranlib-11" "llvm-ranlib-10" "llvm-ranlib-9")
|
||||
|
||||
set (CMAKE_AR "${LLVM_AR_PATH}" CACHE FILEPATH "" FORCE)
|
||||
set (CMAKE_RANLIB "${LLVM_RANLIB_PATH}" CACHE FILEPATH "" FORCE)
|
||||
|
||||
set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
|
||||
set (LINKER_NAME "riscv64-linux-gnu-ld.bfd" CACHE STRING "" FORCE)
|
||||
|
||||
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=bfd")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=bfd")
|
||||
|
||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
||||
|
||||
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
||||
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
35
cmake/linux/toolchain-x86_64-musl.cmake
Normal file
35
cmake/linux/toolchain-x86_64-musl.cmake
Normal file
@ -0,0 +1,35 @@
|
||||
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
|
||||
|
||||
set (CMAKE_SYSTEM_NAME "Linux")
|
||||
set (CMAKE_SYSTEM_PROCESSOR "x86_64")
|
||||
set (CMAKE_C_COMPILER_TARGET "x86_64-linux-musl")
|
||||
set (CMAKE_CXX_COMPILER_TARGET "x86_64-linux-musl")
|
||||
set (CMAKE_ASM_COMPILER_TARGET "x86_64-linux-musl")
|
||||
|
||||
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-x86_64-musl")
|
||||
|
||||
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}")
|
||||
|
||||
find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-13" "llvm-ar-12" "llvm-ar-11" "llvm-ar-10" "llvm-ar-9" "llvm-ar-8")
|
||||
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-13" "llvm-ranlib-12" "llvm-ranlib-11" "llvm-ranlib-10" "llvm-ranlib-9")
|
||||
|
||||
set (CMAKE_AR "${LLVM_AR_PATH}" CACHE FILEPATH "" FORCE)
|
||||
set (CMAKE_RANLIB "${LLVM_RANLIB_PATH}" CACHE FILEPATH "" FORCE)
|
||||
|
||||
set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
|
||||
set (LINKER_NAME "ld.lld" CACHE STRING "" FORCE)
|
||||
|
||||
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||
|
||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
||||
|
||||
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
||||
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
||||
|
||||
set (USE_MUSL 1)
|
||||
add_definitions(-DUSE_MUSL=1)
|
@ -20,40 +20,30 @@ endif ()
|
||||
if (CMAKE_CROSSCOMPILING)
|
||||
if (OS_DARWIN)
|
||||
# FIXME: broken dependencies
|
||||
set (ENABLE_PROTOBUF OFF CACHE INTERNAL "")
|
||||
set (ENABLE_GRPC OFF CACHE INTERNAL "") # no protobuf -> no grpc
|
||||
|
||||
set (USE_SNAPPY OFF CACHE INTERNAL "")
|
||||
set (ENABLE_PARQUET OFF CACHE INTERNAL "") # no snappy and protobuf -> no parquet
|
||||
set (ENABLE_ORC OFF CACHE INTERNAL "") # no arrow (parquet) -> no orc
|
||||
|
||||
set (ENABLE_ICU OFF CACHE INTERNAL "")
|
||||
set (ENABLE_FASTOPS OFF CACHE INTERNAL "")
|
||||
elseif (OS_LINUX OR OS_ANDROID)
|
||||
if (ARCH_AARCH64)
|
||||
# FIXME: broken dependencies
|
||||
set (ENABLE_PROTOBUF OFF CACHE INTERNAL "")
|
||||
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
||||
|
||||
set (ENABLE_PARQUET OFF CACHE INTERNAL "")
|
||||
set (ENABLE_ORC OFF CACHE INTERNAL "")
|
||||
|
||||
set (ENABLE_MYSQL OFF CACHE INTERNAL "")
|
||||
set (USE_SENTRY OFF CACHE INTERNAL "")
|
||||
elseif (ARCH_PPC64LE)
|
||||
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
||||
set (USE_SENTRY OFF CACHE INTERNAL "")
|
||||
endif ()
|
||||
elseif (OS_FREEBSD)
|
||||
# FIXME: broken dependencies
|
||||
set (ENABLE_PROTOBUF OFF CACHE INTERNAL "")
|
||||
set (ENABLE_PARQUET OFF CACHE INTERNAL "")
|
||||
set (ENABLE_ORC OFF CACHE INTERNAL "")
|
||||
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
||||
|
||||
set (ENABLE_ORC OFF CACHE INTERNAL "") # no protobuf -> no parquet -> no orc
|
||||
|
||||
set (ENABLE_EMBEDDED_COMPILER OFF CACHE INTERNAL "")
|
||||
else ()
|
||||
message (FATAL_ERROR "Trying to cross-compile to unsupported system: ${CMAKE_SYSTEM_NAME}!")
|
||||
endif ()
|
||||
|
||||
# Don't know why but CXX_STANDARD doesn't work for cross-compilation
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++20")
|
||||
|
||||
message (STATUS "Cross-compiling for target: ${CMAKE_CXX_COMPILE_TARGET}")
|
||||
endif ()
|
||||
|
@ -8,7 +8,7 @@ endif ()
|
||||
|
||||
if (COMPILER_GCC)
|
||||
# Require minimum version of gcc
|
||||
set (GCC_MINIMUM_VERSION 10)
|
||||
set (GCC_MINIMUM_VERSION 11)
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${GCC_MINIMUM_VERSION} AND NOT CMAKE_VERSION VERSION_LESS 2.8.9)
|
||||
message (FATAL_ERROR "GCC version must be at least ${GCC_MINIMUM_VERSION}. For example, if GCC ${GCC_MINIMUM_VERSION} is available under gcc-${GCC_MINIMUM_VERSION}, g++-${GCC_MINIMUM_VERSION} names, do the following: export CC=gcc-${GCC_MINIMUM_VERSION} CXX=g++-${GCC_MINIMUM_VERSION}; rm -rf CMakeCache.txt CMakeFiles; and re run cmake or ./release.")
|
||||
endif ()
|
||||
@ -18,6 +18,10 @@ if (COMPILER_GCC)
|
||||
elseif (COMPILER_CLANG)
|
||||
# Require minimum version of clang/apple-clang
|
||||
if (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
|
||||
# If you are developer you can figure out what exact versions of AppleClang are Ok,
|
||||
# simply remove the following line.
|
||||
message (FATAL_ERROR "AppleClang is not supported, you should install clang from brew. See the instruction: https://clickhouse.com/docs/en/development/build-osx/")
|
||||
|
||||
# AppleClang 10.0.1 (Xcode 10.2) corresponds to LLVM/Clang upstream version 7.0.0
|
||||
# AppleClang 11.0.0 (Xcode 11.0) corresponds to LLVM/Clang upstream version 8.0.0
|
||||
set (XCODE_MINIMUM_VERSION 10.2)
|
||||
@ -31,7 +35,7 @@ elseif (COMPILER_CLANG)
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fchar8_t")
|
||||
endif ()
|
||||
else ()
|
||||
set (CLANG_MINIMUM_VERSION 9)
|
||||
set (CLANG_MINIMUM_VERSION 12)
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${CLANG_MINIMUM_VERSION})
|
||||
message (FATAL_ERROR "Clang version must be at least ${CLANG_MINIMUM_VERSION}.")
|
||||
endif ()
|
||||
|
94
contrib/CMakeLists.txt
vendored
94
contrib/CMakeLists.txt
vendored
@ -1,16 +1,5 @@
|
||||
# Third-party libraries may have substandard code.
|
||||
|
||||
# Put all targets defined here and in added subfolders under "contrib/" folder in GUI-based IDEs by default.
|
||||
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they will
|
||||
# appear not in "contrib/" as originally planned here.
|
||||
get_filename_component (_current_dir_name "${CMAKE_CURRENT_LIST_DIR}" NAME)
|
||||
if (CMAKE_FOLDER)
|
||||
set (CMAKE_FOLDER "${CMAKE_FOLDER}/${_current_dir_name}")
|
||||
else ()
|
||||
set (CMAKE_FOLDER "${_current_dir_name}")
|
||||
endif ()
|
||||
unset (_current_dir_name)
|
||||
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w")
|
||||
|
||||
@ -49,6 +38,14 @@ add_subdirectory (replxx-cmake)
|
||||
add_subdirectory (unixodbc-cmake)
|
||||
add_subdirectory (nanodbc-cmake)
|
||||
|
||||
if (USE_INTERNAL_CAPNP_LIBRARY AND NOT MISSING_INTERNAL_CAPNP_LIBRARY)
|
||||
add_subdirectory(capnproto-cmake)
|
||||
endif ()
|
||||
|
||||
if (ENABLE_FUZZING)
|
||||
add_subdirectory (libprotobuf-mutator-cmake)
|
||||
endif()
|
||||
|
||||
if (USE_YAML_CPP)
|
||||
add_subdirectory (yaml-cpp-cmake)
|
||||
endif()
|
||||
@ -278,7 +275,7 @@ if (USE_FASTOPS)
|
||||
endif()
|
||||
|
||||
if (USE_AMQPCPP OR USE_CASSANDRA)
|
||||
add_subdirectory (libuv)
|
||||
add_subdirectory (libuv-cmake)
|
||||
endif()
|
||||
if (USE_AMQPCPP)
|
||||
add_subdirectory (amqpcpp-cmake)
|
||||
@ -348,3 +345,76 @@ endif()
|
||||
if (USE_S2_GEOMETRY)
|
||||
add_subdirectory(s2geometry-cmake)
|
||||
endif()
|
||||
|
||||
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
|
||||
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
|
||||
# in "contrib/..." as originally planned, so we workaround this by fixing FOLDER properties of all targets manually,
|
||||
# instead of controlling it via CMAKE_FOLDER.
|
||||
|
||||
function (ensure_target_rooted_in _target _folder)
|
||||
# Skip INTERFACE library targets, since FOLDER property is not available for them.
|
||||
get_target_property (_target_type "${_target}" TYPE)
|
||||
if (_target_type STREQUAL "INTERFACE_LIBRARY")
|
||||
return ()
|
||||
endif ()
|
||||
|
||||
# Read the original FOLDER property value, if any.
|
||||
get_target_property (_folder_prop "${_target}" FOLDER)
|
||||
|
||||
# Normalize that value, so we avoid possible repetitions in folder names.
|
||||
|
||||
if (NOT _folder_prop)
|
||||
set (_folder_prop "")
|
||||
endif ()
|
||||
|
||||
if (CMAKE_FOLDER AND _folder_prop MATCHES "^${CMAKE_FOLDER}/(.*)\$")
|
||||
set (_folder_prop "${CMAKE_MATCH_1}")
|
||||
endif ()
|
||||
|
||||
if (_folder AND _folder_prop MATCHES "^${_folder}/(.*)\$")
|
||||
set (_folder_prop "${CMAKE_MATCH_1}")
|
||||
endif ()
|
||||
|
||||
if (_folder)
|
||||
set (_folder_prop "${_folder}/${_folder_prop}")
|
||||
endif ()
|
||||
|
||||
if (CMAKE_FOLDER)
|
||||
set (_folder_prop "${CMAKE_FOLDER}/${_folder_prop}")
|
||||
endif ()
|
||||
|
||||
# Set the updated FOLDER property value back.
|
||||
set_target_properties ("${_target}" PROPERTIES FOLDER "${_folder_prop}")
|
||||
endfunction ()
|
||||
|
||||
function (ensure_own_targets_are_rooted_in _dir _folder)
|
||||
get_directory_property (_targets DIRECTORY "${_dir}" BUILDSYSTEM_TARGETS)
|
||||
foreach (_target IN LISTS _targets)
|
||||
ensure_target_rooted_in ("${_target}" "${_folder}")
|
||||
endforeach ()
|
||||
endfunction ()
|
||||
|
||||
function (ensure_all_targets_are_rooted_in _dir _folder)
|
||||
ensure_own_targets_are_rooted_in ("${_dir}" "${_folder}")
|
||||
|
||||
get_property (_sub_dirs DIRECTORY "${_dir}" PROPERTY SUBDIRECTORIES)
|
||||
foreach (_sub_dir IN LISTS _sub_dirs)
|
||||
ensure_all_targets_are_rooted_in ("${_sub_dir}" "${_folder}")
|
||||
endforeach ()
|
||||
endfunction ()
|
||||
|
||||
function (organize_ide_folders_2_level _dir)
|
||||
get_filename_component (_dir_name "${_dir}" NAME)
|
||||
ensure_own_targets_are_rooted_in ("${_dir}" "${_dir_name}")
|
||||
|
||||
# Note, that we respect only first two levels of nesting, we don't want to
|
||||
# reorganize target folders further within each third-party dir.
|
||||
|
||||
get_property (_sub_dirs DIRECTORY "${_dir}" PROPERTY SUBDIRECTORIES)
|
||||
foreach (_sub_dir IN LISTS _sub_dirs)
|
||||
get_filename_component (_sub_dir_name "${_sub_dir}" NAME)
|
||||
ensure_all_targets_are_rooted_in ("${_sub_dir}" "${_dir_name}/${_sub_dir_name}")
|
||||
endforeach ()
|
||||
endfunction ()
|
||||
|
||||
organize_ide_folders_2_level ("${CMAKE_CURRENT_LIST_DIR}")
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user