mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 01:22:04 +00:00
Merge branch 'master' into pr/content-type
This commit is contained in:
commit
daceac5c0b
@ -144,6 +144,7 @@ Checks: '-*,
|
||||
clang-analyzer-cplusplus.SelfAssignment,
|
||||
clang-analyzer-deadcode.DeadStores,
|
||||
clang-analyzer-cplusplus.Move,
|
||||
clang-analyzer-optin.cplusplus.UninitializedObject,
|
||||
clang-analyzer-optin.cplusplus.VirtualCall,
|
||||
clang-analyzer-security.insecureAPI.UncheckedReturn,
|
||||
clang-analyzer-security.insecureAPI.bcmp,
|
||||
@ -163,6 +164,8 @@ Checks: '-*,
|
||||
clang-analyzer-unix.cstring.NullArg,
|
||||
|
||||
boost-use-to-string,
|
||||
|
||||
alpha.security.cert.env.InvalidPtr,
|
||||
'
|
||||
WarningsAsErrors: '*'
|
||||
|
||||
|
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -1,4 +1,4 @@
|
||||
Changelog category (leave one):
|
||||
### Changelog category (leave one):
|
||||
- New Feature
|
||||
- Improvement
|
||||
- Bug Fix (user-visible misbehaviour in official stable or prestable release)
|
||||
@ -9,7 +9,7 @@ Changelog category (leave one):
|
||||
- Not for changelog (changelog entry is not required)
|
||||
|
||||
|
||||
Changelog entry (a user-readable short description of the changes that goes to CHANGELOG.md):
|
||||
### Changelog entry (a user-readable short description of the changes that goes to CHANGELOG.md):
|
||||
...
|
||||
|
||||
|
||||
|
43
.github/workflows/backport_branches.yml
vendored
43
.github/workflows/backport_branches.yml
vendored
@ -9,6 +9,18 @@ on: # yamllint disable-line rule:truthy
|
||||
branches:
|
||||
- 'backport/**'
|
||||
jobs:
|
||||
PythonUnitTests:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Python unit tests
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 -m unittest discover -s . -p '*_test.py'
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
@ -143,8 +155,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -184,8 +196,8 @@ jobs:
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -229,8 +241,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -274,8 +286,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -319,8 +331,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -341,10 +353,15 @@ jobs:
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
DEPENDENCIES=$(cat << 'EOF' | jq '. | length'
|
||||
${{ toJSON(needs) }}
|
||||
EOF
|
||||
)
|
||||
echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV"
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -360,7 +377,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
|
121
.github/workflows/docs_release.yml
vendored
Normal file
121
.github/workflows/docs_release.yml
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
name: DocsReleaseChecks
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
concurrency:
|
||||
group: master-release
|
||||
cancel-in-progress: true
|
||||
on: # yamllint disable-line rule:truthy
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- 'website/**'
|
||||
- 'benchmark/**'
|
||||
- 'docker/**'
|
||||
- '.github/**'
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
DockerHubPush:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
DocsRelease:
|
||||
needs: DockerHubPush
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
# https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/docs_release
|
||||
REPO_COPY=${{runner.temp}}/docs_release/ClickHouse
|
||||
CLOUDFLARE_TOKEN=${{secrets.CLOUDFLARE}}
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
EOF
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.TEMP_PATH }}
|
||||
- name: Docs Release
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 docs_release.py
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
80
.github/workflows/master.yml
vendored
80
.github/workflows/master.yml
vendored
@ -219,8 +219,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -260,8 +260,8 @@ jobs:
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -305,8 +305,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -350,8 +350,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -395,8 +395,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -440,8 +440,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -485,8 +485,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -530,8 +530,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -575,8 +575,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -620,8 +620,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -668,8 +668,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -713,8 +713,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -758,8 +758,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -803,8 +803,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -848,8 +848,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -893,8 +893,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -938,8 +938,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -992,10 +992,16 @@ jobs:
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
DEPENDENCIES=$(cat << 'EOF' | jq '. | length'
|
||||
${{ toJSON(needs) }}
|
||||
EOF
|
||||
)
|
||||
echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV"
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1011,7 +1017,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
|
50
.github/workflows/nightly.yml
vendored
50
.github/workflows/nightly.yml
vendored
@ -72,3 +72,53 @@ jobs:
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
BuilderCoverity:
|
||||
needs: DockerHubPush
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
BUILD_NAME=coverity
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
EOF
|
||||
echo "COVERITY_TOKEN=${{ secrets.COVERITY_TOKEN }}" >> "$GITHUB_ENV"
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
id: coverity-checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
- name: Upload Coverity Analysis
|
||||
if: ${{ success() || failure() }}
|
||||
run: |
|
||||
curl --form token="${COVERITY_TOKEN}" \
|
||||
--form email='security+coverity@clickhouse.com' \
|
||||
--form file="@$TEMP_PATH/$BUILD_NAME/clickhouse-scan.tgz" \
|
||||
--form version="${GITHUB_REF#refs/heads/}-${GITHUB_SHA::6}" \
|
||||
--form description="Nighly Scan: $(date +'%Y-%m-%dT%H:%M:%S')" \
|
||||
https://scan.coverity.com/builds?project=ClickHouse%2FClickHouse
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
|
80
.github/workflows/pull_request.yml
vendored
80
.github/workflows/pull_request.yml
vendored
@ -272,8 +272,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -317,8 +317,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -362,8 +362,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -404,8 +404,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -446,8 +446,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -491,8 +491,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -536,8 +536,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -581,8 +581,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -626,8 +626,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -671,8 +671,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -719,8 +719,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -764,8 +764,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -809,8 +809,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -854,8 +854,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -899,8 +899,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -944,8 +944,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -989,8 +989,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -1044,10 +1044,16 @@ jobs:
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
DEPENDENCIES=$(cat << 'EOF' | jq '. | length'
|
||||
${{ toJSON(needs) }}
|
||||
EOF
|
||||
)
|
||||
echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV"
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1063,7 +1069,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
|
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@ -52,8 +52,8 @@ jobs:
|
||||
- name: Check docker clickhouse/clickhouse-server building
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_server.py --release-type auto
|
||||
python3 docker_server.py --release-type auto --no-ubuntu \
|
||||
python3 docker_server.py --release-type auto --version "${{ github.ref }}"
|
||||
python3 docker_server.py --release-type auto --version "${{ github.ref }}" --no-ubuntu \
|
||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
|
40
.github/workflows/release_branches.yml
vendored
40
.github/workflows/release_branches.yml
vendored
@ -146,8 +146,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -187,8 +187,8 @@ jobs:
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -232,8 +232,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -277,8 +277,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -322,8 +322,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -367,8 +367,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -412,8 +412,8 @@ jobs:
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -436,10 +436,16 @@ jobs:
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
DEPENDENCIES=$(cat << 'EOF' | jq '. | length'
|
||||
${{ toJSON(needs) }}
|
||||
EOF
|
||||
)
|
||||
echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV"
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -455,7 +461,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
|
@ -222,6 +222,12 @@ else ()
|
||||
set(NO_WHOLE_ARCHIVE --no-whole-archive)
|
||||
endif ()
|
||||
|
||||
option(ENABLE_CURL_BUILD "Enable curl, azure, sentry build on by default except MacOS." ON)
|
||||
if (OS_DARWIN)
|
||||
# Disable the curl, azure, senry build on MacOS
|
||||
set (ENABLE_CURL_BUILD OFF)
|
||||
endif ()
|
||||
|
||||
# Ignored if `lld` is used
|
||||
option(ADD_GDB_INDEX_FOR_GOLD "Add .gdb-index to resulting binaries for gold linker.")
|
||||
|
||||
@ -294,14 +300,19 @@ include(cmake/cpu_features.cmake)
|
||||
# Enable it explicitly.
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -fasynchronous-unwind-tables")
|
||||
|
||||
# Reproducible builds
|
||||
# If turned `ON`, remap file source paths in debug info, predefined preprocessor macros and __builtin_FILE().
|
||||
option(ENABLE_BUILD_PATH_MAPPING "Enable remap file source paths in debug info, predefined preprocessor macros and __builtin_FILE(). It's to generate reproducible builds. See https://reproducible-builds.org/docs/build-path" ON)
|
||||
# Reproducible builds.
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||
set (ENABLE_BUILD_PATH_MAPPING_DEFAULT OFF)
|
||||
else ()
|
||||
set (ENABLE_BUILD_PATH_MAPPING_DEFAULT ON)
|
||||
endif ()
|
||||
|
||||
option (ENABLE_BUILD_PATH_MAPPING "Enable remapping of file source paths in debug info, predefined preprocessor macros, and __builtin_FILE(). It's used to generate reproducible builds. See https://reproducible-builds.org/docs/build-path" ${ENABLE_BUILD_PATH_MAPPING_DEFAULT})
|
||||
|
||||
if (ENABLE_BUILD_PATH_MAPPING)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.")
|
||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
if (${CMAKE_VERSION} VERSION_LESS "3.12.4")
|
||||
# CMake < 3.12 doesn't support setting 20 as a C++ standard version.
|
||||
|
@ -828,7 +828,6 @@ void BaseDaemon::initializeTerminationAndSignalProcessing()
|
||||
|
||||
/// Setup signal handlers.
|
||||
/// SIGTSTP is added for debugging purposes. To output a stack trace of any running thread at anytime.
|
||||
|
||||
addSignalHandler({SIGABRT, SIGSEGV, SIGILL, SIGBUS, SIGSYS, SIGFPE, SIGPIPE, SIGTSTP, SIGTRAP}, signalHandler, &handled_signals);
|
||||
addSignalHandler({SIGHUP}, closeLogsSignalHandler, &handled_signals);
|
||||
addSignalHandler({SIGINT, SIGQUIT, SIGTERM}, terminateRequestedSignalHandler, &handled_signals);
|
||||
|
@ -197,7 +197,6 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
|
||||
Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter(color_enabled);
|
||||
Poco::AutoPtr<DB::OwnFormattingChannel> log = new DB::OwnFormattingChannel(pf, new Poco::ConsoleChannel);
|
||||
logger.warning("Logging " + console_log_level_string + " to console");
|
||||
log->setLevel(console_log_level);
|
||||
split->addChannel(log, "console");
|
||||
}
|
||||
|
10
contrib/CMakeLists.txt
vendored
10
contrib/CMakeLists.txt
vendored
@ -119,9 +119,13 @@ add_contrib (fastops-cmake fastops)
|
||||
add_contrib (libuv-cmake libuv)
|
||||
add_contrib (amqpcpp-cmake AMQP-CPP) # requires: libuv
|
||||
add_contrib (cassandra-cmake cassandra) # requires: libuv
|
||||
add_contrib (curl-cmake curl)
|
||||
add_contrib (azure-cmake azure)
|
||||
add_contrib (sentry-native-cmake sentry-native) # requires: curl
|
||||
|
||||
if (ENABLE_CURL_BUILD)
|
||||
add_contrib (curl-cmake curl)
|
||||
add_contrib (azure-cmake azure)
|
||||
add_contrib (sentry-native-cmake sentry-native) # requires: curl
|
||||
endif()
|
||||
|
||||
add_contrib (fmtlib-cmake fmtlib)
|
||||
add_contrib (krb5-cmake krb5)
|
||||
add_contrib (cyrus-sasl-cmake cyrus-sasl) # for krb5
|
||||
|
2
contrib/arrow
vendored
2
contrib/arrow
vendored
@ -1 +1 @@
|
||||
Subproject commit 1d9cc51daa4e7e9fc6926320ef73759818bd736e
|
||||
Subproject commit efdcd015cfdee1b6aa349c9ca227ca12c3d697f5
|
2
contrib/curl
vendored
2
contrib/curl
vendored
@ -1 +1 @@
|
||||
Subproject commit 3b8bbbbd1609c638a3d3d0acb148a33dedb67be3
|
||||
Subproject commit 801bd5138ce31aa0d906fa4e2eabfc599d74e793
|
@ -32,7 +32,6 @@ set (SRCS
|
||||
"${LIBRARY_DIR}/lib/transfer.c"
|
||||
"${LIBRARY_DIR}/lib/strcase.c"
|
||||
"${LIBRARY_DIR}/lib/easy.c"
|
||||
"${LIBRARY_DIR}/lib/security.c"
|
||||
"${LIBRARY_DIR}/lib/curl_fnmatch.c"
|
||||
"${LIBRARY_DIR}/lib/fileinfo.c"
|
||||
"${LIBRARY_DIR}/lib/wildcard.c"
|
||||
@ -115,6 +114,12 @@ set (SRCS
|
||||
"${LIBRARY_DIR}/lib/curl_get_line.c"
|
||||
"${LIBRARY_DIR}/lib/altsvc.c"
|
||||
"${LIBRARY_DIR}/lib/socketpair.c"
|
||||
"${LIBRARY_DIR}/lib/bufref.c"
|
||||
"${LIBRARY_DIR}/lib/dynbuf.c"
|
||||
"${LIBRARY_DIR}/lib/hsts.c"
|
||||
"${LIBRARY_DIR}/lib/http_aws_sigv4.c"
|
||||
"${LIBRARY_DIR}/lib/mqtt.c"
|
||||
"${LIBRARY_DIR}/lib/rename.c"
|
||||
"${LIBRARY_DIR}/lib/vauth/vauth.c"
|
||||
"${LIBRARY_DIR}/lib/vauth/cleartext.c"
|
||||
"${LIBRARY_DIR}/lib/vauth/cram.c"
|
||||
@ -131,8 +136,6 @@ set (SRCS
|
||||
"${LIBRARY_DIR}/lib/vtls/gtls.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/vtls.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/nss.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/polarssl.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/polarssl_threadlock.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/wolfssl.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/schannel.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/schannel_verify.c"
|
||||
@ -141,6 +144,7 @@ set (SRCS
|
||||
"${LIBRARY_DIR}/lib/vtls/mbedtls.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/mesalink.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/bearssl.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/keylog.c"
|
||||
"${LIBRARY_DIR}/lib/vquic/ngtcp2.c"
|
||||
"${LIBRARY_DIR}/lib/vquic/quiche.c"
|
||||
"${LIBRARY_DIR}/lib/vssh/libssh2.c"
|
||||
|
@ -1,4 +1,4 @@
|
||||
set (ENABLE_KRB5_DEFAULT 1)
|
||||
set (ENABLE_KRB5_DEFAULT ${ENABLE_LIBRARIES})
|
||||
if (NOT CMAKE_SYSTEM_NAME MATCHES "Linux" AND NOT (CMAKE_SYSTEM_NAME MATCHES "Darwin" AND NOT CMAKE_CROSSCOMPILING))
|
||||
message (WARNING "krb5 disabled in non-Linux and non-native-Darwin environments")
|
||||
set (ENABLE_KRB5_DEFAULT 0)
|
||||
@ -16,6 +16,7 @@ if(NOT AWK_PROGRAM)
|
||||
endif()
|
||||
|
||||
set(KRB5_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/krb5/src")
|
||||
set(KRB5_ET_BIN_DIR "${CMAKE_CURRENT_BINARY_DIR}/include_private")
|
||||
|
||||
set(ALL_SRCS
|
||||
"${KRB5_SOURCE_DIR}/util/et/et_name.c"
|
||||
@ -90,7 +91,6 @@ set(ALL_SRCS
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/get_tkt_flags.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/set_allowable_enctypes.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5sealiov.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/gssapi_err_krb5.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/canon_name.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/inq_cred.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/export_sec_context.c"
|
||||
@ -143,11 +143,12 @@ set(ALL_SRCS
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_buffer_set.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_set.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_token.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/gssapi_err_generic.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/disp_major_status.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_seqstate.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_errmap.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/rel_buffer.c"
|
||||
"${KRB5_ET_BIN_DIR}/lib/gssapi/krb5/gssapi_err_krb5.c"
|
||||
"${KRB5_ET_BIN_DIR}/lib/gssapi/generic/gssapi_err_generic.c"
|
||||
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/spnego/spnego_mech.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/gssapi/spnego/negoex_util.c"
|
||||
@ -256,8 +257,8 @@ set(ALL_SRCS
|
||||
"${KRB5_SOURCE_DIR}/util/profile/prof_parse.c"
|
||||
"${KRB5_SOURCE_DIR}/util/profile/prof_get.c"
|
||||
"${KRB5_SOURCE_DIR}/util/profile/prof_set.c"
|
||||
"${KRB5_SOURCE_DIR}/util/profile/prof_err.c"
|
||||
"${KRB5_SOURCE_DIR}/util/profile/prof_init.c"
|
||||
"${KRB5_ET_BIN_DIR}/util/profile/prof_err.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/krb/fwd_tgt.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/krb/conv_creds.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/krb/fast.c"
|
||||
@ -450,13 +451,12 @@ set(ALL_SRCS
|
||||
|
||||
|
||||
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.c"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.c"
|
||||
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/k5e1_err.c"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kdb5_err.c"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/asn1_err.c"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb5_err.c"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb524_err.c"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kv5m_err.c"
|
||||
|
||||
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_base.c"
|
||||
@ -473,7 +473,7 @@ set(ALL_SRCS
|
||||
)
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT "${KRB5_SOURCE_DIR}/util/et/compile_et"
|
||||
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/compile_et"
|
||||
COMMAND /bin/sh
|
||||
./config_script
|
||||
./compile_et.sh
|
||||
@ -481,50 +481,17 @@ add_custom_command(
|
||||
${AWK_PROGRAM}
|
||||
sed
|
||||
>
|
||||
compile_et
|
||||
${CMAKE_CURRENT_BINARY_DIR}/compile_et
|
||||
DEPENDS "${KRB5_SOURCE_DIR}/util/et/compile_et.sh" "${KRB5_SOURCE_DIR}/util/et/config_script"
|
||||
WORKING_DIRECTORY "${KRB5_SOURCE_DIR}/util/et"
|
||||
)
|
||||
|
||||
file(GLOB_RECURSE ET_FILES
|
||||
"${KRB5_SOURCE_DIR}/*.et"
|
||||
)
|
||||
|
||||
function(preprocess_et out_var)
|
||||
set(result)
|
||||
foreach(in_f ${ARGN})
|
||||
string(REPLACE
|
||||
.et
|
||||
.c
|
||||
F_C
|
||||
${in_f}
|
||||
)
|
||||
string(REPLACE
|
||||
.et
|
||||
.h
|
||||
F_H
|
||||
${in_f}
|
||||
)
|
||||
|
||||
get_filename_component(ET_PATH ${in_f} DIRECTORY)
|
||||
|
||||
add_custom_command(OUTPUT ${F_C} ${F_H}
|
||||
COMMAND perl "${KRB5_SOURCE_DIR}/util/et/compile_et" -d "${KRB5_SOURCE_DIR}/util/et" ${in_f}
|
||||
DEPENDS ${in_f} "${KRB5_SOURCE_DIR}/util/et/compile_et"
|
||||
WORKING_DIRECTORY ${ET_PATH}
|
||||
VERBATIM
|
||||
)
|
||||
list(APPEND result ${F_C})
|
||||
endforeach()
|
||||
set(${out_var} "${result}" PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h"
|
||||
OUTPUT "${KRB5_ET_BIN_DIR}/error_map.h"
|
||||
COMMAND perl
|
||||
-I../../../util
|
||||
../../../util/gen-map.pl
|
||||
-oerror_map.h
|
||||
-o${KRB5_ET_BIN_DIR}/error_map.h
|
||||
NAME=gsserrmap
|
||||
KEY=OM_uint32
|
||||
VALUE=char*
|
||||
@ -536,22 +503,21 @@ add_custom_command(
|
||||
|
||||
add_custom_target(
|
||||
ERROR_MAP_H
|
||||
DEPENDS "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h"
|
||||
DEPENDS "${KRB5_ET_BIN_DIR}/error_map.h"
|
||||
VERBATIM
|
||||
)
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT "${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h"
|
||||
COMMAND perl -w -I../../../util ../../../util/gen.pl bimap errmap.h NAME=mecherrmap LEFT=OM_uint32 RIGHT=struct\ mecherror LEFTPRINT=print_OM_uint32 RIGHTPRINT=mecherror_print LEFTCMP=cmp_OM_uint32 RIGHTCMP=mecherror_cmp
|
||||
OUTPUT "${KRB5_ET_BIN_DIR}/errmap.h"
|
||||
COMMAND perl -w -I../../../util ../../../util/gen.pl bimap ${KRB5_ET_BIN_DIR}/errmap.h NAME=mecherrmap LEFT=OM_uint32 RIGHT=struct\ mecherror LEFTPRINT=print_OM_uint32 RIGHTPRINT=mecherror_print LEFTCMP=cmp_OM_uint32 RIGHTCMP=mecherror_cmp
|
||||
WORKING_DIRECTORY "${KRB5_SOURCE_DIR}/lib/gssapi/generic"
|
||||
)
|
||||
|
||||
add_custom_target(
|
||||
ERRMAP_H
|
||||
DEPENDS "${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h"
|
||||
DEPENDS "${KRB5_ET_BIN_DIR}/errmap.h"
|
||||
VERBATIM
|
||||
)
|
||||
|
||||
add_custom_target(
|
||||
KRB_5_H
|
||||
DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/include/krb5/krb5.h"
|
||||
@ -567,7 +533,40 @@ add_dependencies(
|
||||
KRB_5_H
|
||||
)
|
||||
|
||||
preprocess_et(processed_et_files ${ET_FILES})
|
||||
#
|
||||
# Generate error tables
|
||||
#
|
||||
function(preprocess_et et_path)
|
||||
string(REPLACE .et .c F_C ${et_path})
|
||||
string(REPLACE .et .h F_H ${et_path})
|
||||
get_filename_component(et_dir ${et_path} DIRECTORY)
|
||||
get_filename_component(et_name ${et_path} NAME_WLE)
|
||||
|
||||
add_custom_command(OUTPUT ${F_C} ${F_H} ${KRB5_ET_BIN_DIR}/${et_name}.h
|
||||
COMMAND perl "${CMAKE_CURRENT_BINARY_DIR}/compile_et" -d "${KRB5_SOURCE_DIR}/util/et" ${et_path}
|
||||
# for #include w/o path (via -iquote)
|
||||
COMMAND ${CMAKE_COMMAND} -E create_symlink ${F_H} ${KRB5_ET_BIN_DIR}/${et_name}.h
|
||||
DEPENDS ${et_path} "${CMAKE_CURRENT_BINARY_DIR}/compile_et"
|
||||
WORKING_DIRECTORY ${et_dir}
|
||||
VERBATIM
|
||||
)
|
||||
endfunction()
|
||||
|
||||
function(generate_error_tables)
|
||||
file(GLOB_RECURSE ET_FILES "${KRB5_SOURCE_DIR}/*.et")
|
||||
foreach(et_path ${ET_FILES})
|
||||
string(REPLACE ${KRB5_SOURCE_DIR} ${KRB5_ET_BIN_DIR} et_bin_path ${et_path})
|
||||
string(REPLACE / _ et_target_name ${et_path})
|
||||
get_filename_component(et_bin_dir ${et_bin_path} DIRECTORY)
|
||||
add_custom_command(OUTPUT ${et_bin_path}
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${et_bin_dir}
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${et_path} ${et_bin_path}
|
||||
VERBATIM
|
||||
)
|
||||
preprocess_et(${et_bin_path})
|
||||
endforeach()
|
||||
endfunction()
|
||||
generate_error_tables()
|
||||
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
||||
add_custom_command(
|
||||
@ -634,12 +633,12 @@ file(MAKE_DIRECTORY
|
||||
|
||||
SET(KRBHDEP
|
||||
"${KRB5_SOURCE_DIR}/include/krb5/krb5.hin"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.h"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.h"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.h"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.h"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.h"
|
||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.h"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb5_err.h"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/k5e1_err.h"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kdb5_err.h"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kv5m_err.h"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb524_err.h"
|
||||
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/asn1_err.h"
|
||||
)
|
||||
|
||||
# cmake < 3.18 does not have 'cat' command
|
||||
@ -656,6 +655,11 @@ target_include_directories(_krb5 SYSTEM BEFORE PUBLIC
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/include"
|
||||
)
|
||||
|
||||
target_compile_options(_krb5 PRIVATE
|
||||
# For '#include "file.h"'
|
||||
-iquote "${CMAKE_CURRENT_BINARY_DIR}/include_private"
|
||||
)
|
||||
|
||||
target_include_directories(_krb5 PRIVATE
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/include_private" # For autoconf.h and other generated headers.
|
||||
${KRB5_SOURCE_DIR}
|
||||
|
2
contrib/poco
vendored
2
contrib/poco
vendored
@ -1 +1 @@
|
||||
Subproject commit 520a90e02e3e5cb90afeae1846d161dbc508a6f1
|
||||
Subproject commit 008b16469471d55b176db181756c94e3f14dd2dc
|
2
contrib/replxx
vendored
2
contrib/replxx
vendored
@ -1 +1 @@
|
||||
Subproject commit 6f0b6f151ae2a044625ae93acd19ca365fcea64d
|
||||
Subproject commit 3fd0e3c9364a589447453d9906d854ebd8d385c5
|
2
contrib/unixodbc
vendored
2
contrib/unixodbc
vendored
@ -1 +1 @@
|
||||
Subproject commit b0ad30f7f6289c12b76f04bfb9d466374bb32168
|
||||
Subproject commit a2cd5395e8c7f7390025ec93af5bfebef3fb5fcd
|
@ -1,4 +1,3 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/docs-builder .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
|
@ -20,6 +20,8 @@ ENV LANG=en_US.UTF-8 \
|
||||
COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.31.so /lib/
|
||||
COPY --from=glibc-donor /etc/nsswitch.conf /etc/
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
ARG TARGETARCH
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& case $arch in \
|
||||
amd64) mkdir -p /lib64 && ln -sf /lib/ld-2.31.so /lib64/ld-linux-x86-64.so.2 ;; \
|
||||
|
@ -25,13 +25,23 @@ read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}"
|
||||
env
|
||||
cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA "-DCMAKE_BUILD_TYPE=$BUILD_TYPE" "-DSANITIZE=$SANITIZER" -DENABLE_CHECK_HEAVY_BUILDS=1 "${CMAKE_FLAGS[@]}" ..
|
||||
|
||||
if [ "coverity" == "$COMBINED_OUTPUT" ]
|
||||
then
|
||||
mkdir -p /opt/cov-analysis
|
||||
|
||||
wget --post-data "token=$COVERITY_TOKEN&project=ClickHouse%2FClickHouse" -qO- https://scan.coverity.com/download/linux64 | tar xz -C /opt/cov-analysis --strip-components 1
|
||||
export PATH=$PATH:/opt/cov-analysis/bin
|
||||
cov-configure --config ./coverity.config --template --comptype clangcc --compiler "$CC"
|
||||
SCAN_WRAPPER="cov-build --config ./coverity.config --dir cov-int"
|
||||
fi
|
||||
|
||||
cache_status
|
||||
# clear cache stats
|
||||
ccache --zero-stats ||:
|
||||
|
||||
# No quotes because I want it to expand to nothing if empty.
|
||||
# shellcheck disable=SC2086
|
||||
ninja $NINJA_FLAGS clickhouse-bundle
|
||||
# shellcheck disable=SC2086 # No quotes because I want it to expand to nothing if empty.
|
||||
$SCAN_WRAPPER ninja $NINJA_FLAGS clickhouse-bundle
|
||||
|
||||
cache_status
|
||||
|
||||
@ -91,6 +101,12 @@ then
|
||||
mv "$COMBINED_OUTPUT.tgz" /output
|
||||
fi
|
||||
|
||||
if [ "coverity" == "$COMBINED_OUTPUT" ]
|
||||
then
|
||||
tar -cv -I pigz -f "coverity-scan.tgz" cov-int
|
||||
mv "coverity-scan.tgz" /output
|
||||
fi
|
||||
|
||||
# Also build fuzzers if any sanitizer specified
|
||||
# if [ -n "$SANITIZER" ]
|
||||
# then
|
||||
|
@ -176,6 +176,9 @@ def parse_env_variables(
|
||||
if package_type == "performance":
|
||||
result.append("COMBINED_OUTPUT=performance")
|
||||
cmake_flags.append("-DENABLE_TESTS=0")
|
||||
elif package_type == "coverity":
|
||||
result.append("COMBINED_OUTPUT=coverity")
|
||||
result.append('COVERITY_TOKEN="$COVERITY_TOKEN"')
|
||||
elif split_binary:
|
||||
result.append("COMBINED_OUTPUT=shared_build")
|
||||
|
||||
@ -262,9 +265,8 @@ if __name__ == "__main__":
|
||||
# and configs to be used for performance test.
|
||||
parser.add_argument(
|
||||
"--package-type",
|
||||
choices=("deb", "binary", "performance"),
|
||||
choices=["deb", "binary", "performance", "coverity"],
|
||||
required=True,
|
||||
help="a build type",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--clickhouse-repo-path",
|
||||
@ -330,7 +332,11 @@ if __name__ == "__main__":
|
||||
if not os.path.isabs(args.output_dir):
|
||||
args.output_dir = os.path.abspath(os.path.join(os.getcwd(), args.output_dir))
|
||||
|
||||
image_type = "binary" if args.package_type == "performance" else args.package_type
|
||||
image_type = (
|
||||
"binary"
|
||||
if args.package_type in ("performance", "coverity")
|
||||
else args.package_type
|
||||
)
|
||||
image_name = "clickhouse/binary-builder"
|
||||
|
||||
if not os.path.isabs(args.clickhouse_repo_path):
|
||||
|
@ -94,8 +94,9 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
&& apt-get update \
|
||||
&& apt-get --yes -o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold" upgrade \
|
||||
&& for package in ${PACKAGES}; do \
|
||||
apt-get install --allow-unauthenticated --yes --no-install-recommends "${package}=${VERSION}" || exit 1 \
|
||||
packages="${packages} ${package}=${VERSION}" \
|
||||
; done \
|
||||
&& apt-get install --allow-unauthenticated --yes --no-install-recommends ${packages} || exit 1 \
|
||||
; fi \
|
||||
&& clickhouse-local -q 'SELECT * FROM system.build_options' \
|
||||
&& rm -rf \
|
||||
|
@ -226,7 +226,6 @@ quit
|
||||
--receive_data_timeout_ms=10000 \
|
||||
--stacktrace \
|
||||
--query-fuzzer-runs=1000 \
|
||||
--testmode \
|
||||
--queries-file $(ls -1 ch/tests/queries/0_stateless/*.sql | sort -R) \
|
||||
$NEW_TESTS_OPT \
|
||||
> >(tail -n 100000 > fuzzer.log) \
|
||||
|
@ -1,8 +1,10 @@
|
||||
# docker build -t clickhouse/mysql-js-client .
|
||||
# MySQL JavaScript client docker container
|
||||
|
||||
FROM node:8
|
||||
FROM node:16.14.2
|
||||
|
||||
WORKDIR /usr/app
|
||||
|
||||
RUN npm install mysql
|
||||
|
||||
COPY ./test.js test.js
|
||||
COPY ./test.js ./test.js
|
||||
|
@ -96,7 +96,7 @@ else
|
||||
clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits"
|
||||
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
|
||||
clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
||||
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits"
|
||||
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0"
|
||||
fi
|
||||
|
||||
clickhouse-client --query "SHOW TABLES FROM test"
|
||||
|
@ -139,7 +139,7 @@ pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhous
|
||||
# directly
|
||||
# - even though ci auto-compress some files (but not *.tsv) it does this only
|
||||
# for files >64MB, we want this files to be compressed explicitly
|
||||
for table in query_log zookeeper_log trace_log
|
||||
for table in query_log zookeeper_log trace_log transactions_info_log
|
||||
do
|
||||
clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | pigz > /test_output/$table.tsv.gz &
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
|
@ -106,17 +106,6 @@ function stop()
|
||||
|
||||
function start()
|
||||
{
|
||||
# Rename existing log file - it will be more convenient to read separate files for separate server runs.
|
||||
if [ -f '/var/log/clickhouse-server/clickhouse-server.log' ]
|
||||
then
|
||||
log_file_counter=1
|
||||
while [ -f "/var/log/clickhouse-server/clickhouse-server.log.${log_file_counter}" ]
|
||||
do
|
||||
log_file_counter=$((log_file_counter + 1))
|
||||
done
|
||||
mv '/var/log/clickhouse-server/clickhouse-server.log' "/var/log/clickhouse-server/clickhouse-server.log.${log_file_counter}"
|
||||
fi
|
||||
|
||||
counter=0
|
||||
until clickhouse-client --query "SELECT 1"
|
||||
do
|
||||
@ -190,6 +179,8 @@ clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordin
|
||||
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test"
|
||||
|
||||
stop
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.initial.log
|
||||
|
||||
start
|
||||
|
||||
clickhouse-client --query "SHOW TABLES FROM datasets"
|
||||
@ -205,6 +196,8 @@ clickhouse-client --query "SHOW TABLES FROM test"
|
||||
|| echo -e 'Test script failed\tFAIL' >> /test_output/test_results.tsv
|
||||
|
||||
stop
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.stress.log
|
||||
|
||||
start
|
||||
|
||||
clickhouse-client --query "SELECT 'Server successfully started', 'OK'" >> /test_output/test_results.tsv \
|
||||
@ -263,10 +256,12 @@ mkdir previous_release_package_folder
|
||||
clickhouse-client --query="SELECT version()" | ./download_previous_release && echo -e 'Download script exit code\tOK' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Download script failed\tFAIL' >> /test_output/test_results.tsv
|
||||
|
||||
stop
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.clean.log
|
||||
|
||||
if [ "$(ls -A previous_release_package_folder/clickhouse-common-static_*.deb && ls -A previous_release_package_folder/clickhouse-server_*.deb)" ]
|
||||
then
|
||||
echo -e "Successfully downloaded previous release packets\tOK" >> /test_output/test_results.tsv
|
||||
stop
|
||||
|
||||
# Uninstall current packages
|
||||
dpkg --remove clickhouse-client
|
||||
@ -289,7 +284,7 @@ then
|
||||
install_packages package_folder
|
||||
|
||||
mkdir tmp_stress_output
|
||||
|
||||
|
||||
./stress --backward-compatibility-check --output-folder tmp_stress_output --global-time-limit=1200 \
|
||||
&& echo -e 'Backward compatibility check: Test script exit code\tOK' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: Test script failed\tFAIL' >> /test_output/test_results.tsv
|
||||
@ -297,8 +292,9 @@ then
|
||||
|
||||
clickhouse-client --query="SELECT 'Tables count:', count() FROM system.tables"
|
||||
|
||||
stop
|
||||
|
||||
stop
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.backward.stress.log
|
||||
|
||||
# Start new server
|
||||
configure
|
||||
start 500
|
||||
@ -310,8 +306,9 @@ then
|
||||
|
||||
# Let the server run for a while before checking log.
|
||||
sleep 60
|
||||
|
||||
|
||||
stop
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.backward.clean.log
|
||||
|
||||
# Error messages (we should ignore some errors)
|
||||
echo "Check for Error messages in server log:"
|
||||
@ -332,8 +329,8 @@ then
|
||||
-e "Code: 1000, e.code() = 111, Connection refused" \
|
||||
-e "UNFINISHED" \
|
||||
-e "Renaming unexpected part" \
|
||||
/var/log/clickhouse-server/clickhouse-server.log | zgrep -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
|
||||
&& echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
/var/log/clickhouse-server/clickhouse-server.backward.*.log | zgrep -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
|
||||
&& echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tOK' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file bc_check_error_messages.txt if it's empty
|
||||
@ -348,13 +345,13 @@ then
|
||||
rm -f /test_output/tmp
|
||||
|
||||
# OOM
|
||||
zgrep -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \
|
||||
&& echo -e 'Backward compatibility check: OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \
|
||||
zgrep -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /dev/null \
|
||||
&& echo -e 'Backward compatibility check: OOM killer (or signal 9) in clickhouse-server.log\tOK' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Logical errors
|
||||
echo "Check for Logical errors in server log:"
|
||||
zgrep -Fa -A20 "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.log > /test_output/bc_check_logical_errors.txt \
|
||||
zgrep -Fa -A20 "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_logical_errors.txt \
|
||||
&& echo -e 'Backward compatibility check: Logical error thrown (see clickhouse-server.log or bc_check_logical_errors.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No logical errors\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
@ -362,19 +359,18 @@ then
|
||||
[ -s /test_output/bc_check_logical_errors.txt ] || rm /test_output/bc_check_logical_errors.txt
|
||||
|
||||
# Crash
|
||||
zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \
|
||||
zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /dev/null \
|
||||
&& echo -e 'Backward compatibility check: Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: Not crashed\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# It also checks for crash without stacktrace (printed by watchdog)
|
||||
echo "Check for Fatal message in server log:"
|
||||
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.log > /test_output/bc_check_fatal_messages.txt \
|
||||
&& echo -e 'Backward compatibility check: Fatal message in clickhouse-server.log (see bc_check_fatal_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_fatal_messages.txt \
|
||||
&& echo -e 'Backward compatibility check: Fatal message in clickhouse-server.log (see bc_check_fatal_messages.txt)\tOK' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file bc_check_fatal_messages.txt if it's empty
|
||||
[ -s /test_output/bc_check_fatal_messages.txt ] || rm /test_output/bc_check_fatal_messages.txt
|
||||
|
||||
else
|
||||
echo -e "Backward compatibility check: Failed to download previous release packets\tFAIL" >> /test_output/test_results.tsv
|
||||
fi
|
||||
|
@ -10,7 +10,7 @@ import logging
|
||||
import time
|
||||
|
||||
|
||||
def get_options(i):
|
||||
def get_options(i, backward_compatibility_check):
|
||||
options = []
|
||||
client_options = []
|
||||
if 0 < i:
|
||||
@ -19,7 +19,7 @@ def get_options(i):
|
||||
if i % 3 == 1:
|
||||
options.append("--db-engine=Ordinary")
|
||||
|
||||
if i % 3 == 2:
|
||||
if i % 3 == 2 and not backward_compatibility_check:
|
||||
options.append('''--db-engine="Replicated('/test/db/test_{}', 's1', 'r1')"'''.format(i))
|
||||
client_options.append('allow_experimental_database_replicated=1')
|
||||
|
||||
@ -57,7 +57,7 @@ def run_func_test(cmd, output_prefix, num_processes, skip_tests_option, global_t
|
||||
pipes = []
|
||||
for i in range(0, len(output_paths)):
|
||||
f = open(output_paths[i], 'w')
|
||||
full_command = "{} {} {} {} {}".format(cmd, get_options(i), global_time_limit_option, skip_tests_option, backward_compatibility_check_option)
|
||||
full_command = "{} {} {} {} {}".format(cmd, get_options(i, backward_compatibility_check), global_time_limit_option, skip_tests_option, backward_compatibility_check_option)
|
||||
logging.info("Run func tests '%s'", full_command)
|
||||
p = Popen(full_command, shell=True, stdout=f, stderr=f)
|
||||
pipes.append(p)
|
||||
|
@ -1,8 +0,0 @@
|
||||
position: 50
|
||||
label: 'Reference Guides'
|
||||
collapsible: true
|
||||
collapsed: true
|
||||
link:
|
||||
type: generated-index
|
||||
title: Reference Guides
|
||||
slug: /en
|
9
docs/en/commercial/cloud.md
Normal file
9
docs/en/commercial/cloud.md
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
toc_priority: 1
|
||||
toc_title: Cloud
|
||||
---
|
||||
|
||||
# ClickHouse Cloud Service {#clickhouse-cloud-service}
|
||||
|
||||
!!! info "Info"
|
||||
Detailed public description for ClickHouse cloud services is not ready yet, please [contact us](https://clickhouse.com/company/#contact) to learn more.
|
13
docs/en/commercial/index.md
Normal file
13
docs/en/commercial/index.md
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
toc_folder_title: Commercial
|
||||
toc_priority: 70
|
||||
toc_title: Introduction
|
||||
---
|
||||
|
||||
# ClickHouse Commercial Services {#clickhouse-commercial-services}
|
||||
|
||||
Service categories:
|
||||
|
||||
- [Cloud](../commercial/cloud.md)
|
||||
- [Support](../commercial/support.md)
|
||||
|
9
docs/en/commercial/support.md
Normal file
9
docs/en/commercial/support.md
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
toc_priority: 3
|
||||
toc_title: Support
|
||||
---
|
||||
|
||||
# ClickHouse Commercial Support Service {#clickhouse-commercial-support-service}
|
||||
|
||||
!!! info "Info"
|
||||
Detailed public description for ClickHouse support services is not ready yet, please [contact us](https://clickhouse.com/company/#contact) to learn more.
|
@ -1,7 +0,0 @@
|
||||
position: 100
|
||||
label: 'Building ClickHouse'
|
||||
collapsible: true
|
||||
collapsed: true
|
||||
link:
|
||||
type: generated-index
|
||||
title: Building ClickHouse
|
@ -1,9 +1,3 @@
|
||||
---
|
||||
sidebar_label: Adding Test Queries
|
||||
sidebar_position: 63
|
||||
description: Instructions on how to add a test case to ClickHouse continuous integration
|
||||
---
|
||||
|
||||
# How to add test queries to ClickHouse CI
|
||||
|
||||
ClickHouse has hundreds (or even thousands) of features. Every commit gets checked by a complex set of tests containing many thousands of test cases.
|
||||
|
@ -1,12 +1,11 @@
|
||||
---
|
||||
sidebar_label: Architecture Overview
|
||||
sidebar_position: 62
|
||||
toc_priority: 62
|
||||
toc_title: Architecture Overview
|
||||
---
|
||||
|
||||
# Overview of ClickHouse Architecture
|
||||
# Overview of ClickHouse Architecture {#overview-of-clickhouse-architecture}
|
||||
|
||||
ClickHouse is a true column-oriented DBMS. Data is stored by columns, and during the execution of arrays (vectors or chunks of columns).
|
||||
Whenever possible, operations are dispatched on arrays, rather than on individual values. It is called “vectorized query execution” and it helps lower the cost of actual data processing.
|
||||
ClickHouse is a true column-oriented DBMS. Data is stored by columns, and during the execution of arrays (vectors or chunks of columns). Whenever possible, operations are dispatched on arrays, rather than on individual values. It is called “vectorized query execution” and it helps lower the cost of actual data processing.
|
||||
|
||||
> This idea is nothing new. It dates back to the `APL` (A programming language, 1957) and its descendants: `A +` (APL dialect), `J` (1990), `K` (1993), and `Q` (programming language from Kx Systems, 2003). Array programming is used in scientific data processing. Neither is this idea something new in relational databases: for example, it is used in the `VectorWise` system (also known as Actian Vector Analytic Database by Actian Corporation).
|
||||
|
||||
@ -155,9 +154,8 @@ The server initializes the `Context` class with the necessary environment for qu
|
||||
|
||||
We maintain full backward and forward compatibility for the server TCP protocol: old clients can talk to new servers, and new clients can talk to old servers. But we do not want to maintain it eternally, and we are removing support for old versions after about one year.
|
||||
|
||||
:::note
|
||||
For most external applications, we recommend using the HTTP interface because it is simple and easy to use. The TCP protocol is more tightly linked to internal data structures: it uses an internal format for passing blocks of data, and it uses custom framing for compressed data. We haven’t released a C library for that protocol because it requires linking most of the ClickHouse codebase, which is not practical.
|
||||
:::
|
||||
!!! note "Note"
|
||||
For most external applications, we recommend using the HTTP interface because it is simple and easy to use. The TCP protocol is more tightly linked to internal data structures: it uses an internal format for passing blocks of data, and it uses custom framing for compressed data. We haven’t released a C library for that protocol because it requires linking most of the ClickHouse codebase, which is not practical.
|
||||
|
||||
## Distributed Query Execution {#distributed-query-execution}
|
||||
|
||||
@ -195,8 +193,7 @@ Replication is physical: only compressed parts are transferred between nodes, no
|
||||
|
||||
Besides, each replica stores its state in ZooKeeper as the set of parts and its checksums. When the state on the local filesystem diverges from the reference state in ZooKeeper, the replica restores its consistency by downloading missing and broken parts from other replicas. When there is some unexpected or broken data in the local filesystem, ClickHouse does not remove it, but moves it to a separate directory and forgets it.
|
||||
|
||||
:::note
|
||||
The ClickHouse cluster consists of independent shards, and each shard consists of replicas. The cluster is **not elastic**, so after adding a new shard, data is not rebalanced between shards automatically. Instead, the cluster load is supposed to be adjusted to be uneven. This implementation gives you more control, and it is ok for relatively small clusters, such as tens of nodes. But for clusters with hundreds of nodes that we are using in production, this approach becomes a significant drawback. We should implement a table engine that spans across the cluster with dynamically replicated regions that could be split and balanced between clusters automatically.
|
||||
:::
|
||||
!!! note "Note"
|
||||
The ClickHouse cluster consists of independent shards, and each shard consists of replicas. The cluster is **not elastic**, so after adding a new shard, data is not rebalanced between shards automatically. Instead, the cluster load is supposed to be adjusted to be uneven. This implementation gives you more control, and it is ok for relatively small clusters, such as tens of nodes. But for clusters with hundreds of nodes that we are using in production, this approach becomes a significant drawback. We should implement a table engine that spans across the cluster with dynamically replicated regions that could be split and balanced between clusters automatically.
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/development/architecture/)
|
||||
{## [Original article](https://clickhouse.com/docs/en/development/architecture/) ##}
|
||||
|
@ -1,13 +1,12 @@
|
||||
---
|
||||
sidebar_label: Source Code Browser
|
||||
sidebar_position: 72
|
||||
description: Various ways to browse and edit the source code
|
||||
toc_priority: 72
|
||||
toc_title: Source Code Browser
|
||||
---
|
||||
|
||||
# Browse ClickHouse Source Code
|
||||
# Browse ClickHouse Source Code {#browse-clickhouse-source-code}
|
||||
|
||||
You can use the **Woboq** online code browser available [here](https://clickhouse.com/codebrowser/ClickHouse/src/index.html). It provides code navigation and semantic highlighting, search and indexing. The code snapshot is updated daily.
|
||||
You can use **Woboq** online code browser available [here](https://clickhouse.com/codebrowser/ClickHouse/src/index.html). It provides code navigation and semantic highlighting, search and indexing. The code snapshot is updated daily.
|
||||
|
||||
Also, you can browse sources on [GitHub](https://github.com/ClickHouse/ClickHouse) as usual.
|
||||
|
||||
If you’re interested what IDE to use, we recommend CLion, QT Creator, VS Code and KDevelop (with caveats). You can use any favorite IDE. Vim and Emacs also count.
|
||||
If you’re interested what IDE to use, we recommend CLion, QT Creator, VS Code and KDevelop (with caveats). You can use any favourite IDE. Vim and Emacs also count.
|
||||
|
@ -1,12 +1,11 @@
|
||||
---
|
||||
sidebar_position: 67
|
||||
sidebar_label: Build on Linux for AARCH64 (ARM64)
|
||||
toc_priority: 67
|
||||
toc_title: Build on Linux for AARCH64 (ARM64)
|
||||
---
|
||||
|
||||
# How to Build ClickHouse on Linux for AARCH64 (ARM64) Architecture
|
||||
# How to Build ClickHouse on Linux for AARCH64 (ARM64) Architecture {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture}
|
||||
|
||||
This is for the case when you have Linux machine and want to use it to build `clickhouse` binary that will run on another Linux machine with AARCH64 CPU architecture.
|
||||
This is intended for continuous integration checks that run on Linux servers.
|
||||
This is for the case when you have Linux machine and want to use it to build `clickhouse` binary that will run on another Linux machine with AARCH64 CPU architecture. This is intended for continuous integration checks that run on Linux servers.
|
||||
|
||||
The cross-build for AARCH64 is based on the [Build instructions](../development/build.md), follow them first.
|
||||
|
||||
|
@ -1,12 +1,11 @@
|
||||
---
|
||||
sidebar_position: 66
|
||||
sidebar_label: Build on Linux for Mac OS X
|
||||
toc_priority: 66
|
||||
toc_title: Build on Linux for Mac OS X
|
||||
---
|
||||
|
||||
# How to Build ClickHouse on Linux for Mac OS X
|
||||
# How to Build ClickHouse on Linux for Mac OS X {#how-to-build-clickhouse-on-linux-for-mac-os-x}
|
||||
|
||||
This is for the case when you have a Linux machine and want to use it to build `clickhouse` binary that will run on OS X.
|
||||
This is intended for continuous integration checks that run on Linux servers. If you want to build ClickHouse directly on Mac OS X, then proceed with [another instruction](../development/build-osx.md).
|
||||
This is for the case when you have Linux machine and want to use it to build `clickhouse` binary that will run on OS X. This is intended for continuous integration checks that run on Linux servers. If you want to build ClickHouse directly on Mac OS X, then proceed with [another instruction](../development/build-osx.md).
|
||||
|
||||
The cross-build for Mac OS X is based on the [Build instructions](../development/build.md), follow them first.
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
---
|
||||
sidebar_position: 68
|
||||
sidebar_label: Build on Linux for RISC-V 64
|
||||
toc_priority: 68
|
||||
toc_title: Build on Linux for RISC-V 64
|
||||
---
|
||||
|
||||
# How to Build ClickHouse on Linux for RISC-V 64 Architecture
|
||||
# How to Build ClickHouse on Linux for RISC-V 64 Architecture {#how-to-build-clickhouse-on-linux-for-risc-v-64-architecture}
|
||||
|
||||
As of writing (11.11.2021) building for risc-v considered to be highly experimental. Not all features can be enabled.
|
||||
|
||||
|
@ -1,21 +1,16 @@
|
||||
---
|
||||
sidebar_position: 65
|
||||
sidebar_label: Build on Mac OS X
|
||||
description: How to build ClickHouse on Mac OS X
|
||||
toc_priority: 65
|
||||
toc_title: Build on Mac OS X
|
||||
---
|
||||
|
||||
# How to Build ClickHouse on Mac OS X
|
||||
# How to Build ClickHouse on Mac OS X {#how-to-build-clickhouse-on-mac-os-x}
|
||||
|
||||
:::info You don't have to build ClickHouse yourself!
|
||||
You can install pre-built ClickHouse as described in [Quick Start](https://clickhouse.com/#quick-start). Follow **macOS (Intel)** or **macOS (Apple silicon)** installation instructions.
|
||||
:::
|
||||
!!! info "You don't have to build ClickHouse yourself"
|
||||
You can install pre-built ClickHouse as described in [Quick Start](https://clickhouse.com/#quick-start).
|
||||
Follow `macOS (Intel)` or `macOS (Apple silicon)` installation instructions.
|
||||
|
||||
Build should work on x86_64 (Intel) and arm64 (Apple silicon) based macOS 10.15 (Catalina) and higher with Homebrew's vanilla Clang.
|
||||
It is always recommended to use vanilla `clang` compiler.
|
||||
|
||||
:::note
|
||||
It is possible to use XCode's `apple-clang` or `gcc`, but it's strongly discouraged.
|
||||
:::
|
||||
It is always recommended to use vanilla `clang` compiler. It is possible to use XCode's `apple-clang` or `gcc` but it's strongly discouraged.
|
||||
|
||||
## Install Homebrew {#install-homebrew}
|
||||
|
||||
@ -94,9 +89,8 @@ cmake --build . --config RelWithDebInfo
|
||||
|
||||
If you intend to run `clickhouse-server`, make sure to increase the system’s maxfiles variable.
|
||||
|
||||
:::note
|
||||
You’ll need to use sudo.
|
||||
:::
|
||||
!!! info "Note"
|
||||
You’ll need to use sudo.
|
||||
|
||||
To do so, create the `/Library/LaunchDaemons/limit.maxfiles.plist` file with the following content:
|
||||
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
sidebar_position: 64
|
||||
sidebar_label: Build on Linux
|
||||
description: How to build ClickHouse on Linux
|
||||
toc_priority: 64
|
||||
toc_title: Build on Linux
|
||||
---
|
||||
|
||||
# How to Build ClickHouse on Linux
|
||||
# How to Build ClickHouse on Linux {#how-to-build-clickhouse-for-development}
|
||||
|
||||
Supported platforms:
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
---
|
||||
sidebar_position: 62
|
||||
sidebar_label: Continuous Integration Checks
|
||||
description: When you submit a pull request, some automated checks are ran for your code by the ClickHouse continuous integration (CI) system
|
||||
toc_priority: 62
|
||||
toc_title: Continuous Integration Checks
|
||||
---
|
||||
|
||||
# Continuous Integration Checks
|
||||
@ -72,6 +71,8 @@ This check means that the CI system started to process the pull request. When it
|
||||
Performs some simple regex-based checks of code style, using the [`utils/check-style/check-style`](https://github.com/ClickHouse/ClickHouse/blob/master/utils/check-style/check-style) binary (note that it can be run locally).
|
||||
If it fails, fix the style errors following the [code style guide](style.md).
|
||||
|
||||
Python code is checked with [black](https://github.com/psf/black/).
|
||||
|
||||
### Report Details
|
||||
- [Status page example](https://clickhouse-test-reports.s3.yandex.net/12550/659c78c7abb56141723af6a81bfae39335aa8cb2/style_check.html)
|
||||
- `output.txt` contains the check resulting errors (invalid tabulation etc), blank page means no errors. [Successful result example](https://clickhouse-test-reports.s3.yandex.net/12550/659c78c7abb56141723af6a81bfae39335aa8cb2/style_check/output.txt).
|
||||
@ -151,7 +152,7 @@ checks page](../development/build.md#you-dont-have-to-build-clickhouse), or buil
|
||||
|
||||
|
||||
## Functional Stateful Tests
|
||||
Runs [stateful functional tests](tests.md#functional-tests). Treat them in the same way as the functional stateless tests. The difference is that they require `hits` and `visits` tables from the [clickstream dataset](../example-datasets/metrica.md) to run.
|
||||
Runs [stateful functional tests](tests.md#functional-tests). Treat them in the same way as the functional stateless tests. The difference is that they require `hits` and `visits` tables from the [Yandex.Metrica dataset](../getting-started/example-datasets/metrica.md) to run.
|
||||
|
||||
|
||||
## Integration Tests
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
sidebar_position: 71
|
||||
sidebar_label: Third-Party Libraries
|
||||
description: A list of third-party libraries used
|
||||
toc_priority: 71
|
||||
toc_title: Third-Party Libraries Used
|
||||
---
|
||||
|
||||
# Third-Party Libraries Used
|
||||
# Third-Party Libraries Used {#third-party-libraries-used}
|
||||
|
||||
The list of third-party libraries:
|
||||
|
||||
|
@ -1,12 +1,11 @@
|
||||
---
|
||||
sidebar_position: 61
|
||||
sidebar_label: Getting Started
|
||||
description: Prerequisites and an overview of how to build ClickHouse
|
||||
toc_priority: 61
|
||||
toc_title: For Beginners
|
||||
---
|
||||
|
||||
# Getting Started Guide for Building ClickHouse
|
||||
# The Beginner ClickHouse Developer Instruction {#the-beginner-clickhouse-developer-instruction}
|
||||
|
||||
The building of ClickHouse is supported on Linux, FreeBSD and Mac OS X.
|
||||
Building of ClickHouse is supported on Linux, FreeBSD and Mac OS X.
|
||||
|
||||
If you use Windows, you need to create a virtual machine with Ubuntu. To start working with a virtual machine please install VirtualBox. You can download Ubuntu from the website: https://www.ubuntu.com/#download. Please create a virtual machine from the downloaded image (you should reserve at least 4GB of RAM for it). To run a command-line terminal in Ubuntu, please locate a program containing the word “terminal” in its name (gnome-terminal, konsole etc.) or just press Ctrl+Alt+T.
|
||||
|
||||
@ -230,6 +229,25 @@ As simple code editors, you can use Sublime Text or Visual Studio Code, or Kate
|
||||
|
||||
Just in case, it is worth mentioning that CLion creates `build` path on its own, it also on its own selects `debug` for build type, for configuration it uses a version of CMake that is defined in CLion and not the one installed by you, and finally, CLion will use `make` to run build tasks instead of `ninja`. This is normal behaviour, just keep that in mind to avoid confusion.
|
||||
|
||||
## Debugging
|
||||
|
||||
Many graphical IDEs offer with an integrated debugger but you can also use a standalone debugger.
|
||||
|
||||
### GDB
|
||||
|
||||
### LLDB
|
||||
|
||||
# tell LLDB where to find the source code
|
||||
settings set target.source-map /path/to/build/dir /path/to/source/dir
|
||||
|
||||
# configure LLDB to display code before/after currently executing line
|
||||
settings set stop-line-count-before 10
|
||||
settings set stop-line-count-after 10
|
||||
|
||||
target create ./clickhouse-client
|
||||
# <set breakpoints here>
|
||||
process launch -- --query="SELECT * FROM TAB"
|
||||
|
||||
## Writing Code {#writing-code}
|
||||
|
||||
The description of ClickHouse architecture can be found here: https://clickhouse.com/docs/en/development/architecture/
|
||||
|
10
docs/en/development/index.md
Normal file
10
docs/en/development/index.md
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
toc_folder_title: Development
|
||||
toc_hidden: true
|
||||
toc_priority: 58
|
||||
toc_title: hidden
|
||||
---
|
||||
|
||||
# ClickHouse Development {#clickhouse-development}
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/development/) <!--hide-->
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
sidebar_position: 69
|
||||
sidebar_label: C++ Guide
|
||||
description: A list of recommendations regarding coding style, naming convention, formatting and more
|
||||
toc_priority: 69
|
||||
toc_title: C++ Guide
|
||||
---
|
||||
|
||||
# How to Write C++ Code
|
||||
# How to Write C++ Code {#how-to-write-c-code}
|
||||
|
||||
## General Recommendations {#general-recommendations}
|
||||
|
||||
|
@ -1,12 +1,11 @@
|
||||
---
|
||||
sidebar_position: 70
|
||||
sidebar_label: Testing
|
||||
description: Most of ClickHouse features can be tested with functional tests and they are mandatory to use for every change in ClickHouse code that can be tested that way.
|
||||
toc_priority: 70
|
||||
toc_title: Testing
|
||||
---
|
||||
|
||||
# ClickHouse Testing
|
||||
# ClickHouse Testing {#clickhouse-testing}
|
||||
|
||||
## Functional Tests
|
||||
## Functional Tests {#functional-tests}
|
||||
|
||||
Functional tests are the most simple and convenient to use. Most of ClickHouse features can be tested with functional tests and they are mandatory to use for every change in ClickHouse code that can be tested that way.
|
||||
|
||||
|
@ -1,8 +0,0 @@
|
||||
position: 30
|
||||
label: 'Database & Table Engines'
|
||||
collapsible: true
|
||||
collapsed: true
|
||||
link:
|
||||
type: generated-index
|
||||
title: Database & Table Engines
|
||||
slug: /en/table-engines
|
@ -1,9 +1,9 @@
|
||||
---
|
||||
sidebar_label: Atomic
|
||||
sidebar_position: 10
|
||||
toc_priority: 32
|
||||
toc_title: Atomic
|
||||
---
|
||||
|
||||
# Atomic
|
||||
# Atomic {#atomic}
|
||||
|
||||
It supports non-blocking [DROP TABLE](#drop-detach-table) and [RENAME TABLE](#rename-table) queries and atomic [EXCHANGE TABLES](#exchange-tables) queries. `Atomic` database engine is used by default.
|
||||
|
||||
@ -18,21 +18,14 @@ CREATE DATABASE test [ENGINE = Atomic];
|
||||
### Table UUID {#table-uuid}
|
||||
|
||||
All tables in database `Atomic` have persistent [UUID](../../sql-reference/data-types/uuid.md) and store data in directory `/clickhouse_path/store/xxx/xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy/`, where `xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy` is UUID of the table.
|
||||
Usually, the UUID is generated automatically, but the user can also explicitly specify the UUID in the same way when creating the table (this is not recommended).
|
||||
|
||||
For example:
|
||||
Usually, the UUID is generated automatically, but the user can also explicitly specify the UUID in the same way when creating the table (this is not recommended). To display the `SHOW CREATE` query with the UUID you can use setting [show_table_uuid_in_table_create_query_if_not_nil](../../operations/settings/settings.md#show_table_uuid_in_table_create_query_if_not_nil). For example:
|
||||
|
||||
```sql
|
||||
CREATE TABLE name UUID '28f1c61c-2970-457a-bffe-454156ddcfef' (n UInt64) ENGINE = ...;
|
||||
```
|
||||
|
||||
:::note
|
||||
You can use the [show_table_uuid_in_table_create_query_if_not_nil](../../operations/settings/settings.md#show_table_uuid_in_table_create_query_if_not_nil) setting to display the UUID with the `SHOW CREATE` query.
|
||||
:::
|
||||
|
||||
### RENAME TABLE {#rename-table}
|
||||
|
||||
[RENAME](../../sql-reference/statements/rename.md) queries are performed without changing the UUID or moving table data. These queries do not wait for the completion of queries using the table and are executed instantly.
|
||||
[RENAME](../../sql-reference/statements/rename.md) queries are performed without changing UUID and moving table data. These queries do not wait for the completion of queries using the table and are executed instantly.
|
||||
|
||||
### DROP/DETACH TABLE {#drop-detach-table}
|
||||
|
||||
|
@ -6,11 +6,11 @@ toc_title: Introduction
|
||||
|
||||
# Database Engines {#database-engines}
|
||||
|
||||
Database engines allow you to work with tables. By default, ClickHouse uses the [Atomic](../../engines/database-engines/atomic.md) database engine, which provides configurable [table engines](../../engines/table-engines/index.md) and an [SQL dialect](../../sql-reference/syntax.md).
|
||||
Database engines allow you to work with tables.
|
||||
|
||||
Here is a complete list of available database engines. Follow the links for more details:
|
||||
By default, ClickHouse uses database engine [Atomic](../../engines/database-engines/atomic.md). It provides configurable [table engines](../../engines/table-engines/index.md) and an [SQL dialect](../../sql-reference/syntax.md).
|
||||
|
||||
- [Atomic](../../engines/database-engines/atomic.md)
|
||||
You can also use the following database engines:
|
||||
|
||||
- [MySQL](../../engines/database-engines/mysql.md)
|
||||
|
||||
@ -18,6 +18,8 @@ Here is a complete list of available database engines. Follow the links for more
|
||||
|
||||
- [Lazy](../../engines/database-engines/lazy.md)
|
||||
|
||||
- [Atomic](../../engines/database-engines/atomic.md)
|
||||
|
||||
- [PostgreSQL](../../engines/database-engines/postgresql.md)
|
||||
|
||||
- [Replicated](../../engines/database-engines/replicated.md)
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_label: Lazy
|
||||
sidebar_position: 20
|
||||
toc_priority: 31
|
||||
toc_title: Lazy
|
||||
---
|
||||
|
||||
# Lazy {#lazy}
|
||||
|
@ -1,15 +1,16 @@
|
||||
---
|
||||
sidebar_label: MaterializedMySQL
|
||||
sidebar_position: 70
|
||||
toc_priority: 29
|
||||
toc_title: MaterializedMySQL
|
||||
---
|
||||
|
||||
# [experimental] MaterializedMySQL
|
||||
# [experimental] MaterializedMySQL {#materialized-mysql}
|
||||
|
||||
:::warning
|
||||
This is an experimental feature that should not be used in production.
|
||||
:::
|
||||
!!! warning "Warning"
|
||||
This is an experimental feature that should not be used in production.
|
||||
|
||||
Creates a ClickHouse database with all the tables existing in MySQL, and all the data in those tables. The ClickHouse server works as MySQL replica. It reads `binlog` and performs DDL and DML queries.
|
||||
Creates ClickHouse database with all the tables existing in MySQL, and all the data in those tables.
|
||||
|
||||
ClickHouse server works as MySQL replica. It reads binlog and performs DDL and DML queries.
|
||||
|
||||
## Creating a Database {#creating-a-database}
|
||||
|
||||
@ -30,6 +31,8 @@ ENGINE = MaterializedMySQL('host:port', ['database' | database], 'user', 'passwo
|
||||
|
||||
- `max_rows_in_buffer` — Maximum number of rows that data is allowed to cache in memory (for single table and the cache data unable to query). When this number is exceeded, the data will be materialized. Default: `65 505`.
|
||||
- `max_bytes_in_buffer` — Maximum number of bytes that data is allowed to cache in memory (for single table and the cache data unable to query). When this number is exceeded, the data will be materialized. Default: `1 048 576`.
|
||||
- `max_rows_in_buffers` — Maximum number of rows that data is allowed to cache in memory (for database and the cache data unable to query). When this number is exceeded, the data will be materialized. Default: `65 505`.
|
||||
- `max_bytes_in_buffers` — Maximum number of bytes that data is allowed to cache in memory (for database and the cache data unable to query). When this number is exceeded, the data will be materialized. Default: `1 048 576`.
|
||||
- `max_flush_data_time` — Maximum number of milliseconds that data is allowed to cache in memory (for database and the cache data unable to query). When this time is exceeded, the data will be materialized. Default: `1000`.
|
||||
- `max_wait_time_when_mysql_unavailable` — Retry interval when MySQL is not available (milliseconds). Negative value disables retry. Default: `1000`.
|
||||
- `allows_query_when_mysql_lost` — Allows to query a materialized table when MySQL is lost. Default: `0` (`false`).
|
||||
@ -49,9 +52,8 @@ For the correct work of `MaterializedMySQL`, there are few mandatory `MySQL`-sid
|
||||
- `default_authentication_plugin = mysql_native_password` since `MaterializedMySQL` can only authorize with this method.
|
||||
- `gtid_mode = on` since GTID based logging is a mandatory for providing correct `MaterializedMySQL` replication.
|
||||
|
||||
:::note
|
||||
While turning on `gtid_mode` you should also specify `enforce_gtid_consistency = on`.
|
||||
:::
|
||||
!!! attention "Attention"
|
||||
While turning on `gtid_mode` you should also specify `enforce_gtid_consistency = on`.
|
||||
|
||||
## Virtual Columns {#virtual-columns}
|
||||
|
||||
@ -74,7 +76,7 @@ When working with the `MaterializedMySQL` database engine, [ReplacingMergeTree](
|
||||
| FLOAT | [Float32](../../sql-reference/data-types/float.md) |
|
||||
| DOUBLE | [Float64](../../sql-reference/data-types/float.md) |
|
||||
| DECIMAL, NEWDECIMAL | [Decimal](../../sql-reference/data-types/decimal.md) |
|
||||
| DATE, NEWDATE | [Date](../../sql-reference/data-types/date.md) |
|
||||
| DATE, NEWDATE | [Date32](../../sql-reference/data-types/date32.md) |
|
||||
| DATETIME, TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) |
|
||||
| DATETIME2, TIMESTAMP2 | [DateTime64](../../sql-reference/data-types/datetime64.md) |
|
||||
| YEAR | [UInt16](../../sql-reference/data-types/int-uint.md) |
|
||||
@ -218,14 +220,13 @@ extra care needs to be taken.
|
||||
|
||||
You may specify overrides for tables that do not exist yet.
|
||||
|
||||
:::warning
|
||||
It is easy to break replication with table overrides if not used with care. For example:
|
||||
!!! warning "Warning"
|
||||
It is easy to break replication with table overrides if not used with care. For example:
|
||||
|
||||
* If an ALIAS column is added with a table override, and a column with the same name is later added to the source
|
||||
MySQL table, the converted ALTER TABLE query in ClickHouse will fail and replication stops.
|
||||
* It is currently possible to add overrides that reference nullable columns where not-nullable are required, such as in
|
||||
`ORDER BY` or `PARTITION BY`. This will cause CREATE TABLE queries that will fail, also causing replication to stop.
|
||||
:::
|
||||
* If an ALIAS column is added with a table override, and a column with the same name is later added to the source
|
||||
MySQL table, the converted ALTER TABLE query in ClickHouse will fail and replication stops.
|
||||
* It is currently possible to add overrides that reference nullable columns where not-nullable are required, such as in
|
||||
`ORDER BY` or `PARTITION BY`. This will cause CREATE TABLE queries that will fail, also causing replication to stop.
|
||||
|
||||
## Examples of Use {#examples-of-use}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_label: MaterializedPostgreSQL
|
||||
sidebar_position: 60
|
||||
toc_priority: 30
|
||||
toc_title: MaterializedPostgreSQL
|
||||
---
|
||||
|
||||
# [experimental] MaterializedPostgreSQL {#materialize-postgresql}
|
||||
@ -46,9 +46,7 @@ After `MaterializedPostgreSQL` database is created, it does not automatically de
|
||||
ATTACH TABLE postgres_database.new_table;
|
||||
```
|
||||
|
||||
:::warning
|
||||
Before version 22.1, adding a table to replication left an unremoved temporary replication slot (named `{db_name}_ch_replication_slot_tmp`). If attaching tables in ClickHouse version before 22.1, make sure to delete it manually (`SELECT pg_drop_replication_slot('{db_name}_ch_replication_slot_tmp')`). Otherwise disk usage will grow. This issue is fixed in 22.1.
|
||||
:::
|
||||
Warning: before version 22.1 adding table to replication left unremoved temprorary replication slot (named `{db_name}_ch_replication_slot_tmp`). If attaching tables in clickhouse version before 22.1, make sure to delete it manually (`SELECT pg_drop_replication_slot('{db_name}_ch_replication_slot_tmp')`). Otherwise disk usage will grow. Issue is fixed in 22.1.
|
||||
|
||||
## Dynamically removing tables from replication {#dynamically-removing-table-from-replication}
|
||||
|
||||
@ -137,70 +135,69 @@ FROM pg_class
|
||||
WHERE oid = 'postgres_table'::regclass;
|
||||
```
|
||||
|
||||
:::warning
|
||||
Replication of [**TOAST**](https://www.postgresql.org/docs/9.5/storage-toast.html) values is not supported. The default value for the data type will be used.
|
||||
:::
|
||||
!!! warning "Warning"
|
||||
Replication of [**TOAST**](https://www.postgresql.org/docs/9.5/storage-toast.html) values is not supported. The default value for the data type will be used.
|
||||
|
||||
## Settings {#settings}
|
||||
|
||||
1. `materialized_postgresql_tables_list` {#materialized-postgresql-tables-list}
|
||||
1. materialized_postgresql_tables_list {#materialized-postgresql-tables-list}
|
||||
|
||||
Sets a comma-separated list of PostgreSQL database tables, which will be replicated via [MaterializedPostgreSQL](../../engines/database-engines/materialized-postgresql.md) database engine.
|
||||
Sets a comma-separated list of PostgreSQL database tables, which will be replicated via [MaterializedPostgreSQL](../../engines/database-engines/materialized-postgresql.md) database engine.
|
||||
|
||||
Default value: empty list — means whole PostgreSQL database will be replicated.
|
||||
Default value: empty list — means whole PostgreSQL database will be replicated.
|
||||
|
||||
2. `materialized_postgresql_schema` {#materialized-postgresql-schema}
|
||||
2. materialized_postgresql_schema {#materialized-postgresql-schema}
|
||||
|
||||
Default value: empty string. (Default schema is used)
|
||||
Default value: empty string. (Default schema is used)
|
||||
|
||||
3. `materialized_postgresql_schema_list` {#materialized-postgresql-schema-list}
|
||||
3. materialized_postgresql_schema_list {#materialized-postgresql-schema-list}
|
||||
|
||||
Default value: empty list. (Default schema is used)
|
||||
Default value: empty list. (Default schema is used)
|
||||
|
||||
4. `materialized_postgresql_allow_automatic_update` {#materialized-postgresql-allow-automatic-update}
|
||||
4. materialized_postgresql_allow_automatic_update {#materialized-postgresql-allow-automatic-update}
|
||||
|
||||
Do not use this setting before 22.1 version.
|
||||
Do not use this setting before 22.1 version.
|
||||
|
||||
Allows reloading table in the background, when schema changes are detected. DDL queries on the PostgreSQL side are not replicated via ClickHouse [MaterializedPostgreSQL](../../engines/database-engines/materialized-postgresql.md) engine, because it is not allowed with PostgreSQL logical replication protocol, but the fact of DDL changes is detected transactionally. In this case, the default behaviour is to stop replicating those tables once DDL is detected. However, if this setting is enabled, then, instead of stopping the replication of those tables, they will be reloaded in the background via database snapshot without data losses and replication will continue for them.
|
||||
Allows reloading table in the background, when schema changes are detected. DDL queries on the PostgreSQL side are not replicated via ClickHouse [MaterializedPostgreSQL](../../engines/database-engines/materialized-postgresql.md) engine, because it is not allowed with PostgreSQL logical replication protocol, but the fact of DDL changes is detected transactionally. In this case, the default behaviour is to stop replicating those tables once DDL is detected. However, if this setting is enabled, then, instead of stopping the replication of those tables, they will be reloaded in the background via database snapshot without data losses and replication will continue for them.
|
||||
|
||||
Possible values:
|
||||
Possible values:
|
||||
|
||||
- 0 — The table is not automatically updated in the background, when schema changes are detected.
|
||||
- 1 — The table is automatically updated in the background, when schema changes are detected.
|
||||
- 0 — The table is not automatically updated in the background, when schema changes are detected.
|
||||
- 1 — The table is automatically updated in the background, when schema changes are detected.
|
||||
|
||||
Default value: `0`.
|
||||
Default value: `0`.
|
||||
|
||||
5. `materialized_postgresql_max_block_size` {#materialized-postgresql-max-block-size}
|
||||
5. materialized_postgresql_max_block_size {#materialized-postgresql-max-block-size}
|
||||
|
||||
Sets the number of rows collected in memory before flushing data into PostgreSQL database table.
|
||||
Sets the number of rows collected in memory before flushing data into PostgreSQL database table.
|
||||
|
||||
Possible values:
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- Positive integer.
|
||||
|
||||
Default value: `65536`.
|
||||
Default value: `65536`.
|
||||
|
||||
6. `materialized_postgresql_replication_slot` {#materialized-postgresql-replication-slot}
|
||||
6. materialized_postgresql_replication_slot {#materialized-postgresql-replication-slot}
|
||||
|
||||
A user-created replication slot. Must be used together with `materialized_postgresql_snapshot`.
|
||||
A user-created replication slot. Must be used together with `materialized_postgresql_snapshot`.
|
||||
|
||||
7. `materialized_postgresql_snapshot` {#materialized-postgresql-snapshot}
|
||||
7. materialized_postgresql_snapshot {#materialized-postgresql-snapshot}
|
||||
|
||||
A text string identifying a snapshot, from which [initial dump of PostgreSQL tables](../../engines/database-engines/materialized-postgresql.md) will be performed. Must be used together with `materialized_postgresql_replication_slot`.
|
||||
A text string identifying a snapshot, from which [initial dump of PostgreSQL tables](../../engines/database-engines/materialized-postgresql.md) will be performed. Must be used together with `materialized_postgresql_replication_slot`.
|
||||
|
||||
``` sql
|
||||
CREATE DATABASE database1
|
||||
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password')
|
||||
SETTINGS materialized_postgresql_tables_list = 'table1,table2,table3';
|
||||
``` sql
|
||||
CREATE DATABASE database1
|
||||
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password')
|
||||
SETTINGS materialized_postgresql_tables_list = 'table1,table2,table3';
|
||||
|
||||
SELECT * FROM database1.table1;
|
||||
```
|
||||
SELECT * FROM database1.table1;
|
||||
```
|
||||
|
||||
The settings can be changed, if necessary, using a DDL query. But it is impossible to change the setting `materialized_postgresql_tables_list`. To update the list of tables in this setting use the `ATTACH TABLE` query.
|
||||
The settings can be changed, if necessary, using a DDL query. But it is impossible to change the setting `materialized_postgresql_tables_list`. To update the list of tables in this setting use the `ATTACH TABLE` query.
|
||||
|
||||
``` sql
|
||||
ALTER DATABASE postgres_database MODIFY SETTING materialized_postgresql_max_block_size = <new_size>;
|
||||
```
|
||||
``` sql
|
||||
ALTER DATABASE postgres_database MODIFY SETTING materialized_postgresql_max_block_size = <new_size>;
|
||||
```
|
||||
|
||||
## Notes {#notes}
|
||||
|
||||
@ -216,47 +213,47 @@ Please note that this should be used only if it is actually needed. If there is
|
||||
|
||||
1. Configure replication slot in PostgreSQL.
|
||||
|
||||
```yaml
|
||||
apiVersion: "acid.zalan.do/v1"
|
||||
kind: postgresql
|
||||
metadata:
|
||||
name: acid-demo-cluster
|
||||
spec:
|
||||
numberOfInstances: 2
|
||||
postgresql:
|
||||
parameters:
|
||||
wal_level: logical
|
||||
patroni:
|
||||
slots:
|
||||
clickhouse_sync:
|
||||
type: logical
|
||||
database: demodb
|
||||
plugin: pgoutput
|
||||
```
|
||||
```yaml
|
||||
apiVersion: "acid.zalan.do/v1"
|
||||
kind: postgresql
|
||||
metadata:
|
||||
name: acid-demo-cluster
|
||||
spec:
|
||||
numberOfInstances: 2
|
||||
postgresql:
|
||||
parameters:
|
||||
wal_level: logical
|
||||
patroni:
|
||||
slots:
|
||||
clickhouse_sync:
|
||||
type: logical
|
||||
database: demodb
|
||||
plugin: pgoutput
|
||||
```
|
||||
|
||||
2. Wait for replication slot to be ready, then begin a transaction and export the transaction snapshot identifier:
|
||||
|
||||
```sql
|
||||
BEGIN;
|
||||
SELECT pg_export_snapshot();
|
||||
```
|
||||
```sql
|
||||
BEGIN;
|
||||
SELECT pg_export_snapshot();
|
||||
```
|
||||
|
||||
3. In ClickHouse create database:
|
||||
|
||||
```sql
|
||||
CREATE DATABASE demodb
|
||||
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password')
|
||||
SETTINGS
|
||||
materialized_postgresql_replication_slot = 'clickhouse_sync',
|
||||
materialized_postgresql_snapshot = '0000000A-0000023F-3',
|
||||
materialized_postgresql_tables_list = 'table1,table2,table3';
|
||||
```
|
||||
```sql
|
||||
CREATE DATABASE demodb
|
||||
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password')
|
||||
SETTINGS
|
||||
materialized_postgresql_replication_slot = 'clickhouse_sync',
|
||||
materialized_postgresql_snapshot = '0000000A-0000023F-3',
|
||||
materialized_postgresql_tables_list = 'table1,table2,table3';
|
||||
```
|
||||
|
||||
4. End the PostgreSQL transaction once replication to ClickHouse DB is confirmed. Verify that replication continues after failover:
|
||||
|
||||
```bash
|
||||
kubectl exec acid-demo-cluster-0 -c postgres -- su postgres -c 'patronictl failover --candidate acid-demo-cluster-1 --force'
|
||||
```
|
||||
```bash
|
||||
kubectl exec acid-demo-cluster-0 -c postgres -- su postgres -c 'patronictl failover --candidate acid-demo-cluster-1 --force'
|
||||
```
|
||||
|
||||
### Required permissions
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
---
|
||||
sidebar_position: 50
|
||||
sidebar_label: MySQL
|
||||
toc_priority: 30
|
||||
toc_title: MySQL
|
||||
---
|
||||
|
||||
# MySQL
|
||||
# MySQL {#mysql}
|
||||
|
||||
Allows to connect to databases on a remote MySQL server and perform `INSERT` and `SELECT` queries to exchange data between ClickHouse and MySQL.
|
||||
|
||||
@ -49,6 +49,8 @@ ENGINE = MySQL('host:port', ['database' | database], 'user', 'password')
|
||||
|
||||
All other MySQL data types are converted into [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
Because of the ClickHouse date type has a different range from the MySQL date range,If the MySQL date type is out of the range of ClickHouse date, you can use the setting mysql_datatypes_support_level to modify the mapping from the MySQL date type to the Clickhouse date type: date2Date32 (convert MySQL's date type to ClickHouse Date32) or date2String(convert MySQL's date type to ClickHouse String,this is usually used when your mysql data is less than 1925) or default(convert MySQL's date type to ClickHouse Date).
|
||||
|
||||
[Nullable](../../sql-reference/data-types/nullable.md) is supported.
|
||||
|
||||
## Global Variables Support {#global-variables-support}
|
||||
@ -59,9 +61,8 @@ These variables are supported:
|
||||
- `version`
|
||||
- `max_allowed_packet`
|
||||
|
||||
:::warning
|
||||
By now these variables are stubs and don't correspond to anything.
|
||||
:::
|
||||
!!! warning "Warning"
|
||||
By now these variables are stubs and don't correspond to anything.
|
||||
|
||||
Example:
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 40
|
||||
sidebar_label: PostgreSQL
|
||||
toc_priority: 35
|
||||
toc_title: PostgreSQL
|
||||
---
|
||||
|
||||
# PostgreSQL {#postgresql}
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 30
|
||||
sidebar_label: Replicated
|
||||
toc_priority: 36
|
||||
toc_title: Replicated
|
||||
---
|
||||
|
||||
# [experimental] Replicated {#replicated}
|
||||
@ -20,9 +20,8 @@ One ClickHouse server can have multiple replicated databases running and updatin
|
||||
- `shard_name` — Shard name. Database replicas are grouped into shards by `shard_name`.
|
||||
- `replica_name` — Replica name. Replica names must be different for all replicas of the same shard.
|
||||
|
||||
:::warning
|
||||
For [ReplicatedMergeTree](../table-engines/mergetree-family/replication.md#table_engines-replication) tables if no arguments provided, then default arguments are used: `/clickhouse/tables/{uuid}/{shard}` and `{replica}`. These can be changed in the server settings [default_replica_path](../../operations/server-configuration-parameters/settings.md#default_replica_path) and [default_replica_name](../../operations/server-configuration-parameters/settings.md#default_replica_name). Macro `{uuid}` is unfolded to table's uuid, `{shard}` and `{replica}` are unfolded to values from server config, not from database engine arguments. But in the future, it will be possible to use `shard_name` and `replica_name` of Replicated database.
|
||||
:::
|
||||
!!! note "Warning"
|
||||
For [ReplicatedMergeTree](../table-engines/mergetree-family/replication.md#table_engines-replication) tables if no arguments provided, then default arguments are used: `/clickhouse/tables/{uuid}/{shard}` and `{replica}`. These can be changed in the server settings [default_replica_path](../../operations/server-configuration-parameters/settings.md#default_replica_path) and [default_replica_name](../../operations/server-configuration-parameters/settings.md#default_replica_name). Macro `{uuid}` is unfolded to table's uuid, `{shard}` and `{replica}` are unfolded to values from server config, not from database engine arguments. But in the future, it will be possible to use `shard_name` and `replica_name` of Replicated database.
|
||||
|
||||
## Specifics and Recommendations {#specifics-and-recommendations}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 55
|
||||
sidebar_label: SQLite
|
||||
toc_priority: 32
|
||||
toc_title: SQLite
|
||||
---
|
||||
|
||||
# SQLite {#sqlite}
|
||||
|
15
docs/en/engines/index.md
Normal file
15
docs/en/engines/index.md
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
toc_folder_title: Engines
|
||||
toc_hidden: true
|
||||
toc_priority: 25
|
||||
toc_title: hidden
|
||||
---
|
||||
|
||||
# ClickHouse Engines {#clickhouse-engines}
|
||||
|
||||
There are two key engine kinds in ClickHouse:
|
||||
|
||||
- [Table engines](../engines/table-engines/index.md)
|
||||
- [Database engines](../engines/database-engines/index.md)
|
||||
|
||||
{## [Original article](https://clickhouse.com/docs/en/engines/) ##}
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 12
|
||||
sidebar_label: ExternalDistributed
|
||||
toc_priority: 12
|
||||
toc_title: ExternalDistributed
|
||||
---
|
||||
|
||||
# ExternalDistributed {#externaldistributed}
|
||||
@ -51,6 +51,3 @@ You can specify any number of shards and any number of replicas for each shard.
|
||||
- [MySQL table engine](../../../engines/table-engines/integrations/mysql.md)
|
||||
- [PostgreSQL table engine](../../../engines/table-engines/integrations/postgresql.md)
|
||||
- [Distributed table engine](../../../engines/table-engines/special/distributed.md)
|
||||
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/ExternalDistributed/) <!--hide-->
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 9
|
||||
sidebar_label: EmbeddedRocksDB
|
||||
toc_priority: 9
|
||||
toc_title: EmbeddedRocksDB
|
||||
---
|
||||
|
||||
# EmbeddedRocksDB Engine {#EmbeddedRocksDB-engine}
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 6
|
||||
sidebar_label: HDFS
|
||||
toc_priority: 6
|
||||
toc_title: HDFS
|
||||
---
|
||||
|
||||
# HDFS {#table_engines-hdfs}
|
||||
@ -98,9 +98,8 @@ Table consists of all the files in both directories (all files should satisfy fo
|
||||
CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV')
|
||||
```
|
||||
|
||||
:::warning
|
||||
If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||
:::
|
||||
!!! warning "Warning"
|
||||
If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 4
|
||||
sidebar_label: Hive
|
||||
toc_priority: 4
|
||||
toc_title: Hive
|
||||
---
|
||||
|
||||
# Hive {#hive}
|
||||
@ -137,7 +137,7 @@ CREATE TABLE test.test_orc
|
||||
`f_array_array_float` Array(Array(Float32)),
|
||||
`day` String
|
||||
)
|
||||
ENGINE = Hive('thrift://202.168.117.26:9083', 'test', 'test_orc')
|
||||
ENGINE = Hive('thrift://localhost:9083', 'test', 'test_orc')
|
||||
PARTITION BY day
|
||||
|
||||
```
|
||||
@ -406,5 +406,3 @@ f_char: hello world
|
||||
f_bool: true
|
||||
day: 2021-09-18
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/hive/) <!--hide-->
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 40
|
||||
sidebar_label: Integrations
|
||||
toc_folder_title: Integrations
|
||||
toc_priority: 1
|
||||
---
|
||||
|
||||
# Table Engines for Integrations {#table-engines-for-integrations}
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 3
|
||||
sidebar_label: JDBC
|
||||
toc_priority: 3
|
||||
toc_title: JDBC
|
||||
---
|
||||
|
||||
# JDBC {#table-engine-jdbc}
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 8
|
||||
sidebar_label: Kafka
|
||||
toc_priority: 8
|
||||
toc_title: Kafka
|
||||
---
|
||||
|
||||
# Kafka {#kafka}
|
||||
@ -87,9 +87,8 @@ Examples:
|
||||
|
||||
<summary>Deprecated Method for Creating a Table</summary>
|
||||
|
||||
:::warning
|
||||
Do not use this method in new projects. If possible, switch old projects to the method described above.
|
||||
:::
|
||||
!!! attention "Attention"
|
||||
Do not use this method in new projects. If possible, switch old projects to the method described above.
|
||||
|
||||
``` sql
|
||||
Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format
|
||||
@ -134,7 +133,7 @@ Example:
|
||||
|
||||
SELECT level, sum(total) FROM daily GROUP BY level;
|
||||
```
|
||||
To improve performance, received messages are grouped into blocks the size of [max_insert_block_size](../../../operations/settings/settings.md#settings-max_insert_block_size). If the block wasn’t formed within [stream_flush_interval_ms](../../../operations/settings/settings.md/#stream-flush-interval-ms) milliseconds, the data will be flushed to the table regardless of the completeness of the block.
|
||||
To improve performance, received messages are grouped into blocks the size of [max_insert_block_size](../../../operations/settings/settings/#settings-max_insert_block_size). If the block wasn’t formed within [stream_flush_interval_ms](../../../operations/settings/settings/#stream-flush-interval-ms) milliseconds, the data will be flushed to the table regardless of the completeness of the block.
|
||||
|
||||
To stop receiving topic data or to change the conversion logic, detach the materialized view:
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 12
|
||||
sidebar_label: MaterializedPostgreSQL
|
||||
toc_priority: 12
|
||||
toc_title: MaterializedPostgreSQL
|
||||
---
|
||||
|
||||
# MaterializedPostgreSQL {#materialize-postgresql}
|
||||
@ -52,8 +52,5 @@ PRIMARY KEY key;
|
||||
SELECT key, value, _version FROM postgresql_db.postgresql_replica;
|
||||
```
|
||||
|
||||
:::warning
|
||||
Replication of [**TOAST**](https://www.postgresql.org/docs/9.5/storage-toast.html) values is not supported. The default value for the data type will be used.
|
||||
:::
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/materialized-postgresql) <!--hide-->
|
||||
!!! warning "Warning"
|
||||
Replication of [**TOAST**](https://www.postgresql.org/docs/9.5/storage-toast.html) values is not supported. The default value for the data type will be used.
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 5
|
||||
sidebar_label: MongoDB
|
||||
toc_priority: 5
|
||||
toc_title: MongoDB
|
||||
---
|
||||
|
||||
# MongoDB {#mongodb}
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 4
|
||||
sidebar_label: MySQL
|
||||
toc_priority: 4
|
||||
toc_title: MySQL
|
||||
---
|
||||
|
||||
# MySQL {#mysql}
|
||||
@ -148,5 +148,3 @@ Default value: `16`.
|
||||
|
||||
- [The mysql table function](../../../sql-reference/table-functions/mysql.md)
|
||||
- [Using MySQL as a source of external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql)
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/mysql/) <!--hide-->
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 2
|
||||
sidebar_label: ODBC
|
||||
toc_priority: 2
|
||||
toc_title: ODBC
|
||||
---
|
||||
|
||||
# ODBC {#table-engine-odbc}
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 11
|
||||
sidebar_label: PostgreSQL
|
||||
toc_priority: 11
|
||||
toc_title: PostgreSQL
|
||||
---
|
||||
|
||||
# PostgreSQL {#postgresql}
|
||||
@ -73,9 +73,8 @@ All joins, aggregations, sorting, `IN [ array ]` conditions and the `LIMIT` samp
|
||||
|
||||
PostgreSQL `Array` types are converted into ClickHouse arrays.
|
||||
|
||||
:::warning
|
||||
Be careful - in PostgreSQL an array data, created like a `type_name[]`, may contain multi-dimensional arrays of different dimensions in different table rows in same column. But in ClickHouse it is only allowed to have multidimensional arrays of the same count of dimensions in all table rows in same column.
|
||||
:::
|
||||
!!! info "Note"
|
||||
Be careful - in PostgreSQL an array data, created like a `type_name[]`, may contain multi-dimensional arrays of different dimensions in different table rows in same column. But in ClickHouse it is only allowed to have multidimensional arrays of the same count of dimensions in all table rows in same column.
|
||||
|
||||
Supports multiple replicas that must be listed by `|`. For example:
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 10
|
||||
sidebar_label: RabbitMQ
|
||||
toc_priority: 10
|
||||
toc_title: RabbitMQ
|
||||
---
|
||||
|
||||
# RabbitMQ Engine {#rabbitmq-engine}
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 7
|
||||
sidebar_label: S3
|
||||
toc_priority: 7
|
||||
toc_title: S3
|
||||
---
|
||||
|
||||
# S3 Table Engine {#table-engine-s3}
|
||||
@ -66,9 +66,8 @@ For more information about virtual columns see [here](../../../engines/table-eng
|
||||
|
||||
Constructions with `{}` are similar to the [remote](../../../sql-reference/table-functions/remote.md) table function.
|
||||
|
||||
:::warning
|
||||
If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||
:::
|
||||
!!! warning "Warning"
|
||||
If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||
|
||||
**Example with wildcards 1**
|
||||
|
||||
@ -159,5 +158,3 @@ The following settings can be specified in configuration file for given endpoint
|
||||
## See also
|
||||
|
||||
- [s3 table function](../../../sql-reference/table-functions/s3.md)
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/s3/) <!--hide-->
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 7
|
||||
sidebar_label: SQLite
|
||||
toc_priority: 7
|
||||
toc_title: SQLite
|
||||
---
|
||||
|
||||
# SQLite {#sqlite}
|
||||
@ -56,7 +56,4 @@ SELECT * FROM sqlite_db.table2 ORDER BY col1;
|
||||
**See Also**
|
||||
|
||||
- [SQLite](../../../engines/database-engines/sqlite.md) engine
|
||||
- [sqlite](../../../sql-reference/table-functions/sqlite.md) table function
|
||||
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/sqlite/) <!--hide-->
|
||||
- [sqlite](../../../sql-reference/table-functions/sqlite.md) table function
|
@ -1,6 +1,7 @@
|
||||
---
|
||||
sidebar_position: 20
|
||||
sidebar_label: Log Family
|
||||
toc_folder_title: Log Family
|
||||
toc_priority: 29
|
||||
toc_title: Introduction
|
||||
---
|
||||
|
||||
# Log Engine Family {#log-engine-family}
|
||||
|
@ -10,6 +10,3 @@ The engine belongs to the family of `Log` engines. See the common properties of
|
||||
`Log` differs from [TinyLog](../../../engines/table-engines/log-family/tinylog.md) in that a small file of "marks" resides with the column files. These marks are written on every data block and contain offsets that indicate where to start reading the file in order to skip the specified number of rows. This makes it possible to read table data in multiple threads.
|
||||
For concurrent data access, the read operations can be performed simultaneously, while write operations block reads and each other.
|
||||
The `Log` engine does not support indexes. Similarly, if writing to a table failed, the table is broken, and reading from it returns an error. The `Log` engine is appropriate for temporary data, write-once tables, and for testing or demonstration purposes.
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/log-family/log/) <!--hide-->
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 60
|
||||
sidebar_label: AggregatingMergeTree
|
||||
toc_priority: 35
|
||||
toc_title: AggregatingMergeTree
|
||||
---
|
||||
|
||||
# AggregatingMergeTree {#aggregatingmergetree}
|
||||
@ -42,9 +42,8 @@ When creating a `AggregatingMergeTree` table the same [clauses](../../../engines
|
||||
|
||||
<summary>Deprecated Method for Creating a Table</summary>
|
||||
|
||||
:::warning
|
||||
Do not use this method in new projects and, if possible, switch the old projects to the method described above.
|
||||
:::
|
||||
!!! attention "Attention"
|
||||
Do not use this method in new projects and, if possible, switch the old projects to the method described above.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 70
|
||||
sidebar_label: CollapsingMergeTree
|
||||
toc_priority: 36
|
||||
toc_title: CollapsingMergeTree
|
||||
---
|
||||
|
||||
# CollapsingMergeTree {#table_engine-collapsingmergetree}
|
||||
@ -42,9 +42,8 @@ When creating a `CollapsingMergeTree` table, the same [query clauses](../../../e
|
||||
|
||||
<summary>Deprecated Method for Creating a Table</summary>
|
||||
|
||||
:::warning
|
||||
Do not use this method in new projects and, if possible, switch old projects to the method described above.
|
||||
:::
|
||||
!!! attention "Attention"
|
||||
Do not use this method in new projects and, if possible, switch the old projects to the method described above.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
@ -1,15 +1,12 @@
|
||||
---
|
||||
sidebar_position: 30
|
||||
sidebar_label: Custom Partitioning Key
|
||||
toc_priority: 32
|
||||
toc_title: Custom Partitioning Key
|
||||
---
|
||||
|
||||
# Custom Partitioning Key {#custom-partitioning-key}
|
||||
|
||||
:::warning
|
||||
In most cases you do not need a partition key, and in most other cases you do not need a partition key more granular than by months. Partitioning does not speed up queries (in contrast to the ORDER BY expression).
|
||||
|
||||
You should never use too granular of partitioning. Don't partition your data by client identifiers or names. Instead, make a client identifier or name the first column in the ORDER BY expression.
|
||||
:::
|
||||
!!! warning "Warning"
|
||||
In most cases you don't need partition key, and in most other cases you don't need partition key more granular than by months. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead make client identifier or name the first column in the ORDER BY expression).
|
||||
|
||||
Partitioning is available for the [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) family tables (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables). [Materialized views](../../../engines/table-engines/special/materializedview.md#materializedview) based on MergeTree tables support partitioning, as well.
|
||||
|
||||
@ -43,9 +40,8 @@ By default, the floating-point partition key is not supported. To use it enable
|
||||
|
||||
When inserting new data to a table, this data is stored as a separate part (chunk) sorted by the primary key. In 10-15 minutes after inserting, the parts of the same partition are merged into the entire part.
|
||||
|
||||
:::info
|
||||
A merge only works for data parts that have the same value for the partitioning expression. This means **you shouldn’t make overly granular partitions** (more than about a thousand partitions). Otherwise, the `SELECT` query performs poorly because of an unreasonably large number of files in the file system and open file descriptors.
|
||||
:::
|
||||
!!! info "Info"
|
||||
A merge only works for data parts that have the same value for the partitioning expression. This means **you shouldn’t make overly granular partitions** (more than about a thousand partitions). Otherwise, the `SELECT` query performs poorly because of an unreasonably large number of files in the file system and open file descriptors.
|
||||
|
||||
Use the [system.parts](../../../operations/system-tables/parts.md#system_tables-parts) table to view the table parts and partitions. For example, let’s assume that we have a `visits` table with partitioning by month. Let’s perform the `SELECT` query for the `system.parts` table:
|
||||
|
||||
@ -82,9 +78,8 @@ Let’s break down the name of the part: `201901_1_9_2_11`:
|
||||
- `2` is the chunk level (the depth of the merge tree it is formed from).
|
||||
- `11` is the mutation version (if a part mutated)
|
||||
|
||||
:::info
|
||||
The parts of old-type tables have the name: `20190117_20190123_2_2_0` (minimum date - maximum date - minimum block number - maximum block number - level).
|
||||
:::
|
||||
!!! info "Info"
|
||||
The parts of old-type tables have the name: `20190117_20190123_2_2_0` (minimum date - maximum date - minimum block number - maximum block number - level).
|
||||
|
||||
The `active` column shows the status of the part. `1` is active; `0` is inactive. The inactive parts are, for example, source parts remaining after merging to a larger part. The corrupted data parts are also indicated as inactive.
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 90
|
||||
sidebar_label: GraphiteMergeTree
|
||||
toc_priority: 38
|
||||
toc_title: GraphiteMergeTree
|
||||
---
|
||||
|
||||
# GraphiteMergeTree {#graphitemergetree}
|
||||
@ -54,9 +54,8 @@ When creating a `GraphiteMergeTree` table, the same [clauses](../../../engines/t
|
||||
|
||||
<summary>Deprecated Method for Creating a Table</summary>
|
||||
|
||||
:::warning
|
||||
Do not use this method in new projects and, if possible, switch old projects to the method described above.
|
||||
:::
|
||||
!!! attention "Attention"
|
||||
Do not use this method in new projects and, if possible, switch the old projects to the method described above.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
@ -120,13 +119,12 @@ default
|
||||
...
|
||||
```
|
||||
|
||||
:::warning
|
||||
Patterns must be strictly ordered:
|
||||
!!! warning "Attention"
|
||||
Patterns must be strictly ordered:
|
||||
|
||||
1. Patterns without `function` or `retention`.
|
||||
1. Patterns with both `function` and `retention`.
|
||||
1. Pattern `default`.
|
||||
:::
|
||||
1. Patterns without `function` or `retention`.
|
||||
1. Patterns with both `function` and `retention`.
|
||||
1. Pattern `default`.
|
||||
|
||||
When processing a row, ClickHouse checks the rules in the `pattern` sections. Each of `pattern` (including `default`) sections can contain `function` parameter for aggregation, `retention` parameters or both. If the metric name matches the `regexp`, the rules from the `pattern` section (or sections) are applied; otherwise, the rules from the `default` section are used.
|
||||
|
||||
@ -255,6 +253,7 @@ Valid values:
|
||||
```
|
||||
|
||||
|
||||
:::warning
|
||||
Data rollup is performed during merges. Usually, for old partitions, merges are not started, so for rollup it is necessary to trigger an unscheduled merge using [optimize](../../../sql-reference/statements/optimize.md). Or use additional tools, for example [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer).
|
||||
:::
|
||||
!!! warning "Warning"
|
||||
Data rollup is performed during merges. Usually, for old partitions, merges are not started, so for rollup it is necessary to trigger an unscheduled merge using [optimize](../../../sql-reference/statements/optimize.md). Or use additional tools, for example [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer).
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/graphitemergetree/) <!--hide-->
|
||||
|
@ -1,6 +1,7 @@
|
||||
---
|
||||
sidebar_position: 10
|
||||
sidebar_label: MergeTree Family
|
||||
toc_folder_title: MergeTree Family
|
||||
toc_priority: 28
|
||||
toc_title: Introduction
|
||||
---
|
||||
|
||||
# MergeTree Engine Family {#mergetree-engine-family}
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 11
|
||||
sidebar_label: MergeTree
|
||||
toc_priority: 30
|
||||
toc_title: MergeTree
|
||||
---
|
||||
|
||||
# MergeTree {#table_engines-mergetree}
|
||||
@ -27,9 +27,8 @@ Main features:
|
||||
|
||||
If necessary, you can set the data sampling method in the table.
|
||||
|
||||
:::info
|
||||
The [Merge](../../../engines/table-engines/special/merge.md#merge) engine does not belong to the `*MergeTree` family.
|
||||
:::
|
||||
!!! info "Info"
|
||||
The [Merge](../../../engines/table-engines/special/merge.md#merge) engine does not belong to the `*MergeTree` family.
|
||||
|
||||
## Creating a Table {#table_engine-mergetree-creating-a-table}
|
||||
|
||||
@ -128,9 +127,8 @@ The `index_granularity` setting can be omitted because 8192 is the default value
|
||||
|
||||
<summary>Deprecated Method for Creating a Table</summary>
|
||||
|
||||
:::warning
|
||||
Do not use this method in new projects. If possible, switch old projects to the method described above.
|
||||
:::
|
||||
!!! attention "Attention"
|
||||
Do not use this method in new projects. If possible, switch old projects to the method described above.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
@ -306,8 +304,8 @@ CREATE TABLE table_name
|
||||
Indices from the example can be used by ClickHouse to reduce the amount of data to read from disk in the following queries:
|
||||
|
||||
``` sql
|
||||
SELECT count() FROM table WHERE s < 'z'
|
||||
SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234
|
||||
SELECT count() FROM table WHERE s < 'z'
|
||||
SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234
|
||||
```
|
||||
|
||||
#### Available Types of Indices {#available-types-of-indices}
|
||||
@ -366,7 +364,7 @@ The `set` index can be used with all functions. Function subsets for other index
|
||||
| Function (operator) / Index | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter |
|
||||
|------------------------------------------------------------------------------------------------------------|-------------|--------|-------------|-------------|---------------|
|
||||
| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [like](../../../sql-reference/functions/string-search-functions.md#function-like) | ✔ | ✔ | ✔ | ✔ | ✗ |
|
||||
| [notLike](../../../sql-reference/functions/string-search-functions.md#function-notlike) | ✔ | ✔ | ✔ | ✔ | ✗ |
|
||||
| [startsWith](../../../sql-reference/functions/string-functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ |
|
||||
@ -384,10 +382,8 @@ The `set` index can be used with all functions. Function subsets for other index
|
||||
|
||||
Functions with a constant argument that is less than ngram size can’t be used by `ngrambf_v1` for query optimization.
|
||||
|
||||
:::note
|
||||
Bloom filters can have false positive matches, so the `ngrambf_v1`, `tokenbf_v1`, and `bloom_filter` indexes can not be used for optimizing queries where the result of a function is expected to be false.
|
||||
|
||||
For example:
|
||||
!!! note "Note"
|
||||
Bloom filters can have false positive matches, so the `ngrambf_v1`, `tokenbf_v1`, and `bloom_filter` indexes can’t be used for optimizing queries where the result of a function is expected to be false, for example:
|
||||
|
||||
- Can be optimized:
|
||||
- `s LIKE '%test%'`
|
||||
@ -395,13 +391,12 @@ For example:
|
||||
- `s = 1`
|
||||
- `NOT s != 1`
|
||||
- `startsWith(s, 'test')`
|
||||
- Can not be optimized:
|
||||
- Can’t be optimized:
|
||||
- `NOT s LIKE '%test%'`
|
||||
- `s NOT LIKE '%test%'`
|
||||
- `NOT s = 1`
|
||||
- `s != 1`
|
||||
- `NOT startsWith(s, 'test')`
|
||||
:::
|
||||
|
||||
## Projections {#projections}
|
||||
Projections are like [materialized views](../../../sql-reference/statements/create/view.md#materialized) but defined in part-level. It provides consistency guarantees along with automatic usage in queries.
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 40
|
||||
sidebar_label: ReplacingMergeTree
|
||||
toc_priority: 33
|
||||
toc_title: ReplacingMergeTree
|
||||
---
|
||||
|
||||
# ReplacingMergeTree {#replacingmergetree}
|
||||
@ -29,9 +29,8 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
||||
For a description of request parameters, see [statement description](../../../sql-reference/statements/create/table.md).
|
||||
|
||||
:::warning
|
||||
Uniqueness of rows is determined by the `ORDER BY` table section, not `PRIMARY KEY`.
|
||||
:::
|
||||
!!! note "Attention"
|
||||
Uniqueness of rows is determined by the `ORDER BY` table section, not `PRIMARY KEY`.
|
||||
|
||||
**ReplacingMergeTree Parameters**
|
||||
|
||||
@ -50,9 +49,8 @@ When creating a `ReplacingMergeTree` table the same [clauses](../../../engines/t
|
||||
|
||||
<summary>Deprecated Method for Creating a Table</summary>
|
||||
|
||||
:::warning
|
||||
Do not use this method in new projects and, if possible, switch old projects to the method described above.
|
||||
:::
|
||||
!!! attention "Attention"
|
||||
Do not use this method in new projects and, if possible, switch the old projects to the method described above.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 20
|
||||
sidebar_label: Data Replication
|
||||
toc_priority: 31
|
||||
toc_title: Data Replication
|
||||
---
|
||||
|
||||
# Data Replication {#table_engines-replication}
|
||||
@ -31,9 +31,8 @@ ClickHouse uses [Apache ZooKeeper](https://zookeeper.apache.org) for storing rep
|
||||
|
||||
To use replication, set parameters in the [zookeeper](../../../operations/server-configuration-parameters/settings.md#server-settings_zookeeper) server configuration section.
|
||||
|
||||
:::warning
|
||||
Don’t neglect the security setting. ClickHouse supports the `digest` [ACL scheme](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl) of the ZooKeeper security subsystem.
|
||||
:::
|
||||
!!! attention "Attention"
|
||||
Don’t neglect the security setting. ClickHouse supports the `digest` [ACL scheme](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl) of the ZooKeeper security subsystem.
|
||||
|
||||
Example of setting the addresses of the ZooKeeper cluster:
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 50
|
||||
sidebar_label: SummingMergeTree
|
||||
toc_priority: 34
|
||||
toc_title: SummingMergeTree
|
||||
---
|
||||
|
||||
# SummingMergeTree {#summingmergetree}
|
||||
@ -41,9 +41,8 @@ When creating a `SummingMergeTree` table the same [clauses](../../../engines/tab
|
||||
|
||||
<summary>Deprecated Method for Creating a Table</summary>
|
||||
|
||||
:::warning
|
||||
Do not use this method in new projects and, if possible, switch the old projects to the method described above.
|
||||
:::
|
||||
!!! attention "Attention"
|
||||
Do not use this method in new projects and, if possible, switch the old projects to the method described above.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 80
|
||||
sidebar_label: VersionedCollapsingMergeTree
|
||||
toc_priority: 37
|
||||
toc_title: VersionedCollapsingMergeTree
|
||||
---
|
||||
|
||||
# VersionedCollapsingMergeTree {#versionedcollapsingmergetree}
|
||||
@ -53,9 +53,8 @@ When creating a `VersionedCollapsingMergeTree` table, the same [clauses](../../.
|
||||
|
||||
<summary>Deprecated Method for Creating a Table</summary>
|
||||
|
||||
:::warning
|
||||
Do not use this method in new projects. If possible, switch old projects to the method described above.
|
||||
:::
|
||||
!!! attention "Attention"
|
||||
Do not use this method in new projects. If possible, switch the old projects to the method described above.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 120
|
||||
sidebar_label: Buffer
|
||||
toc_priority: 45
|
||||
toc_title: Buffer
|
||||
---
|
||||
|
||||
# Buffer Table Engine {#buffer}
|
||||
@ -54,9 +54,8 @@ If the set of columns in the Buffer table does not match the set of columns in a
|
||||
If the types do not match for one of the columns in the Buffer table and a subordinate table, an error message is entered in the server log, and the buffer is cleared.
|
||||
The same thing happens if the subordinate table does not exist when the buffer is flushed.
|
||||
|
||||
:::warning
|
||||
Running ALTER on the Buffer table in releases made before 26 Oct 2021 will cause a `Block structure mismatch` error (see [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117) and [#30565](https://github.com/ClickHouse/ClickHouse/pull/30565)), so deleting the Buffer table and then recreating is the only option. It is advisable to check that this error is fixed in your release before trying to run ALTER on the Buffer table.
|
||||
:::
|
||||
!!! attention "Attention"
|
||||
Running ALTER on the Buffer table in releases made before 26 Oct 2021 will cause a `Block structure mismatch` error (see [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117) and [#30565](https://github.com/ClickHouse/ClickHouse/pull/30565)), so deleting the Buffer table and then recreating is the only option. It is advisable to check that this error is fixed in your release before trying to run ALTER on the Buffer table.
|
||||
|
||||
If the server is restarted abnormally, the data in the buffer is lost.
|
||||
|
||||
@ -74,4 +73,4 @@ A Buffer table is used when too many INSERTs are received from a large number of
|
||||
|
||||
Note that it does not make sense to insert data one row at a time, even for Buffer tables. This will only produce a speed of a few thousand rows per second, while inserting larger blocks of data can produce over a million rows per second (see the section “Performance”).
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/special/buffer/) <!--hide-->
|
||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/buffer/) <!--hide-->
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 20
|
||||
sidebar_label: Dictionary
|
||||
toc_priority: 35
|
||||
toc_title: Dictionary
|
||||
---
|
||||
|
||||
# Dictionary Table Engine {#dictionary}
|
||||
@ -97,5 +97,3 @@ select * from products limit 1;
|
||||
**See Also**
|
||||
|
||||
- [Dictionary function](../../../sql-reference/table-functions/dictionary.md#dictionary-function)
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/special/dictionary/) <!--hide-->
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 10
|
||||
sidebar_label: Distributed
|
||||
toc_priority: 33
|
||||
toc_title: Distributed
|
||||
---
|
||||
|
||||
# Distributed Table Engine {#distributed}
|
||||
@ -64,19 +64,19 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] AS [db2.]name2
|
||||
|
||||
- `monitor_max_sleep_time_ms` - same as [distributed_directory_monitor_max_sleep_time_ms](../../../operations/settings/settings.md#distributed_directory_monitor_max_sleep_time_ms)
|
||||
|
||||
:::note
|
||||
**Durability settings** (`fsync_...`):
|
||||
!!! note "Note"
|
||||
|
||||
- Affect only asynchronous INSERTs (i.e. `insert_distributed_sync=false`) when data first stored on the initiator node disk and later asynchronously send to shards.
|
||||
- May significantly decrease the inserts' performance
|
||||
- Affect writing the data stored inside Distributed table folder into the **node which accepted your insert**. If you need to have guarantees of writing data to underlying MergeTree tables - see durability settings (`...fsync...`) in `system.merge_tree_settings`
|
||||
**Durability settings** (`fsync_...`):
|
||||
|
||||
For **Insert limit settings** (`..._insert`) see also:
|
||||
- Affect only asynchronous INSERTs (i.e. `insert_distributed_sync=false`) when data first stored on the initiator node disk and later asynchronously send to shards.
|
||||
- May significantly decrease the inserts' performance
|
||||
- Affect writing the data stored inside Distributed table folder into the **node which accepted your insert**. If you need to have guarantees of writing data to underlying MergeTree tables - see durability settings (`...fsync...`) in `system.merge_tree_settings`
|
||||
|
||||
- [insert_distributed_sync](../../../operations/settings/settings.md#insert_distributed_sync) setting
|
||||
- [prefer_localhost_replica](../../../operations/settings/settings.md#settings-prefer-localhost-replica) setting
|
||||
- `bytes_to_throw_insert` handled before `bytes_to_delay_insert`, so you should not set it to the value less then `bytes_to_delay_insert`
|
||||
:::
|
||||
For **Insert limit settings** (`..._insert`) see also:
|
||||
|
||||
- [insert_distributed_sync](../../../operations/settings/settings.md#insert_distributed_sync) setting
|
||||
- [prefer_localhost_replica](../../../operations/settings/settings.md#settings-prefer-localhost-replica) setting
|
||||
- `bytes_to_throw_insert` handled before `bytes_to_delay_insert`, so you should not set it to the value less then `bytes_to_delay_insert`
|
||||
|
||||
**Example**
|
||||
|
||||
@ -215,9 +215,8 @@ To learn more about how distibuted `in` and `global in` queries are processed, r
|
||||
|
||||
- `_shard_num` — Contains the `shard_num` value from the table `system.clusters`. Type: [UInt32](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
:::note
|
||||
Since [remote](../../../sql-reference/table-functions/remote.md) and [cluster](../../../sql-reference/table-functions/cluster.md) table functions internally create temporary Distributed table, `_shard_num` is available there too.
|
||||
:::
|
||||
!!! note "Note"
|
||||
Since [remote](../../../sql-reference/table-functions/remote.md) and [cluster](../../../sql-reference/table-functions/cluster.md) table functions internally create temporary Distributed table, `_shard_num` is available there too.
|
||||
|
||||
**See Also**
|
||||
|
||||
@ -226,4 +225,3 @@ Since [remote](../../../sql-reference/table-functions/remote.md) and [cluster](.
|
||||
- [shardNum()](../../../sql-reference/functions/other-functions.md#shard-num) and [shardCount()](../../../sql-reference/functions/other-functions.md#shard-count) functions
|
||||
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/special/distributed/) <!--hide-->
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 130
|
||||
sidebar_label: External Data
|
||||
toc_priority: 45
|
||||
toc_title: External Data
|
||||
---
|
||||
|
||||
# External Data for Query Processing {#external-data-for-query-processing}
|
||||
@ -63,3 +63,4 @@ $ curl -F 'passwd=@passwd.tsv;' 'http://localhost:8123/?query=SELECT+shell,+coun
|
||||
|
||||
For distributed query processing, the temporary tables are sent to all the remote servers.
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/external_data/) <!--hide-->
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 40
|
||||
sidebar_label: File
|
||||
toc_priority: 37
|
||||
toc_title: File
|
||||
---
|
||||
|
||||
# File Table Engine {#table_engines-file}
|
||||
@ -30,9 +30,8 @@ When creating table using `File(Format)` it creates empty subdirectory in that f
|
||||
|
||||
You may manually create this subfolder and file in server filesystem and then [ATTACH](../../../sql-reference/statements/attach.md) it to table information with matching name, so you can query data from that file.
|
||||
|
||||
:::warning
|
||||
Be careful with this functionality, because ClickHouse does not keep track of external changes to such files. The result of simultaneous writes via ClickHouse and outside of ClickHouse is undefined.
|
||||
:::
|
||||
!!! warning "Warning"
|
||||
Be careful with this functionality, because ClickHouse does not keep track of external changes to such files. The result of simultaneous writes via ClickHouse and outside of ClickHouse is undefined.
|
||||
|
||||
## Example {#example}
|
||||
|
||||
@ -86,4 +85,4 @@ $ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64
|
||||
- Indices
|
||||
- Replication
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/special/file/) <!--hide-->
|
||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/file/) <!--hide-->
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 140
|
||||
sidebar_label: GenerateRandom
|
||||
toc_priority: 46
|
||||
toc_title: GenerateRandom
|
||||
---
|
||||
|
||||
# GenerateRandom Table Engine {#table_engines-generate}
|
||||
@ -56,4 +56,4 @@ SELECT * FROM generate_engine_table LIMIT 3
|
||||
- Indices
|
||||
- Replication
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/special/generate/) <!--hide-->
|
||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/generate/) <!--hide-->
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 50
|
||||
sidebar_label: Special
|
||||
toc_folder_title: Special
|
||||
toc_priority: 31
|
||||
---
|
||||
|
||||
# Special Table Engines {#special-table-engines}
|
||||
|
@ -1,15 +1,14 @@
|
||||
---
|
||||
sidebar_position: 70
|
||||
sidebar_label: Join
|
||||
toc_priority: 40
|
||||
toc_title: Join
|
||||
---
|
||||
|
||||
# Join Table Engine {#join}
|
||||
|
||||
Optional prepared data structure for usage in [JOIN](../../../sql-reference/statements/select/join.md#select-join) operations.
|
||||
|
||||
:::note
|
||||
This is not an article about the [JOIN clause](../../../sql-reference/statements/select/join.md#select-join) itself.
|
||||
:::
|
||||
!!! note "Note"
|
||||
This is not an article about the [JOIN clause](../../../sql-reference/statements/select/join.md#select-join) itself.
|
||||
|
||||
## Creating a Table {#creating-a-table}
|
||||
|
||||
@ -126,5 +125,3 @@ ALTER TABLE id_val_join DELETE WHERE id = 3;
|
||||
│ 1 │ 21 │
|
||||
└────┴─────┘
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/special/join/) <!--hide-->
|
||||
|
@ -1,10 +1,10 @@
|
||||
---
|
||||
sidebar_position: 100
|
||||
sidebar_label: MaterializedView
|
||||
toc_priority: 43
|
||||
toc_title: MaterializedView
|
||||
---
|
||||
|
||||
# MaterializedView Table Engine {#materializedview}
|
||||
|
||||
Used for implementing materialized views (for more information, see [CREATE VIEW](../../../sql-reference/statements/create/view.md#materialized)). For storing data, it uses a different engine that was specified when creating the view. When reading from a table, it just uses that engine.
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/special/materializedview/) <!--hide-->
|
||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/materializedview/) <!--hide-->
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 110
|
||||
sidebar_label: Memory
|
||||
toc_priority: 44
|
||||
toc_title: Memory
|
||||
---
|
||||
|
||||
# Memory Table Engine {#memory}
|
||||
@ -15,4 +15,4 @@ Normally, using this table engine is not justified. However, it can be used for
|
||||
|
||||
The Memory engine is used by the system for temporary tables with external query data (see the section “External data for processing a query”), and for implementing `GLOBAL IN` (see the section “IN operators”).
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/special/memory/) <!--hide-->
|
||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/memory/) <!--hide-->
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 30
|
||||
sidebar_label: Merge
|
||||
toc_priority: 36
|
||||
toc_title: Merge
|
||||
---
|
||||
|
||||
# Merge Table Engine {#merge}
|
||||
@ -12,7 +12,7 @@ Reading is automatically parallelized. Writing to a table is not supported. When
|
||||
## Creating a Table {#creating-a-table}
|
||||
|
||||
``` sql
|
||||
CREATE TABLE ... Engine=Merge(db_name, tables_regexp)
|
||||
CREATE TABLE ... Engine=Merge(db_name, tables_regexp)
|
||||
```
|
||||
|
||||
**Engine Parameters**
|
||||
@ -81,5 +81,3 @@ SELECT * FROM WatchLog;
|
||||
|
||||
- [Virtual columns](../../../engines/table-engines/special/index.md#table_engines-virtual_columns)
|
||||
- [merge](../../../sql-reference/table-functions/merge.md) table function
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/special/merge/) <!--hide-->
|
||||
|
@ -1,15 +1,13 @@
|
||||
---
|
||||
sidebar_position: 50
|
||||
sidebar_label: 'Null'
|
||||
toc_priority: 38
|
||||
toc_title: 'Null'
|
||||
---
|
||||
|
||||
# Null Table Engine {#null}
|
||||
|
||||
When writing to a `Null` table, data is ignored. When reading from a `Null` table, the response is empty.
|
||||
|
||||
:::note
|
||||
If you are wondering why this is useful, note that you can create a materialized view on a `Null` table. So the data written to the table will end up affecting the view, but original raw data will still be discarded.
|
||||
:::
|
||||
!!! info "Hint"
|
||||
However, you can create a materialized view on a `Null` table. So the data written to the table will end up affecting the view, but original raw data will still be discarded.
|
||||
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/special/null/) <!--hide-->
|
||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/null/) <!--hide-->
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user