Merge branch 'master' into fix_ignore_hdfs_error

This commit is contained in:
mergify[bot] 2022-01-18 07:05:24 +00:00 committed by GitHub
commit df869c5b9c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
324 changed files with 13123 additions and 5235 deletions

8
.github/actionlint.yml vendored Normal file
View File

@ -0,0 +1,8 @@
self-hosted-runner:
labels:
- builder
- fuzzer-unit-tester
- stress-tester
- style-checker
- func-tester-aarch64
- func-tester

View File

@ -33,11 +33,11 @@ jobs:
- name: Cherry pick - name: Cherry pick
run: | run: |
sudo pip install GitPython sudo pip install GitPython
cd $GITHUB_WORKSPACE/tests/ci cd "$GITHUB_WORKSPACE/tests/ci"
python3 cherry_pick.py python3 cherry_pick.py
- name: Cleanup - name: Cleanup
if: always() if: always()
run: | run: |
docker kill $(docker ps -q) ||: docker kill "$(docker ps -q)" ||:
docker rm -f $(docker ps -a -q) ||: docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"

View File

@ -9,23 +9,68 @@ on: # yamllint disable-line rule:truthy
branches: branches:
- 'backport/**' - 'backport/**'
jobs: jobs:
DockerHubPush: DockerHubPushAarch64:
runs-on: [self-hosted, style-checker] runs-on: [self-hosted, func-tester-aarch64]
steps: steps:
- name: Clear repository - name: Clear repository
run: | run: |
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Images check - name: Images check
run: | run: |
cd $GITHUB_WORKSPACE/tests/ci cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py python3 docker_images_check.py --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
with:
name: changed_images_aarch64
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
DockerHubPushAmd64:
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix amd64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
with:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
DockerHubPush:
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- name: Download changed aarch64 images
uses: actions/download-artifact@v2
with:
name: changed_images_aarch64
path: ${{ runner.temp }}
- name: Download changed amd64 images
uses: actions/download-artifact@v2
with:
name: changed_images_amd64
path: ${{ runner.temp }}
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
- name: Upload images files to artifacts - name: Upload images files to artifacts
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
with: with:
name: changed_images name: changed_images
path: ${{ runner.temp }}/docker_images_check/changed_images.json path: ${{ runner.temp }}/changed_images.json
CompatibilityCheck: CompatibilityCheck:
needs: [BuilderDebRelease] needs: [BuilderDebRelease]
runs-on: [self-hosted, style-checker] runs-on: [self-hosted, style-checker]
@ -39,7 +84,7 @@ jobs:
EOF EOF
- name: Clear repository - name: Clear repository
run: | run: |
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Download json reports - name: Download json reports
@ -48,16 +93,16 @@ jobs:
path: ${{ env.REPORTS_PATH }} path: ${{ env.REPORTS_PATH }}
- name: CompatibilityCheck - name: CompatibilityCheck
run: | run: |
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"
mkdir -p $TEMP_PATH mkdir -p "$TEMP_PATH"
cp -r $GITHUB_WORKSPACE $TEMP_PATH cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd $REPO_COPY/tests/ci && python3 compatibility_check.py cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py
- name: Cleanup - name: Cleanup
if: always() if: always()
run: | run: |
docker kill $(docker ps -q) ||: docker kill "$(docker ps -q)" ||:
docker rm -f $(docker ps -a -q) ||: docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"
######################################################################################### #########################################################################################
#################################### ORDINARY BUILDS #################################### #################################### ORDINARY BUILDS ####################################
######################################################################################### #########################################################################################
@ -82,7 +127,7 @@ jobs:
path: ${{ env.IMAGES_PATH }} path: ${{ env.IMAGES_PATH }}
- name: Clear repository - name: Clear repository
run: | run: |
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v2 uses: actions/checkout@v2
with: with:
@ -90,10 +135,10 @@ jobs:
fetch-depth: 0 # otherwise we will have no info about contributors fetch-depth: 0 # otherwise we will have no info about contributors
- name: Build - name: Build
run: | run: |
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"
mkdir -p $TEMP_PATH mkdir -p "$TEMP_PATH"
cp -r $GITHUB_WORKSPACE $TEMP_PATH cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
- name: Upload build URLs to artifacts - name: Upload build URLs to artifacts
if: ${{ success() || failure() }} if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
@ -103,9 +148,50 @@ jobs:
- name: Cleanup - name: Cleanup
if: always() if: always()
run: | run: |
docker kill $(docker ps -q) ||: docker kill "$(docker ps -q)" ||:
docker rm -f $(docker ps -a -q) ||: docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr $TEMP_PATH $CACHES_PATH sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebAarch64:
needs: [DockerHubPush]
runs-on: [self-hosted, builder]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/build_check
IMAGES_PATH=${{runner.temp}}/images_path
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
CACHES_PATH=${{runner.temp}}/../ccaches
CHECK_NAME=ClickHouse build check (actions)
BUILD_NAME=package_aarch64
EOF
- name: Download changed images
uses: actions/download-artifact@v2
with:
name: changed_images
path: ${{ runner.temp }}/images_path
- name: Check out repository code
uses: actions/checkout@v2
with:
submodules: 'true'
fetch-depth: 0 # otherwise we will have no info about contributors
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
name: ${{ env.BUILD_NAME }}
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebAsan: BuilderDebAsan:
needs: [DockerHubPush] needs: [DockerHubPush]
runs-on: [self-hosted, builder] runs-on: [self-hosted, builder]
@ -127,7 +213,7 @@ jobs:
path: ${{ env.IMAGES_PATH }} path: ${{ env.IMAGES_PATH }}
- name: Clear repository - name: Clear repository
run: | run: |
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v2 uses: actions/checkout@v2
with: with:
@ -135,10 +221,10 @@ jobs:
fetch-depth: 0 # otherwise we will have no info about contributors fetch-depth: 0 # otherwise we will have no info about contributors
- name: Build - name: Build
run: | run: |
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"
mkdir -p $TEMP_PATH mkdir -p "$TEMP_PATH"
cp -r $GITHUB_WORKSPACE $TEMP_PATH cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
- name: Upload build URLs to artifacts - name: Upload build URLs to artifacts
if: ${{ success() || failure() }} if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
@ -148,9 +234,9 @@ jobs:
- name: Cleanup - name: Cleanup
if: always() if: always()
run: | run: |
docker kill $(docker ps -q) ||: docker kill "$(docker ps -q)" ||:
docker rm -f $(docker ps -a -q) ||: docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr $TEMP_PATH $CACHES_PATH sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebTsan: BuilderDebTsan:
needs: [DockerHubPush] needs: [DockerHubPush]
runs-on: [self-hosted, builder] runs-on: [self-hosted, builder]
@ -172,7 +258,7 @@ jobs:
path: ${{ env.IMAGES_PATH }} path: ${{ env.IMAGES_PATH }}
- name: Clear repository - name: Clear repository
run: | run: |
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v2 uses: actions/checkout@v2
with: with:
@ -180,10 +266,10 @@ jobs:
fetch-depth: 0 # otherwise we will have no info about contributors fetch-depth: 0 # otherwise we will have no info about contributors
- name: Build - name: Build
run: | run: |
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"
mkdir -p $TEMP_PATH mkdir -p "$TEMP_PATH"
cp -r $GITHUB_WORKSPACE $TEMP_PATH cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
- name: Upload build URLs to artifacts - name: Upload build URLs to artifacts
if: ${{ success() || failure() }} if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
@ -193,9 +279,9 @@ jobs:
- name: Cleanup - name: Cleanup
if: always() if: always()
run: | run: |
docker kill $(docker ps -q) ||: docker kill "$(docker ps -q)" ||:
docker rm -f $(docker ps -a -q) ||: docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr $TEMP_PATH $CACHES_PATH sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebDebug: BuilderDebDebug:
needs: [DockerHubPush] needs: [DockerHubPush]
runs-on: [self-hosted, builder] runs-on: [self-hosted, builder]
@ -217,7 +303,7 @@ jobs:
path: ${{ env.IMAGES_PATH }} path: ${{ env.IMAGES_PATH }}
- name: Clear repository - name: Clear repository
run: | run: |
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v2 uses: actions/checkout@v2
with: with:
@ -225,10 +311,10 @@ jobs:
fetch-depth: 0 # otherwise we will have no info about contributors fetch-depth: 0 # otherwise we will have no info about contributors
- name: Build - name: Build
run: | run: |
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"
mkdir -p $TEMP_PATH mkdir -p "$TEMP_PATH"
cp -r $GITHUB_WORKSPACE $TEMP_PATH cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
- name: Upload build URLs to artifacts - name: Upload build URLs to artifacts
if: ${{ success() || failure() }} if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
@ -238,15 +324,16 @@ jobs:
- name: Cleanup - name: Cleanup
if: always() if: always()
run: | run: |
docker kill $(docker ps -q) ||: docker kill "$(docker ps -q)" ||:
docker rm -f $(docker ps -a -q) ||: docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr $TEMP_PATH $CACHES_PATH sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
############################################################################################ ############################################################################################
##################################### BUILD REPORTER ####################################### ##################################### BUILD REPORTER #######################################
############################################################################################ ############################################################################################
BuilderReport: BuilderReport:
needs: needs:
- BuilderDebRelease - BuilderDebRelease
- BuilderDebAarch64
- BuilderDebAsan - BuilderDebAsan
- BuilderDebTsan - BuilderDebTsan
- BuilderDebDebug - BuilderDebDebug
@ -265,21 +352,21 @@ jobs:
path: ${{ env.REPORTS_PATH }} path: ${{ env.REPORTS_PATH }}
- name: Clear repository - name: Clear repository
run: | run: |
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Report Builder - name: Report Builder
run: | run: |
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"
mkdir -p $TEMP_PATH mkdir -p "$TEMP_PATH"
cd $GITHUB_WORKSPACE/tests/ci cd "$GITHUB_WORKSPACE/tests/ci"
python3 build_report_check.py "$CHECK_NAME" python3 build_report_check.py "$CHECK_NAME"
- name: Cleanup - name: Cleanup
if: always() if: always()
run: | run: |
docker kill $(docker ps -q) ||: docker kill "$(docker ps -q)" ||:
docker rm -f $(docker ps -a -q) ||: docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"
############################################################################################## ##############################################################################################
########################### FUNCTIONAl STATELESS TESTS ####################################### ########################### FUNCTIONAl STATELESS TESTS #######################################
############################################################################################## ##############################################################################################
@ -302,22 +389,22 @@ jobs:
path: ${{ env.REPORTS_PATH }} path: ${{ env.REPORTS_PATH }}
- name: Clear repository - name: Clear repository
run: | run: |
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Functional test - name: Functional test
run: | run: |
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"
mkdir -p $TEMP_PATH mkdir -p "$TEMP_PATH"
cp -r $GITHUB_WORKSPACE $TEMP_PATH cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd $REPO_COPY/tests/ci cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup - name: Cleanup
if: always() if: always()
run: | run: |
docker kill $(docker ps -q) ||: docker kill "$(docker ps -q)" ||:
docker rm -f $(docker ps -a -q) ||: docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"
############################################################################################## ##############################################################################################
############################ FUNCTIONAl STATEFUL TESTS ####################################### ############################ FUNCTIONAl STATEFUL TESTS #######################################
############################################################################################## ##############################################################################################
@ -340,22 +427,22 @@ jobs:
path: ${{ env.REPORTS_PATH }} path: ${{ env.REPORTS_PATH }}
- name: Clear repository - name: Clear repository
run: | run: |
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Functional test - name: Functional test
run: | run: |
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"
mkdir -p $TEMP_PATH mkdir -p "$TEMP_PATH"
cp -r $GITHUB_WORKSPACE $TEMP_PATH cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd $REPO_COPY/tests/ci cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup - name: Cleanup
if: always() if: always()
run: | run: |
docker kill $(docker ps -q) ||: docker kill "$(docker ps -q)" ||:
docker rm -f $(docker ps -a -q) ||: docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"
############################################################################################## ##############################################################################################
######################################### STRESS TESTS ####################################### ######################################### STRESS TESTS #######################################
############################################################################################## ##############################################################################################
@ -381,22 +468,22 @@ jobs:
path: ${{ env.REPORTS_PATH }} path: ${{ env.REPORTS_PATH }}
- name: Clear repository - name: Clear repository
run: | run: |
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Stress test - name: Stress test
run: | run: |
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"
mkdir -p $TEMP_PATH mkdir -p "$TEMP_PATH"
cp -r $GITHUB_WORKSPACE $TEMP_PATH cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd $REPO_COPY/tests/ci cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME" python3 stress_check.py "$CHECK_NAME"
- name: Cleanup - name: Cleanup
if: always() if: always()
run: | run: |
docker kill $(docker ps -q) ||: docker kill "$(docker ps -q)" ||:
docker rm -f $(docker ps -a -q) ||: docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"
############################################################################################# #############################################################################################
############################# INTEGRATION TESTS ############################################# ############################# INTEGRATION TESTS #############################################
############################################################################################# #############################################################################################
@ -418,22 +505,22 @@ jobs:
path: ${{ env.REPORTS_PATH }} path: ${{ env.REPORTS_PATH }}
- name: Clear repository - name: Clear repository
run: | run: |
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Integration test - name: Integration test
run: | run: |
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"
mkdir -p $TEMP_PATH mkdir -p "$TEMP_PATH"
cp -r $GITHUB_WORKSPACE $TEMP_PATH cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd $REPO_COPY/tests/ci cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME" python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup - name: Cleanup
if: always() if: always()
run: | run: |
docker kill $(docker ps -q) ||: docker kill "$(docker ps -q)" ||:
docker rm -f $(docker ps -a -q) ||: docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"
FinishCheck: FinishCheck:
needs: needs:
- DockerHubPush - DockerHubPush
@ -447,10 +534,10 @@ jobs:
steps: steps:
- name: Clear repository - name: Clear repository
run: | run: |
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Finish label - name: Finish label
run: | run: |
cd $GITHUB_WORKSPACE/tests/ci cd "$GITHUB_WORKSPACE/tests/ci"
python3 finish_check.py python3 finish_check.py

View File

@ -6,7 +6,7 @@ env:
on: # yamllint disable-line rule:truthy on: # yamllint disable-line rule:truthy
workflow_run: workflow_run:
workflows: ["CIGithubActions", "ReleaseCI", "DocsCheck", "BackportPR"] workflows: ["PullRequestCI", "ReleaseCI", "DocsCheck", "BackportPR"]
types: types:
- requested - requested
jobs: jobs:

View File

@ -21,31 +21,77 @@ jobs:
steps: steps:
- name: Clear repository - name: Clear repository
run: | run: |
sudo rm -rf $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE sudo rm -rf "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Labels check - name: Labels check
run: | run: |
cd $GITHUB_WORKSPACE/tests/ci cd "$GITHUB_WORKSPACE/tests/ci"
python3 run_check.py python3 run_check.py
DockerHubPush: DockerHubPushAarch64:
needs: CheckLabels
runs-on: [self-hosted, func-tester-aarch64]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
with:
name: changed_images_aarch64
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
DockerHubPushAmd64:
needs: CheckLabels needs: CheckLabels
runs-on: [self-hosted, style-checker] runs-on: [self-hosted, style-checker]
steps: steps:
- name: Clear repository - name: Clear repository
run: | run: |
sudo rm -rf $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Images check - name: Images check
run: | run: |
cd $GITHUB_WORKSPACE/tests/ci cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py python3 docker_images_check.py --suffix amd64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
with:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
DockerHubPush:
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- name: Download changed aarch64 images
uses: actions/download-artifact@v2
with:
name: changed_images_aarch64
path: ${{ runner.temp }}
- name: Download changed amd64 images
uses: actions/download-artifact@v2
with:
name: changed_images_amd64
path: ${{ runner.temp }}
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
- name: Upload images files to artifacts - name: Upload images files to artifacts
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
with: with:
name: changed_images name: changed_images
path: ${{ runner.temp }}/docker_images_check/changed_images.json path: ${{ runner.temp }}/changed_images.json
DocsCheck: DocsCheck:
needs: DockerHubPush needs: DockerHubPush
runs-on: [self-hosted, func-tester] runs-on: [self-hosted, func-tester]
@ -63,17 +109,17 @@ jobs:
path: ${{ env.TEMP_PATH }} path: ${{ env.TEMP_PATH }}
- name: Clear repository - name: Clear repository
run: | run: |
sudo rm -rf $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE sudo rm -rf "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Docs Check - name: Docs Check
run: | run: |
cp -r $GITHUB_WORKSPACE $TEMP_PATH cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd $REPO_COPY/tests/ci cd "$REPO_COPY/tests/ci"
python3 docs_check.py python3 docs_check.py
- name: Cleanup - name: Cleanup
if: always() if: always()
run: | run: |
docker kill $(docker ps -q) ||: docker kill "$(docker ps -q)" ||:
docker rm -f $(docker ps -a -q) ||: docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"

View File

@ -8,7 +8,7 @@ on: # yamllint disable-line rule:truthy
schedule: schedule:
- cron: '0 */6 * * *' - cron: '0 */6 * * *'
workflow_run: workflow_run:
workflows: ["CIGithubActions"] workflows: ["PullRequestCI"]
types: types:
- completed - completed
workflow_dispatch: workflow_dispatch:
@ -24,21 +24,21 @@ jobs:
EOF EOF
- name: Clear repository - name: Clear repository
run: | run: |
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v2 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Jepsen Test - name: Jepsen Test
run: | run: |
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"
mkdir -p $TEMP_PATH mkdir -p "$TEMP_PATH"
cp -r $GITHUB_WORKSPACE $TEMP_PATH cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd $REPO_COPY/tests/ci cd "$REPO_COPY/tests/ci"
python3 keeper_jepsen_check.py python3 keeper_jepsen_check.py
- name: Cleanup - name: Cleanup
if: always() if: always()
run: | run: |
docker kill $(docker ps -q) ||: docker kill "$(docker ps -q)" ||:
docker rm -f $(docker ps -a -q) ||: docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -19,23 +19,68 @@ on: # yamllint disable-line rule:truthy
- '.github/**' - '.github/**'
workflow_dispatch: workflow_dispatch:
jobs: jobs:
DockerHubPush: DockerHubPushAarch64:
runs-on: [self-hosted, style-checker] runs-on: [self-hosted, func-tester-aarch64]
steps: steps:
- name: Clear repository - name: Clear repository
run: | run: |
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Images check - name: Images check
run: | run: |
cd $GITHUB_WORKSPACE/tests/ci cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py python3 docker_images_check.py --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
with:
name: changed_images_aarch64
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
DockerHubPushAmd64:
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix amd64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
with:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
DockerHubPush:
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- name: Download changed aarch64 images
uses: actions/download-artifact@v2
with:
name: changed_images_aarch64
path: ${{ runner.temp }}
- name: Download changed amd64 images
uses: actions/download-artifact@v2
with:
name: changed_images_amd64
path: ${{ runner.temp }}
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
- name: Upload images files to artifacts - name: Upload images files to artifacts
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
with: with:
name: changed_images name: changed_images
path: ${{ runner.temp }}/docker_images_check/changed_images.json path: ${{ runner.temp }}/changed_images.json
DocsRelease: DocsRelease:
needs: DockerHubPush needs: DockerHubPush
runs-on: [self-hosted, func-tester] runs-on: [self-hosted, func-tester]
@ -53,7 +98,7 @@ jobs:
EOF EOF
- name: Clear repository - name: Clear repository
run: | run: |
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Download changed images - name: Download changed images
@ -63,14 +108,14 @@ jobs:
path: ${{ env.TEMP_PATH }} path: ${{ env.TEMP_PATH }}
- name: Docs Release - name: Docs Release
run: | run: |
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"
mkdir -p $TEMP_PATH mkdir -p "$TEMP_PATH"
cp -r $GITHUB_WORKSPACE $TEMP_PATH cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd $REPO_COPY/tests/ci cd "$REPO_COPY/tests/ci"
python3 docs_release.py python3 docs_release.py
- name: Cleanup - name: Cleanup
if: always() if: always()
run: | run: |
docker kill $(docker ps -q) ||: docker kill "$(docker ps -q)" ||:
docker rm -f $(docker ps -a -q) ||: docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"

File diff suppressed because it is too large Load Diff

View File

@ -23,20 +23,20 @@ jobs:
EOF EOF
- name: Clear repository - name: Clear repository
run: | run: |
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v2 uses: actions/checkout@v2
with: with:
submodules: 'true' submodules: 'true'
- name: Codebrowser - name: Codebrowser
run: | run: |
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"
mkdir -p $TEMP_PATH mkdir -p "$TEMP_PATH"
cp -r $GITHUB_WORKSPACE $TEMP_PATH cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd $REPO_COPY/tests/ci && python3 codebrowser_check.py cd "$REPO_COPY/tests/ci" && python3 codebrowser_check.py
- name: Cleanup - name: Cleanup
if: always() if: always()
run: | run: |
docker kill $(docker ps -q) ||: docker kill "$(docker ps -q)" ||:
docker rm -f $(docker ps -a -q) ||: docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr $TEMP_PATH sudo rm -fr "$TEMP_PATH"

3
.gitmodules vendored
View File

@ -247,6 +247,9 @@
[submodule "contrib/sysroot"] [submodule "contrib/sysroot"]
path = contrib/sysroot path = contrib/sysroot
url = https://github.com/ClickHouse-Extras/sysroot.git url = https://github.com/ClickHouse-Extras/sysroot.git
[submodule "contrib/hive-metastore"]
path = contrib/hive-metastore
url = https://github.com/ClickHouse-Extras/hive-metastore
[submodule "contrib/azure"] [submodule "contrib/azure"]
path = contrib/azure path = contrib/azure
url = https://github.com/ClickHouse-Extras/azure-sdk-for-cpp.git url = https://github.com/ClickHouse-Extras/azure-sdk-for-cpp.git

File diff suppressed because it is too large Load Diff

View File

@ -511,6 +511,7 @@ include (cmake/find/h3.cmake)
include (cmake/find/libxml2.cmake) include (cmake/find/libxml2.cmake)
include (cmake/find/brotli.cmake) include (cmake/find/brotli.cmake)
include (cmake/find/protobuf.cmake) include (cmake/find/protobuf.cmake)
include (cmake/find/thrift.cmake)
include (cmake/find/grpc.cmake) include (cmake/find/grpc.cmake)
include (cmake/find/pdqsort.cmake) include (cmake/find/pdqsort.cmake)
include (cmake/find/miniselect.cmake) include (cmake/find/miniselect.cmake)
@ -520,7 +521,7 @@ include (cmake/find/curl.cmake)
include (cmake/find/s3.cmake) include (cmake/find/s3.cmake)
include (cmake/find/blob_storage.cmake) include (cmake/find/blob_storage.cmake)
include (cmake/find/base64.cmake) include (cmake/find/base64.cmake)
include (cmake/find/parquet.cmake) include (cmake/find/parquet.cmake) # uses protobuf and thrift
include (cmake/find/simdjson.cmake) include (cmake/find/simdjson.cmake)
include (cmake/find/fast_float.cmake) include (cmake/find/fast_float.cmake)
include (cmake/find/rapidjson.cmake) include (cmake/find/rapidjson.cmake)
@ -548,6 +549,7 @@ include (cmake/find/cassandra.cmake)
include (cmake/find/sentry.cmake) include (cmake/find/sentry.cmake)
include (cmake/find/datasketches.cmake) include (cmake/find/datasketches.cmake)
include (cmake/find/libprotobuf-mutator.cmake) include (cmake/find/libprotobuf-mutator.cmake)
include (cmake/find/hive-metastore.cmake)
set (USE_INTERNAL_CITYHASH_LIBRARY ON CACHE INTERNAL "") set (USE_INTERNAL_CITYHASH_LIBRARY ON CACHE INTERNAL "")
find_contrib_lib(cityhash) find_contrib_lib(cityhash)

View File

@ -2,11 +2,11 @@
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION, # NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. # only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
SET(VERSION_REVISION 54458) SET(VERSION_REVISION 54459)
SET(VERSION_MAJOR 21) SET(VERSION_MAJOR 22)
SET(VERSION_MINOR 13) SET(VERSION_MINOR 2)
SET(VERSION_PATCH 1) SET(VERSION_PATCH 1)
SET(VERSION_GITHASH 4cc45c1e15912ee300bca7cc8b8da2b888a70e2a) SET(VERSION_GITHASH dfe64a2789bbf51046bb6b5476f874f7b59d124c)
SET(VERSION_DESCRIBE v21.13.1.1-prestable) SET(VERSION_DESCRIBE v22.2.1.1-prestable)
SET(VERSION_STRING 21.13.1.1) SET(VERSION_STRING 22.2.1.1)
# end of autochange # end of autochange

View File

@ -0,0 +1,26 @@
option(ENABLE_HIVE "Enable Hive" ${ENABLE_LIBRARIES})
if (NOT ENABLE_HIVE)
message("Hive disabled")
return()
endif()
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/hive-metastore")
message(WARNING "submodule contrib/hive-metastore is missing. to fix try run: \n git submodule update --init")
set(USE_HIVE 0)
elseif (NOT USE_THRIFT)
message(WARNING "Thrift is not found, which is needed by Hive")
set(USE_HIVE 0)
elseif (NOT USE_HDFS)
message(WARNING "HDFS is not found, which is needed by Hive")
set(USE_HIVE 0)
elseif (NOT USE_ORC OR NOT USE_ARROW OR NOT USE_PARQUET)
message(WARNING "ORC/Arrow/Parquet is not found, which are needed by Hive")
set(USE_HIVE 0)
else()
set(USE_HIVE 1)
set(HIVE_METASTORE_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/hive-metastore)
set(HIVE_METASTORE_LIBRARY hivemetastore)
endif()
message (STATUS "Using_Hive=${USE_HIVE}: ${HIVE_METASTORE_INCLUDE_DIR} : ${HIVE_METASTORE_LIBRARY}")

View File

@ -34,7 +34,6 @@ endif()
if(NOT USE_INTERNAL_PARQUET_LIBRARY) if(NOT USE_INTERNAL_PARQUET_LIBRARY)
find_package(Arrow) find_package(Arrow)
find_package(Parquet) find_package(Parquet)
find_library(THRIFT_LIBRARY thrift)
find_library(UTF8_PROC_LIBRARY utf8proc) find_library(UTF8_PROC_LIBRARY utf8proc)
find_package(BZip2) find_package(BZip2)
@ -145,12 +144,10 @@ if(NOT EXTERNAL_PARQUET_FOUND AND NOT MISSING_INTERNAL_PARQUET_LIBRARY AND NOT O
set(FLATBUFFERS_LIBRARY flatbuffers) set(FLATBUFFERS_LIBRARY flatbuffers)
set(ARROW_LIBRARY arrow_static) set(ARROW_LIBRARY arrow_static)
set(PARQUET_LIBRARY parquet_static) set(PARQUET_LIBRARY parquet_static)
set(THRIFT_LIBRARY thrift_static)
else() else()
set(FLATBUFFERS_LIBRARY flatbuffers_shared) set(FLATBUFFERS_LIBRARY flatbuffers_shared)
set(ARROW_LIBRARY arrow_shared) set(ARROW_LIBRARY arrow_shared)
set(PARQUET_LIBRARY parquet_shared) set(PARQUET_LIBRARY parquet_shared)
set(THRIFT_LIBRARY thrift)
endif() endif()
set(USE_PARQUET 1) set(USE_PARQUET 1)

34
cmake/find/thrift.cmake Normal file
View File

@ -0,0 +1,34 @@
option(ENABLE_THRIFT "Enable Thrift" ${ENABLE_LIBRARIES})
if (NOT ENABLE_THRIFT)
message (STATUS "thrift disabled")
set(USE_INTERNAL_THRIFT_LIBRARY 0)
return()
endif()
option(USE_INTERNAL_THRIFT_LIBRARY "Set to FALSE to use system thrift library instead of bundled" ON)
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/thrift")
if (USE_INTERNAL_THRIFT_LIBRARY)
message (WARNING "submodule contrib/thrift is missing. to fix try run: \n git submodule update --init --recursive")
set(USE_INTERNAL_THRIFT_LIBRARY 0)
endif ()
endif()
if (USE_INTERNAL_THRIFT_LIBRARY)
if (MAKE_STATIC_LIBRARIES)
set(THRIFT_LIBRARY thrift_static)
else()
set(THRIFT_LIBRARY thrift)
endif()
set (THRIFT_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/thrift/lib/cpp/src")
set(USE_THRIFT 1)
else()
find_library(THRIFT_LIBRARY thrift)
if (NOT THRIFT_LIBRARY)
set(USE_THRIFT 0)
else()
set(USE_THRIFT 1)
endif()
endif ()
message (STATUS "Using thrift=${USE_THRIFT}: ${THRIFT_INCLUDE_DIR} : ${THRIFT_LIBRARY}")

View File

@ -150,7 +150,6 @@ if (USE_INTERNAL_PARQUET_LIBRARY)
# The library is large - avoid bloat. # The library is large - avoid bloat.
target_compile_options (${ARROW_LIBRARY} PRIVATE -g0) target_compile_options (${ARROW_LIBRARY} PRIVATE -g0)
target_compile_options (${THRIFT_LIBRARY} PRIVATE -g0)
target_compile_options (${PARQUET_LIBRARY} PRIVATE -g0) target_compile_options (${PARQUET_LIBRARY} PRIVATE -g0)
endif() endif()
@ -206,6 +205,10 @@ if (USE_INTERNAL_PROTOBUF_LIBRARY)
add_subdirectory(protobuf-cmake) add_subdirectory(protobuf-cmake)
endif () endif ()
if (USE_INTERNAL_THRIFT_LIBRARY)
add_subdirectory(thrift-cmake)
endif ()
if (USE_INTERNAL_HDFS3_LIBRARY) if (USE_INTERNAL_HDFS3_LIBRARY)
add_subdirectory(libhdfs3-cmake) add_subdirectory(libhdfs3-cmake)
endif () endif ()
@ -299,6 +302,10 @@ if (USE_S2_GEOMETRY)
add_subdirectory(s2geometry-cmake) add_subdirectory(s2geometry-cmake)
endif() endif()
if (USE_HIVE)
add_subdirectory (hive-metastore-cmake)
endif()
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs. # Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear # Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
# in "contrib/..." as originally planned, so we workaround this by fixing FOLDER properties of all targets manually, # in "contrib/..." as originally planned, so we workaround this by fixing FOLDER properties of all targets manually,

2
contrib/arrow vendored

@ -1 +1 @@
Subproject commit aa9a7a698e33e278abe053f4634170b3b026e48e Subproject commit 1d9cc51daa4e7e9fc6926320ef73759818bd736e

View File

@ -17,57 +17,8 @@ else()
set(ARROW_FULL_SO_VERSION "${ARROW_SO_VERSION}.${ARROW_VERSION_PATCH}.0") set(ARROW_FULL_SO_VERSION "${ARROW_SO_VERSION}.${ARROW_VERSION_PATCH}.0")
endif() endif()
# === thrift
set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/thrift/lib/cpp")
# contrib/thrift/lib/cpp/CMakeLists.txt
set(thriftcpp_SOURCES
"${LIBRARY_DIR}/src/thrift/TApplicationException.cpp"
"${LIBRARY_DIR}/src/thrift/TOutput.cpp"
"${LIBRARY_DIR}/src/thrift/async/TAsyncChannel.cpp"
"${LIBRARY_DIR}/src/thrift/async/TAsyncProtocolProcessor.cpp"
"${LIBRARY_DIR}/src/thrift/async/TConcurrentClientSyncInfo.h"
"${LIBRARY_DIR}/src/thrift/async/TConcurrentClientSyncInfo.cpp"
"${LIBRARY_DIR}/src/thrift/concurrency/ThreadManager.cpp"
"${LIBRARY_DIR}/src/thrift/concurrency/TimerManager.cpp"
"${LIBRARY_DIR}/src/thrift/concurrency/Util.cpp"
"${LIBRARY_DIR}/src/thrift/processor/PeekProcessor.cpp"
"${LIBRARY_DIR}/src/thrift/protocol/TBase64Utils.cpp"
"${LIBRARY_DIR}/src/thrift/protocol/TDebugProtocol.cpp"
"${LIBRARY_DIR}/src/thrift/protocol/TJSONProtocol.cpp"
"${LIBRARY_DIR}/src/thrift/protocol/TMultiplexedProtocol.cpp"
"${LIBRARY_DIR}/src/thrift/protocol/TProtocol.cpp"
"${LIBRARY_DIR}/src/thrift/transport/TTransportException.cpp"
"${LIBRARY_DIR}/src/thrift/transport/TFDTransport.cpp"
"${LIBRARY_DIR}/src/thrift/transport/TSimpleFileTransport.cpp"
"${LIBRARY_DIR}/src/thrift/transport/THttpTransport.cpp"
"${LIBRARY_DIR}/src/thrift/transport/THttpClient.cpp"
"${LIBRARY_DIR}/src/thrift/transport/THttpServer.cpp"
"${LIBRARY_DIR}/src/thrift/transport/TSocket.cpp"
"${LIBRARY_DIR}/src/thrift/transport/TSocketPool.cpp"
"${LIBRARY_DIR}/src/thrift/transport/TServerSocket.cpp"
"${LIBRARY_DIR}/src/thrift/transport/TTransportUtils.cpp"
"${LIBRARY_DIR}/src/thrift/transport/TBufferTransports.cpp"
"${LIBRARY_DIR}/src/thrift/server/TConnectedClient.cpp"
"${LIBRARY_DIR}/src/thrift/server/TServerFramework.cpp"
"${LIBRARY_DIR}/src/thrift/server/TSimpleServer.cpp"
"${LIBRARY_DIR}/src/thrift/server/TThreadPoolServer.cpp"
"${LIBRARY_DIR}/src/thrift/server/TThreadedServer.cpp"
)
set(thriftcpp_threads_SOURCES
"${LIBRARY_DIR}/src/thrift/concurrency/ThreadFactory.cpp"
"${LIBRARY_DIR}/src/thrift/concurrency/Thread.cpp"
"${LIBRARY_DIR}/src/thrift/concurrency/Monitor.cpp"
"${LIBRARY_DIR}/src/thrift/concurrency/Mutex.cpp"
)
add_library(${THRIFT_LIBRARY} ${thriftcpp_SOURCES} ${thriftcpp_threads_SOURCES})
set_target_properties(${THRIFT_LIBRARY} PROPERTIES CXX_STANDARD 14) # REMOVE after https://github.com/apache/thrift/pull/1641
target_include_directories(${THRIFT_LIBRARY} SYSTEM PUBLIC "${ClickHouse_SOURCE_DIR}/contrib/thrift/lib/cpp/src")
target_link_libraries (${THRIFT_LIBRARY} PRIVATE boost::headers_only)
# === orc # === orc
set(ORC_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/orc/c++") set(ORC_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/orc/c++")
set(ORC_INCLUDE_DIR "${ORC_SOURCE_DIR}/include") set(ORC_INCLUDE_DIR "${ORC_SOURCE_DIR}/include")
set(ORC_SOURCE_SRC_DIR "${ORC_SOURCE_DIR}/src") set(ORC_SOURCE_SRC_DIR "${ORC_SOURCE_DIR}/src")
@ -463,49 +414,6 @@ set(PARQUET_SRCS
#list(TRANSFORM PARQUET_SRCS PREPEND "${LIBRARY_DIR}/") # cmake 3.12 #list(TRANSFORM PARQUET_SRCS PREPEND "${LIBRARY_DIR}/") # cmake 3.12
add_library(${PARQUET_LIBRARY} ${PARQUET_SRCS}) add_library(${PARQUET_LIBRARY} ${PARQUET_SRCS})
target_include_directories(${PARQUET_LIBRARY} SYSTEM PUBLIC "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src" "${CMAKE_CURRENT_SOURCE_DIR}/cpp/src" PRIVATE ${OPENSSL_INCLUDE_DIR}) target_include_directories(${PARQUET_LIBRARY} SYSTEM PUBLIC "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src" "${CMAKE_CURRENT_SOURCE_DIR}/cpp/src" PRIVATE ${OPENSSL_INCLUDE_DIR})
set (HAVE_ARPA_INET_H 1)
set (HAVE_FCNTL_H 1)
set (HAVE_GETOPT_H 1)
set (HAVE_INTTYPES_H 1)
set (HAVE_NETDB_H 1)
set (HAVE_NETINET_IN_H 1)
set (HAVE_SIGNAL_H 1)
set (HAVE_STDINT_H 1)
set (HAVE_UNISTD_H 1)
set (HAVE_PTHREAD_H 1)
set (HAVE_SYS_IOCTL_H 1)
set (HAVE_SYS_PARAM_H 1)
set (HAVE_SYS_RESOURCE_H 1)
set (HAVE_SYS_SOCKET_H 1)
set (HAVE_SYS_STAT_H 1)
set (HAVE_SYS_TIME_H 1)
set (HAVE_SYS_UN_H 1)
set (HAVE_POLL_H 1)
set (HAVE_SYS_POLL_H 1)
set (HAVE_SYS_SELECT_H 1)
set (HAVE_SCHED_H 1)
set (HAVE_STRING_H 1)
set (HAVE_STRINGS_H 1)
set (HAVE_GETHOSTBYNAME 1)
set (HAVE_STRERROR_R 1)
set (HAVE_SCHED_GET_PRIORITY_MAX 1)
set (HAVE_SCHED_GET_PRIORITY_MIN 1)
if (OS_LINUX AND NOT USE_MUSL)
set (STRERROR_R_CHAR_P 1)
endif ()
#set(PACKAGE ${PACKAGE_NAME})
#set(PACKAGE_STRING "${PACKAGE_NAME} ${PACKAGE_VERSION}")
#set(VERSION ${thrift_VERSION})
# generate a config.h file
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/build/cmake/config.h.in" "${CMAKE_CURRENT_BINARY_DIR}/thrift/config.h")
include_directories("${CMAKE_CURRENT_BINARY_DIR}")
target_link_libraries(${PARQUET_LIBRARY} PUBLIC ${ARROW_LIBRARY} PRIVATE ${THRIFT_LIBRARY} boost::headers_only boost::regex ${OPENSSL_LIBRARIES}) target_link_libraries(${PARQUET_LIBRARY} PUBLIC ${ARROW_LIBRARY} PRIVATE ${THRIFT_LIBRARY} boost::headers_only boost::regex ${OPENSSL_LIBRARIES})
if (SANITIZE STREQUAL "undefined") if (SANITIZE STREQUAL "undefined")

1
contrib/hive-metastore vendored Submodule

@ -0,0 +1 @@
Subproject commit 809a77d435ce218d9b000733f19489c606fc567b

View File

@ -0,0 +1,9 @@
set (SRCS
${ClickHouse_SOURCE_DIR}/contrib/hive-metastore/hive_metastore_constants.cpp
${ClickHouse_SOURCE_DIR}/contrib/hive-metastore/hive_metastore_types.cpp
${ClickHouse_SOURCE_DIR}/contrib/hive-metastore/ThriftHiveMetastore.cpp
)
add_library(${HIVE_METASTORE_LIBRARY} ${SRCS})
target_link_libraries(${HIVE_METASTORE_LIBRARY} PUBLIC ${THRIFT_LIBRARY})
target_include_directories(${HIVE_METASTORE_LIBRARY} SYSTEM PUBLIC ${HIVE_METASTORE_INCLUDE_DIR})

View File

@ -0,0 +1,87 @@
set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/thrift/lib/cpp")
set(thriftcpp_SOURCES
"${LIBRARY_DIR}/src/thrift/TApplicationException.cpp"
"${LIBRARY_DIR}/src/thrift/TOutput.cpp"
"${LIBRARY_DIR}/src/thrift/async/TAsyncChannel.cpp"
"${LIBRARY_DIR}/src/thrift/async/TAsyncProtocolProcessor.cpp"
"${LIBRARY_DIR}/src/thrift/async/TConcurrentClientSyncInfo.h"
"${LIBRARY_DIR}/src/thrift/async/TConcurrentClientSyncInfo.cpp"
"${LIBRARY_DIR}/src/thrift/concurrency/ThreadManager.cpp"
"${LIBRARY_DIR}/src/thrift/concurrency/TimerManager.cpp"
"${LIBRARY_DIR}/src/thrift/concurrency/Util.cpp"
"${LIBRARY_DIR}/src/thrift/processor/PeekProcessor.cpp"
"${LIBRARY_DIR}/src/thrift/protocol/TBase64Utils.cpp"
"${LIBRARY_DIR}/src/thrift/protocol/TDebugProtocol.cpp"
"${LIBRARY_DIR}/src/thrift/protocol/TJSONProtocol.cpp"
"${LIBRARY_DIR}/src/thrift/protocol/TMultiplexedProtocol.cpp"
"${LIBRARY_DIR}/src/thrift/protocol/TProtocol.cpp"
"${LIBRARY_DIR}/src/thrift/transport/TTransportException.cpp"
"${LIBRARY_DIR}/src/thrift/transport/TFDTransport.cpp"
"${LIBRARY_DIR}/src/thrift/transport/TSimpleFileTransport.cpp"
"${LIBRARY_DIR}/src/thrift/transport/THttpTransport.cpp"
"${LIBRARY_DIR}/src/thrift/transport/THttpClient.cpp"
"${LIBRARY_DIR}/src/thrift/transport/THttpServer.cpp"
"${LIBRARY_DIR}/src/thrift/transport/TSocket.cpp"
"${LIBRARY_DIR}/src/thrift/transport/TSocketPool.cpp"
"${LIBRARY_DIR}/src/thrift/transport/TServerSocket.cpp"
"${LIBRARY_DIR}/src/thrift/transport/TTransportUtils.cpp"
"${LIBRARY_DIR}/src/thrift/transport/TBufferTransports.cpp"
"${LIBRARY_DIR}/src/thrift/server/TConnectedClient.cpp"
"${LIBRARY_DIR}/src/thrift/server/TServerFramework.cpp"
"${LIBRARY_DIR}/src/thrift/server/TSimpleServer.cpp"
"${LIBRARY_DIR}/src/thrift/server/TThreadPoolServer.cpp"
"${LIBRARY_DIR}/src/thrift/server/TThreadedServer.cpp"
)
set(thriftcpp_threads_SOURCES
"${LIBRARY_DIR}/src/thrift/concurrency/ThreadFactory.cpp"
"${LIBRARY_DIR}/src/thrift/concurrency/Thread.cpp"
"${LIBRARY_DIR}/src/thrift/concurrency/Monitor.cpp"
"${LIBRARY_DIR}/src/thrift/concurrency/Mutex.cpp"
)
include("${ClickHouse_SOURCE_DIR}/contrib/thrift/build/cmake/ConfigureChecks.cmake") # makes config.h
set (HAVE_ARPA_INET_H 1)
set (HAVE_FCNTL_H 1)
set (HAVE_GETOPT_H 1)
set (HAVE_INTTYPES_H 1)
set (HAVE_NETDB_H 1)
set (HAVE_NETINET_IN_H 1)
set (HAVE_SIGNAL_H 1)
set (HAVE_STDINT_H 1)
set (HAVE_UNISTD_H 1)
set (HAVE_PTHREAD_H 1)
set (HAVE_SYS_IOCTL_H 1)
set (HAVE_SYS_PARAM_H 1)
set (HAVE_SYS_RESOURCE_H 1)
set (HAVE_SYS_SOCKET_H 1)
set (HAVE_SYS_STAT_H 1)
set (HAVE_SYS_TIME_H 1)
set (HAVE_SYS_UN_H 1)
set (HAVE_POLL_H 1)
set (HAVE_SYS_POLL_H 1)
set (HAVE_SYS_SELECT_H 1)
set (HAVE_SCHED_H 1)
set (HAVE_STRING_H 1)
set (HAVE_STRINGS_H 1)
set (HAVE_GETHOSTBYNAME 1)
set (HAVE_STRERROR_R 1)
set (HAVE_SCHED_GET_PRIORITY_MAX 1)
set (HAVE_SCHED_GET_PRIORITY_MIN 1)
if (OS_LINUX AND NOT USE_MUSL)
set (STRERROR_R_CHAR_P 1)
endif ()
#set(PACKAGE ${PACKAGE_NAME})
#set(PACKAGE_STRING "${PACKAGE_NAME} ${PACKAGE_VERSION}")
#set(VERSION ${thrift_VERSION})
# generate a config.h file
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/build/cmake/config.h.in" "${CMAKE_CURRENT_BINARY_DIR}/thrift/config.h")
include_directories("${CMAKE_CURRENT_BINARY_DIR}")
add_library(${THRIFT_LIBRARY} ${thriftcpp_SOURCES} ${thriftcpp_threads_SOURCES})
target_include_directories(${THRIFT_LIBRARY} SYSTEM PUBLIC "${THRIFT_INCLUDE_DIR}" ${CMAKE_CURRENT_BINARY_DIR})
target_link_libraries (${THRIFT_LIBRARY} PUBLIC boost::headers_only)

View File

@ -0,0 +1 @@
../../../thrift/build/cmake/config.h.in

2
debian/changelog vendored
View File

@ -1,4 +1,4 @@
clickhouse (21.13.1.1) unstable; urgency=low clickhouse (22.1.1.1) unstable; urgency=low
* Modified source code * Modified source code

View File

@ -5,7 +5,7 @@ ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/" ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
ARG version=21.13.1.* ARG version=22.1.1.*
RUN apt-get update \ RUN apt-get update \
&& apt-get install --yes --no-install-recommends \ && apt-get install --yes --no-install-recommends \

View File

@ -1,4 +1,5 @@
# docker build -t clickhouse/docs-build . # rebuild in #33610
# docker build -t clickhouse/docs-builder .
FROM ubuntu:20.04 FROM ubuntu:20.04
# ARG for quick switch to a given ubuntu mirror # ARG for quick switch to a given ubuntu mirror
@ -9,8 +10,6 @@ ENV LANG=C.UTF-8
RUN apt-get update \ RUN apt-get update \
&& DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \ && DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \
python3-setuptools \
virtualenv \
wget \ wget \
bash \ bash \
python \ python \

View File

@ -1,5 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/docs-check . # docker build -t clickhouse/docs-check .
FROM clickhouse/docs-builder ARG FROM_TAG=latest
FROM clickhouse/docs-builder:$FROM_TAG
COPY run.sh / COPY run.sh /

View File

@ -2,6 +2,7 @@
set -euo pipefail set -euo pipefail
cd $REPO_PATH/docs/tools cd $REPO_PATH/docs/tools
rm -rf venv
mkdir venv mkdir venv
virtualenv -p $(which python3) venv virtualenv -p $(which python3) venv
source venv/bin/activate source venv/bin/activate

View File

@ -1,5 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/docs-release . # docker build -t clickhouse/docs-release .
FROM clickhouse/docs-builder ARG FROM_TAG=latest
FROM clickhouse/docs-builder:$FROM_TAG
COPY run.sh / COPY run.sh /

View File

@ -1,3 +1,4 @@
# rebuild in #33610
# docker build -t clickhouse/binary-builder . # docker build -t clickhouse/binary-builder .
FROM ubuntu:20.04 FROM ubuntu:20.04

View File

@ -1,3 +1,4 @@
# rebuild in #33610
# docker build -t clickhouse/deb-builder . # docker build -t clickhouse/deb-builder .
FROM ubuntu:20.04 FROM ubuntu:20.04
@ -28,12 +29,14 @@ RUN apt-get update \
software-properties-common \ software-properties-common \
--yes --no-install-recommends --yes --no-install-recommends
# Architecture of the image when BuildKit/buildx is used
ARG TARGETARCH
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able # Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip. # to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
# Significantly increase deb packaging speed and compatible with old systems # Significantly increase deb packaging speed and compatible with old systems
RUN curl -O https://clickhouse-datasets.s3.yandex.net/utils/1/dpkg-deb \ RUN arch=${TARGETARCH:-amd64} \
&& chmod +x dpkg-deb \ && curl -Lo /usr/bin/dpkg-deb https://github.com/ClickHouse-Extras/dpkg/releases/download/1.21.1-clickhouse/dpkg-deb-${arch}
&& cp dpkg-deb /usr/bin
RUN apt-get update \ RUN apt-get update \
&& apt-get install \ && apt-get install \

View File

@ -5,8 +5,7 @@ ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/" ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
ARG version=21.13.1.* ARG version=22.1.1.*
ARG gosu_ver=1.10
# set non-empty deb_location_url url to create a docker image # set non-empty deb_location_url url to create a docker image
# from debs created by CI build, for example: # from debs created by CI build, for example:
@ -30,6 +29,23 @@ ARG DEBIAN_FRONTEND=noninteractive
# installed to prevent picking those uid / gid by some unrelated software. # installed to prevent picking those uid / gid by some unrelated software.
# The same uid / gid (101) is used both for alpine and ubuntu. # The same uid / gid (101) is used both for alpine and ubuntu.
# To drop privileges, we need 'su' command, that simply changes uid and gid.
# In fact, the 'su' command from Linux is not so simple, due to inherent vulnerability in Linux:
# https://ruderich.org/simon/notes/su-sudo-from-root-tty-hijacking
# It has to mitigate this drawback of Linux, and to do this, 'su' command is creating it's own pseudo-terminal
# and forwarding commands. Due to some ridiculous curcumstances, it does not work in Docker (or it does)
# and for these reasons people are using alternatives to the 'su' command in Docker,
# that don't mess with the terminal, don't care about closing the opened files, etc...
# but can only be safe to drop privileges inside Docker.
# The question - what implementation of 'su' command to use.
# It should be a simple script doing about just two syscalls.
# Some people tend to use 'gosu' tool that is written in Go.
# It is not used for several reasons:
# 1. Dependency on some foreign code in yet another programming language - does not sound alright.
# 2. Anselmo D. Adams suggested not to use it due to false positive alarms in some undisclosed security scanners.
COPY su-exec.c /su-exec.c
RUN groupadd -r clickhouse --gid=101 \ RUN groupadd -r clickhouse --gid=101 \
&& useradd -r -g clickhouse --uid=101 --home-dir=/var/lib/clickhouse --shell=/bin/bash clickhouse \ && useradd -r -g clickhouse --uid=101 --home-dir=/var/lib/clickhouse --shell=/bin/bash clickhouse \
&& apt-get update \ && apt-get update \
@ -68,8 +84,12 @@ RUN groupadd -r clickhouse --gid=101 \
clickhouse-client=$version \ clickhouse-client=$version \
clickhouse-server=$version ; \ clickhouse-server=$version ; \
fi \ fi \
&& wget --progress=bar:force:noscroll "https://github.com/tianon/gosu/releases/download/$gosu_ver/gosu-$(dpkg --print-architecture)" -O /bin/gosu \ && apt-get install -y --no-install-recommends tcc libc-dev && \
&& chmod +x /bin/gosu \ tcc /su-exec.c -o /bin/su-exec && \
chown root:root /bin/su-exec && \
chmod 0755 /bin/su-exec && \
rm /su-exec.c && \
apt-get purge -y --auto-remove tcc libc-dev libc-dev-bin libc6-dev linux-libc-dev \
&& clickhouse-local -q 'SELECT * FROM system.build_options' \ && clickhouse-local -q 'SELECT * FROM system.build_options' \
&& rm -rf \ && rm -rf \
/var/lib/apt/lists/* \ /var/lib/apt/lists/* \
@ -100,4 +120,3 @@ VOLUME /var/lib/clickhouse
ENV CLICKHOUSE_CONFIG /etc/clickhouse-server/config.xml ENV CLICKHOUSE_CONFIG /etc/clickhouse-server/config.xml
ENTRYPOINT ["/entrypoint.sh"] ENTRYPOINT ["/entrypoint.sh"]

View File

@ -6,7 +6,7 @@
# Middle steps are performed by the bash script. # Middle steps are performed by the bash script.
FROM ubuntu:18.04 as clickhouse-server-base FROM ubuntu:18.04 as clickhouse-server-base
ARG gosu_ver=1.10 ARG gosu_ver=1.14
VOLUME /packages/ VOLUME /packages/

138
docker/server/su-exec.c Normal file
View File

@ -0,0 +1,138 @@
/*
https://github.com/ncopa/su-exec
The file is copy-pasted verbatim to avoid supply chain attacks.
The MIT License (MIT)
Copyright (c) 2015 ncopa
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
/* set user and group id and exec */
#include <sys/types.h>
#include <err.h>
#include <errno.h>
#include <grp.h>
#include <pwd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
static char *argv0;
static void usage(int exitcode)
{
printf("Usage: %s user-spec command [args]\n", argv0);
exit(exitcode);
}
int main(int argc, char *argv[])
{
char *user, *group, **cmdargv;
char *end;
uid_t uid = getuid();
gid_t gid = getgid();
argv0 = argv[0];
if (argc < 3)
usage(0);
user = argv[1];
group = strchr(user, ':');
if (group)
*group++ = '\0';
cmdargv = &argv[2];
struct passwd *pw = NULL;
if (user[0] != '\0') {
uid_t nuid = strtol(user, &end, 10);
if (*end == '\0')
uid = nuid;
else {
pw = getpwnam(user);
if (pw == NULL)
err(1, "getpwnam(%s)", user);
}
}
if (pw == NULL) {
pw = getpwuid(uid);
}
if (pw != NULL) {
uid = pw->pw_uid;
gid = pw->pw_gid;
}
setenv("HOME", pw != NULL ? pw->pw_dir : "/", 1);
if (group && group[0] != '\0') {
/* group was specified, ignore grouplist for setgroups later */
pw = NULL;
gid_t ngid = strtol(group, &end, 10);
if (*end == '\0')
gid = ngid;
else {
struct group *gr = getgrnam(group);
if (gr == NULL)
err(1, "getgrnam(%s)", group);
gid = gr->gr_gid;
}
}
if (pw == NULL) {
if (setgroups(1, &gid) < 0)
err(1, "setgroups(%i)", gid);
} else {
int ngroups = 0;
gid_t *glist = NULL;
while (1) {
int r = getgrouplist(pw->pw_name, gid, glist, &ngroups);
if (r >= 0) {
if (setgroups(ngroups, glist) < 0)
err(1, "setgroups");
break;
}
glist = realloc(glist, ngroups * sizeof(gid_t));
if (glist == NULL)
err(1, "malloc");
}
}
if (setgid(gid) < 0)
err(1, "setgid(%i)", gid);
if (setuid(uid) < 0)
err(1, "setuid(%i)", uid);
execvp(cmdargv[0], cmdargv);
err(1, "%s", cmdargv[0]);
return 1;
}

View File

@ -1,7 +1,7 @@
FROM ubuntu:18.04 FROM ubuntu:18.04
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/" ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
ARG version=21.13.1.* ARG version=22.1.1.*
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y apt-transport-https dirmngr && \ apt-get install -y apt-transport-https dirmngr && \

View File

@ -1,5 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/test-base . # docker build -t clickhouse/test-base .
FROM clickhouse/test-util ARG FROM_TAG=latest
FROM clickhouse/test-util:$FROM_TAG
# ARG for quick switch to a given ubuntu mirror # ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com" ARG apt_archive="http://archive.ubuntu.com"
@ -28,12 +30,14 @@ RUN apt-get update \
software-properties-common \ software-properties-common \
--yes --no-install-recommends --yes --no-install-recommends
# Architecture of the image when BuildKit/buildx is used
ARG TARGETARCH
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able # Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip. # to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
# Significantly increase deb packaging speed and compatible with old systems # Significantly increase deb packaging speed and compatible with old systems
RUN curl -O https://clickhouse-datasets.s3.yandex.net/utils/1/dpkg-deb \ RUN arch=${TARGETARCH:-amd64} \
&& chmod +x dpkg-deb \ && curl -Lo /usr/bin/dpkg-deb https://github.com/ClickHouse-Extras/dpkg/releases/download/1.21.1-clickhouse/dpkg-deb-${arch}
&& cp dpkg-deb /usr/bin
RUN apt-get update \ RUN apt-get update \
&& apt-get install \ && apt-get install \

View File

@ -1,12 +1,14 @@
# rebuild in #33610
# docker build --network=host -t clickhouse/codebrowser . # docker build --network=host -t clickhouse/codebrowser .
# docker run --volume=path_to_repo:/repo_folder --volume=path_to_result:/test_output clickhouse/codebrowser # docker run --volume=path_to_repo:/repo_folder --volume=path_to_result:/test_output clickhouse/codebrowser
FROM clickhouse/binary-builder ARG FROM_TAG=latest
FROM clickhouse/binary-builder:$FROM_TAG
# ARG for quick switch to a given ubuntu mirror # ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com" ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
RUN apt-get update && apt-get --yes --allow-unauthenticated install clang-13 libllvm13 libclang-13-dev RUN apt-get update && apt-get --yes --allow-unauthenticated install clang-13 libllvm13 libclang-13-dev libmlir-13-dev
# repo versions doesn't work correctly with C++17 # repo versions doesn't work correctly with C++17
# also we push reports to s3, so we add index.html to subfolder urls # also we push reports to s3, so we add index.html to subfolder urls

View File

@ -1,5 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/fasttest . # docker build -t clickhouse/fasttest .
FROM clickhouse/test-util ARG FROM_TAG=latest
FROM clickhouse/test-util:$FROM_TAG
# ARG for quick switch to a given ubuntu mirror # ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com" ARG apt_archive="http://archive.ubuntu.com"
@ -28,12 +30,14 @@ RUN apt-get update \
software-properties-common \ software-properties-common \
--yes --no-install-recommends --yes --no-install-recommends
# Architecture of the image when BuildKit/buildx is used
ARG TARGETARCH
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able # Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip. # to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
# Significantly increase deb packaging speed and compatible with old systems # Significantly increase deb packaging speed and compatible with old systems
RUN curl -O https://clickhouse-datasets.s3.yandex.net/utils/1/dpkg-deb \ RUN arch=${TARGETARCH:-amd64} \
&& chmod +x dpkg-deb \ && curl -Lo /usr/bin/dpkg-deb https://github.com/ClickHouse-Extras/dpkg/releases/download/1.21.1-clickhouse/dpkg-deb-${arch}
&& cp dpkg-deb /usr/bin
RUN apt-get update \ RUN apt-get update \
&& apt-get install \ && apt-get install \

View File

@ -1,5 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/fuzzer . # docker build -t clickhouse/fuzzer .
FROM clickhouse/test-base ARG FROM_TAG=latest
FROM clickhouse/test-base:$FROM_TAG
# ARG for quick switch to a given ubuntu mirror # ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com" ARG apt_archive="http://archive.ubuntu.com"

View File

@ -1,44 +1,57 @@
# rebuild in #33610
# docker build -t clickhouse/integration-test . # docker build -t clickhouse/integration-test .
FROM clickhouse/test-base ARG FROM_TAG=latest
FROM clickhouse/test-base:$FROM_TAG
SHELL ["/bin/bash", "-c"] SHELL ["/bin/bash", "-c"]
RUN apt-get update \ RUN apt-get update \
&& env DEBIAN_FRONTEND=noninteractive apt-get -y install \ && env DEBIAN_FRONTEND=noninteractive apt-get -y install \
tzdata \
python3 \
libicu-dev \
bsdutils \ bsdutils \
curl \
default-jre \
g++ \
gdb \ gdb \
unixodbc \ iproute2 \
odbcinst \ krb5-user \
libicu-dev \
libsqlite3-dev \ libsqlite3-dev \
libsqliteodbc \ libsqliteodbc \
odbc-postgresql \
sqlite3 \
curl \
tar \
lz4 \
krb5-user \
iproute2 \
lsof \ lsof \
g++ \ lz4 \
default-jre odbc-postgresql \
odbcinst \
python3 \
rpm2cpio \
sqlite3 \
tar \
tzdata \
unixodbc \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
RUN rm -rf \ # Architecture of the image when BuildKit/buildx is used
/var/lib/apt/lists/* \ ARG TARGETARCH
/var/cache/debconf \
/tmp/* \
RUN apt-get clean
# Install MySQL ODBC driver # Install MySQL ODBC driver from RHEL rpm
RUN curl 'https://downloads.mysql.com/archives/get/p/10/file/mysql-connector-odbc-8.0.21-linux-glibc2.12-x86-64bit.tar.gz' --location --output 'mysql-connector.tar.gz' && tar -xzf mysql-connector.tar.gz && cd mysql-connector-odbc-8.0.21-linux-glibc2.12-x86-64bit/lib && mv * /usr/local/lib && ln -s /usr/local/lib/libmyodbc8a.so /usr/lib/x86_64-linux-gnu/odbc/libmyodbc.so RUN arch=${TARGETARCH:-amd64} \
&& case $arch in \
amd64) rarch=x86_64 ;; \
arm64) rarch=aarch64 ;; \
esac \
&& cd /tmp \
&& curl -o mysql-odbc.rpm "https://cdn.mysql.com/Downloads/Connector-ODBC/8.0/mysql-connector-odbc-8.0.27-1.el8.${rarch}.rpm" \
&& rpm2archive mysql-odbc.rpm \
&& tar xf mysql-odbc.rpm.tgz -C / ./usr/lib64/ \
&& LINK_DIR=$(dpkg -L libodbc1 | grep '^/usr/lib/.*-linux-gnu/odbc$') \
&& ln -s /usr/lib64/libmyodbc8a.so "$LINK_DIR" \
&& ln -s /usr/lib64/libmyodbc8a.so "$LINK_DIR"/libmyodbc.so
# Unfortunately this is required for a single test for conversion data from zookeeper to clickhouse-keeper. # Unfortunately this is required for a single test for conversion data from zookeeper to clickhouse-keeper.
# ZooKeeper is not started by default, but consumes some space in containers. # ZooKeeper is not started by default, but consumes some space in containers.
# 777 perms used to allow anybody to start/stop ZooKeeper # 777 perms used to allow anybody to start/stop ZooKeeper
ENV ZOOKEEPER_VERSION='3.6.3' ENV ZOOKEEPER_VERSION='3.6.3'
RUN curl -O "https://mirrors.estointernet.in/apache/zookeeper/zookeeper-${ZOOKEEPER_VERSION}/apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz" RUN curl -O "https://dlcdn.apache.org/zookeeper/zookeeper-${ZOOKEEPER_VERSION}/apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz"
RUN tar -zxvf apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz && mv apache-zookeeper-${ZOOKEEPER_VERSION}-bin /opt/zookeeper && chmod -R 777 /opt/zookeeper && rm apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz RUN tar -zxvf apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz && mv apache-zookeeper-${ZOOKEEPER_VERSION}-bin /opt/zookeeper && chmod -R 777 /opt/zookeeper && rm apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz
RUN echo $'tickTime=2500 \n\ RUN echo $'tickTime=2500 \n\
tickTime=2500 \n\ tickTime=2500 \n\

View File

@ -0,0 +1,47 @@
FROM ubuntu:20.04
MAINTAINER lgbo-ustc <lgbo.ustc@gmail.com>
RUN apt-get update
RUN apt-get install -y wget openjdk-8-jre
RUN wget https://archive.apache.org/dist/hadoop/common/hadoop-3.1.0/hadoop-3.1.0.tar.gz && \
tar -xf hadoop-3.1.0.tar.gz && rm -rf hadoop-3.1.0.tar.gz
RUN wget https://dlcdn.apache.org/hive/hive-2.3.9/apache-hive-2.3.9-bin.tar.gz && \
tar -xf apache-hive-2.3.9-bin.tar.gz && rm -rf apache-hive-2.3.9-bin.tar.gz
RUN apt install -y vim
RUN apt install -y openssh-server openssh-client
RUN apt install -y mysql-server
RUN mkdir -p /root/.ssh && \
ssh-keygen -t rsa -b 2048 -P '' -f /root/.ssh/id_rsa && \
cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys && \
cp /root/.ssh/id_rsa /etc/ssh/ssh_host_rsa_key && \
cp /root/.ssh/id_rsa.pub /etc/ssh/ssh_host_rsa_key.pub
RUN wget https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.27.tar.gz &&\
tar -xf mysql-connector-java-8.0.27.tar.gz && \
mv mysql-connector-java-8.0.27/mysql-connector-java-8.0.27.jar /apache-hive-2.3.9-bin/lib/ && \
rm -rf mysql-connector-java-8.0.27.tar.gz mysql-connector-java-8.0.27
RUN apt install -y iputils-ping net-tools
ENV JAVA_HOME=/usr
ENV HADOOP_HOME=/hadoop-3.1.0
ENV HDFS_NAMENODE_USER=root
ENV HDFS_DATANODE_USER=root HDFS_SECONDARYNAMENODE_USER=root YARN_RESOURCEMANAGER_USER=root YARN_NODEMANAGER_USER=root HDFS_DATANODE_SECURE_USER=hdfs
COPY hdfs-site.xml /hadoop-3.1.0/etc/hadoop
COPY mapred-site.xml /hadoop-3.1.0/etc/hadoop
COPY yarn-site.xml /hadoop-3.1.0/etc/hadoop
COPY hadoop-env.sh /hadoop-3.1.0/etc/hadoop/
#COPY core-site.xml /hadoop-3.1.0/etc/hadoop
COPY core-site.xml.template /hadoop-3.1.0/etc/hadoop
COPY hive-site.xml /apache-hive-2.3.9-bin/conf
COPY prepare_hive_data.sh /
COPY demo_data.txt /
ENV PATH=/apache-hive-2.3.9-bin/bin:/hadoop-3.1.0/bin:/hadoop-3.1.0/sbin:$PATH
COPY start.sh /

View File

@ -0,0 +1,14 @@
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://HOSTNAME:9000</value>
</property>
<property>
<name>hadoop.proxyuser.root.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.root.groups</name>
<value>*</value>
</property>
</configuration>

View File

@ -0,0 +1,6 @@
abc,1,2021-11-16
abd,15,2021-11-05
aaa,22,2021-11-16
dda,0,2021-11-01
dfb,11,2021-11-05
jhn,89,2021-11-11

View File

@ -0,0 +1,422 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set Hadoop-specific environment variables here.
##
## THIS FILE ACTS AS THE MASTER FILE FOR ALL HADOOP PROJECTS.
## SETTINGS HERE WILL BE READ BY ALL HADOOP COMMANDS. THEREFORE,
## ONE CAN USE THIS FILE TO SET YARN, HDFS, AND MAPREDUCE
## CONFIGURATION OPTIONS INSTEAD OF xxx-env.sh.
##
## Precedence rules:
##
## {yarn-env.sh|hdfs-env.sh} > hadoop-env.sh > hard-coded defaults
##
## {YARN_xyz|HDFS_xyz} > HADOOP_xyz > hard-coded defaults
##
# Many of the options here are built from the perspective that users
# may want to provide OVERWRITING values on the command line.
# For example:
#
JAVA_HOME=/usr/
#
# Therefore, the vast majority (BUT NOT ALL!) of these defaults
# are configured for substitution and not append. If append
# is preferable, modify this file accordingly.
###
# Generic settings for HADOOP
###
# Technically, the only required environment variable is JAVA_HOME.
# All others are optional. However, the defaults are probably not
# preferred. Many sites configure these options outside of Hadoop,
# such as in /etc/profile.d
# The java implementation to use. By default, this environment
# variable is REQUIRED on ALL platforms except OS X!
# export JAVA_HOME=
# Location of Hadoop. By default, Hadoop will attempt to determine
# this location based upon its execution path.
# export HADOOP_HOME=
# Location of Hadoop's configuration information. i.e., where this
# file is living. If this is not defined, Hadoop will attempt to
# locate it based upon its execution path.
#
# NOTE: It is recommend that this variable not be set here but in
# /etc/profile.d or equivalent. Some options (such as
# --config) may react strangely otherwise.
#
# export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop
# The maximum amount of heap to use (Java -Xmx). If no unit
# is provided, it will be converted to MB. Daemons will
# prefer any Xmx setting in their respective _OPT variable.
# There is no default; the JVM will autoscale based upon machine
# memory size.
# export HADOOP_HEAPSIZE_MAX=
# The minimum amount of heap to use (Java -Xms). If no unit
# is provided, it will be converted to MB. Daemons will
# prefer any Xms setting in their respective _OPT variable.
# There is no default; the JVM will autoscale based upon machine
# memory size.
# export HADOOP_HEAPSIZE_MIN=
# Enable extra debugging of Hadoop's JAAS binding, used to set up
# Kerberos security.
# export HADOOP_JAAS_DEBUG=true
# Extra Java runtime options for all Hadoop commands. We don't support
# IPv6 yet/still, so by default the preference is set to IPv4.
# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
# For Kerberos debugging, an extended option set logs more invormation
# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true -Dsun.security.krb5.debug=true -Dsun.security.spnego.debug"
# Some parts of the shell code may do special things dependent upon
# the operating system. We have to set this here. See the next
# section as to why....
export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
# Under certain conditions, Java on OS X will throw SCDynamicStore errors
# in the system logs.
# See HADOOP-8719 for more information. If one needs Kerberos
# support on OS X, one will want to change/remove this extra bit.
case ${HADOOP_OS_TYPE} in
Darwin*)
export HADOOP_OPTS="${HADOOP_OPTS} -Djava.security.krb5.realm= "
export HADOOP_OPTS="${HADOOP_OPTS} -Djava.security.krb5.kdc= "
export HADOOP_OPTS="${HADOOP_OPTS} -Djava.security.krb5.conf= "
;;
esac
# Extra Java runtime options for some Hadoop commands
# and clients (i.e., hdfs dfs -blah). These get appended to HADOOP_OPTS for
# such commands. In most cases, # this should be left empty and
# let users supply it on the command line.
# export HADOOP_CLIENT_OPTS=""
#
# A note about classpaths.
#
# By default, Apache Hadoop overrides Java's CLASSPATH
# environment variable. It is configured such
# that it sarts out blank with new entries added after passing
# a series of checks (file/dir exists, not already listed aka
# de-deduplication). During de-depulication, wildcards and/or
# directories are *NOT* expanded to keep it simple. Therefore,
# if the computed classpath has two specific mentions of
# awesome-methods-1.0.jar, only the first one added will be seen.
# If two directories are in the classpath that both contain
# awesome-methods-1.0.jar, then Java will pick up both versions.
# An additional, custom CLASSPATH. Site-wide configs should be
# handled via the shellprofile functionality, utilizing the
# hadoop_add_classpath function for greater control and much
# harder for apps/end-users to accidentally override.
# Similarly, end users should utilize ${HOME}/.hadooprc .
# This variable should ideally only be used as a short-cut,
# interactive way for temporary additions on the command line.
# export HADOOP_CLASSPATH="/some/cool/path/on/your/machine"
# Should HADOOP_CLASSPATH be first in the official CLASSPATH?
# export HADOOP_USER_CLASSPATH_FIRST="yes"
# If HADOOP_USE_CLIENT_CLASSLOADER is set, the classpath along
# with the main jar are handled by a separate isolated
# client classloader when 'hadoop jar', 'yarn jar', or 'mapred job'
# is utilized. If it is set, HADOOP_CLASSPATH and
# HADOOP_USER_CLASSPATH_FIRST are ignored.
# export HADOOP_USE_CLIENT_CLASSLOADER=true
# HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES overrides the default definition of
# system classes for the client classloader when HADOOP_USE_CLIENT_CLASSLOADER
# is enabled. Names ending in '.' (period) are treated as package names, and
# names starting with a '-' are treated as negative matches. For example,
# export HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES="-org.apache.hadoop.UserClass,java.,javax.,org.apache.hadoop."
# Enable optional, bundled Hadoop features
# This is a comma delimited list. It may NOT be overridden via .hadooprc
# Entries may be added/removed as needed.
# export HADOOP_OPTIONAL_TOOLS="hadoop-openstack,hadoop-aliyun,hadoop-azure,hadoop-azure-datalake,hadoop-aws,hadoop-kafka"
###
# Options for remote shell connectivity
###
# There are some optional components of hadoop that allow for
# command and control of remote hosts. For example,
# start-dfs.sh will attempt to bring up all NNs, DNS, etc.
# Options to pass to SSH when one of the "log into a host and
# start/stop daemons" scripts is executed
# export HADOOP_SSH_OPTS="-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s"
# The built-in ssh handler will limit itself to 10 simultaneous connections.
# For pdsh users, this sets the fanout size ( -f )
# Change this to increase/decrease as necessary.
# export HADOOP_SSH_PARALLEL=10
# Filename which contains all of the hosts for any remote execution
# helper scripts # such as workers.sh, start-dfs.sh, etc.
# export HADOOP_WORKERS="${HADOOP_CONF_DIR}/workers"
###
# Options for all daemons
###
#
#
# Many options may also be specified as Java properties. It is
# very common, and in many cases, desirable, to hard-set these
# in daemon _OPTS variables. Where applicable, the appropriate
# Java property is also identified. Note that many are re-used
# or set differently in certain contexts (e.g., secure vs
# non-secure)
#
# Where (primarily) daemon log files are stored.
# ${HADOOP_HOME}/logs by default.
# Java property: hadoop.log.dir
# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
# A string representing this instance of hadoop. $USER by default.
# This is used in writing log and pid files, so keep that in mind!
# Java property: hadoop.id.str
# export HADOOP_IDENT_STRING=$USER
# How many seconds to pause after stopping a daemon
# export HADOOP_STOP_TIMEOUT=5
# Where pid files are stored. /tmp by default.
# export HADOOP_PID_DIR=/tmp
# Default log4j setting for interactive commands
# Java property: hadoop.root.logger
# export HADOOP_ROOT_LOGGER=INFO,console
# Default log4j setting for daemons spawned explicitly by
# --daemon option of hadoop, hdfs, mapred and yarn command.
# Java property: hadoop.root.logger
# export HADOOP_DAEMON_ROOT_LOGGER=INFO,RFA
# Default log level and output location for security-related messages.
# You will almost certainly want to change this on a per-daemon basis via
# the Java property (i.e., -Dhadoop.security.logger=foo). (Note that the
# defaults for the NN and 2NN override this by default.)
# Java property: hadoop.security.logger
# export HADOOP_SECURITY_LOGGER=INFO,NullAppender
# Default process priority level
# Note that sub-processes will also run at this level!
# export HADOOP_NICENESS=0
# Default name for the service level authorization file
# Java property: hadoop.policy.file
# export HADOOP_POLICYFILE="hadoop-policy.xml"
#
# NOTE: this is not used by default! <-----
# You can define variables right here and then re-use them later on.
# For example, it is common to use the same garbage collection settings
# for all the daemons. So one could define:
#
# export HADOOP_GC_SETTINGS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps"
#
# .. and then use it as per the b option under the namenode.
###
# Secure/privileged execution
###
#
# Out of the box, Hadoop uses jsvc from Apache Commons to launch daemons
# on privileged ports. This functionality can be replaced by providing
# custom functions. See hadoop-functions.sh for more information.
#
# The jsvc implementation to use. Jsvc is required to run secure datanodes
# that bind to privileged ports to provide authentication of data transfer
# protocol. Jsvc is not required if SASL is configured for authentication of
# data transfer protocol using non-privileged ports.
# export JSVC_HOME=/usr/bin
#
# This directory contains pids for secure and privileged processes.
#export HADOOP_SECURE_PID_DIR=${HADOOP_PID_DIR}
#
# This directory contains the logs for secure and privileged processes.
# Java property: hadoop.log.dir
# export HADOOP_SECURE_LOG=${HADOOP_LOG_DIR}
#
# When running a secure daemon, the default value of HADOOP_IDENT_STRING
# ends up being a bit bogus. Therefore, by default, the code will
# replace HADOOP_IDENT_STRING with HADOOP_xx_SECURE_USER. If one wants
# to keep HADOOP_IDENT_STRING untouched, then uncomment this line.
# export HADOOP_SECURE_IDENT_PRESERVE="true"
###
# NameNode specific parameters
###
# Default log level and output location for file system related change
# messages. For non-namenode daemons, the Java property must be set in
# the appropriate _OPTS if one wants something other than INFO,NullAppender
# Java property: hdfs.audit.logger
# export HDFS_AUDIT_LOGGER=INFO,NullAppender
# Specify the JVM options to be used when starting the NameNode.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# a) Set JMX options
# export HDFS_NAMENODE_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=1026"
#
# b) Set garbage collection logs
# export HDFS_NAMENODE_OPTS="${HADOOP_GC_SETTINGS} -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
#
# c) ... or set them directly
# export HDFS_NAMENODE_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
# this is the default:
# export HDFS_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
###
# SecondaryNameNode specific parameters
###
# Specify the JVM options to be used when starting the SecondaryNameNode.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# This is the default:
# export HDFS_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
###
# DataNode specific parameters
###
# Specify the JVM options to be used when starting the DataNode.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# This is the default:
# export HDFS_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS"
# On secure datanodes, user to run the datanode as after dropping privileges.
# This **MUST** be uncommented to enable secure HDFS if using privileged ports
# to provide authentication of data transfer protocol. This **MUST NOT** be
# defined if SASL is configured for authentication of data transfer protocol
# using non-privileged ports.
# This will replace the hadoop.id.str Java property in secure mode.
# export HDFS_DATANODE_SECURE_USER=hdfs
# Supplemental options for secure datanodes
# By default, Hadoop uses jsvc which needs to know to launch a
# server jvm.
# export HDFS_DATANODE_SECURE_EXTRA_OPTS="-jvm server"
###
# NFS3 Gateway specific parameters
###
# Specify the JVM options to be used when starting the NFS3 Gateway.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# export HDFS_NFS3_OPTS=""
# Specify the JVM options to be used when starting the Hadoop portmapper.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# export HDFS_PORTMAP_OPTS="-Xmx512m"
# Supplemental options for priviliged gateways
# By default, Hadoop uses jsvc which needs to know to launch a
# server jvm.
# export HDFS_NFS3_SECURE_EXTRA_OPTS="-jvm server"
# On privileged gateways, user to run the gateway as after dropping privileges
# This will replace the hadoop.id.str Java property in secure mode.
# export HDFS_NFS3_SECURE_USER=nfsserver
###
# ZKFailoverController specific parameters
###
# Specify the JVM options to be used when starting the ZKFailoverController.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# export HDFS_ZKFC_OPTS=""
###
# QuorumJournalNode specific parameters
###
# Specify the JVM options to be used when starting the QuorumJournalNode.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# export HDFS_JOURNALNODE_OPTS=""
###
# HDFS Balancer specific parameters
###
# Specify the JVM options to be used when starting the HDFS Balancer.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# export HDFS_BALANCER_OPTS=""
###
# HDFS Mover specific parameters
###
# Specify the JVM options to be used when starting the HDFS Mover.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# export HDFS_MOVER_OPTS=""
###
# Router-based HDFS Federation specific parameters
# Specify the JVM options to be used when starting the RBF Routers.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# export HDFS_DFSROUTER_OPTS=""
###
###
# Advanced Users Only!
###
#
# When building Hadoop, one can add the class paths to the commands
# via this special env var:
# export HADOOP_ENABLE_BUILD_PATHS="true"
#
# To prevent accidents, shell commands be (superficially) locked
# to only allow certain users to execute certain subcommands.
# It uses the format of (command)_(subcommand)_USER.
#
# For example, to limit who can execute the namenode command,
# export HDFS_NAMENODE_USER=hdfs

View File

@ -0,0 +1,6 @@
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration>

View File

@ -0,0 +1,35 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?><!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<configuration>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://localhost/hcatalog?createDatabaseIfNotExist=true</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>test</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>test</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
</configuration>

View File

@ -0,0 +1,6 @@
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>

View File

@ -0,0 +1,6 @@
#!/bin/bash
hive -e "create database test"
hive -e "create table test.demo(id string, score int) PARTITIONED BY(day string) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'; create table test.demo_orc(id string, score int) PARTITIONED BY(day string) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat'; "
hive -e "create table test.demo_text(id string, score int, day string)row format delimited fields terminated by ','; load data local inpath '/demo_data.txt' into table test.demo_text "
hive -e "set hive.exec.dynamic.partition.mode=nonstrict;insert into test.demo partition(day) select * from test.demo_text; insert into test.demo_orc partition(day) select * from test.demo_text"

View File

@ -0,0 +1,12 @@
service ssh start
sed s/HOSTNAME/$HOSTNAME/ /hadoop-3.1.0/etc/hadoop/core-site.xml.template > /hadoop-3.1.0/etc/hadoop/core-site.xml
hadoop namenode -format
start-all.sh
service mysql start
mysql -u root -e "CREATE USER \"test\"@\"localhost\" IDENTIFIED BY \"test\""
mysql -u root -e "GRANT ALL ON * . * TO 'test'@'localhost'"
schematool -initSchema -dbType mysql
#nohup hiveserver2 &
nohup hive --service metastore &
bash /prepare_hive_data.sh
while true; do sleep 1000; done

View File

@ -0,0 +1,32 @@
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.application.classpath</name>
<value>/hadoop-3.1.0/etc/hadoop,/hadoop-3.1.0/share/hadoop/common/*,/hadoop-3.1.0/share/hadoop/common/lib/*,/hadoop-3.1.0/share/hadoop/hdfs/*, /hadoop-3.1.0/share/hadoop/hdfs/lib/*, /hadoop-3.1.0/share/hadoop/mapreduce/*, /hadoop-3.1.0/share/hadoop/mapreduce/lib/*, /hadoop-3.1.0/share/hadoop/yarn/*, /hadoop-3.1.0/share/hadoop/yarn/lib/*</value>
</property>
<property>
<description>
Number of seconds after an application finishes before the nodemanager's
DeletionService will delete the application's localized file directory
and log directory.
To diagnose Yarn application problems, set this property's value large
enough (for example, to 600 = 10 minutes) to permit examination of these
directories. After changing the property's value, you must restart the
nodemanager in order for it to have an effect.
The roots of Yarn applications' work directories is configurable with
the yarn.nodemanager.local-dirs property (see below), and the roots
of the Yarn applications' log directories is configurable with the
yarn.nodemanager.log-dirs property (see also below).
</description>
<name>yarn.nodemanager.delete.debug-delay-sec</name>
<value>600</value>
</property>
</configuration>

View File

@ -45,7 +45,7 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
ENV DOCKER_CHANNEL stable ENV DOCKER_CHANNEL stable
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
RUN add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -c -s) ${DOCKER_CHANNEL}" RUN add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -c -s) ${DOCKER_CHANNEL}"
RUN apt-get update \ RUN apt-get update \
&& env DEBIAN_FRONTEND=noninteractive apt-get install --yes \ && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
@ -58,7 +58,9 @@ RUN apt-get update \
RUN dockerd --version; docker --version RUN dockerd --version; docker --version
RUN python3 -m pip install \ ARG TARGETARCH
# FIXME: psycopg2-binary is not available for aarch64, we skip it for now
RUN test x$TARGETARCH = xarm64 || ( python3 -m pip install \
PyMySQL \ PyMySQL \
aerospike==4.0.0 \ aerospike==4.0.0 \
avro==1.10.2 \ avro==1.10.2 \
@ -88,7 +90,7 @@ RUN python3 -m pip install \
urllib3 \ urllib3 \
requests-kerberos \ requests-kerberos \
pyhdfs \ pyhdfs \
azure-storage-blob azure-storage-blob )
COPY modprobe.sh /usr/local/bin/modprobe COPY modprobe.sh /usr/local/bin/modprobe
COPY dockerd-entrypoint.sh /usr/local/bin/ COPY dockerd-entrypoint.sh /usr/local/bin/
@ -102,8 +104,6 @@ RUN set -x \
&& echo 'dockremap:165536:65536' >> /etc/subuid \ && echo 'dockremap:165536:65536' >> /etc/subuid \
&& echo 'dockremap:165536:65536' >> /etc/subgid && echo 'dockremap:165536:65536' >> /etc/subgid
RUN echo '127.0.0.1 localhost test.com' >> /etc/hosts
EXPOSE 2375 EXPOSE 2375
ENTRYPOINT ["dockerd-entrypoint.sh"] ENTRYPOINT ["dockerd-entrypoint.sh"]
CMD ["sh", "-c", "pytest $PYTEST_OPTS"] CMD ["sh", "-c", "pytest $PYTEST_OPTS"]

View File

@ -0,0 +1,7 @@
version: '2.3'
services:
hdfs1:
image: lgboustc/hive_test:v1.0
hostname: hivetest
restart: always
entrypoint: bash /start.sh

View File

@ -14,4 +14,4 @@ services:
image: mongo:5.0 image: mongo:5.0
restart: always restart: always
ports: ports:
- "27018:27017" - ${MONGO_NO_CRED_EXTERNAL_PORT}:${MONGO_NO_CRED_INTERNAL_PORT}

View File

@ -1,5 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/keeper-jepsen-test . # docker build -t clickhouse/keeper-jepsen-test .
FROM clickhouse/test-base ARG FROM_TAG=latest
FROM clickhouse/test-base:$FROM_TAG
ENV DEBIAN_FRONTEND=noninteractive ENV DEBIAN_FRONTEND=noninteractive
ENV CLOJURE_VERSION=1.10.3.814 ENV CLOJURE_VERSION=1.10.3.814

View File

@ -1,8 +1,14 @@
# rebuild in #33610
# docker build -t clickhouse/pvs-test . # docker build -t clickhouse/pvs-test .
FROM clickhouse/binary-builder ARG FROM_TAG=latest
FROM clickhouse/binary-builder:$FROM_TAG
RUN apt-get update --yes \ # PVS studio doesn't support aarch64/arm64, so there is a check for it everywhere
# We'll produce an empty image for arm64
ARG TARGETARCH
RUN test x$TARGETARCH = xarm64 || ( apt-get update --yes \
&& apt-get install \ && apt-get install \
bash \ bash \
wget \ wget \
@ -15,7 +21,7 @@ RUN apt-get update --yes \
libprotoc-dev \ libprotoc-dev \
libgrpc++-dev \ libgrpc++-dev \
libc-ares-dev \ libc-ares-dev \
--yes --no-install-recommends --yes --no-install-recommends )
#RUN wget -nv -O - http://files.viva64.com/etc/pubkey.txt | sudo apt-key add - #RUN wget -nv -O - http://files.viva64.com/etc/pubkey.txt | sudo apt-key add -
#RUN sudo wget -nv -O /etc/apt/sources.list.d/viva64.list http://files.viva64.com/etc/viva64.list #RUN sudo wget -nv -O /etc/apt/sources.list.d/viva64.list http://files.viva64.com/etc/viva64.list
@ -27,7 +33,7 @@ RUN apt-get update --yes \
ENV PKG_VERSION="pvs-studio-latest" ENV PKG_VERSION="pvs-studio-latest"
RUN set -x \ RUN test x$TARGETARCH = xarm64 || ( set -x \
&& export PUBKEY_HASHSUM="ad369a2e9d8b8c30f5a9f2eb131121739b79c78e03fef0f016ea51871a5f78cd4e6257b270dca0ac3be3d1f19d885516" \ && export PUBKEY_HASHSUM="ad369a2e9d8b8c30f5a9f2eb131121739b79c78e03fef0f016ea51871a5f78cd4e6257b270dca0ac3be3d1f19d885516" \
&& wget -nv https://files.viva64.com/etc/pubkey.txt -O /tmp/pubkey.txt \ && wget -nv https://files.viva64.com/etc/pubkey.txt -O /tmp/pubkey.txt \
&& echo "${PUBKEY_HASHSUM} /tmp/pubkey.txt" | sha384sum -c \ && echo "${PUBKEY_HASHSUM} /tmp/pubkey.txt" | sha384sum -c \
@ -35,7 +41,7 @@ RUN set -x \
&& wget -nv "https://files.viva64.com/${PKG_VERSION}.deb" \ && wget -nv "https://files.viva64.com/${PKG_VERSION}.deb" \
&& { debsig-verify ${PKG_VERSION}.deb \ && { debsig-verify ${PKG_VERSION}.deb \
|| echo "WARNING: Some file was just downloaded from the internet without any validation and we are installing it into the system"; } \ || echo "WARNING: Some file was just downloaded from the internet without any validation and we are installing it into the system"; } \
&& dpkg -i "${PKG_VERSION}.deb" && dpkg -i "${PKG_VERSION}.deb" )
ENV CCACHE_DIR=/test_output/ccache ENV CCACHE_DIR=/test_output/ccache

View File

@ -1,5 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/split-build-smoke-test . # docker build -t clickhouse/split-build-smoke-test .
FROM clickhouse/binary-builder ARG FROM_TAG=latest
FROM clickhouse/binary-builder:$FROM_TAG
COPY run.sh /run.sh COPY run.sh /run.sh
COPY process_split_build_smoke_test_result.py / COPY process_split_build_smoke_test_result.py /

View File

@ -1,5 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/stateful-test . # docker build -t clickhouse/stateful-test .
FROM clickhouse/stateless-test ARG FROM_TAG=latest
FROM clickhouse/stateless-test:$FROM_TAG
RUN apt-get update -y \ RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \ && env DEBIAN_FRONTEND=noninteractive \

View File

@ -1,11 +1,10 @@
# rebuild in #33610
# docker build -t clickhouse/stateless-test . # docker build -t clickhouse/stateless-test .
FROM clickhouse/test-base ARG FROM_TAG=latest
FROM clickhouse/test-base:$FROM_TAG
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz" ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz"
RUN echo "deb [trusted=yes] http://repo.mysql.com/apt/ubuntu/ bionic mysql-5.7" >> /etc/apt/sources.list \
&& apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 8C718D3B5072E1F5
RUN apt-get update -y \ RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \ && env DEBIAN_FRONTEND=noninteractive \
apt-get install --yes --no-install-recommends \ apt-get install --yes --no-install-recommends \
@ -30,7 +29,7 @@ RUN apt-get update -y \
tree \ tree \
unixodbc \ unixodbc \
wget \ wget \
mysql-client=5.7* \ mysql-client=8.0* \
postgresql-client \ postgresql-client \
sqlite3 sqlite3
@ -49,10 +48,13 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
ENV NUM_TRIES=1 ENV NUM_TRIES=1
ENV MAX_RUN_TIME=0 ENV MAX_RUN_TIME=0
ARG TARGETARCH
# Download Minio-related binaries # Download Minio-related binaries
RUN wget 'https://dl.min.io/server/minio/release/linux-amd64/minio' \ RUN arch=${TARGETARCH:-amd64} \
&& wget "https://dl.min.io/server/minio/release/linux-${arch}/minio" \
&& chmod +x ./minio \ && chmod +x ./minio \
&& wget 'https://dl.min.io/client/mc/release/linux-amd64/mc' \ && wget "https://dl.min.io/client/mc/release/linux-${arch}/mc" \
&& chmod +x ./mc && chmod +x ./mc
ENV MINIO_ROOT_USER="clickhouse" ENV MINIO_ROOT_USER="clickhouse"

View File

@ -1,5 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/stateless-pytest . # docker build -t clickhouse/stateless-pytest .
FROM clickhouse/test-base ARG FROM_TAG=latest
FROM clickhouse/test-base:$FROM_TAG
RUN apt-get update -y && \ RUN apt-get update -y && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \

View File

@ -1,5 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/stress-test . # docker build -t clickhouse/stress-test .
FROM clickhouse/stateful-test ARG FROM_TAG=latest
FROM clickhouse/stateful-test:$FROM_TAG
RUN apt-get update -y \ RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \ && env DEBIAN_FRONTEND=noninteractive \

View File

@ -146,6 +146,7 @@ handle SIGUSR2 nostop noprint pass
handle SIG$RTMIN nostop noprint pass handle SIG$RTMIN nostop noprint pass
info signals info signals
continue continue
gcore
backtrace full backtrace full
info locals info locals
info registers info registers
@ -263,3 +264,10 @@ done
# Write check result into check_status.tsv # Write check result into check_status.tsv
clickhouse-local --structure "test String, res String" -q "SELECT 'failure', test FROM table WHERE res != 'OK' order by (lower(test) like '%hung%') LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv clickhouse-local --structure "test String, res String" -q "SELECT 'failure', test FROM table WHERE res != 'OK' order by (lower(test) like '%hung%') LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv
[ -s /test_output/check_status.tsv ] || echo -e "success\tNo errors found" > /test_output/check_status.tsv [ -s /test_output/check_status.tsv ] || echo -e "success\tNo errors found" > /test_output/check_status.tsv
# Core dumps (see gcore)
# Default filename is 'core.PROCESS_ID'
for core in core.*; do
pigz $core
mv $core.gz /output/
done

View File

@ -1,19 +1,41 @@
# docker build -t clickhouse/style-test . # docker build -t clickhouse/style-test .
FROM ubuntu:20.04 FROM ubuntu:20.04
ARG ACT_VERSION=0.2.25
ARG ACTIONLINT_VERSION=1.6.8
# ARG for quick switch to a given ubuntu mirror # ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com" ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
shellcheck \ curl \
libxml2-utils \
git \ git \
python3-pip \ libxml2-utils \
pylint \ pylint \
python3-pip \
shellcheck \
yamllint \ yamllint \
&& pip3 install codespell PyGithub boto3 unidiff dohq-artifactory && pip3 install codespell PyGithub boto3 unidiff dohq-artifactory
# Architecture of the image when BuildKit/buildx is used
ARG TARGETARCH
# Get act and actionlint from releases
RUN arch=${TARGETARCH:-amd64} \
&& case $arch in \
amd64) act_arch=x86_64 ;; \
arm64) act_arch=$arch ;; \
esac \
&& curl -o /tmp/act.tgz -L \
"https://github.com/nektos/act/releases/download/v${ACT_VERSION}/act_Linux_${act_arch}.tar.gz" \
&& tar xf /tmp/act.tgz -C /usr/bin act \
&& rm /tmp/act.tgz \
&& curl -o /tmp/actiolint.zip -L \
"https://github.com/rhysd/actionlint/releases/download/v${ACTIONLINT_VERSION}/actionlint_${ACTIONLINT_VERSION}_linux_${arch}.tar.gz" \
&& tar xf /tmp/actiolint.zip -C /usr/bin actionlint \
&& rm /tmp/actiolint.zip
COPY run.sh / COPY run.sh /
COPY process_style_check_result.py / COPY process_style_check_result.py /
CMD ["/bin/bash", "/run.sh"] CMD ["/bin/bash", "/run.sh"]

View File

@ -11,40 +11,7 @@ def process_result(result_folder):
description = "" description = ""
test_results = [] test_results = []
style_log_path = '{}/style_output.txt'.format(result_folder) duplicate_log_path = "{}/duplicate_output.txt".format(result_folder)
if not os.path.exists(style_log_path):
logging.info("No style check log on path %s", style_log_path)
return "exception", "No style check log", []
elif os.stat(style_log_path).st_size != 0:
description += "Style check failed. "
test_results.append(("Style check", "FAIL"))
status = "failure" # Disabled for now
else:
test_results.append(("Style check", "OK"))
typos_log_path = '{}/typos_output.txt'.format(result_folder)
if not os.path.exists(style_log_path):
logging.info("No typos check log on path %s", style_log_path)
return "exception", "No typos check log", []
elif os.stat(typos_log_path).st_size != 0:
description += "Typos check failed. "
test_results.append(("Typos check", "FAIL"))
status = "failure"
else:
test_results.append(("Typos check", "OK"))
whitespaces_log_path = '{}/whitespaces_output.txt'.format(result_folder)
if not os.path.exists(style_log_path):
logging.info("No whitespaces check log on path %s", style_log_path)
return "exception", "No whitespaces check log", []
elif os.stat(whitespaces_log_path).st_size != 0:
description += "Whitespaces check failed. "
test_results.append(("Whitespaces check", "FAIL"))
status = "failure"
else:
test_results.append(("Whitespaces check", "OK"))
duplicate_log_path = '{}/duplicate_output.txt'.format(result_folder)
if not os.path.exists(duplicate_log_path): if not os.path.exists(duplicate_log_path):
logging.info("No header duplicates check log on path %s", duplicate_log_path) logging.info("No header duplicates check log on path %s", duplicate_log_path)
return "exception", "No header duplicates check log", [] return "exception", "No header duplicates check log", []
@ -55,7 +22,7 @@ def process_result(result_folder):
else: else:
test_results.append(("Header duplicates check", "OK")) test_results.append(("Header duplicates check", "OK"))
shellcheck_log_path = '{}/shellcheck_output.txt'.format(result_folder) shellcheck_log_path = "{}/shellcheck_output.txt".format(result_folder)
if not os.path.exists(shellcheck_log_path): if not os.path.exists(shellcheck_log_path):
logging.info("No shellcheck log on path %s", shellcheck_log_path) logging.info("No shellcheck log on path %s", shellcheck_log_path)
return "exception", "No shellcheck log", [] return "exception", "No shellcheck log", []
@ -66,6 +33,50 @@ def process_result(result_folder):
else: else:
test_results.append(("Shellcheck", "OK")) test_results.append(("Shellcheck", "OK"))
style_log_path = "{}/style_output.txt".format(result_folder)
if not os.path.exists(style_log_path):
logging.info("No style check log on path %s", style_log_path)
return "exception", "No style check log", []
elif os.stat(style_log_path).st_size != 0:
description += "Style check failed. "
test_results.append(("Style check", "FAIL"))
status = "failure"
else:
test_results.append(("Style check", "OK"))
typos_log_path = "{}/typos_output.txt".format(result_folder)
if not os.path.exists(typos_log_path):
logging.info("No typos check log on path %s", typos_log_path)
return "exception", "No typos check log", []
elif os.stat(typos_log_path).st_size != 0:
description += "Typos check failed. "
test_results.append(("Typos check", "FAIL"))
status = "failure"
else:
test_results.append(("Typos check", "OK"))
whitespaces_log_path = "{}/whitespaces_output.txt".format(result_folder)
if not os.path.exists(whitespaces_log_path):
logging.info("No whitespaces check log on path %s", whitespaces_log_path)
return "exception", "No whitespaces check log", []
elif os.stat(whitespaces_log_path).st_size != 0:
description += "Whitespaces check failed. "
test_results.append(("Whitespaces check", "FAIL"))
status = "failure"
else:
test_results.append(("Whitespaces check", "OK"))
workflows_log_path = "{}/workflows_output.txt".format(result_folder)
if not os.path.exists(workflows_log_path):
logging.info("No workflows check log on path %s", style_log_path)
return "exception", "No workflows check log", []
elif os.stat(whitespaces_log_path).st_size != 0:
description += "Workflows check failed. "
test_results.append(("Workflows check", "FAIL"))
status = "failure"
else:
test_results.append(("Workflows check", "OK"))
if not description: if not description:
description += "Style check success" description += "Style check success"
@ -73,20 +84,22 @@ def process_result(result_folder):
def write_results(results_file, status_file, results, status): def write_results(results_file, status_file, results, status):
with open(results_file, 'w') as f: with open(results_file, "w") as f:
out = csv.writer(f, delimiter='\t') out = csv.writer(f, delimiter="\t")
out.writerows(results) out.writerows(results)
with open(status_file, 'w') as f: with open(status_file, "w") as f:
out = csv.writer(f, delimiter='\t') out = csv.writer(f, delimiter="\t")
out.writerow(status) out.writerow(status)
if __name__ == "__main__": if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
parser = argparse.ArgumentParser(description="ClickHouse script for parsing results of style check") parser = argparse.ArgumentParser(
parser.add_argument("--in-results-dir", default='/test_output/') description="ClickHouse script for parsing results of style check"
parser.add_argument("--out-results-file", default='/test_output/test_results.tsv') )
parser.add_argument("--out-status-file", default='/test_output/check_status.tsv') parser.add_argument("--in-results-dir", default="/test_output/")
parser.add_argument("--out-results-file", default="/test_output/test_results.tsv")
parser.add_argument("--out-status-file", default="/test_output/check_status.tsv")
args = parser.parse_args() args = parser.parse_args()
state, description, test_results = process_result(args.in_results_dir) state, description, test_results = process_result(args.in_results_dir)

View File

@ -3,9 +3,10 @@
# yaml check is not the best one # yaml check is not the best one
cd /ClickHouse/utils/check-style || echo -e "failure\tRepo not found" > /test_output/check_status.tsv cd /ClickHouse/utils/check-style || echo -e "failure\tRepo not found" > /test_output/check_status.tsv
./check-duplicate-includes.sh |& tee /test_output/duplicate_output.txt
./check-style -n |& tee /test_output/style_output.txt ./check-style -n |& tee /test_output/style_output.txt
./check-typos |& tee /test_output/typos_output.txt ./check-typos |& tee /test_output/typos_output.txt
./check-whitespaces -n |& tee /test_output/whitespaces_output.txt ./check-whitespaces -n |& tee /test_output/whitespaces_output.txt
./check-duplicate-includes.sh |& tee /test_output/duplicate_output.txt ./check-workflows |& tee /test_output/workflows_output.txt
./shellcheck-run.sh |& tee /test_output/shellcheck_output.txt ./shellcheck-run.sh |& tee /test_output/shellcheck_output.txt
/process_style_check_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv /process_style_check_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv

View File

@ -1,5 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/unit-test . # docker build -t clickhouse/unit-test .
FROM clickhouse/stateless-test ARG FROM_TAG=latest
FROM clickhouse/stateless-test:$FROM_TAG
RUN apt-get install gdb RUN apt-get install gdb

View File

@ -1,3 +1,4 @@
# rebuild in #33610
# docker build -t clickhouse/test-util . # docker build -t clickhouse/test-util .
FROM ubuntu:20.04 FROM ubuntu:20.04

View File

@ -6,8 +6,8 @@ Minimal ClickHouse build example:
```bash ```bash
cmake .. \ cmake .. \
-DCMAKE_C_COMPILER=$(which clang-11) \ -DCMAKE_C_COMPILER=$(which clang-13) \
-DCMAKE_CXX_COMPILER=$(which clang++-11) \ -DCMAKE_CXX_COMPILER=$(which clang++-13) \
-DCMAKE_BUILD_TYPE=Debug \ -DCMAKE_BUILD_TYPE=Debug \
-DENABLE_CLICKHOUSE_ALL=OFF \ -DENABLE_CLICKHOUSE_ALL=OFF \
-DENABLE_CLICKHOUSE_SERVER=ON \ -DENABLE_CLICKHOUSE_SERVER=ON \

View File

@ -175,7 +175,7 @@ When we are going to read something from a part in `MergeTree`, we look at `prim
When you `INSERT` a bunch of data into `MergeTree`, that bunch is sorted by primary key order and forms a new part. There are background threads that periodically select some parts and merge them into a single sorted part to keep the number of parts relatively low. Thats why it is called `MergeTree`. Of course, merging leads to “write amplification”. All parts are immutable: they are only created and deleted, but not modified. When SELECT is executed, it holds a snapshot of the table (a set of parts). After merging, we also keep old parts for some time to make a recovery after failure easier, so if we see that some merged part is probably broken, we can replace it with its source parts. When you `INSERT` a bunch of data into `MergeTree`, that bunch is sorted by primary key order and forms a new part. There are background threads that periodically select some parts and merge them into a single sorted part to keep the number of parts relatively low. Thats why it is called `MergeTree`. Of course, merging leads to “write amplification”. All parts are immutable: they are only created and deleted, but not modified. When SELECT is executed, it holds a snapshot of the table (a set of parts). After merging, we also keep old parts for some time to make a recovery after failure easier, so if we see that some merged part is probably broken, we can replace it with its source parts.
`MergeTree` is not an LSM tree because it does not contain “memtable” and “log”: inserted data is written directly to the filesystem. This makes it suitable only to INSERT data in batches, not by individual row and not very frequently about once per second is ok, but a thousand times a second is not. We did it this way for simplicitys sake, and because we are already inserting data in batches in our applications. `MergeTree` is not an LSM tree because it does not contain MEMTABLE and LOG: inserted data is written directly to the filesystem. This behavior makes MergeTree much more suitable to insert data in batches. Therefore frequently inserting small amounts of rows is not ideal for MergeTree. For example, a couple of rows per second is OK, but doing it a thousand times a second is not optimal for MergeTree. However, there is an async insert mode for small inserts to overcome this limitation. We did it this way for simplicitys sake, and because we are already inserting data in batches in our applications
There are MergeTree engines that are doing additional work during background merges. Examples are `CollapsingMergeTree` and `AggregatingMergeTree`. This could be treated as special support for updates. Keep in mind that these are not real updates because users usually have no control over the time when background merges are executed, and data in a `MergeTree` table is almost always stored in more than one part, not in completely merged form. There are MergeTree engines that are doing additional work during background merges. Examples are `CollapsingMergeTree` and `AggregatingMergeTree`. This could be treated as special support for updates. Keep in mind that these are not real updates because users usually have no control over the time when background merges are executed, and data in a `MergeTree` table is almost always stored in more than one part, not in completely merged form.

View File

@ -5,7 +5,7 @@ toc_title: Build on Mac OS X
# How to Build ClickHouse on Mac OS X {#how-to-build-clickhouse-on-mac-os-x} # How to Build ClickHouse on Mac OS X {#how-to-build-clickhouse-on-mac-os-x}
!!! info "You don't have to build ClickHouse yourself!" !!! info "You don't have to build ClickHouse yourself"
You can install pre-built ClickHouse as described in [Quick Start](https://clickhouse.com/#quick-start). You can install pre-built ClickHouse as described in [Quick Start](https://clickhouse.com/#quick-start).
Follow `macOS (Intel)` or `macOS (Apple silicon)` installation instructions. Follow `macOS (Intel)` or `macOS (Apple silicon)` installation instructions.

View File

@ -26,7 +26,7 @@ ENGINE = MaterializedPostgreSQL('host:port', 'database', 'user', 'password') [SE
## Example of Use {#example-of-use} ## Example of Use {#example-of-use}
``` sql ``` sql
CREATE DATABASE postgresql; CREATE DATABASE postgres_db
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password'); ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password');
SHOW TABLES FROM postgres_db; SHOW TABLES FROM postgres_db;

View File

@ -0,0 +1,408 @@
---
toc_priority: 4
toc_title: Hive
---
# Hive {#hive}
The Hive engine allows you to perform `SELECT` quries on HDFS Hive table. Currently it supports input formats as below:
- Text: only supports simple scalar column types except `binary`
- ORC: support simple scalar columns types except `char`; only support complex types like `array`
- Parquet: support all simple scalar columns types; only support complex types like `array`
## Creating a Table {#creating-a-table}
``` sql
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
(
name1 [type1] [ALIAS expr1],
name2 [type2] [ALIAS expr2],
...
) ENGINE = Hive('thrift://host:port', 'database', 'table');
PARTITION BY expr
```
See a detailed description of the [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query) query.
The table structure can differ from the original Hive table structure:
- Column names should be the same as in the original Hive table, but you can use just some of these columns and in any order, also you can use some alias columns calculated from other columns.
- Column types should be the same from those in the original Hive table.
- Partition by expression should be consistent with the original Hive table, and columns in partition by expression should be in the table structure.
**Engine Parameters**
- `thrift://host:port` — Hive Metastore address
- `database` — Remote database name.
- `table` — Remote table name.
## Usage Example {#usage-example}
### How to Use Local Cache for HDFS Filesystem
We strongly advice you to enable local cache for remote filesystems. Benchmark shows that its almost 2x faster with cache.
Before using cache, add it to `config.xml`
``` xml
<local_cache_for_remote_fs>
<enable>true</enable>
<root_dir>local_cache</root_dir>
<limit_size>559096952</limit_size>
<bytes_read_before_flush>1048576</bytes_read_before_flush>
</local_cache_for_remote_fs>
```
- enable: ClickHouse will maintain local cache for remote filesystem(HDFS) after startup if true.
- root_dir: Required. The root directory to store local cache files for remote filesystem.
- limit_size: Required. The maximum size(in bytes) of local cache files.
- bytes_read_before_flush: Control bytes before flush to local filesystem when downloading file from remote filesystem. The default value is 1MB.
When ClickHouse is started up with local cache for remote filesystem enabled, users can still choose not to use cache with `settings use_local_cache_for_remote_fs = 0` in their query. `use_local_cache_for_remote_fs` is `false` in default.
### Query Hive Table with ORC Input Format
#### Create Table in Hive
``` text
hive > CREATE TABLE `test`.`test_orc`(
`f_tinyint` tinyint,
`f_smallint` smallint,
`f_int` int,
`f_integer` int,
`f_bigint` bigint,
`f_float` float,
`f_double` double,
`f_decimal` decimal(10,0),
`f_timestamp` timestamp,
`f_date` date,
`f_string` string,
`f_varchar` varchar(100),
`f_bool` boolean,
`f_binary` binary,
`f_array_int` array<int>,
`f_array_string` array<string>,
`f_array_float` array<float>,
`f_array_array_int` array<array<int>>,
`f_array_array_string` array<array<string>>,
`f_array_array_float` array<array<float>>)
PARTITIONED BY (
`day` string)
ROW FORMAT SERDE
'org.apache.hadoop.hive.ql.io.orc.OrcSerde'
STORED AS INPUTFORMAT
'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat'
OUTPUTFORMAT
'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat'
LOCATION
'hdfs://testcluster/data/hive/test.db/test_orc'
OK
Time taken: 0.51 seconds
hive > insert into test.test_orc partition(day='2021-09-18') select 1, 2, 3, 4, 5, 6.11, 7.22, 8.333, current_timestamp(), current_date(), 'hello world', 'hello world', 'hello world', true, 'hello world', array(1, 2, 3), array('hello world', 'hello world'), array(float(1.1), float(1.2)), array(array(1, 2), array(3, 4)), array(array('a', 'b'), array('c', 'd')), array(array(float(1.11), float(2.22)), array(float(3.33), float(4.44)));
OK
Time taken: 36.025 seconds
hive > select * from test.test_orc;
OK
1 2 3 4 5 6.11 7.22 8 2021-11-05 12:38:16.314 2021-11-05 hello world hello world hello world true hello world [1,2,3] ["hello world","hello world"] [1.1,1.2] [[1,2],[3,4]] [["a","b"],["c","d"]] [[1.11,2.22],[3.33,4.44]] 2021-09-18
Time taken: 0.295 seconds, Fetched: 1 row(s)
```
#### Create Table in ClickHouse
Table in ClickHouse, retrieving data from the Hive table created above:
``` sql
CREATE TABLE test.test_orc
(
`f_tinyint` Int8,
`f_smallint` Int16,
`f_int` Int32,
`f_integer` Int32,
`f_bigint` Int64,
`f_float` Float32,
`f_double` Float64,
`f_decimal` Float64,
`f_timestamp` DateTime,
`f_date` Date,
`f_string` String,
`f_varchar` String,
`f_bool` Bool,
`f_binary` String,
`f_array_int` Array(Int32),
`f_array_string` Array(String),
`f_array_float` Array(Float32),
`f_array_array_int` Array(Array(Int32)),
`f_array_array_string` Array(Array(String)),
`f_array_array_float` Array(Array(Float32)),
`day` String
)
ENGINE = Hive('thrift://202.168.117.26:9083', 'test', 'test_orc')
PARTITION BY day
```
``` sql
SELECT * FROM test.test_orc settings input_format_orc_allow_missing_columns = 1\G
```
``` text
SELECT *
FROM test.test_orc
SETTINGS input_format_orc_allow_missing_columns = 1
Query id: c3eaffdc-78ab-43cd-96a4-4acc5b480658
Row 1:
──────
f_tinyint: 1
f_smallint: 2
f_int: 3
f_integer: 4
f_bigint: 5
f_float: 6.11
f_double: 7.22
f_decimal: 8
f_timestamp: 2021-12-04 04:00:44
f_date: 2021-12-03
f_string: hello world
f_varchar: hello world
f_bool: true
f_binary: hello world
f_array_int: [1,2,3]
f_array_string: ['hello world','hello world']
f_array_float: [1.1,1.2]
f_array_array_int: [[1,2],[3,4]]
f_array_array_string: [['a','b'],['c','d']]
f_array_array_float: [[1.11,2.22],[3.33,4.44]]
day: 2021-09-18
1 rows in set. Elapsed: 0.078 sec.
```
### Query Hive Table with Parquet Input Format
#### Create Table in Hive
``` text
hive >
CREATE TABLE `test`.`test_parquet`(
`f_tinyint` tinyint,
`f_smallint` smallint,
`f_int` int,
`f_integer` int,
`f_bigint` bigint,
`f_float` float,
`f_double` double,
`f_decimal` decimal(10,0),
`f_timestamp` timestamp,
`f_date` date,
`f_string` string,
`f_varchar` varchar(100),
`f_char` char(100),
`f_bool` boolean,
`f_binary` binary,
`f_array_int` array<int>,
`f_array_string` array<string>,
`f_array_float` array<float>,
`f_array_array_int` array<array<int>>,
`f_array_array_string` array<array<string>>,
`f_array_array_float` array<array<float>>)
PARTITIONED BY (
`day` string)
ROW FORMAT SERDE
'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'
STORED AS INPUTFORMAT
'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'
OUTPUTFORMAT
'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'
LOCATION
'hdfs://testcluster/data/hive/test.db/test_parquet'
OK
Time taken: 0.51 seconds
hive > insert into test.test_parquet partition(day='2021-09-18') select 1, 2, 3, 4, 5, 6.11, 7.22, 8.333, current_timestamp(), current_date(), 'hello world', 'hello world', 'hello world', true, 'hello world', array(1, 2, 3), array('hello world', 'hello world'), array(float(1.1), float(1.2)), array(array(1, 2), array(3, 4)), array(array('a', 'b'), array('c', 'd')), array(array(float(1.11), float(2.22)), array(float(3.33), float(4.44)));
OK
Time taken: 36.025 seconds
hive > select * from test.test_parquet;
OK
1 2 3 4 5 6.11 7.22 8 2021-12-14 17:54:56.743 2021-12-14 hello world hello world hello world true hello world [1,2,3] ["hello world","hello world"] [1.1,1.2] [[1,2],[3,4]] [["a","b"],["c","d"]] [[1.11,2.22],[3.33,4.44]] 2021-09-18
Time taken: 0.766 seconds, Fetched: 1 row(s)
```
#### Create Table in ClickHouse
Table in ClickHouse, retrieving data from the Hive table created above:
``` sql
CREATE TABLE test.test_parquet
(
`f_tinyint` Int8,
`f_smallint` Int16,
`f_int` Int32,
`f_integer` Int32,
`f_bigint` Int64,
`f_float` Float32,
`f_double` Float64,
`f_decimal` Float64,
`f_timestamp` DateTime,
`f_date` Date,
`f_string` String,
`f_varchar` String,
`f_char` String,
`f_bool` Bool,
`f_binary` String,
`f_array_int` Array(Int32),
`f_array_string` Array(String),
`f_array_float` Array(Float32),
`f_array_array_int` Array(Array(Int32)),
`f_array_array_string` Array(Array(String)),
`f_array_array_float` Array(Array(Float32)),
`day` String
)
ENGINE = Hive('thrift://localhost:9083', 'test', 'test_parquet')
PARTITION BY day
```
``` sql
SELECT * FROM test.test_parquet settings input_format_parquet_allow_missing_columns = 1\G
```
``` text
SELECT *
FROM test_parquet
SETTINGS input_format_parquet_allow_missing_columns = 1
Query id: 4e35cf02-c7b2-430d-9b81-16f438e5fca9
Row 1:
──────
f_tinyint: 1
f_smallint: 2
f_int: 3
f_integer: 4
f_bigint: 5
f_float: 6.11
f_double: 7.22
f_decimal: 8
f_timestamp: 2021-12-14 17:54:56
f_date: 2021-12-14
f_string: hello world
f_varchar: hello world
f_char: hello world
f_bool: true
f_binary: hello world
f_array_int: [1,2,3]
f_array_string: ['hello world','hello world']
f_array_float: [1.1,1.2]
f_array_array_int: [[1,2],[3,4]]
f_array_array_string: [['a','b'],['c','d']]
f_array_array_float: [[1.11,2.22],[3.33,4.44]]
day: 2021-09-18
1 rows in set. Elapsed: 0.357 sec.
```
### Query Hive Table with Text Input Format
#### Create Table in Hive
``` text
hive >
CREATE TABLE `test`.`test_text`(
`f_tinyint` tinyint,
`f_smallint` smallint,
`f_int` int,
`f_integer` int,
`f_bigint` bigint,
`f_float` float,
`f_double` double,
`f_decimal` decimal(10,0),
`f_timestamp` timestamp,
`f_date` date,
`f_string` string,
`f_varchar` varchar(100),
`f_char` char(100),
`f_bool` boolean,
`f_binary` binary,
`f_array_int` array<int>,
`f_array_string` array<string>,
`f_array_float` array<float>,
`f_array_array_int` array<array<int>>,
`f_array_array_string` array<array<string>>,
`f_array_array_float` array<array<float>>)
PARTITIONED BY (
`day` string)
ROW FORMAT SERDE
'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
STORED AS INPUTFORMAT
'org.apache.hadoop.mapred.TextInputFormat'
OUTPUTFORMAT
'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
LOCATION
'hdfs://testcluster/data/hive/test.db/test_text'
Time taken: 0.1 seconds, Fetched: 34 row(s)
hive > insert into test.test_text partition(day='2021-09-18') select 1, 2, 3, 4, 5, 6.11, 7.22, 8.333, current_timestamp(), current_date(), 'hello world', 'hello world', 'hello world', true, 'hello world', array(1, 2, 3), array('hello world', 'hello world'), array(float(1.1), float(1.2)), array(array(1, 2), array(3, 4)), array(array('a', 'b'), array('c', 'd')), array(array(float(1.11), float(2.22)), array(float(3.33), float(4.44)));
OK
Time taken: 36.025 seconds
hive > select * from test.test_text;
OK
1 2 3 4 5 6.11 7.22 8 2021-12-14 18:11:17.239 2021-12-14 hello world hello world hello world true hello world [1,2,3] ["hello world","hello world"] [1.1,1.2] [[1,2],[3,4]] [["a","b"],["c","d"]] [[1.11,2.22],[3.33,4.44]] 2021-09-18
Time taken: 0.624 seconds, Fetched: 1 row(s)
```
#### Create Table in ClickHouse
Table in ClickHouse, retrieving data from the Hive table created above:
``` sql
CREATE TABLE test.test_text
(
`f_tinyint` Int8,
`f_smallint` Int16,
`f_int` Int32,
`f_integer` Int32,
`f_bigint` Int64,
`f_float` Float32,
`f_double` Float64,
`f_decimal` Float64,
`f_timestamp` DateTime,
`f_date` Date,
`f_string` String,
`f_varchar` String,
`f_char` String,
`f_bool` Bool,
`day` String
)
ENGINE = Hive('thrift://localhost:9083', 'test', 'test_text')
PARTITION BY day
```
``` sql
SELECT * FROM test.test_text settings input_format_skip_unknown_fields = 1, input_format_with_names_use_header = 1, date_time_input_format = 'best_effort'\G
```
``` text
SELECT *
FROM test.test_text
SETTINGS input_format_skip_unknown_fields = 1, input_format_with_names_use_header = 1, date_time_input_format = 'best_effort'
Query id: 55b79d35-56de-45b9-8be6-57282fbf1f44
Row 1:
──────
f_tinyint: 1
f_smallint: 2
f_int: 3
f_integer: 4
f_bigint: 5
f_float: 6.11
f_double: 7.22
f_decimal: 8
f_timestamp: 2021-12-14 18:11:17
f_date: 2021-12-14
f_string: hello world
f_varchar: hello world
f_char: hello world
f_bool: true
day: 2021-09-18
```

View File

@ -20,3 +20,4 @@ List of supported integrations:
- [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md) - [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md)
- [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md) - [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md)
- [SQLite](../../../engines/table-engines/integrations/sqlite.md) - [SQLite](../../../engines/table-engines/integrations/sqlite.md)
- [Hive](../../../engines/table-engines/integrations/hive.md)

View File

@ -66,4 +66,14 @@ SELECT COUNT() FROM mongo_table;
└─────────┘ └─────────┘
``` ```
You can also adjust connection timeout:
``` sql
CREATE TABLE mongo_table
(
key UInt64,
data String
) ENGINE = MongoDB('mongo2:27017', 'test', 'simple_table', 'testuser', 'clickhouse', 'connectTimeoutMS=100000');
```
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/mongodb/) <!--hide--> [Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/mongodb/) <!--hide-->

View File

@ -153,6 +153,7 @@ toc_title: Adopters
| <a href="https://www.spotify.com" class="favicon">Spotify</a> | Music | Experimentation | — | — | [Slides, July 2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) | | <a href="https://www.spotify.com" class="favicon">Spotify</a> | Music | Experimentation | — | — | [Slides, July 2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) |
| <a href="https://www.staffcop.ru/" class="favicon">Staffcop</a> | Information Security | Main Product | — | — | [Official website, Documentation](https://www.staffcop.ru/sce43) | | <a href="https://www.staffcop.ru/" class="favicon">Staffcop</a> | Information Security | Main Product | — | — | [Official website, Documentation](https://www.staffcop.ru/sce43) |
| <a href="https://www.suning.com/" class="favicon">Suning</a> | E-Commerce | User behaviour analytics | — | — | [Blog article](https://www.sohu.com/a/434152235_411876) | | <a href="https://www.suning.com/" class="favicon">Suning</a> | E-Commerce | User behaviour analytics | — | — | [Blog article](https://www.sohu.com/a/434152235_411876) |
| <a href="https://superwall.me/" class="favicon">Superwall</a> | Monetization Tooling | Main product | — | — | [Word of mouth, Jan 2022](https://github.com/ClickHouse/ClickHouse/pull/33573) |
| <a href="https://www.teralytics.net/" class="favicon">Teralytics</a> | Mobility | Analytics | — | — | [Tech blog](https://www.teralytics.net/knowledge-hub/visualizing-mobility-data-the-scalability-challenge) | | <a href="https://www.teralytics.net/" class="favicon">Teralytics</a> | Mobility | Analytics | — | — | [Tech blog](https://www.teralytics.net/knowledge-hub/visualizing-mobility-data-the-scalability-challenge) |
| <a href="https://www.tencent.com" class="favicon">Tencent</a> | Big Data | Data processing | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) | | <a href="https://www.tencent.com" class="favicon">Tencent</a> | Big Data | Data processing | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) |
| <a href="https://www.tencent.com" class="favicon">Tencent</a> | Messaging | Logging | — | — | [Talk in Chinese, November 2019](https://youtu.be/T-iVQRuw-QY?t=5050) | | <a href="https://www.tencent.com" class="favicon">Tencent</a> | Messaging | Logging | — | — | [Talk in Chinese, November 2019](https://youtu.be/T-iVQRuw-QY?t=5050) |
@ -178,7 +179,7 @@ toc_title: Adopters
| <a href="https://cloud.yandex.ru/services/managed-clickhouse" class="favicon">Yandex Cloud</a> | Public Cloud | Main product | — | — | [Talk in Russian, December 2019](https://www.youtube.com/watch?v=pgnak9e_E0o) | | <a href="https://cloud.yandex.ru/services/managed-clickhouse" class="favicon">Yandex Cloud</a> | Public Cloud | Main product | — | — | [Talk in Russian, December 2019](https://www.youtube.com/watch?v=pgnak9e_E0o) |
| <a href="https://cloud.yandex.ru/services/datalens" class="favicon">Yandex DataLens</a> | Business Intelligence | Main product | — | — | [Slides in Russian, December 2019](https://presentations.clickhouse.com/meetup38/datalens.pdf) | | <a href="https://cloud.yandex.ru/services/datalens" class="favicon">Yandex DataLens</a> | Business Intelligence | Main product | — | — | [Slides in Russian, December 2019](https://presentations.clickhouse.com/meetup38/datalens.pdf) |
| <a href="https://market.yandex.ru/" class="favicon">Yandex Market</a> | e-Commerce | Metrics, Logging | — | — | [Talk in Russian, January 2019](https://youtu.be/_l1qP0DyBcA?t=478) | | <a href="https://market.yandex.ru/" class="favicon">Yandex Market</a> | e-Commerce | Metrics, Logging | — | — | [Talk in Russian, January 2019](https://youtu.be/_l1qP0DyBcA?t=478) |
| <a href="https://metrica.yandex.com" class="favicon">Yandex Metrica</a> | Web analytics | Macin product | 630 servers in one cluster, 360 servers in another cluster, 1862 servers in one department | 133 PiB / 8.31 PiB / 120 trillion records | [Slides, February 2020](https://presentations.clickhouse.com/meetup40/introduction/#13) | | <a href="https://metrica.yandex.com" class="favicon">Yandex Metrica</a> | Web analytics | Main product | 630 servers in one cluster, 360 servers in another cluster, 1862 servers in one department | 133 PiB / 8.31 PiB / 120 trillion records | [Slides, February 2020](https://presentations.clickhouse.com/meetup40/introduction/#13) |
| <a href="https://www.yellowfinbi.com" class="favicon"><COMPANYNAME></a> | Analytics | Main product | - | - | [Integration](https://www.yellowfinbi.com/campaign/yellowfin-9-whats-new#el-30219e0e) | | <a href="https://www.yellowfinbi.com" class="favicon"><COMPANYNAME></a> | Analytics | Main product | - | - | [Integration](https://www.yellowfinbi.com/campaign/yellowfin-9-whats-new#el-30219e0e) |
| <a href="https://www.yotascale.com/" class="favicon">Yotascale</a> | Cloud | Data pipeline | — | 2 bn records/day | [LinkedIn (Accomplishments)](https://www.linkedin.com/in/adilsaleem/) | | <a href="https://www.yotascale.com/" class="favicon">Yotascale</a> | Cloud | Data pipeline | — | 2 bn records/day | [LinkedIn (Accomplishments)](https://www.linkedin.com/in/adilsaleem/) |
| <a href="https://www.your-analytics.org/" class="favicon">Your Analytics</a> | Product Analytics | Main Product | — | - | [Tweet, November 2021](https://twitter.com/mikenikles/status/1459737241165565953) | | <a href="https://www.your-analytics.org/" class="favicon">Your Analytics</a> | Product Analytics | Main Product | — | - | [Tweet, November 2021](https://twitter.com/mikenikles/status/1459737241165565953) |

View File

@ -36,7 +36,8 @@ Other common parameters are inherited from the ClickHouse server config (`listen
Internal coordination settings are located in `<keeper_server>.<coordination_settings>` section: Internal coordination settings are located in `<keeper_server>.<coordination_settings>` section:
- `operation_timeout_ms` — Timeout for a single client operation (ms) (default: 10000). - `operation_timeout_ms` — Timeout for a single client operation (ms) (default: 10000).
- `session_timeout_ms` — Timeout for client session (ms) (default: 30000). - `min_session_timeout_ms` — Min timeout for client session (ms) (default: 10000).
- `session_timeout_ms` — Max timeout for client session (ms) (default: 100000).
- `dead_session_check_period_ms` — How often ClickHouse Keeper check dead sessions and remove them (ms) (default: 500). - `dead_session_check_period_ms` — How often ClickHouse Keeper check dead sessions and remove them (ms) (default: 500).
- `heart_beat_interval_ms` — How often a ClickHouse Keeper leader will send heartbeats to followers (ms) (default: 500). - `heart_beat_interval_ms` — How often a ClickHouse Keeper leader will send heartbeats to followers (ms) (default: 500).
- `election_timeout_lower_bound_ms` — If the follower didn't receive heartbeats from the leader in this interval, then it can initiate leader election (default: 1000). - `election_timeout_lower_bound_ms` — If the follower didn't receive heartbeats from the leader in this interval, then it can initiate leader election (default: 1000).

View File

@ -51,9 +51,6 @@ With filtering by realm:
</clickhouse> </clickhouse>
``` ```
!!! warning "Note"
You can define only one `kerberos` section. The presence of multiple `kerberos` sections will force ClickHouse to disable Kerberos authentication.
!!! warning "Note" !!! warning "Note"
`principal` and `realm` sections cannot be specified at the same time. The presence of both `principal` and `realm` sections will force ClickHouse to disable Kerberos authentication. `principal` and `realm` sections cannot be specified at the same time. The presence of both `principal` and `realm` sections will force ClickHouse to disable Kerberos authentication.

View File

@ -740,74 +740,6 @@ Result:
└───────┘ └───────┘
``` ```
## h3DegsToRads {#h3degstorads}
Converts degrees to radians.
**Syntax**
``` sql
h3DegsToRads(degrees)
```
**Parameter**
- `degrees` — Input in degrees. Type: [Float64](../../../sql-reference/data-types/float.md).
**Returned values**
- Radians. Type: [Float64](../../../sql-reference/data-types/float.md).
**Example**
Query:
``` sql
SELECT h3DegsToRads(180.0) AS radians;
```
Result:
``` text
┌───────────radians─┐
│ 3.141592653589793 │
└───────────────────┘
```
## h3RadsToDegs {#h3radstodegs}
Converts radians to degrees.
**Syntax**
``` sql
h3RadsToDegs(radians)
```
**Parameter**
- `radians` — Input in radians. Type: [Float64](../../../sql-reference/data-types/float.md).
**Returned values**
- Degrees. Type: [Float64](../../../sql-reference/data-types/float.md).
**Example**
Query:
``` sql
SELECT h3RadsToDegs(3.141592653589793) AS degrees;
```
Result:
``` text
┌─degrees─┐
│ 180 │
└─────────┘
```
## h3CellAreaM2 {#h3cellaream2} ## h3CellAreaM2 {#h3cellaream2}
Returns the exact area of a specific cell in square meters corresponding to the given input H3 index. Returns the exact area of a specific cell in square meters corresponding to the given input H3 index.

View File

@ -55,7 +55,11 @@ SELECT * FROM insert_select_testtable;
└───┴───┴───┘ └───┴───┴───┘
``` ```
In this example, we see that the second inserted row has `a` and `c` columns filled by the passed values, and `b` filled with value by default. In this example, we see that the second inserted row has `a` and `c` columns filled by the passed values, and `b` filled with value by default. It is also possible to use `DEFAULT` keyword to insert default values:
``` sql
INSERT INTO insert_select_testtable VALUES (1, DEFAULT, 1) ;
```
If a list of columns does not include all existing columns, the rest of the columns are filled with: If a list of columns does not include all existing columns, the rest of the columns are filled with:

View File

@ -16,7 +16,7 @@ This query tries to initialize an unscheduled merge of data parts for tables.
OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE [BY expression]] OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE [BY expression]]
``` ```
The `OPTMIZE` query is supported for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) family, the [MaterializedView](../../engines/table-engines/special/materializedview.md) and the [Buffer](../../engines/table-engines/special/buffer.md) engines. Other table engines arent supported. The `OPTIMIZE` query is supported for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) family, the [MaterializedView](../../engines/table-engines/special/materializedview.md) and the [Buffer](../../engines/table-engines/special/buffer.md) engines. Other table engines arent supported.
When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family of table engines, ClickHouse creates a task for merging and waits for execution on all replicas (if the [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) setting is set to `2`) or on current replica (if the [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) setting is set to `1`). When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family of table engines, ClickHouse creates a task for merging and waits for execution on all replicas (if the [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) setting is set to `2`) or on current replica (if the [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) setting is set to `1`).

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
--- ---
toc_folder_title: Changelog toc_folder_title: Changelog
toc_priority: 74 toc_priority: 74
toc_title: '2021' toc_title: '2022'
--- ---
{% include "content/changelog.md" %} {% include "content/changelog.md" %}

View File

@ -37,7 +37,7 @@ ClickHouse собирает:
Можно настроить экспорт метрик из ClickHouse в [Graphite](https://github.com/graphite-project). Смотрите секцию [graphite](server-configuration-parameters/settings.md#server_configuration_parameters-graphite) конфигурационного файла ClickHouse. Перед настройкой экспорта метрик необходимо настроить Graphite, как указано в [официальном руководстве](https://graphite.readthedocs.io/en/latest/install.html). Можно настроить экспорт метрик из ClickHouse в [Graphite](https://github.com/graphite-project). Смотрите секцию [graphite](server-configuration-parameters/settings.md#server_configuration_parameters-graphite) конфигурационного файла ClickHouse. Перед настройкой экспорта метрик необходимо настроить Graphite, как указано в [официальном руководстве](https://graphite.readthedocs.io/en/latest/install.html).
Можно настроить экспорт метрик из ClickHouse в [Prometheus](https://prometheus.io). Смотрите [prometheus](server-configuration-parameters/settings.md#server_configuration_parameters-prometheus) конфигурационного файла ClickHouse. Перед настройкой экспорта метрик необходимо настроить Prometheus, как указано в [официальном руководстве](https://prometheus.io/docs/prometheus/latest/installation/). Можно настроить экспорт метрик из ClickHouse в [Prometheus](https://prometheus.io). Смотрите секцию [prometheus](server-configuration-parameters/settings.md#server_configuration_parameters-prometheus) конфигурационного файла ClickHouse. Перед настройкой экспорта метрик необходимо настроить Prometheus, как указано в [официальном руководстве](https://prometheus.io/docs/prometheus/latest/installation/).
Также, можно отслеживать доступность сервера через HTTP API. Отправьте `HTTP GET` к ресурсу `/ping`. Если сервер доступен, он отвечает `200 OK`. Также, можно отслеживать доступность сервера через HTTP API. Отправьте `HTTP GET` к ресурсу `/ping`. Если сервер доступен, он отвечает `200 OK`.

View File

@ -55,7 +55,12 @@ SELECT * FROM insert_select_testtable
└───┴───┴───┘ └───┴───┴───┘
``` ```
В этом примере мы видим, что вторая строка содержит столбцы `a` и `c`, заполненные переданными значениями и `b`, заполненный значением по умолчанию. В этом примере мы видим, что вторая строка содержит столбцы `a` и `c`, заполненные переданными значениями и `b`, заполненный значением по умолчанию. Также можно использовать ключевое слово `DEFAULT` для вставки значений по умолчанию:
``` sql
INSERT INTO insert_select_testtable VALUES (1, DEFAULT, 1) ;
```
Если список столбцов не включает все существующие столбцы, то все остальные столбцы заполняются следующим образом: Если список столбцов не включает все существующие столбцы, то все остальные столбцы заполняются следующим образом:
- Значения, вычисляемые из `DEFAULT` выражений, указанных в определении таблицы. - Значения, вычисляемые из `DEFAULT` выражений, указанных в определении таблицы.

View File

@ -1,23 +1,17 @@
#include <stdlib.h> #include <stdlib.h>
#include <fcntl.h> #include <fcntl.h>
#include <signal.h>
#include <map> #include <map>
#include <iostream> #include <iostream>
#include <fstream>
#include <iomanip> #include <iomanip>
#include <unordered_set>
#include <algorithm>
#include <optional> #include <optional>
#include <base/scope_guard_safe.h> #include <base/scope_guard_safe.h>
#include <boost/program_options.hpp> #include <boost/program_options.hpp>
#include <boost/algorithm/string/replace.hpp> #include <boost/algorithm/string/replace.hpp>
#include <Poco/String.h>
#include <filesystem> #include <filesystem>
#include <string> #include <string>
#include "Client.h" #include "Client.h"
#include "Core/Protocol.h" #include "Core/Protocol.h"
#include <base/argsToConfig.h>
#include <base/find_symbols.h> #include <base/find_symbols.h>
#include <Common/config_version.h> #include <Common/config_version.h>

View File

@ -38,7 +38,8 @@
<coordination_settings> <coordination_settings>
<operation_timeout_ms>10000</operation_timeout_ms> <operation_timeout_ms>10000</operation_timeout_ms>
<session_timeout_ms>30000</session_timeout_ms> <min_session_timeout_ms>10000</min_session_timeout_ms>
<session_timeout_ms>100000</session_timeout_ms>
<raft_logs_level>information</raft_logs_level> <raft_logs_level>information</raft_logs_level>
<!-- All settings listed in https://github.com/ClickHouse/ClickHouse/blob/master/src/Coordination/CoordinationSettings.h --> <!-- All settings listed in https://github.com/ClickHouse/ClickHouse/blob/master/src/Coordination/CoordinationSettings.h -->
</coordination_settings> </coordination_settings>

View File

@ -58,6 +58,8 @@
#include <Storages/StorageReplicatedMergeTree.h> #include <Storages/StorageReplicatedMergeTree.h>
#include <Storages/System/attachSystemTables.h> #include <Storages/System/attachSystemTables.h>
#include <Storages/System/attachInformationSchemaTables.h> #include <Storages/System/attachInformationSchemaTables.h>
#include <Storages/Cache/ExternalDataSourceCache.h>
#include <Storages/Cache/registerRemoteFileMetadatas.h>
#include <AggregateFunctions/registerAggregateFunctions.h> #include <AggregateFunctions/registerAggregateFunctions.h>
#include <Functions/registerFunctions.h> #include <Functions/registerFunctions.h>
#include <TableFunctions/registerTableFunctions.h> #include <TableFunctions/registerTableFunctions.h>
@ -525,6 +527,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
registerDictionaries(); registerDictionaries();
registerDisks(); registerDisks();
registerFormats(); registerFormats();
registerRemoteFileMetadatas();
CurrentMetrics::set(CurrentMetrics::Revision, ClickHouseRevision::getVersionRevision()); CurrentMetrics::set(CurrentMetrics::Revision, ClickHouseRevision::getVersionRevision());
CurrentMetrics::set(CurrentMetrics::VersionInteger, ClickHouseRevision::getVersionInteger()); CurrentMetrics::set(CurrentMetrics::VersionInteger, ClickHouseRevision::getVersionInteger());
@ -559,6 +562,21 @@ if (ThreadFuzzer::instance().isEffective())
config().getUInt("thread_pool_queue_size", 10000) config().getUInt("thread_pool_queue_size", 10000)
); );
/// Initialize global local cache for remote filesystem.
if (config().has("local_cache_for_remote_fs"))
{
bool enable = config().getBool("local_cache_for_remote_fs.enable", false);
if (enable)
{
String root_dir = config().getString("local_cache_for_remote_fs.root_dir");
UInt64 limit_size = config().getUInt64("local_cache_for_remote_fs.limit_size");
UInt64 bytes_read_before_flush
= config().getUInt64("local_cache_for_remote_fs.bytes_read_before_flush", DBMS_DEFAULT_BUFFER_SIZE);
ExternalDataSourceCache::instance().initOnce(global_context, root_dir, limit_size, bytes_read_before_flush);
}
}
Poco::ThreadPool server_pool(3, config().getUInt("max_connections", 1024)); Poco::ThreadPool server_pool(3, config().getUInt("max_connections", 1024));
std::mutex servers_lock; std::mutex servers_lock;
std::vector<ProtocolServerAdapter> servers; std::vector<ProtocolServerAdapter> servers;

View File

@ -75,6 +75,7 @@ public:
cache.remove(params); cache.remove(params);
} }
auto res = std::shared_ptr<ContextAccess>(new ContextAccess(access_control, params)); auto res = std::shared_ptr<ContextAccess>(new ContextAccess(access_control, params));
res->initialize();
cache.add(params, res); cache.add(params, res);
return res; return res;
} }

View File

@ -146,16 +146,22 @@ ContextAccess::ContextAccess(const AccessControl & access_control_, const Params
: access_control(&access_control_) : access_control(&access_control_)
, params(params_) , params(params_)
{ {
std::lock_guard lock{mutex}; }
subscription_for_user_change = access_control->subscribeForChanges(
*params.user_id, [this](const UUID &, const AccessEntityPtr & entity) void ContextAccess::initialize()
{ {
std::lock_guard lock{mutex};
subscription_for_user_change = access_control->subscribeForChanges(
*params.user_id, [weak_ptr = weak_from_this()](const UUID &, const AccessEntityPtr & entity)
{
auto ptr = weak_ptr.lock();
if (!ptr)
return;
UserPtr changed_user = entity ? typeid_cast<UserPtr>(entity) : nullptr; UserPtr changed_user = entity ? typeid_cast<UserPtr>(entity) : nullptr;
std::lock_guard lock2{mutex}; std::lock_guard lock2{ptr->mutex};
setUser(changed_user); ptr->setUser(changed_user);
}); });
setUser(access_control->read<User>(*params.user_id)); setUser(access_control->read<User>(*params.user_id));
} }

View File

@ -63,7 +63,7 @@ struct ContextAccessParams
}; };
class ContextAccess class ContextAccess : public std::enable_shared_from_this<ContextAccess>
{ {
public: public:
using Params = ContextAccessParams; using Params = ContextAccessParams;
@ -161,6 +161,7 @@ private:
ContextAccess() {} ContextAccess() {}
ContextAccess(const AccessControl & access_control_, const Params & params_); ContextAccess(const AccessControl & access_control_, const Params & params_);
void initialize();
void setUser(const UserPtr & user_) const; void setUser(const UserPtr & user_) const;
void setRolesInfo(const std::shared_ptr<const EnabledRolesInfo> & roles_info_) const; void setRolesInfo(const std::shared_ptr<const EnabledRolesInfo> & roles_info_) const;
void setSettingsAndConstraints() const; void setSettingsAndConstraints() const;

View File

@ -353,6 +353,9 @@ bool LDAPAccessStorage::areLDAPCredentialsValidNoLock(const User & user, const C
if (credentials.getUserName() != user.getName()) if (credentials.getUserName() != user.getName())
return false; return false;
if (typeid_cast<const AlwaysAllowCredentials *>(&credentials))
return true;
if (const auto * basic_credentials = dynamic_cast<const BasicCredentials *>(&credentials)) if (const auto * basic_credentials = dynamic_cast<const BasicCredentials *>(&credentials))
return external_authenticators.checkLDAPCredentials(ldap_server_name, *basic_credentials, &role_search_params, &role_search_results); return external_authenticators.checkLDAPCredentials(ldap_server_name, *basic_credentials, &role_search_params, &role_search_results);
@ -478,53 +481,53 @@ std::optional<UUID> LDAPAccessStorage::authenticateImpl(
const Credentials & credentials, const Credentials & credentials,
const Poco::Net::IPAddress & address, const Poco::Net::IPAddress & address,
const ExternalAuthenticators & external_authenticators, const ExternalAuthenticators & external_authenticators,
bool /* throw_if_user_not_exists */) const bool throw_if_user_not_exists) const
{ {
std::scoped_lock lock(mutex); std::scoped_lock lock(mutex);
LDAPClient::SearchResultsList external_roles;
auto id = memory_storage.find<User>(credentials.getUserName()); auto id = memory_storage.find<User>(credentials.getUserName());
if (id) UserPtr user = id ? memory_storage.read<User>(*id) : nullptr;
std::shared_ptr<User> new_user;
if (!user)
{ {
auto user = memory_storage.read<User>(*id); // User does not exist, so we create one, and will add it if authentication is successful.
new_user = std::make_shared<User>();
new_user->setName(credentials.getUserName());
new_user->auth_data = AuthenticationData(AuthenticationType::LDAP);
new_user->auth_data.setLDAPServerName(ldap_server_name);
user = new_user;
}
if (!isAddressAllowed(*user, address)) if (!isAddressAllowed(*user, address))
throwAddressNotAllowed(address); throwAddressNotAllowed(address);
if (typeid_cast<const AlwaysAllowCredentials *>(&credentials)) LDAPClient::SearchResultsList external_roles;
return id;
if (!areLDAPCredentialsValidNoLock(*user, credentials, external_authenticators, external_roles)) if (!areLDAPCredentialsValidNoLock(*user, credentials, external_authenticators, external_roles))
throwInvalidCredentials(); {
// We don't know why the authentication has just failed:
// either there is no such user in LDAP or the password is not correct.
// We treat this situation as if there is no such user because we don't want to block
// other storages following this LDAPAccessStorage from trying to authenticate on their own.
if (throw_if_user_not_exists)
throwNotFound(AccessEntityType::USER, credentials.getUserName());
else
return {};
}
// Just in case external_roles are changed. This will be no-op if they are not. if (new_user)
updateAssignedRolesNoLock(*id, user->getName(), external_roles); {
// TODO: if these were AlwaysAllowCredentials, then mapped external roles are not available here,
// since without a password we can't authenticate and retrieve roles from the LDAP server.
return id; assignRolesNoLock(*new_user, external_roles);
id = memory_storage.insert(new_user);
} }
else else
{ {
// User does not exist, so we create one, and will add it if authentication is successful. // Just in case external_roles are changed. This will be no-op if they are not.
auto user = std::make_shared<User>(); updateAssignedRolesNoLock(*id, user->getName(), external_roles);
user->setName(credentials.getUserName());
user->auth_data = AuthenticationData(AuthenticationType::LDAP);
user->auth_data.setLDAPServerName(ldap_server_name);
if (!isAddressAllowed(*user, address))
throwAddressNotAllowed(address);
if (typeid_cast<const AlwaysAllowCredentials *>(&credentials))
{
// TODO: mapped external roles are not available here. Without a password we can't authenticate and retrieve roles from LDAP server.
assignRolesNoLock(*user, external_roles);
return memory_storage.insert(user);
} }
if (!areLDAPCredentialsValidNoLock(*user, credentials, external_authenticators, external_roles)) return id;
throwInvalidCredentials();
assignRolesNoLock(*user, external_roles);
return memory_storage.insert(user);
}
} }
} }

View File

@ -69,7 +69,7 @@ namespace
std::recursive_mutex ldap_global_mutex; std::recursive_mutex ldap_global_mutex;
auto escapeForLDAP(const String & src) auto escapeForDN(const String & src)
{ {
String dest; String dest;
dest.reserve(src.size() * 2); dest.reserve(src.size() * 2);
@ -96,6 +96,39 @@ namespace
return dest; return dest;
} }
auto escapeForFilter(const String & src)
{
String dest;
dest.reserve(src.size() * 3);
for (auto ch : src)
{
switch (ch)
{
case '*':
dest += "\\2A";
break;
case '(':
dest += "\\28";
break;
case ')':
dest += "\\29";
break;
case '\\':
dest += "\\5C";
break;
case '\0':
dest += "\\00";
break;
default:
dest += ch;
break;
}
}
return dest;
}
auto replacePlaceholders(const String & src, const std::vector<std::pair<String, String>> & pairs) auto replacePlaceholders(const String & src, const std::vector<std::pair<String, String>> & pairs)
{ {
String dest = src; String dest = src;
@ -160,7 +193,7 @@ void LDAPClient::diag(const int rc, String text)
} }
} }
void LDAPClient::openConnection() bool LDAPClient::openConnection()
{ {
std::scoped_lock lock(ldap_global_mutex); std::scoped_lock lock(ldap_global_mutex);
@ -294,7 +327,7 @@ void LDAPClient::openConnection()
if (params.enable_tls == LDAPClient::Params::TLSEnable::YES_STARTTLS) if (params.enable_tls == LDAPClient::Params::TLSEnable::YES_STARTTLS)
diag(ldap_start_tls_s(handle, nullptr, nullptr)); diag(ldap_start_tls_s(handle, nullptr, nullptr));
final_user_name = escapeForLDAP(params.user); final_user_name = escapeForDN(params.user);
final_bind_dn = replacePlaceholders(params.bind_dn, { {"{user_name}", final_user_name} }); final_bind_dn = replacePlaceholders(params.bind_dn, { {"{user_name}", final_user_name} });
final_user_dn = final_bind_dn; // The default value... may be updated right after a successful bind. final_user_dn = final_bind_dn; // The default value... may be updated right after a successful bind.
@ -306,7 +339,15 @@ void LDAPClient::openConnection()
cred.bv_val = const_cast<char *>(params.password.c_str()); cred.bv_val = const_cast<char *>(params.password.c_str());
cred.bv_len = params.password.size(); cred.bv_len = params.password.size();
diag(ldap_sasl_bind_s(handle, final_bind_dn.c_str(), LDAP_SASL_SIMPLE, &cred, nullptr, nullptr, nullptr)); {
const auto rc = ldap_sasl_bind_s(handle, final_bind_dn.c_str(), LDAP_SASL_SIMPLE, &cred, nullptr, nullptr, nullptr);
// Handle invalid credentials gracefully.
if (rc == LDAP_INVALID_CREDENTIALS)
return false;
diag(rc);
}
// Once bound, run the user DN search query and update the default value, if asked. // Once bound, run the user DN search query and update the default value, if asked.
if (params.user_dn_detection) if (params.user_dn_detection)
@ -322,7 +363,7 @@ void LDAPClient::openConnection()
final_user_dn = *user_dn_search_results.begin(); final_user_dn = *user_dn_search_results.begin();
} }
break; return true;
} }
default: default:
@ -366,10 +407,10 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params)
}); });
const auto final_search_filter = replacePlaceholders(search_params.search_filter, { const auto final_search_filter = replacePlaceholders(search_params.search_filter, {
{"{user_name}", final_user_name}, {"{user_name}", escapeForFilter(final_user_name)},
{"{bind_dn}", final_bind_dn}, {"{bind_dn}", escapeForFilter(final_bind_dn)},
{"{user_dn}", final_user_dn}, {"{user_dn}", escapeForFilter(final_user_dn)},
{"{base_dn}", final_base_dn} {"{base_dn}", escapeForFilter(final_base_dn)}
}); });
char * attrs[] = { const_cast<char *>(search_params.attribute.c_str()), nullptr }; char * attrs[] = { const_cast<char *>(search_params.attribute.c_str()), nullptr };
@ -541,8 +582,9 @@ bool LDAPSimpleAuthClient::authenticate(const RoleSearchParamsList * role_search
SCOPE_EXIT({ closeConnection(); }); SCOPE_EXIT({ closeConnection(); });
// Will throw on any error, including invalid credentials. // Will return false on invalid credentials, will throw on any other error.
openConnection(); if (!openConnection())
return false;
// While connected, run search queries and save the results, if asked. // While connected, run search queries and save the results, if asked.
if (role_search_params) if (role_search_params)
@ -574,7 +616,7 @@ void LDAPClient::diag(const int, String)
throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME); throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
} }
void LDAPClient::openConnection() bool LDAPClient::openConnection()
{ {
throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME); throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
} }

View File

@ -134,7 +134,7 @@ public:
protected: protected:
MAYBE_NORETURN void diag(const int rc, String text = ""); MAYBE_NORETURN void diag(const int rc, String text = "");
MAYBE_NORETURN void openConnection(); MAYBE_NORETURN bool openConnection();
void closeConnection() noexcept; void closeConnection() noexcept;
SearchResults search(const SearchParams & search_params); SearchResults search(const SearchParams & search_params);

View File

@ -452,9 +452,11 @@ void MultipleAccessStorage::updateSubscriptionsToNestedStorages(std::unique_lock
std::optional<UUID> MultipleAccessStorage::authenticateImpl(const Credentials & credentials, const Poco::Net::IPAddress & address, const ExternalAuthenticators & external_authenticators, bool throw_if_user_not_exists) const std::optional<UUID> MultipleAccessStorage::authenticateImpl(const Credentials & credentials, const Poco::Net::IPAddress & address, const ExternalAuthenticators & external_authenticators, bool throw_if_user_not_exists) const
{ {
auto storages = getStoragesInternal(); auto storages = getStoragesInternal();
for (const auto & storage : *storages) for (size_t i = 0; i != storages->size(); ++i)
{ {
auto id = storage->authenticate(credentials, address, external_authenticators, /* throw_if_user_not_exists = */ false); const auto & storage = (*storages)[i];
bool is_last_storage = (i == storages->size() - 1);
auto id = storage->authenticate(credentials, address, external_authenticators, throw_if_user_not_exists && is_last_storage);
if (id) if (id)
{ {
std::lock_guard lock{mutex}; std::lock_guard lock{mutex};

View File

@ -115,6 +115,11 @@ if (USE_HDFS)
add_headers_and_sources(dbms Disks/HDFS) add_headers_and_sources(dbms Disks/HDFS)
endif() endif()
add_headers_and_sources(dbms Storages/Cache)
if (USE_HIVE)
add_headers_and_sources(dbms Storages/Hive)
endif()
if(USE_FILELOG) if(USE_FILELOG)
add_headers_and_sources(dbms Storages/FileLog) add_headers_and_sources(dbms Storages/FileLog)
endif() endif()
@ -448,6 +453,12 @@ if (USE_HDFS)
dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${HDFS3_INCLUDE_DIR}) dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${HDFS3_INCLUDE_DIR})
endif() endif()
if (USE_HIVE)
dbms_target_link_libraries(PRIVATE hivemetastore)
dbms_target_include_directories(SYSTEM BEFORE PUBLIC ${ClickHouse_SOURCE_DIR}/contrib/hive-metastore)
endif()
if (USE_AWS_S3) if (USE_AWS_S3)
target_link_libraries (clickhouse_common_io PUBLIC ${AWS_S3_LIBRARY}) target_link_libraries (clickhouse_common_io PUBLIC ${AWS_S3_LIBRARY})
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${AWS_S3_CORE_INCLUDE_DIR}) target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${AWS_S3_CORE_INCLUDE_DIR})
@ -469,6 +480,11 @@ if (USE_BROTLI)
target_include_directories (clickhouse_common_io SYSTEM BEFORE PRIVATE ${BROTLI_INCLUDE_DIR}) target_include_directories (clickhouse_common_io SYSTEM BEFORE PRIVATE ${BROTLI_INCLUDE_DIR})
endif() endif()
if (USE_SNAPPY)
target_link_libraries (clickhouse_common_io PUBLIC ${SNAPPY_LIBRARY})
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${SNAPPY_INCLUDE_DIR})
endif()
if (USE_AMQPCPP) if (USE_AMQPCPP)
dbms_target_link_libraries(PUBLIC ${AMQPCPP_LIBRARY}) dbms_target_link_libraries(PUBLIC ${AMQPCPP_LIBRARY})
dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${AMQPCPP_INCLUDE_DIR}) dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${AMQPCPP_INCLUDE_DIR})

View File

@ -463,12 +463,13 @@ void ClientBase::initBlockOutputStream(const Block & block, ASTPtr parsed_query)
/// The query can specify output format or output file. /// The query can specify output format or output file.
if (const auto * query_with_output = dynamic_cast<const ASTQueryWithOutput *>(parsed_query.get())) if (const auto * query_with_output = dynamic_cast<const ASTQueryWithOutput *>(parsed_query.get()))
{ {
String out_file;
if (query_with_output->out_file) if (query_with_output->out_file)
{ {
select_into_file = true; select_into_file = true;
const auto & out_file_node = query_with_output->out_file->as<ASTLiteral &>(); const auto & out_file_node = query_with_output->out_file->as<ASTLiteral &>();
const auto & out_file = out_file_node.value.safeGet<std::string>(); out_file = out_file_node.value.safeGet<std::string>();
std::string compression_method; std::string compression_method;
if (query_with_output->compression) if (query_with_output->compression)
@ -494,6 +495,12 @@ void ClientBase::initBlockOutputStream(const Block & block, ASTPtr parsed_query)
const auto & id = query_with_output->format->as<ASTIdentifier &>(); const auto & id = query_with_output->format->as<ASTIdentifier &>();
current_format = id.name(); current_format = id.name();
} }
else if (query_with_output->out_file)
{
const auto & format_name = FormatFactory::instance().getFormatFromFileName(out_file);
if (!format_name.empty())
current_format = format_name;
}
} }
if (has_vertical_output_suffix) if (has_vertical_output_suffix)
@ -1008,11 +1015,15 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des
compression_method = compression_method_node.value.safeGet<std::string>(); compression_method = compression_method_node.value.safeGet<std::string>();
} }
String current_format = parsed_insert_query->format;
if (current_format.empty())
current_format = FormatFactory::instance().getFormatFromFileName(in_file, true);
/// Create temporary storage file, to support globs and parallel reading /// Create temporary storage file, to support globs and parallel reading
StorageFile::CommonArguments args{ StorageFile::CommonArguments args{
WithContext(global_context), WithContext(global_context),
parsed_insert_query->table_id, parsed_insert_query->table_id,
parsed_insert_query->format, current_format,
getFormatSettings(global_context), getFormatSettings(global_context),
compression_method, compression_method,
columns_description_for_query, columns_description_for_query,

View File

@ -214,6 +214,12 @@ bool LocalConnection::poll(size_t)
if (next_packet_type) if (next_packet_type)
return true; return true;
if (state->exception)
{
next_packet_type = Protocol::Server::Exception;
return true;
}
if (!state->is_finished) if (!state->is_finished)
{ {
if (send_progress && (state->after_send_progress.elapsedMicroseconds() >= query_context->getSettingsRef().interactive_delay)) if (send_progress && (state->after_send_progress.elapsedMicroseconds() >= query_context->getSettingsRef().interactive_delay))

View File

@ -3,9 +3,11 @@
#include <Columns/ColumnsCommon.h> #include <Columns/ColumnsCommon.h>
#include <Common/PODArray.h> #include <Common/PODArray.h>
#include <Common/ProfileEvents.h> #include <Common/ProfileEvents.h>
#include <Common/assert_cast.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>
#include <Functions/IFunction.h> #include <Functions/IFunction.h>
namespace ProfileEvents namespace ProfileEvents
{ {
extern const Event FunctionExecute; extern const Event FunctionExecute;
@ -59,6 +61,40 @@ ColumnPtr ColumnFunction::cut(size_t start, size_t length) const
return ColumnFunction::create(length, function, capture, is_short_circuit_argument, is_function_compiled); return ColumnFunction::create(length, function, capture, is_short_circuit_argument, is_function_compiled);
} }
void ColumnFunction::insertFrom(const IColumn & src, size_t n)
{
const ColumnFunction & src_func = assert_cast<const ColumnFunction &>(src);
size_t num_captured_columns = captured_columns.size();
assert(num_captured_columns == src_func.captured_columns.size());
for (size_t i = 0; i < num_captured_columns; ++i)
{
auto mut_column = IColumn::mutate(std::move(captured_columns[i].column));
mut_column->insertFrom(*src_func.captured_columns[i].column, n);
captured_columns[i].column = std::move(mut_column);
}
++size_;
}
void ColumnFunction::insertRangeFrom(const IColumn & src, size_t start, size_t length)
{
const ColumnFunction & src_func = assert_cast<const ColumnFunction &>(src);
size_t num_captured_columns = captured_columns.size();
assert(num_captured_columns == src_func.captured_columns.size());
for (size_t i = 0; i < num_captured_columns; ++i)
{
auto mut_column = IColumn::mutate(std::move(captured_columns[i].column));
mut_column->insertRangeFrom(*src_func.captured_columns[i].column, start, length);
captured_columns[i].column = std::move(mut_column);
}
size_ += length;
}
ColumnPtr ColumnFunction::filter(const Filter & filt, ssize_t result_size_hint) const ColumnPtr ColumnFunction::filter(const Filter & filt, ssize_t result_size_hint) const
{ {
if (size_ != filt.size()) if (size_ != filt.size())

View File

@ -88,10 +88,8 @@ public:
throw Exception("Cannot insert into " + getName(), ErrorCodes::NOT_IMPLEMENTED); throw Exception("Cannot insert into " + getName(), ErrorCodes::NOT_IMPLEMENTED);
} }
void insertRangeFrom(const IColumn &, size_t, size_t) override void insertFrom(const IColumn & src, size_t n) override;
{ void insertRangeFrom(const IColumn &, size_t start, size_t length) override;
throw Exception("Cannot insert into " + getName(), ErrorCodes::NOT_IMPLEMENTED);
}
void insertData(const char *, size_t) override void insertData(const char *, size_t) override
{ {

Some files were not shown because too many files have changed in this diff Show More