mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Merge branch 'master' into pretty-type-names-default
This commit is contained in:
commit
7392919909
7
.github/actions/common_setup/action.yml
vendored
7
.github/actions/common_setup/action.yml
vendored
@ -18,9 +18,6 @@ runs:
|
||||
echo "Setup the common ENV variables"
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/${{inputs.job_type}}
|
||||
REPO_COPY=${{runner.temp}}/${{inputs.job_type}}/git-repo-copy
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
EOF
|
||||
if [ -z "${{env.GITHUB_JOB_OVERRIDDEN}}" ] && [ "true" == "${{inputs.nested_job}}" ]; then
|
||||
echo "The GITHUB_JOB_OVERRIDDEN ENV is unset, and must be set for the nested jobs"
|
||||
@ -30,6 +27,4 @@ runs:
|
||||
shell: bash
|
||||
run: |
|
||||
# to remove every leftovers
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$REPO_COPY"
|
||||
cp -a "$GITHUB_WORKSPACE"/. "$REPO_COPY"/
|
||||
sudo rm -fr "$TEMP_PATH" && mkdir -p "$TEMP_PATH"
|
||||
|
244
.github/workflows/backport_branches.yml
vendored
244
.github/workflows/backport_branches.yml
vendored
@ -10,27 +10,21 @@ on: # yamllint disable-line rule:truthy
|
||||
branches:
|
||||
- 'backport/**'
|
||||
jobs:
|
||||
CheckLabels:
|
||||
RunConfig:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
# Run the first check always, even if the CI is cancelled
|
||||
if: ${{ always() }}
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Labels check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 run_check.py
|
||||
PythonUnitTests:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
needs: CheckLabels
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Python unit tests
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
@ -40,273 +34,237 @@ jobs:
|
||||
echo "Testing $dir"
|
||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
||||
done
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
needs: CheckLabels
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
- name: PrepareRunConfig
|
||||
id: runconfig
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
needs: CheckLabels
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
echo "::group::configure CI run"
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::CI run configure results"
|
||||
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
|
||||
echo "::endgroup::"
|
||||
|
||||
{
|
||||
echo 'CI_DATA<<EOF'
|
||||
cat ${{ runner.temp }}/ci_run_data.json
|
||||
echo 'EOF'
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
- name: Re-create GH statuses for skipped jobs if any
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
DockerHubPush:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64, PythonUnitTests]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
|
||||
filter: tree:0
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses
|
||||
BuildDockers:
|
||||
needs: [RunConfig]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_docker.yml
|
||||
with:
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
CompatibilityCheckX86:
|
||||
needs: [BuilderDebRelease]
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Compatibility check X86
|
||||
test_name: Compatibility check (amd64)
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions
|
||||
CompatibilityCheckAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
needs: [RunConfig, BuilderDebAarch64]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Compatibility check X86
|
||||
test_name: Compatibility check (aarch64)
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc
|
||||
#########################################################################################
|
||||
#################################### ORDINARY BUILDS ####################################
|
||||
#########################################################################################
|
||||
BuilderDebRelease:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_release
|
||||
checkout_depth: 0
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderDebAarch64:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_aarch64
|
||||
checkout_depth: 0
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderDebAsan:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_asan
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderDebTsan:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_tsan
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderDebDebug:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_debug
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderBinDarwin:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: binary_darwin
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
checkout_depth: 0
|
||||
BuilderBinDarwinAarch64:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: binary_darwin_aarch64
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
checkout_depth: 0
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
DockerServerImages:
|
||||
needs:
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAarch64
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
|
||||
filter: tree:0
|
||||
- name: Check docker clickhouse/clickhouse-server building
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-server --image-path docker/server
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Docker server and keeper images
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
checkout_depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
|
||||
run_command: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-server --image-path docker/server --allow-build-reuse
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper --allow-build-reuse
|
||||
############################################################################################
|
||||
##################################### BUILD REPORTER #######################################
|
||||
############################################################################################
|
||||
BuilderReport:
|
||||
if: ${{ success() || failure() }}
|
||||
# run report check for failed builds to indicate the CI error
|
||||
if: ${{ !cancelled() }}
|
||||
needs:
|
||||
- BuilderDebRelease
|
||||
- RunConfig
|
||||
- BuilderDebAarch64
|
||||
- BuilderDebAsan
|
||||
- BuilderDebTsan
|
||||
- BuilderDebDebug
|
||||
- BuilderDebRelease
|
||||
- BuilderDebTsan
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: ClickHouse build check
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
additional_envs: |
|
||||
NEEDS_DATA<<NDENV
|
||||
${{ toJSON(needs) }}
|
||||
NDENV
|
||||
run_command: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
BuilderSpecialReport:
|
||||
if: ${{ success() || failure() }}
|
||||
# run report check for failed builds to indicate the CI error
|
||||
if: ${{ !cancelled() }}
|
||||
needs:
|
||||
- RunConfig
|
||||
- BuilderBinDarwin
|
||||
- BuilderBinDarwinAarch64
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: ClickHouse special build check
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
additional_envs: |
|
||||
NEEDS_DATA<<NDENV
|
||||
${{ toJSON(needs) }}
|
||||
NDENV
|
||||
run_command: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
############################################################################################
|
||||
#################################### INSTALL PACKAGES ######################################
|
||||
############################################################################################
|
||||
InstallPackagesTestRelease:
|
||||
needs: [BuilderDebRelease]
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Install packages (amd64)
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 install_check.py "$CHECK_NAME"
|
||||
InstallPackagesTestAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
needs: [RunConfig, BuilderDebAarch64]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Install packages (arm64)
|
||||
runner_type: style-checker-aarch64
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 install_check.py "$CHECK_NAME"
|
||||
##############################################################################################
|
||||
########################### FUNCTIONAl STATELESS TESTS #######################################
|
||||
##############################################################################################
|
||||
FunctionalStatelessTestAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
needs: [RunConfig, BuilderDebAsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (asan)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=10800
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
##############################################################################################
|
||||
############################ FUNCTIONAl STATEFUL TESTS #######################################
|
||||
##############################################################################################
|
||||
FunctionalStatefulTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
needs: [RunConfig, BuilderDebDebug]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateful tests (debug)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=3600
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
##############################################################################################
|
||||
######################################### STRESS TESTS #######################################
|
||||
##############################################################################################
|
||||
StressTestTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
needs: [RunConfig, BuilderDebTsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (tsan)
|
||||
runner_type: stress-tester
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
#############################################################################################
|
||||
############################# INTEGRATION TESTS #############################################
|
||||
#############################################################################################
|
||||
IntegrationTestsRelease:
|
||||
needs: [BuilderDebRelease]
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (release)
|
||||
runner_type: stress-tester
|
||||
batches: 4
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FinishCheck:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs:
|
||||
- DockerHubPush
|
||||
- DockerServerImages
|
||||
- BuilderReport
|
||||
- BuilderSpecialReport
|
||||
- FunctionalStatelessTestAsan
|
||||
|
138
.github/workflows/docs_check.yml
vendored
138
.github/workflows/docs_check.yml
vendored
@ -1,138 +0,0 @@
|
||||
name: DocsCheck
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
pull_request:
|
||||
types:
|
||||
- synchronize
|
||||
- reopened
|
||||
- opened
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- '**.md'
|
||||
- 'docker/docs/**'
|
||||
- 'docs/**'
|
||||
- 'utils/check-style/aspell-ignore/**'
|
||||
- 'tests/ci/docs_check.py'
|
||||
- '.github/workflows/docs_check.yml'
|
||||
jobs:
|
||||
CheckLabels:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Labels check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 run_check.py
|
||||
DockerHubPushAarch64:
|
||||
needs: CheckLabels
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
needs: CheckLabels
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
DockerHubPush:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
|
||||
filter: tree:0
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
StyleCheck:
|
||||
needs: DockerHubPush
|
||||
# We need additional `&& ! cancelled()` to have the job being able to cancel
|
||||
if: ${{ success() || failure() || ( always() && ! cancelled() ) }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Style check
|
||||
runner_type: style-checker
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 style_check.py
|
||||
secrets:
|
||||
secret_envs: |
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
DocsCheck:
|
||||
needs: DockerHubPush
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Docs check
|
||||
runner_type: func-tester-aarch64
|
||||
additional_envs: |
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 docs_check.py
|
||||
FinishCheck:
|
||||
needs:
|
||||
- StyleCheck
|
||||
- DockerHubPush
|
||||
- DocsCheck
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Finish label
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 finish_check.py
|
||||
python3 merge_pr.py --check-approved
|
6
.github/workflows/jepsen.yml
vendored
6
.github/workflows/jepsen.yml
vendored
@ -11,16 +11,14 @@ on: # yamllint disable-line rule:truthy
|
||||
workflow_call:
|
||||
jobs:
|
||||
KeeperJepsenRelease:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
uses: ./.github/workflows/reusable_simple_job.yml
|
||||
with:
|
||||
test_name: Jepsen keeper check
|
||||
runner_type: style-checker
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 jepsen_check.py keeper
|
||||
# ServerJepsenRelease:
|
||||
# runs-on: [self-hosted, style-checker]
|
||||
# uses: ./.github/workflows/reusable_test.yml
|
||||
# uses: ./.github/workflows/reusable_simple_job.yml
|
||||
# with:
|
||||
# test_name: Jepsen server check
|
||||
# runner_type: style-checker
|
||||
|
9
.github/workflows/libfuzzer.yml
vendored
9
.github/workflows/libfuzzer.yml
vendored
@ -8,19 +8,26 @@ on: # yamllint disable-line rule:truthy
|
||||
# schedule:
|
||||
# - cron: '0 0 2 31 1' # never for now
|
||||
workflow_call:
|
||||
inputs:
|
||||
data:
|
||||
description: json ci data
|
||||
type: string
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
BuilderFuzzers:
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: fuzzers
|
||||
data: ${{ inputs.data }}
|
||||
libFuzzerTest:
|
||||
needs: [BuilderFuzzers]
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: libFuzzer tests
|
||||
runner_type: func-tester
|
||||
data: ${{ inputs.data }}
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=10800
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 libfuzzer_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
|
639
.github/workflows/master.yml
vendored
639
.github/workflows/master.yml
vendored
File diff suppressed because it is too large
Load Diff
79
.github/workflows/nightly.yml
vendored
79
.github/workflows/nightly.yml
vendored
@ -13,67 +13,38 @@ jobs:
|
||||
Debug:
|
||||
# The task for having a preserved ENV and event.json for later investigation
|
||||
uses: ./.github/workflows/debug.yml
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix aarch64 --all
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
RunConfig:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix amd64 --all
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
DockerHubPush:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Images check
|
||||
- name: PrepareRunConfig
|
||||
id: runconfig
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
echo "::group::configure CI run"
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --skip-jobs --rebuild-all-docker --outfile ${{ runner.temp }}/ci_run_data.json
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::CI run configure results"
|
||||
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
|
||||
echo "::endgroup::"
|
||||
{
|
||||
echo 'CI_DATA<<EOF'
|
||||
cat ${{ runner.temp }}/ci_run_data.json
|
||||
echo 'EOF'
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
BuildDockers:
|
||||
needs: [RunConfig]
|
||||
uses: ./.github/workflows/reusable_docker.yml
|
||||
with:
|
||||
data: "${{ needs.RunConfig.outputs.data }}"
|
||||
set_latest: true
|
||||
SonarCloud:
|
||||
runs-on: [self-hosted, builder]
|
||||
env:
|
||||
|
816
.github/workflows/pull_request.yml
vendored
816
.github/workflows/pull_request.yml
vendored
File diff suppressed because it is too large
Load Diff
422
.github/workflows/release_branches.yml
vendored
422
.github/workflows/release_branches.yml
vendored
@ -13,171 +13,169 @@ on: # yamllint disable-line rule:truthy
|
||||
- '2[1-9].[1-9]'
|
||||
|
||||
jobs:
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
RunConfig:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
DockerHubPush:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Images check
|
||||
- name: Labels check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
python3 run_check.py
|
||||
- name: Python unit tests
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
echo "Testing the main ci directory"
|
||||
python3 -m unittest discover -s . -p 'test_*.py'
|
||||
for dir in *_lambda/; do
|
||||
echo "Testing $dir"
|
||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
||||
done
|
||||
- name: PrepareRunConfig
|
||||
id: runconfig
|
||||
run: |
|
||||
echo "::group::configure CI run"
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --rebuild-all-binaries --outfile ${{ runner.temp }}/ci_run_data.json
|
||||
echo "::endgroup::"
|
||||
echo "::group::CI run configure results"
|
||||
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
|
||||
echo "::endgroup::"
|
||||
{
|
||||
echo 'CI_DATA<<EOF'
|
||||
cat ${{ runner.temp }}/ci_run_data.json
|
||||
echo 'EOF'
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
- name: Re-create GH statuses for skipped jobs if any
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses
|
||||
BuildDockers:
|
||||
needs: [RunConfig]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_docker.yml
|
||||
with:
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
CompatibilityCheckX86:
|
||||
needs: [BuilderDebRelease]
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Compatibility check X86
|
||||
test_name: Compatibility check (amd64)
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions
|
||||
CompatibilityCheckAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
needs: [RunConfig, BuilderDebAarch64]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Compatibility check X86
|
||||
test_name: Compatibility check (aarch64)
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc
|
||||
#########################################################################################
|
||||
#################################### ORDINARY BUILDS ####################################
|
||||
#########################################################################################
|
||||
BuilderDebRelease:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_release
|
||||
checkout_depth: 0
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderDebAarch64:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_aarch64
|
||||
checkout_depth: 0
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderDebAsan:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_asan
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderDebUBsan:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_ubsan
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderDebTsan:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_tsan
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderDebMsan:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_msan
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderDebDebug:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: package_debug
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderBinDarwin:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: binary_darwin
|
||||
checkout_depth: 0
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
BuilderBinDarwinAarch64:
|
||||
needs: [DockerHubPush]
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
build_name: binary_darwin_aarch64
|
||||
checkout_depth: 0
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
DockerServerImages:
|
||||
needs:
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAarch64
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
|
||||
filter: tree:0
|
||||
- name: Check docker clickhouse/clickhouse-server building
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-server --image-path docker/server
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Docker server and keeper images
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
checkout_depth: 0
|
||||
run_command: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-server --image-path docker/server --allow-build-reuse
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper --allow-build-reuse
|
||||
############################################################################################
|
||||
##################################### BUILD REPORTER #######################################
|
||||
############################################################################################
|
||||
BuilderReport:
|
||||
if: ${{ success() || failure() }}
|
||||
# run report check for failed builds to indicate the CI error
|
||||
if: ${{ !cancelled() }}
|
||||
needs:
|
||||
- RunConfig
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAarch64
|
||||
- BuilderDebAsan
|
||||
@ -189,30 +187,38 @@ jobs:
|
||||
with:
|
||||
test_name: ClickHouse build check
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
additional_envs: |
|
||||
NEEDS_DATA<<NDENV
|
||||
${{ toJSON(needs) }}
|
||||
NDENV
|
||||
run_command: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
BuilderSpecialReport:
|
||||
if: ${{ success() || failure() }}
|
||||
# run report check for failed builds to indicate the CI error
|
||||
if: ${{ !cancelled() }}
|
||||
needs:
|
||||
- BuilderBinDarwin
|
||||
- BuilderBinDarwinAarch64
|
||||
- RunConfig
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAarch64
|
||||
- BuilderDebAsan
|
||||
- BuilderDebTsan
|
||||
- BuilderDebUBsan
|
||||
- BuilderDebMsan
|
||||
- BuilderDebDebug
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: ClickHouse special build check
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
additional_envs: |
|
||||
NEEDS_DATA<<NDENV
|
||||
${{ toJSON(needs) }}
|
||||
NDENV
|
||||
run_command: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
MarkReleaseReady:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs:
|
||||
- BuilderBinDarwin
|
||||
- BuilderBinDarwinAarch64
|
||||
@ -232,282 +238,224 @@ jobs:
|
||||
#################################### INSTALL PACKAGES ######################################
|
||||
############################################################################################
|
||||
InstallPackagesTestRelease:
|
||||
needs: [BuilderDebRelease]
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Install packages (amd64)
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 install_check.py "$CHECK_NAME"
|
||||
InstallPackagesTestAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
needs: [RunConfig, BuilderDebAarch64]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Install packages (arm64)
|
||||
runner_type: style-checker-aarch64
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 install_check.py "$CHECK_NAME"
|
||||
##############################################################################################
|
||||
########################### FUNCTIONAl STATELESS TESTS #######################################
|
||||
##############################################################################################
|
||||
FunctionalStatelessTestRelease:
|
||||
needs: [BuilderDebRelease]
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (release)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=10800
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
needs: [RunConfig, BuilderDebAarch64]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (aarch64)
|
||||
runner_type: func-tester-aarch64
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=10800
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
needs: [RunConfig, BuilderDebAsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (asan)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=10800
|
||||
batches: 4
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
needs: [RunConfig, BuilderDebTsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (tsan)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=10800
|
||||
batches: 5
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
FunctionalStatelessTestUBsan:
|
||||
needs: [BuilderDebUBsan]
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (ubsan)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=10800
|
||||
batches: 2
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestMsan:
|
||||
needs: [BuilderDebMsan]
|
||||
needs: [RunConfig, BuilderDebMsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (msan)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=10800
|
||||
batches: 6
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestUBsan:
|
||||
needs: [RunConfig, BuilderDebUBsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (ubsan)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
needs: [RunConfig, BuilderDebDebug]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (debug)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=10800
|
||||
batches: 5
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
##############################################################################################
|
||||
############################ FUNCTIONAl STATEFUL TESTS #######################################
|
||||
##############################################################################################
|
||||
FunctionalStatefulTestRelease:
|
||||
needs: [BuilderDebRelease]
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateful tests (release)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=3600
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatefulTestAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
needs: [RunConfig, BuilderDebAarch64]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateful tests (aarch64)
|
||||
runner_type: func-tester-aarch64
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=3600
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatefulTestAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
needs: [RunConfig, BuilderDebAsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateful tests (asan)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=3600
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatefulTestTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
needs: [RunConfig, BuilderDebTsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateful tests (tsan)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=3600
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatefulTestMsan:
|
||||
needs: [BuilderDebMsan]
|
||||
needs: [RunConfig, BuilderDebMsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateful tests (msan)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=3600
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatefulTestUBsan:
|
||||
needs: [BuilderDebUBsan]
|
||||
needs: [RunConfig, BuilderDebUBsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateful tests (ubsan)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=3600
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatefulTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
needs: [RunConfig, BuilderDebDebug]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateful tests (debug)
|
||||
runner_type: func-tester
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=3600
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
##############################################################################################
|
||||
######################################### STRESS TESTS #######################################
|
||||
##############################################################################################
|
||||
StressTestAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
needs: [RunConfig, BuilderDebAsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (asan)
|
||||
runner_type: stress-tester
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StressTestTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
needs: [RunConfig, BuilderDebTsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (tsan)
|
||||
runner_type: stress-tester
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StressTestMsan:
|
||||
needs: [BuilderDebMsan]
|
||||
needs: [RunConfig, BuilderDebMsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (msan)
|
||||
runner_type: stress-tester
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StressTestUBsan:
|
||||
needs: [BuilderDebUBsan]
|
||||
needs: [RunConfig, BuilderDebUBsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (ubsan)
|
||||
runner_type: stress-tester
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StressTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
needs: [RunConfig, BuilderDebDebug]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (debug)
|
||||
runner_type: stress-tester
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
#############################################################################################
|
||||
############################# INTEGRATION TESTS #############################################
|
||||
#############################################################################################
|
||||
IntegrationTestsAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
needs: [RunConfig, BuilderDebAsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (asan)
|
||||
runner_type: stress-tester
|
||||
batches: 4
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
IntegrationTestsAnalyzerAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
needs: [RunConfig, BuilderDebAsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (asan, analyzer)
|
||||
runner_type: stress-tester
|
||||
batches: 6
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
IntegrationTestsTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
needs: [RunConfig, BuilderDebTsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (tsan)
|
||||
runner_type: stress-tester
|
||||
batches: 6
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
IntegrationTestsRelease:
|
||||
needs: [BuilderDebRelease]
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (release)
|
||||
runner_type: stress-tester
|
||||
batches: 4
|
||||
run_command: |
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FinishCheck:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs:
|
||||
- DockerHubPush
|
||||
- DockerServerImages
|
||||
- BuilderReport
|
||||
- BuilderSpecialReport
|
||||
|
33
.github/workflows/reusable_build.yml
vendored
33
.github/workflows/reusable_build.yml
vendored
@ -22,6 +22,10 @@ name: Build ClickHouse
|
||||
description: the label of runner to use
|
||||
default: builder
|
||||
type: string
|
||||
data:
|
||||
description: json ci data
|
||||
type: string
|
||||
required: true
|
||||
additional_envs:
|
||||
description: additional ENV variables to setup the job
|
||||
type: string
|
||||
@ -29,6 +33,7 @@ name: Build ClickHouse
|
||||
jobs:
|
||||
Build:
|
||||
name: Build-${{inputs.build_name}}
|
||||
if: contains(fromJson(inputs.data).jobs_data.jobs_to_do, inputs.build_name)
|
||||
env:
|
||||
GITHUB_JOB_OVERRIDDEN: Build-${{inputs.build_name}}
|
||||
runs-on: [self-hosted, '${{inputs.runner_type}}']
|
||||
@ -37,6 +42,7 @@ jobs:
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
ref: ${{ fromJson(inputs.data).git_ref }}
|
||||
submodules: true
|
||||
fetch-depth: ${{inputs.checkout_depth}}
|
||||
filter: tree:0
|
||||
@ -44,6 +50,9 @@ jobs:
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
${{inputs.additional_envs}}
|
||||
DOCKER_TAG<<DOCKER_JSON
|
||||
${{ toJson(fromJson(inputs.data).docker_data.images) }}
|
||||
DOCKER_JSON
|
||||
EOF
|
||||
python3 "$GITHUB_WORKSPACE"/tests/ci/ci_config.py --build-name "${{inputs.build_name}}" >> "$GITHUB_ENV"
|
||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||
@ -60,20 +69,20 @@ jobs:
|
||||
uses: ./.github/actions/common_setup
|
||||
with:
|
||||
job_type: build_check
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Pre
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --pre --job-name '${{inputs.build_name}}'
|
||||
- name: Build
|
||||
run: |
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/build_check.py" "$BUILD_NAME"
|
||||
- name: Post
|
||||
# it still be build report to upload for failed build job
|
||||
if: always()
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.build_name}}'
|
||||
- name: Mark as done
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.build_name}}'
|
||||
- name: Clean
|
||||
if: always()
|
||||
uses: ./.github/actions/clean
|
||||
|
68
.github/workflows/reusable_docker.yml
vendored
Normal file
68
.github/workflows/reusable_docker.yml
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
name: Build docker images
|
||||
'on':
|
||||
workflow_call:
|
||||
inputs:
|
||||
data:
|
||||
description: json with ci data from todo job
|
||||
required: true
|
||||
type: string
|
||||
set_latest:
|
||||
description: set latest tag for resulting multiarch manifest
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
jobs:
|
||||
DockerBuildAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
if: |
|
||||
!failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_aarch64) != '[]'
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
ref: ${{ fromJson(inputs.data).git_ref }}
|
||||
- name: Build images
|
||||
run: |
|
||||
python3 "${GITHUB_WORKSPACE}/tests/ci/docker_images_check.py" \
|
||||
--suffix aarch64 \
|
||||
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
|
||||
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_aarch64) }}'
|
||||
DockerBuildAmd64:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
if: |
|
||||
!failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_amd64) != '[]'
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
ref: ${{ fromJson(inputs.data).git_ref }}
|
||||
- name: Build images
|
||||
run: |
|
||||
python3 "${GITHUB_WORKSPACE}/tests/ci/docker_images_check.py" \
|
||||
--suffix amd64 \
|
||||
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
|
||||
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_amd64) }}'
|
||||
DockerMultiArchManifest:
|
||||
needs: [DockerBuildAmd64, DockerBuildAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
if: |
|
||||
!failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_multi) != '[]'
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
ref: ${{ fromJson(inputs.data).git_ref }}
|
||||
- name: Build images
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
if [ "${{ inputs.set_latest }}" == "true" ]; then
|
||||
echo "latest tag will be set for resulting manifests"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64 \
|
||||
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
|
||||
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_multi) }}' \
|
||||
--set-latest
|
||||
else
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64 \
|
||||
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
|
||||
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_multi) }}'
|
||||
fi
|
90
.github/workflows/reusable_simple_job.yml
vendored
Normal file
90
.github/workflows/reusable_simple_job.yml
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
### For the pure soul wishes to move it to another place
|
||||
# https://github.com/orgs/community/discussions/9050
|
||||
|
||||
name: Simple job
|
||||
'on':
|
||||
workflow_call:
|
||||
inputs:
|
||||
test_name:
|
||||
description: the value of test type from tests/ci/ci_config.py, ends up as $CHECK_NAME ENV
|
||||
required: true
|
||||
type: string
|
||||
runner_type:
|
||||
description: the label of runner to use
|
||||
required: true
|
||||
type: string
|
||||
run_command:
|
||||
description: the command to launch the check
|
||||
default: ""
|
||||
required: false
|
||||
type: string
|
||||
checkout_depth:
|
||||
description: the value of the git shallow checkout
|
||||
required: false
|
||||
type: number
|
||||
default: 1
|
||||
submodules:
|
||||
description: if the submodules should be checked out
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
additional_envs:
|
||||
description: additional ENV variables to setup the job
|
||||
type: string
|
||||
working-directory:
|
||||
description: sets custom working directory
|
||||
type: string
|
||||
default: ""
|
||||
git_ref:
|
||||
description: commit to use, merge commit for pr or head
|
||||
required: false
|
||||
type: string
|
||||
default: ${{ github.event.after }} # no merge commit
|
||||
secrets:
|
||||
secret_envs:
|
||||
description: if given, it's passed to the environments
|
||||
required: false
|
||||
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
CHECK_NAME: ${{inputs.test_name}}
|
||||
|
||||
jobs:
|
||||
Test:
|
||||
runs-on: [self-hosted, '${{inputs.runner_type}}']
|
||||
name: ${{inputs.test_name}}
|
||||
env:
|
||||
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
ref: ${{ inputs.git_ref }}
|
||||
submodules: ${{inputs.submodules}}
|
||||
fetch-depth: ${{inputs.checkout_depth}}
|
||||
filter: tree:0
|
||||
- name: Set build envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
CHECK_NAME=${{ inputs.test_name }}
|
||||
${{inputs.additional_envs}}
|
||||
${{secrets.secret_envs}}
|
||||
EOF
|
||||
- name: Common setup
|
||||
uses: ./.github/actions/common_setup
|
||||
with:
|
||||
job_type: test
|
||||
- name: Run
|
||||
run: |
|
||||
if [ -n '${{ inputs.working-directory }}' ]; then
|
||||
cd "${{ inputs.working-directory }}"
|
||||
else
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
fi
|
||||
${{ inputs.run_command }}
|
||||
- name: Clean
|
||||
if: always()
|
||||
uses: ./.github/actions/clean
|
88
.github/workflows/reusable_test.yml
vendored
88
.github/workflows/reusable_test.yml
vendored
@ -14,13 +14,10 @@ name: Testing workflow
|
||||
required: true
|
||||
type: string
|
||||
run_command:
|
||||
description: the command to launch the check. Usually starts with `cd '$REPO_COPY/tests/ci'`
|
||||
required: true
|
||||
description: the command to launch the check
|
||||
default: ""
|
||||
required: false
|
||||
type: string
|
||||
batches:
|
||||
description: how many batches for the test will be launched
|
||||
default: 1
|
||||
type: number
|
||||
checkout_depth:
|
||||
description: the value of the git shallow checkout
|
||||
required: false
|
||||
@ -34,80 +31,89 @@ name: Testing workflow
|
||||
additional_envs:
|
||||
description: additional ENV variables to setup the job
|
||||
type: string
|
||||
data:
|
||||
description: ci data
|
||||
type: string
|
||||
required: true
|
||||
working-directory:
|
||||
description: sets custom working directory
|
||||
type: string
|
||||
default: ""
|
||||
secrets:
|
||||
secret_envs:
|
||||
description: if given, it's passed to the environments
|
||||
required: false
|
||||
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
CHECK_NAME: ${{inputs.test_name}}
|
||||
|
||||
jobs:
|
||||
PrepareStrategy:
|
||||
# batches < 1 is misconfiguration,
|
||||
# and we need this step only for batches > 1
|
||||
if: ${{ inputs.batches > 1 }}
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
outputs:
|
||||
batches: ${{steps.batches.outputs.batches}}
|
||||
steps:
|
||||
- name: Calculate batches
|
||||
id: batches
|
||||
run: |
|
||||
batches_output=$(python3 -c 'import json; print(json.dumps(list(range(${{inputs.batches}}))))')
|
||||
echo "batches=${batches_output}" >> "$GITHUB_OUTPUT"
|
||||
Test:
|
||||
# If PrepareStrategy is skipped for batches == 1,
|
||||
# we still need to launch the test.
|
||||
# `! failure()` is mandatory here to launch on skipped Job
|
||||
# `&& !cancelled()` to allow the be cancelable
|
||||
if: ${{ ( !failure() && !cancelled() ) && inputs.batches > 0 }}
|
||||
# Do not add `-0` to the end, if there's only one batch
|
||||
name: ${{inputs.test_name}}${{ inputs.batches > 1 && format('-{0}',matrix.batch) || '' }}
|
||||
env:
|
||||
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ inputs.batches > 1 && format('-{0}',matrix.batch) || '' }}
|
||||
runs-on: [self-hosted, '${{inputs.runner_type}}']
|
||||
needs: [PrepareStrategy]
|
||||
if: ${{ !failure() && !cancelled() && contains(fromJson(inputs.data).jobs_data.jobs_to_do, inputs.test_name) }}
|
||||
name: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }}
|
||||
env:
|
||||
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }}
|
||||
strategy:
|
||||
fail-fast: false # we always wait for entire matrix
|
||||
matrix:
|
||||
# if PrepareStrategy does not have batches, we use 0
|
||||
batch: ${{ needs.PrepareStrategy.outputs.batches
|
||||
&& fromJson(needs.PrepareStrategy.outputs.batches)
|
||||
|| fromJson('[0]')}}
|
||||
batch: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].batches }}
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
ref: ${{ fromJson(inputs.data).git_ref }}
|
||||
submodules: ${{inputs.submodules}}
|
||||
fetch-depth: ${{inputs.checkout_depth}}
|
||||
filter: tree:0
|
||||
- name: Set build envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
CHECK_NAME=${{ inputs.test_name }}
|
||||
${{inputs.additional_envs}}
|
||||
${{secrets.secret_envs}}
|
||||
DOCKER_TAG<<DOCKER_JSON
|
||||
${{ toJson(fromJson(inputs.data).docker_data.images) }}
|
||||
DOCKER_JSON
|
||||
EOF
|
||||
- name: Common setup
|
||||
uses: ./.github/actions/common_setup
|
||||
with:
|
||||
job_type: test
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Setup batch
|
||||
if: ${{ inputs.batches > 1}}
|
||||
if: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 }}
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
RUN_BY_HASH_NUM=${{matrix.batch}}
|
||||
RUN_BY_HASH_TOTAL=${{inputs.batches}}
|
||||
RUN_BY_HASH_TOTAL=${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches }}
|
||||
EOF
|
||||
- name: Run test
|
||||
run: ${{inputs.run_command}}
|
||||
- name: Pre run
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --pre --job-name '${{inputs.test_name}}'
|
||||
- name: Run
|
||||
run: |
|
||||
if [ -n "${{ inputs.working-directory }}" ]; then
|
||||
cd "${{ inputs.working-directory }}"
|
||||
else
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
fi
|
||||
if [ -n "$(echo '${{ inputs.run_command }}' | tr -d '\n')" ]; then
|
||||
echo "Running command from workflow input"
|
||||
${{ inputs.run_command }}
|
||||
else
|
||||
echo "Running command from job config"
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --run --job-name '${{inputs.test_name}}'
|
||||
fi
|
||||
- name: Post run
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.test_name}}'
|
||||
- name: Mark as done
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.test_name}}' --batch ${{matrix.batch}}
|
||||
- name: Clean
|
||||
if: always()
|
||||
uses: ./.github/actions/clean
|
||||
|
10
.gitmessage
Normal file
10
.gitmessage
Normal file
@ -0,0 +1,10 @@
|
||||
|
||||
|
||||
## To avoid merge commit in CI run (add a leading space to apply):
|
||||
#no-merge-commit
|
||||
|
||||
## Running specified job (add a leading space to apply):
|
||||
#job_<JOB NAME>
|
||||
#job_stateless_tests_release
|
||||
#job_package_debug
|
||||
#job_integration_tests_asan
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -354,6 +354,9 @@
|
||||
[submodule "contrib/aklomp-base64"]
|
||||
path = contrib/aklomp-base64
|
||||
url = https://github.com/aklomp/base64.git
|
||||
[submodule "contrib/pocketfft"]
|
||||
path = contrib/pocketfft
|
||||
url = https://github.com/mreineck/pocketfft.git
|
||||
[submodule "contrib/sqids-cpp"]
|
||||
path = contrib/sqids-cpp
|
||||
url = https://github.com/sqids/sqids-cpp.git
|
||||
|
142
CHANGELOG.md
142
CHANGELOG.md
@ -1,4 +1,5 @@
|
||||
### Table of Contents
|
||||
**[ClickHouse release v23.12, 2023-12-28](#2312)**<br/>
|
||||
**[ClickHouse release v23.11, 2023-12-06](#2311)**<br/>
|
||||
**[ClickHouse release v23.10, 2023-11-02](#2310)**<br/>
|
||||
**[ClickHouse release v23.9, 2023-09-28](#239)**<br/>
|
||||
@ -14,6 +15,146 @@
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### <a id="2312"></a> ClickHouse release 23.12, 2023-12-28
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Fix check for non-deterministic functions in TTL expressions. Previously, you could create a TTL expression with non-deterministic functions in some cases, which could lead to undefined behavior later. This fixes [#37250](https://github.com/ClickHouse/ClickHouse/issues/37250). Disallow TTL expressions that don't depend on any columns of a table by default. It can be allowed back by `SET allow_suspicious_ttl_expressions = 1` or `SET compatibility = '23.11'`. Closes [#37286](https://github.com/ClickHouse/ClickHouse/issues/37286). [#51858](https://github.com/ClickHouse/ClickHouse/pull/51858) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The MergeTree setting `clean_deleted_rows` is deprecated, it has no effect anymore. The `CLEANUP` keyword for the `OPTIMIZE` is not allowed by default (it can be unlocked with the `allow_experimental_replacing_merge_with_cleanup` setting). [#58267](https://github.com/ClickHouse/ClickHouse/pull/58267) ([Alexander Tokmakov](https://github.com/tavplubix)). This fixes [#57930](https://github.com/ClickHouse/ClickHouse/issues/57930). This closes [#54988](https://github.com/ClickHouse/ClickHouse/issues/54988). This closes [#54570](https://github.com/ClickHouse/ClickHouse/issues/54570). This closes [#50346](https://github.com/ClickHouse/ClickHouse/issues/50346). This closes [#47579](https://github.com/ClickHouse/ClickHouse/issues/47579). The feature has to be removed because it is not good. We have to remove it as quickly as possible, because there is no other option. [#57932](https://github.com/ClickHouse/ClickHouse/pull/57932) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### New Feature
|
||||
* Implement Refreshable Materialized Views, requested in [#33919](https://github.com/ClickHouse/ClickHouse/issues/57995). [#56946](https://github.com/ClickHouse/ClickHouse/pull/56946) ([Michael Kolupaev](https://github.com/al13n321), [Michael Guzov](https://github.com/koloshmet)).
|
||||
* Introduce `PASTE JOIN`, which allows users to join tables without `ON` clause simply by row numbers. Example: `SELECT * FROM (SELECT number AS a FROM numbers(2)) AS t1 PASTE JOIN (SELECT number AS a FROM numbers(2) ORDER BY a DESC) AS t2`. [#57995](https://github.com/ClickHouse/ClickHouse/pull/57995) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* The `ORDER BY` clause now supports specifying `ALL`, meaning that ClickHouse sorts by all columns in the `SELECT` clause. Example: `SELECT col1, col2 FROM tab WHERE [...] ORDER BY ALL`. [#57875](https://github.com/ClickHouse/ClickHouse/pull/57875) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||
* Added a new mutation command `ALTER TABLE <table> APPLY DELETED MASK`, which allows to enforce applying of mask written by lightweight delete and to remove rows marked as deleted from disk. [#57433](https://github.com/ClickHouse/ClickHouse/pull/57433) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* A handler `/binary` opens a visual viewer of symbols inside the ClickHouse binary. [#58211](https://github.com/ClickHouse/ClickHouse/pull/58211) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Added a new SQL function `sqid` to generate Sqids (https://sqids.org/), example: `SELECT sqid(125, 126)`. [#57512](https://github.com/ClickHouse/ClickHouse/pull/57512) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add a new function `seriesPeriodDetectFFT` to detect series period using FFT. [#57574](https://github.com/ClickHouse/ClickHouse/pull/57574) ([Bhavna Jindal](https://github.com/bhavnajindal)).
|
||||
* Add an HTTP endpoint for checking if Keeper is ready to accept traffic. [#55876](https://github.com/ClickHouse/ClickHouse/pull/55876) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Add 'union' mode for schema inference. In this mode the resulting table schema is the union of all files schemas (so schema is inferred from each file). The mode of schema inference is controlled by a setting `schema_inference_mode` with two possible values - `default` and `union`. Closes [#55428](https://github.com/ClickHouse/ClickHouse/issues/55428). [#55892](https://github.com/ClickHouse/ClickHouse/pull/55892) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add new setting `input_format_csv_try_infer_numbers_from_strings` that allows to infer numbers from strings in CSV format. Closes [#56455](https://github.com/ClickHouse/ClickHouse/issues/56455). [#56859](https://github.com/ClickHouse/ClickHouse/pull/56859) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* When the number of databases or tables exceeds a configurable threshold, show a warning to the user. [#57375](https://github.com/ClickHouse/ClickHouse/pull/57375) ([凌涛](https://github.com/lingtaolf)).
|
||||
* Dictionary with `HASHED_ARRAY` (and `COMPLEX_KEY_HASHED_ARRAY`) layout supports `SHARDS` similarly to `HASHED`. [#57544](https://github.com/ClickHouse/ClickHouse/pull/57544) ([vdimir](https://github.com/vdimir)).
|
||||
* Add asynchronous metrics for total primary key bytes and total allocated primary key bytes in memory. [#57551](https://github.com/ClickHouse/ClickHouse/pull/57551) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Add `SHA512_256` function. [#57645](https://github.com/ClickHouse/ClickHouse/pull/57645) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Add `FORMAT_BYTES` as an alias for `formatReadableSize`. [#57592](https://github.com/ClickHouse/ClickHouse/pull/57592) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Allow passing optional session token to the `s3` table function. [#57850](https://github.com/ClickHouse/ClickHouse/pull/57850) ([Shani Elharrar](https://github.com/shanielh)).
|
||||
* Introduce a new setting `http_make_head_request`. If it is turned off, the URL table engine will not do a HEAD request to determine the file size. This is needed to support inefficient, misconfigured, or not capable HTTP servers. [#54602](https://github.com/ClickHouse/ClickHouse/pull/54602) ([Fionera](https://github.com/fionera)).
|
||||
* It is now possible to refer to ALIAS column in index (non-primary-key) definitions (issue [#55650](https://github.com/ClickHouse/ClickHouse/issues/55650)). Example: `CREATE TABLE tab(col UInt32, col_alias ALIAS col + 1, INDEX idx (col_alias) TYPE minmax) ENGINE = MergeTree ORDER BY col;`. [#57546](https://github.com/ClickHouse/ClickHouse/pull/57546) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Added a new setting `readonly` which can be used to specify an S3 disk is read only. It can be useful to create a table on a disk of `s3_plain` type, while having read only access to the underlying S3 bucket. [#57977](https://github.com/ClickHouse/ClickHouse/pull/57977) ([Pengyuan Bian](https://github.com/bianpengyuan)).
|
||||
* The primary key analysis in MergeTree tables will now be applied to predicates that include the virtual column `_part_offset` (optionally with `_part`). This feature can serve as a special kind of a secondary index. [#58224](https://github.com/ClickHouse/ClickHouse/pull/58224) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Extract non-intersecting parts ranges from MergeTree table during FINAL processing. That way we can avoid additional FINAL logic for this non-intersecting parts ranges. In case when amount of duplicate values with same primary key is low, performance will be almost the same as without FINAL. Improve reading performance for MergeTree FINAL when `do_not_merge_across_partitions_select_final` setting is set. [#58120](https://github.com/ClickHouse/ClickHouse/pull/58120) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Made copy between s3 disks using a s3-server-side copy instead of copying through the buffer. Improves `BACKUP/RESTORE` operations and `clickhouse-disks copy` command. [#56744](https://github.com/ClickHouse/ClickHouse/pull/56744) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Hash JOIN respects setting `max_joined_block_size_rows` and do not produce large blocks for `ALL JOIN`. [#56996](https://github.com/ClickHouse/ClickHouse/pull/56996) ([vdimir](https://github.com/vdimir)).
|
||||
* Release memory for aggregation earlier. This may avoid unnecessary external aggregation. [#57691](https://github.com/ClickHouse/ClickHouse/pull/57691) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Improve performance of string serialization. [#57717](https://github.com/ClickHouse/ClickHouse/pull/57717) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Support trivial count optimization for `Merge`-engine tables. [#57867](https://github.com/ClickHouse/ClickHouse/pull/57867) ([skyoct](https://github.com/skyoct)).
|
||||
* Optimized aggregation in some cases. [#57872](https://github.com/ClickHouse/ClickHouse/pull/57872) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* The `hasAny` function can now take advantage of the full-text skipping indices. [#57878](https://github.com/ClickHouse/ClickHouse/pull/57878) ([Jpnock](https://github.com/Jpnock)).
|
||||
* Function `if(cond, then, else)` (and its alias `cond ? then : else`) were optimized to use branch-free evaluation. [#57885](https://github.com/ClickHouse/ClickHouse/pull/57885) ([zhanglistar](https://github.com/zhanglistar)).
|
||||
* MergeTree automatically derive `do_not_merge_across_partitions_select_final` setting if partition key expression contains only columns from primary key expression. [#58218](https://github.com/ClickHouse/ClickHouse/pull/58218) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Speedup `MIN` and `MAX` for native types. [#58231](https://github.com/ClickHouse/ClickHouse/pull/58231) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Implement `SLRU` cache policy for filesystem cache. [#57076](https://github.com/ClickHouse/ClickHouse/pull/57076) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* The limit for the number of connections per endpoint for background fetches was raised from `15` to the value of `background_fetches_pool_size` setting. - MergeTree-level setting `replicated_max_parallel_fetches_for_host` became obsolete - MergeTree-level settings `replicated_fetches_http_connection_timeout`, `replicated_fetches_http_send_timeout` and `replicated_fetches_http_receive_timeout` are moved to the Server-level. - Setting `keep_alive_timeout` is added to the list of Server-level settings. [#57523](https://github.com/ClickHouse/ClickHouse/pull/57523) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Make querying `system.filesystem_cache` not memory intensive. [#57687](https://github.com/ClickHouse/ClickHouse/pull/57687) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Reduce memory usage on strings deserialization. [#57787](https://github.com/ClickHouse/ClickHouse/pull/57787) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* More efficient constructor for Enum - it makes sense when Enum has a boatload of values. [#57887](https://github.com/ClickHouse/ClickHouse/pull/57887) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* An improvement for reading from the filesystem cache: always use `pread` method. [#57970](https://github.com/ClickHouse/ClickHouse/pull/57970) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Add optimization for AND notEquals chain in logical expression optimizer. This optimization is only available with the experimental Analyzer enabled. [#58214](https://github.com/ClickHouse/ClickHouse/pull/58214) ([Kevin Mingtarja](https://github.com/kevinmingtarja)).
|
||||
|
||||
#### Improvement
|
||||
* Support for soft memory limit in Keeper. It will refuse requests if the memory usage is close to the maximum. [#57271](https://github.com/ClickHouse/ClickHouse/pull/57271) ([Han Fei](https://github.com/hanfei1991)). [#57699](https://github.com/ClickHouse/ClickHouse/pull/57699) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Make inserts into distributed tables handle updated cluster configuration properly. When the list of cluster nodes is dynamically updated, the Directory Monitor of the distribution table will update it. [#42826](https://github.com/ClickHouse/ClickHouse/pull/42826) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||
* Do not allow creating a replicated table with inconsistent merge parameters. [#56833](https://github.com/ClickHouse/ClickHouse/pull/56833) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Show uncompressed size in `system.tables`. [#56618](https://github.com/ClickHouse/ClickHouse/issues/56618). [#57186](https://github.com/ClickHouse/ClickHouse/pull/57186) ([Chen Lixiang](https://github.com/chenlx0)).
|
||||
* Add `skip_unavailable_shards` as a setting for `Distributed` tables that is similar to the corresponding query-level setting. Closes [#43666](https://github.com/ClickHouse/ClickHouse/issues/43666). [#57218](https://github.com/ClickHouse/ClickHouse/pull/57218) ([Gagan Goel](https://github.com/tntnatbry)).
|
||||
* The function `substring` (aliases: `substr`, `mid`) can now be used with `Enum` types. Previously, the first function argument had to be a value of type `String` or `FixedString`. This improves compatibility with 3rd party tools such as Tableau via MySQL interface. [#57277](https://github.com/ClickHouse/ClickHouse/pull/57277) ([Serge Klochkov](https://github.com/slvrtrn)).
|
||||
* Function `format` now supports arbitrary argument types (instead of only `String` and `FixedString` arguments). This is important to calculate `SELECT format('The {0} to all questions is {1}', 'answer', 42)`. [#57549](https://github.com/ClickHouse/ClickHouse/pull/57549) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Allows to use the `date_trunc` function with a case-insensitive first argument. Both cases are now supported: `SELECT date_trunc('day', now())` and `SELECT date_trunc('DAY', now())`. [#57624](https://github.com/ClickHouse/ClickHouse/pull/57624) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Better hints when a table doesn't exist. [#57342](https://github.com/ClickHouse/ClickHouse/pull/57342) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Allow to overwrite `max_partition_size_to_drop` and `max_table_size_to_drop` server settings in query time. [#57452](https://github.com/ClickHouse/ClickHouse/pull/57452) ([Jordi Villar](https://github.com/jrdi)).
|
||||
* Slightly better inference of unnamed tupes in JSON formats. [#57751](https://github.com/ClickHouse/ClickHouse/pull/57751) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add support for read-only flag when connecting to Keeper (fixes [#53749](https://github.com/ClickHouse/ClickHouse/issues/53749)). [#57479](https://github.com/ClickHouse/ClickHouse/pull/57479) ([Mikhail Koviazin](https://github.com/mkmkme)).
|
||||
* Fix possible distributed sends stuck due to "No such file or directory" (during recovering a batch from disk). Fix possible issues with `error_count` from `system.distribution_queue` (in case of `distributed_directory_monitor_max_sleep_time_ms` >5min). Introduce profile event to track async INSERT failures - `DistributedAsyncInsertionFailures`. [#57480](https://github.com/ClickHouse/ClickHouse/pull/57480) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Support PostgreSQL generated columns and default column values in `MaterializedPostgreSQL` (experimental feature). Closes [#40449](https://github.com/ClickHouse/ClickHouse/issues/40449). [#57568](https://github.com/ClickHouse/ClickHouse/pull/57568) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Allow to apply some filesystem cache config settings changes without server restart. [#57578](https://github.com/ClickHouse/ClickHouse/pull/57578) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Properly handling PostgreSQL table structure with empty array. [#57618](https://github.com/ClickHouse/ClickHouse/pull/57618) ([Mike Kot](https://github.com/myrrc)).
|
||||
* Expose the total number of errors occurred since last server restart as a `ClickHouseErrorMetric_ALL` metric. [#57627](https://github.com/ClickHouse/ClickHouse/pull/57627) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Allow nodes in the configuration file with `from_env`/`from_zk` reference and non empty element with replace=1. [#57628](https://github.com/ClickHouse/ClickHouse/pull/57628) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* A table function `fuzzJSON` which allows generating a lot of malformed JSON for fuzzing. [#57646](https://github.com/ClickHouse/ClickHouse/pull/57646) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* Allow IPv6 to UInt128 conversion and binary arithmetic. [#57707](https://github.com/ClickHouse/ClickHouse/pull/57707) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Add a setting for `async inserts deduplication cache` - how long we wait for cache update. Deprecate setting `async_block_ids_cache_min_update_interval_ms`. Now cache is updated only in case of conflicts. [#57743](https://github.com/ClickHouse/ClickHouse/pull/57743) ([alesapin](https://github.com/alesapin)).
|
||||
* `sleep()` function now can be cancelled with `KILL QUERY`. [#57746](https://github.com/ClickHouse/ClickHouse/pull/57746) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Forbid `CREATE TABLE ... AS SELECT` queries for `Replicated` table engines in the experimental `Replicated` database because they are not supported. Reference [#35408](https://github.com/ClickHouse/ClickHouse/issues/35408). [#57796](https://github.com/ClickHouse/ClickHouse/pull/57796) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix and improve transforming queries for external databases, to recursively obtain all compatible predicates. [#57888](https://github.com/ClickHouse/ClickHouse/pull/57888) ([flynn](https://github.com/ucasfl)).
|
||||
* Support dynamic reloading of the filesystem cache size. Closes [#57866](https://github.com/ClickHouse/ClickHouse/issues/57866). [#57897](https://github.com/ClickHouse/ClickHouse/pull/57897) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Correctly support `system.stack_trace` for threads with blocked SIGRTMIN (these threads can exist in low-quality external libraries such as Apache rdkafka). [#57907](https://github.com/ClickHouse/ClickHouse/pull/57907) ([Azat Khuzhin](https://github.com/azat)). Aand also send signal to the threads only if it is not blocked to avoid waiting `storage_system_stack_trace_pipe_read_timeout_ms` when it does not make any sense. [#58136](https://github.com/ClickHouse/ClickHouse/pull/58136) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Tolerate keeper failures in the quorum inserts' check. [#57986](https://github.com/ClickHouse/ClickHouse/pull/57986) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Add max/peak RSS (`MemoryResidentMax`) into system.asynchronous_metrics. [#58095](https://github.com/ClickHouse/ClickHouse/pull/58095) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* This PR allows users to use s3-style links (`https://` and `s3://`) without mentioning region if it's not default. Also find the correct region if the user mentioned the wrong one. [#58148](https://github.com/ClickHouse/ClickHouse/pull/58148) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* `clickhouse-format --obfuscate` will know about Settings, MergeTreeSettings, and time zones and keep their names unchanged. [#58179](https://github.com/ClickHouse/ClickHouse/pull/58179) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Added explicit `finalize()` function in `ZipArchiveWriter`. Simplify too complicated code in `ZipArchiveWriter`. This fixes [#58074](https://github.com/ClickHouse/ClickHouse/issues/58074). [#58202](https://github.com/ClickHouse/ClickHouse/pull/58202) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Make caches with the same path use the same cache objects. This behaviour existed before, but was broken in 23.4. If such caches with the same path have different set of cache settings, an exception will be thrown, that this is not allowed. [#58264](https://github.com/ClickHouse/ClickHouse/pull/58264) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Parallel replicas (experimental feature): friendly settings [#57542](https://github.com/ClickHouse/ClickHouse/pull/57542) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Parallel replicas (experimental feature): announcement response handling improvement [#57749](https://github.com/ClickHouse/ClickHouse/pull/57749) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Parallel replicas (experimental feature): give more respect to `min_number_of_marks` in `ParallelReplicasReadingCoordinator` [#57763](https://github.com/ClickHouse/ClickHouse/pull/57763) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Parallel replicas (experimental feature): disable parallel replicas with IN (subquery) [#58133](https://github.com/ClickHouse/ClickHouse/pull/58133) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Parallel replicas (experimental feature): add profile event 'ParallelReplicasUsedCount' [#58173](https://github.com/ClickHouse/ClickHouse/pull/58173) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Non POST requests such as HEAD will be readonly similar to GET. [#58060](https://github.com/ClickHouse/ClickHouse/pull/58060) ([San](https://github.com/santrancisco)).
|
||||
* Add `bytes_uncompressed` column to `system.part_log` [#58167](https://github.com/ClickHouse/ClickHouse/pull/58167) ([Jordi Villar](https://github.com/jrdi)).
|
||||
* Add base backup name to `system.backups` and `system.backup_log` tables [#58178](https://github.com/ClickHouse/ClickHouse/pull/58178) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||
* Add support for specifying query parameters in the command line in clickhouse-local [#58210](https://github.com/ClickHouse/ClickHouse/pull/58210) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Randomize more settings [#39663](https://github.com/ClickHouse/ClickHouse/pull/39663) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Randomize disabled optimizations in CI [#57315](https://github.com/ClickHouse/ClickHouse/pull/57315) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Allow usage of Azure-related table engines/functions on macOS. [#51866](https://github.com/ClickHouse/ClickHouse/pull/51866) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* ClickHouse Fast Test now uses Musl instead of GLibc. [#57711](https://github.com/ClickHouse/ClickHouse/pull/57711) ([Alexey Milovidov](https://github.com/alexey-milovidov)). The fully-static Musl build is available to download from the CI.
|
||||
* Run ClickBench for every commit. This closes [#57708](https://github.com/ClickHouse/ClickHouse/issues/57708). [#57712](https://github.com/ClickHouse/ClickHouse/pull/57712) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove the usage of a harmful C/POSIX `select` function from external libraries. [#57467](https://github.com/ClickHouse/ClickHouse/pull/57467) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Settings only available in ClickHouse Cloud will be also present in the open-source ClickHouse build for convenience. [#57638](https://github.com/ClickHouse/ClickHouse/pull/57638) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Fixed a possibility of sorting order breakage in TTL GROUP BY [#49103](https://github.com/ClickHouse/ClickHouse/pull/49103) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fix: split `lttb` bucket strategy, first bucket and last bucket should only contain single point [#57003](https://github.com/ClickHouse/ClickHouse/pull/57003) ([FFish](https://github.com/wxybear)).
|
||||
* Fix possible deadlock in the `Template` format during sync after error [#57004](https://github.com/ClickHouse/ClickHouse/pull/57004) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix early stop while parsing a file with skipping lots of errors [#57006](https://github.com/ClickHouse/ClickHouse/pull/57006) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Prevent dictionary's ACL bypass via the `dictionary` table function [#57362](https://github.com/ClickHouse/ClickHouse/pull/57362) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Fix another case of a "non-ready set" error found by Fuzzer. [#57423](https://github.com/ClickHouse/ClickHouse/pull/57423) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix several issues regarding PostgreSQL `array_ndims` usage. [#57436](https://github.com/ClickHouse/ClickHouse/pull/57436) ([Ryan Jacobs](https://github.com/ryanmjacobs)).
|
||||
* Fix RWLock inconsistency after write lock timeout [#57454](https://github.com/ClickHouse/ClickHouse/pull/57454) ([Vitaly Baranov](https://github.com/vitlibar)). Fix RWLock inconsistency after write lock timeout (again) [#57733](https://github.com/ClickHouse/ClickHouse/pull/57733) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix: don't exclude ephemeral column when building pushing to view chain [#57461](https://github.com/ClickHouse/ClickHouse/pull/57461) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* MaterializedPostgreSQL (experimental issue): fix issue [#41922](https://github.com/ClickHouse/ClickHouse/issues/41922), add test for [#41923](https://github.com/ClickHouse/ClickHouse/issues/41923) [#57515](https://github.com/ClickHouse/ClickHouse/pull/57515) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Ignore ON CLUSTER clause in grant/revoke queries for management of replicated access entities. [#57538](https://github.com/ClickHouse/ClickHouse/pull/57538) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Fix crash in clickhouse-local [#57553](https://github.com/ClickHouse/ClickHouse/pull/57553) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* A fix for Hash JOIN. [#57564](https://github.com/ClickHouse/ClickHouse/pull/57564) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix possible error in PostgreSQL source [#57567](https://github.com/ClickHouse/ClickHouse/pull/57567) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix type correction in Hash JOIN for nested LowCardinality. [#57614](https://github.com/ClickHouse/ClickHouse/pull/57614) ([vdimir](https://github.com/vdimir)).
|
||||
* Avoid hangs of `system.stack_trace` by correctly prohibiting parallel reading from it. [#57641](https://github.com/ClickHouse/ClickHouse/pull/57641) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix an error for aggregation of sparse columns with `any(...) RESPECT NULL` [#57710](https://github.com/ClickHouse/ClickHouse/pull/57710) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix unary operators parsing [#57713](https://github.com/ClickHouse/ClickHouse/pull/57713) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix dependency loading for the experimental table engine `MaterializedPostgreSQL`. [#57754](https://github.com/ClickHouse/ClickHouse/pull/57754) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix retries for disconnected nodes for BACKUP/RESTORE ON CLUSTER [#57764](https://github.com/ClickHouse/ClickHouse/pull/57764) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix result of external aggregation in case of partially materialized projection [#57790](https://github.com/ClickHouse/ClickHouse/pull/57790) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix merge in aggregation functions with `*Map` combinator [#57795](https://github.com/ClickHouse/ClickHouse/pull/57795) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Disable `system.kafka_consumers` because it has a bug. [#57822](https://github.com/ClickHouse/ClickHouse/pull/57822) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix LowCardinality keys support in Merge JOIN. [#57827](https://github.com/ClickHouse/ClickHouse/pull/57827) ([vdimir](https://github.com/vdimir)).
|
||||
* A fix for `InterpreterCreateQuery` related to the sample block. [#57855](https://github.com/ClickHouse/ClickHouse/pull/57855) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* `addresses_expr` were ignored for named collections from PostgreSQL. [#57874](https://github.com/ClickHouse/ClickHouse/pull/57874) ([joelynch](https://github.com/joelynch)).
|
||||
* Fix invalid memory access in BLAKE3 (Rust) [#57876](https://github.com/ClickHouse/ClickHouse/pull/57876) ([Raúl Marín](https://github.com/Algunenano)). Then it was rewritten from Rust to C++ for better [memory-safety](https://www.memorysafety.org/). [#57994](https://github.com/ClickHouse/ClickHouse/pull/57994) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Normalize function names in `CREATE INDEX` [#57906](https://github.com/ClickHouse/ClickHouse/pull/57906) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix handling of unavailable replicas before first request happened [#57933](https://github.com/ClickHouse/ClickHouse/pull/57933) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix literal alias misclassification [#57988](https://github.com/ClickHouse/ClickHouse/pull/57988) ([Chen768959](https://github.com/Chen768959)).
|
||||
* Fix invalid preprocessing on Keeper [#58069](https://github.com/ClickHouse/ClickHouse/pull/58069) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix integer overflow in the `Poco` library, related to `UTF32Encoding` [#58073](https://github.com/ClickHouse/ClickHouse/pull/58073) ([Andrey Fedotov](https://github.com/anfedotoff)).
|
||||
* Fix parallel replicas (experimental feature) in presence of a scalar subquery with a big integer value [#58118](https://github.com/ClickHouse/ClickHouse/pull/58118) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix `accurateCastOrNull` for out-of-range `DateTime` [#58139](https://github.com/ClickHouse/ClickHouse/pull/58139) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Fix possible `PARAMETER_OUT_OF_BOUND` error during subcolumns reading from a wide part in MergeTree [#58175](https://github.com/ClickHouse/ClickHouse/pull/58175) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix a slow-down of CREATE VIEW with an enormous number of subqueries [#58220](https://github.com/ClickHouse/ClickHouse/pull/58220) ([Tao Wang](https://github.com/wangtZJU)).
|
||||
* Fix parallel parsing for JSONCompactEachRow [#58181](https://github.com/ClickHouse/ClickHouse/pull/58181) ([Alexey Milovidov](https://github.com/alexey-milovidov)). [#58250](https://github.com/ClickHouse/ClickHouse/pull/58250) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
|
||||
### <a id="2311"></a> ClickHouse release 23.11, 2023-12-06
|
||||
|
||||
#### Backward Incompatible Change
|
||||
@ -105,7 +246,6 @@
|
||||
* Rewrite equality with `is null` check in JOIN ON section. Experimental *Analyzer only*. [#56538](https://github.com/ClickHouse/ClickHouse/pull/56538) ([vdimir](https://github.com/vdimir)).
|
||||
* Function`concat` now supports arbitrary argument types (instead of only String and FixedString arguments). This makes it behave more similar to MySQL `concat` implementation. For example, `SELECT concat('ab', 42)` now returns `ab42`. [#56540](https://github.com/ClickHouse/ClickHouse/pull/56540) ([Serge Klochkov](https://github.com/slvrtrn)).
|
||||
* Allow getting cache configuration from 'named_collection' section in config or from SQL created named collections. [#56541](https://github.com/ClickHouse/ClickHouse/pull/56541) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Update `query_masking_rules` when reloading the config ([#56449](https://github.com/ClickHouse/ClickHouse/issues/56449)). [#56573](https://github.com/ClickHouse/ClickHouse/pull/56573) ([Mikhail Koviazin](https://github.com/mkmkme)).
|
||||
* PostgreSQL database engine: Make the removal of outdated tables less aggressive with unsuccessful postgres connection. [#56609](https://github.com/ClickHouse/ClickHouse/pull/56609) ([jsc0218](https://github.com/jsc0218)).
|
||||
* It took too much time to connnect to PG when URL is not right, so the relevant query stucks there and get cancelled. [#56648](https://github.com/ClickHouse/ClickHouse/pull/56648) ([jsc0218](https://github.com/jsc0218)).
|
||||
* Keeper improvement: disable compressed logs by default in Keeper. [#56763](https://github.com/ClickHouse/ClickHouse/pull/56763) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
4
LICENSE
4
LICENSE
@ -1,4 +1,4 @@
|
||||
Copyright 2016-2023 ClickHouse, Inc.
|
||||
Copyright 2016-2024 ClickHouse, Inc.
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
@ -188,7 +188,7 @@ Copyright 2016-2023 ClickHouse, Inc.
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016-2023 ClickHouse, Inc.
|
||||
Copyright 2016-2024 ClickHouse, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -33,12 +33,7 @@ curl https://clickhouse.com/ | sh
|
||||
|
||||
## Upcoming Events
|
||||
|
||||
* [**ClickHouse Meetup in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/296488501/) - Nov 30
|
||||
* [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/296488779/) - Dec 11
|
||||
* [**ClickHouse Meetup in Sydney**](https://www.meetup.com/clickhouse-sydney-user-group/events/297638812/) - Dec 12
|
||||
* [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/296488840/) - Dec 12
|
||||
|
||||
Also, keep an eye out for upcoming meetups around the world. Somewhere else you want us to be? Please feel free to reach out to tyler <at> clickhouse <dot> com.
|
||||
Keep an eye out for upcoming meetups around the world. Somewhere else you want us to be? Please feel free to reach out to tyler <at> clickhouse <dot> com.
|
||||
|
||||
## Recent Recordings
|
||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||
|
@ -13,9 +13,10 @@ The following versions of ClickHouse server are currently being supported with s
|
||||
|
||||
| Version | Supported |
|
||||
|:-|:-|
|
||||
| 23.12 | ✔️ |
|
||||
| 23.11 | ✔️ |
|
||||
| 23.10 | ✔️ |
|
||||
| 23.9 | ✔️ |
|
||||
| 23.9 | ❌ |
|
||||
| 23.8 | ✔️ |
|
||||
| 23.7 | ❌ |
|
||||
| 23.6 | ❌ |
|
||||
|
@ -69,6 +69,9 @@
|
||||
// init() is called in the MyIOS constructor.
|
||||
// Therefore we replace each call to init() with
|
||||
// the poco_ios_init macro defined below.
|
||||
//
|
||||
// Also this macro will adjust exceptions() flags, since by default std::ios
|
||||
// will hide exceptions, while in ClickHouse it is better to pass them through.
|
||||
|
||||
|
||||
#if !defined(POCO_IOS_INIT_HACK)
|
||||
@ -79,7 +82,10 @@
|
||||
#if defined(POCO_IOS_INIT_HACK)
|
||||
# define poco_ios_init(buf)
|
||||
#else
|
||||
# define poco_ios_init(buf) init(buf)
|
||||
# define poco_ios_init(buf) do { \
|
||||
init(buf); \
|
||||
this->exceptions(std::ios::failbit | std::ios::badbit); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -70,6 +70,15 @@ public:
|
||||
int queryConvert(const unsigned char * bytes, int length) const;
|
||||
int sequenceLength(const unsigned char * bytes, int length) const;
|
||||
|
||||
protected:
|
||||
static int safeToInt(Poco::UInt32 value)
|
||||
{
|
||||
if (value <= 0x10FFFF)
|
||||
return static_cast<int>(value);
|
||||
else
|
||||
return -1;
|
||||
}
|
||||
|
||||
private:
|
||||
bool _flipBytes;
|
||||
static const char * _names[];
|
||||
|
@ -30,22 +30,22 @@ const char* UTF32Encoding::_names[] =
|
||||
|
||||
const TextEncoding::CharacterMap UTF32Encoding::_charMap =
|
||||
{
|
||||
/* 00 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
/* 10 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
/* 20 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
/* 30 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
/* 40 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
/* 50 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
/* 60 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
/* 70 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
/* 80 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
/* 90 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
/* a0 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
/* b0 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
/* c0 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
/* d0 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
/* e0 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
/* f0 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
/* 00 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||
/* 10 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||
/* 20 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||
/* 30 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||
/* 40 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||
/* 50 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||
/* 60 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||
/* 70 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||
/* 80 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||
/* 90 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||
/* a0 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||
/* b0 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||
/* c0 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||
/* d0 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||
/* e0 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||
/* f0 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||
};
|
||||
|
||||
|
||||
@ -118,7 +118,7 @@ const TextEncoding::CharacterMap& UTF32Encoding::characterMap() const
|
||||
int UTF32Encoding::convert(const unsigned char* bytes) const
|
||||
{
|
||||
UInt32 uc;
|
||||
unsigned char* p = (unsigned char*) &uc;
|
||||
unsigned char* p = reinterpret_cast<unsigned char*>(&uc);
|
||||
*p++ = *bytes++;
|
||||
*p++ = *bytes++;
|
||||
*p++ = *bytes++;
|
||||
@ -129,7 +129,7 @@ int UTF32Encoding::convert(const unsigned char* bytes) const
|
||||
ByteOrder::flipBytes(uc);
|
||||
}
|
||||
|
||||
return uc;
|
||||
return safeToInt(uc);
|
||||
}
|
||||
|
||||
|
||||
@ -138,7 +138,7 @@ int UTF32Encoding::convert(int ch, unsigned char* bytes, int length) const
|
||||
if (bytes && length >= 4)
|
||||
{
|
||||
UInt32 ch1 = _flipBytes ? ByteOrder::flipBytes((UInt32) ch) : (UInt32) ch;
|
||||
unsigned char* p = (unsigned char*) &ch1;
|
||||
unsigned char* p = reinterpret_cast<unsigned char*>(&ch1);
|
||||
*bytes++ = *p++;
|
||||
*bytes++ = *p++;
|
||||
*bytes++ = *p++;
|
||||
@ -155,14 +155,14 @@ int UTF32Encoding::queryConvert(const unsigned char* bytes, int length) const
|
||||
if (length >= 4)
|
||||
{
|
||||
UInt32 uc;
|
||||
unsigned char* p = (unsigned char*) &uc;
|
||||
unsigned char* p = reinterpret_cast<unsigned char*>(&uc);
|
||||
*p++ = *bytes++;
|
||||
*p++ = *bytes++;
|
||||
*p++ = *bytes++;
|
||||
*p++ = *bytes++;
|
||||
if (_flipBytes)
|
||||
ByteOrder::flipBytes(uc);
|
||||
return uc;
|
||||
ret = safeToInt(uc);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -18,6 +18,7 @@
|
||||
#ifndef POCO_UTIL_NO_XMLCONFIGURATION
|
||||
|
||||
|
||||
#include "Poco/String.h"
|
||||
#include "Poco/SAX/InputSource.h"
|
||||
#include "Poco/DOM/DOMParser.h"
|
||||
#include "Poco/DOM/Element.h"
|
||||
@ -28,6 +29,8 @@
|
||||
#include "Poco/NumberParser.h"
|
||||
#include "Poco/NumberFormatter.h"
|
||||
#include <unordered_map>
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
|
||||
|
||||
namespace Poco {
|
||||
@ -275,8 +278,9 @@ void XMLConfiguration::enumerate(const std::string& key, Keys& range) const
|
||||
{
|
||||
if (pChild->nodeType() == Poco::XML::Node::ELEMENT_NODE)
|
||||
{
|
||||
const std::string& nodeName = pChild->nodeName();
|
||||
std::string nodeName = pChild->nodeName();
|
||||
size_t& count = keys[nodeName];
|
||||
replaceInPlace(nodeName, ".", "\\.");
|
||||
if (count)
|
||||
range.push_back(nodeName + "[" + NumberFormatter::format(count) + "]");
|
||||
else
|
||||
@ -379,7 +383,21 @@ Poco::XML::Node* XMLConfiguration::findNode(std::string::const_iterator& it, con
|
||||
{
|
||||
while (it != end && *it == _delim) ++it;
|
||||
std::string key;
|
||||
while (it != end && *it != _delim && *it != '[') key += *it++;
|
||||
while (it != end)
|
||||
{
|
||||
if (*it == '\\' && std::distance(it, end) > 1)
|
||||
{
|
||||
// Skip backslash, copy only the char after it
|
||||
std::advance(it, 1);
|
||||
key += *it++;
|
||||
continue;
|
||||
}
|
||||
if (*it == _delim)
|
||||
break;
|
||||
if (*it == '[')
|
||||
break;
|
||||
key += *it++;
|
||||
}
|
||||
return findNode(it, end, findElement(key, pNode, create), create);
|
||||
}
|
||||
}
|
||||
|
@ -2,11 +2,11 @@
|
||||
|
||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||
SET(VERSION_REVISION 54481)
|
||||
SET(VERSION_MAJOR 23)
|
||||
SET(VERSION_MINOR 12)
|
||||
SET(VERSION_REVISION 54482)
|
||||
SET(VERSION_MAJOR 24)
|
||||
SET(VERSION_MINOR 1)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 05bc8ef1e02b9c7332f08091775b255d191341bf)
|
||||
SET(VERSION_DESCRIBE v23.12.1.1-testing)
|
||||
SET(VERSION_STRING 23.12.1.1)
|
||||
SET(VERSION_GITHASH a2faa65b080a587026c86844f3a20c74d23a86f8)
|
||||
SET(VERSION_DESCRIBE v24.1.1.1-testing)
|
||||
SET(VERSION_STRING 24.1.1.1)
|
||||
# end of autochange
|
||||
|
@ -12,6 +12,8 @@ elseif (CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
||||
elseif (CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
||||
set (OS_DARWIN 1)
|
||||
add_definitions(-D OS_DARWIN)
|
||||
# For MAP_ANON/MAP_ANONYMOUS
|
||||
add_definitions(-D _DARWIN_C_SOURCE)
|
||||
elseif (CMAKE_SYSTEM_NAME MATCHES "SunOS")
|
||||
set (OS_SUNOS 1)
|
||||
add_definitions(-D OS_SUNOS)
|
||||
@ -73,8 +75,3 @@ if (CMAKE_CROSSCOMPILING)
|
||||
|
||||
message (STATUS "Cross-compiling for target: ${CMAKE_CXX_COMPILE_TARGET}")
|
||||
endif ()
|
||||
|
||||
if (USE_MUSL)
|
||||
# Does not work for unknown reason
|
||||
set (ENABLE_RUST OFF CACHE INTERNAL "")
|
||||
endif ()
|
||||
|
1
contrib/CMakeLists.txt
vendored
1
contrib/CMakeLists.txt
vendored
@ -44,6 +44,7 @@ else ()
|
||||
endif ()
|
||||
add_contrib (miniselect-cmake miniselect)
|
||||
add_contrib (pdqsort-cmake pdqsort)
|
||||
add_contrib (pocketfft-cmake pocketfft)
|
||||
add_contrib (crc32-vpmsum-cmake crc32-vpmsum)
|
||||
add_contrib (sparsehash-c11-cmake sparsehash-c11)
|
||||
add_contrib (abseil-cpp-cmake abseil-cpp)
|
||||
|
2
contrib/azure
vendored
2
contrib/azure
vendored
@ -1 +1 @@
|
||||
Subproject commit 352ff0a61cb319ac1cc38c4058443ddf70147530
|
||||
Subproject commit 060c54dfb0abe869c065143303a9d3e9c54c29e3
|
@ -8,37 +8,21 @@ endif()
|
||||
set(AZURE_DIR "${ClickHouse_SOURCE_DIR}/contrib/azure")
|
||||
set(AZURE_SDK_LIBRARY_DIR "${AZURE_DIR}/sdk")
|
||||
|
||||
file(GLOB AZURE_SDK_CORE_SRC
|
||||
file(GLOB AZURE_SDK_SRC
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/cryptography/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/curl/*.hpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/curl/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/winhttp/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/io/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/private/*.hpp"
|
||||
)
|
||||
|
||||
file(GLOB AZURE_SDK_IDENTITY_SRC
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/tracing/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/identity/azure-identity/src/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/identity/azure-identity/src/private/*.hpp"
|
||||
)
|
||||
|
||||
file(GLOB AZURE_SDK_STORAGE_COMMON_SRC
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/src/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/src/private/*.cpp"
|
||||
)
|
||||
|
||||
file(GLOB AZURE_SDK_STORAGE_BLOBS_SRC
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/private/*.hpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/private/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/src/*.cpp"
|
||||
)
|
||||
|
||||
file(GLOB AZURE_SDK_UNIFIED_SRC
|
||||
${AZURE_SDK_CORE_SRC}
|
||||
${AZURE_SDK_IDENTITY_SRC}
|
||||
${AZURE_SDK_STORAGE_COMMON_SRC}
|
||||
${AZURE_SDK_STORAGE_BLOBS_SRC}
|
||||
${AZURE_SDK_SRC}
|
||||
)
|
||||
|
||||
set(AZURE_SDK_INCLUDES
|
||||
|
2
contrib/boringssl
vendored
2
contrib/boringssl
vendored
@ -1 +1 @@
|
||||
Subproject commit 8061ac62d67953e61b793042e33baf1352e67510
|
||||
Subproject commit aa6d2f865a2eab01cf94f197e11e36b6de47b5b4
|
2
contrib/librdkafka
vendored
2
contrib/librdkafka
vendored
@ -1 +1 @@
|
||||
Subproject commit 6f3b483426a8c8ec950e27e446bec175cf8b553f
|
||||
Subproject commit 2d2aab6f5b79db1cfca15d7bf0dee75d00d82082
|
@ -11,7 +11,9 @@ option (ENABLE_EMBEDDED_COMPILER "Enable support for JIT compilation during quer
|
||||
|
||||
option (ENABLE_DWARF_PARSER "Enable support for DWARF input format (uses LLVM library)" ${ENABLE_DWARF_PARSER_DEFAULT})
|
||||
|
||||
if (NOT ENABLE_EMBEDDED_COMPILER AND NOT ENABLE_DWARF_PARSER)
|
||||
option (ENABLE_BLAKE3 "Enable BLAKE3 function" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (NOT ENABLE_EMBEDDED_COMPILER AND NOT ENABLE_DWARF_PARSER AND NOT ENABLE_BLAKE3)
|
||||
message(STATUS "Not using LLVM")
|
||||
return()
|
||||
endif()
|
||||
@ -26,61 +28,75 @@ set (LLVM_LIBRARY_DIRS "${ClickHouse_BINARY_DIR}/contrib/llvm-project/llvm")
|
||||
# and llvm cannot be compiled with bundled libcxx and 20 standard.
|
||||
set (CMAKE_CXX_STANDARD 14)
|
||||
|
||||
# This list was generated by listing all LLVM libraries, compiling the binary and removing all libraries while it still compiles.
|
||||
set (REQUIRED_LLVM_LIBRARIES
|
||||
LLVMExecutionEngine
|
||||
LLVMRuntimeDyld
|
||||
LLVMAsmPrinter
|
||||
LLVMDebugInfoDWARF
|
||||
LLVMGlobalISel
|
||||
LLVMSelectionDAG
|
||||
LLVMMCDisassembler
|
||||
LLVMPasses
|
||||
LLVMCodeGen
|
||||
LLVMipo
|
||||
LLVMBitWriter
|
||||
LLVMInstrumentation
|
||||
LLVMScalarOpts
|
||||
LLVMAggressiveInstCombine
|
||||
LLVMInstCombine
|
||||
LLVMVectorize
|
||||
LLVMTransformUtils
|
||||
LLVMTarget
|
||||
LLVMAnalysis
|
||||
LLVMProfileData
|
||||
LLVMObject
|
||||
LLVMBitReader
|
||||
LLVMCore
|
||||
LLVMRemarks
|
||||
LLVMBitstreamReader
|
||||
LLVMMCParser
|
||||
LLVMMC
|
||||
LLVMBinaryFormat
|
||||
LLVMDebugInfoCodeView
|
||||
LLVMSupport
|
||||
LLVMDemangle
|
||||
)
|
||||
if (ARCH_AMD64)
|
||||
set (LLVM_TARGETS_TO_BUILD "X86" CACHE INTERNAL "")
|
||||
elseif (ARCH_AARCH64)
|
||||
set (LLVM_TARGETS_TO_BUILD "AArch64" CACHE INTERNAL "")
|
||||
elseif (ARCH_PPC64LE)
|
||||
set (LLVM_TARGETS_TO_BUILD "PowerPC" CACHE INTERNAL "")
|
||||
elseif (ARCH_S390X)
|
||||
set (LLVM_TARGETS_TO_BUILD "SystemZ" CACHE INTERNAL "")
|
||||
elseif (ARCH_RISCV64)
|
||||
set (LLVM_TARGETS_TO_BUILD "RISCV" CACHE INTERNAL "")
|
||||
endif ()
|
||||
|
||||
|
||||
if (NOT ENABLE_EMBEDDED_COMPILER AND NOT ENABLE_DWARF_PARSER)
|
||||
# Only compiling blake3
|
||||
set (REQUIRED_LLVM_LIBRARIES LLVMSupport)
|
||||
else()
|
||||
# This list was generated by listing all LLVM libraries, compiling the binary and removing all libraries while it still compiles.
|
||||
set (REQUIRED_LLVM_LIBRARIES
|
||||
LLVMExecutionEngine
|
||||
LLVMRuntimeDyld
|
||||
LLVMAsmPrinter
|
||||
LLVMDebugInfoDWARF
|
||||
LLVMGlobalISel
|
||||
LLVMSelectionDAG
|
||||
LLVMMCDisassembler
|
||||
LLVMPasses
|
||||
LLVMCodeGen
|
||||
LLVMipo
|
||||
LLVMBitWriter
|
||||
LLVMInstrumentation
|
||||
LLVMScalarOpts
|
||||
LLVMAggressiveInstCombine
|
||||
LLVMInstCombine
|
||||
LLVMVectorize
|
||||
LLVMTransformUtils
|
||||
LLVMTarget
|
||||
LLVMAnalysis
|
||||
LLVMProfileData
|
||||
LLVMObject
|
||||
LLVMBitReader
|
||||
LLVMCore
|
||||
LLVMRemarks
|
||||
LLVMBitstreamReader
|
||||
LLVMMCParser
|
||||
LLVMMC
|
||||
LLVMBinaryFormat
|
||||
LLVMDebugInfoCodeView
|
||||
LLVMSupport
|
||||
LLVMDemangle
|
||||
)
|
||||
|
||||
if (ARCH_AMD64)
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMX86Info LLVMX86Desc LLVMX86CodeGen)
|
||||
elseif (ARCH_AARCH64)
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMAArch64Info LLVMAArch64Desc LLVMAArch64CodeGen)
|
||||
elseif (ARCH_PPC64LE)
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMPowerPCInfo LLVMPowerPCDesc LLVMPowerPCCodeGen)
|
||||
elseif (ARCH_S390X)
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMSystemZInfo LLVMSystemZDesc LLVMSystemZCodeGen)
|
||||
elseif (ARCH_RISCV64)
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMRISCVInfo LLVMRISCVDesc LLVMRISCVCodeGen)
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
|
||||
# Skip useless "install" instructions from CMake:
|
||||
set (LLVM_INSTALL_TOOLCHAIN_ONLY 1 CACHE INTERNAL "")
|
||||
|
||||
if (ARCH_AMD64)
|
||||
set (LLVM_TARGETS_TO_BUILD "X86" CACHE INTERNAL "")
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMX86Info LLVMX86Desc LLVMX86CodeGen)
|
||||
elseif (ARCH_AARCH64)
|
||||
set (LLVM_TARGETS_TO_BUILD "AArch64" CACHE INTERNAL "")
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMAArch64Info LLVMAArch64Desc LLVMAArch64CodeGen)
|
||||
elseif (ARCH_PPC64LE)
|
||||
set (LLVM_TARGETS_TO_BUILD "PowerPC" CACHE INTERNAL "")
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMPowerPCInfo LLVMPowerPCDesc LLVMPowerPCCodeGen)
|
||||
elseif (ARCH_S390X)
|
||||
set (LLVM_TARGETS_TO_BUILD "SystemZ" CACHE INTERNAL "")
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMSystemZInfo LLVMSystemZDesc LLVMSystemZCodeGen)
|
||||
elseif (ARCH_RISCV64)
|
||||
set (LLVM_TARGETS_TO_BUILD "RISCV" CACHE INTERNAL "")
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMRISCVInfo LLVMRISCVDesc LLVMRISCVCodeGen)
|
||||
endif ()
|
||||
|
||||
message (STATUS "LLVM TARGETS TO BUILD ${LLVM_TARGETS_TO_BUILD}")
|
||||
|
||||
set (CMAKE_INSTALL_RPATH "ON") # Do not adjust RPATH in llvm, since then it will not be able to find libcxx/libcxxabi/libunwind
|
||||
|
@ -1,4 +1,4 @@
|
||||
if(OS_LINUX AND TARGET OpenSSL::SSL)
|
||||
if((OS_LINUX OR OS_DARWIN) AND TARGET OpenSSL::SSL)
|
||||
option(ENABLE_MYSQL "Enable MySQL" ${ENABLE_LIBRARIES})
|
||||
else ()
|
||||
option(ENABLE_MYSQL "Enable MySQL" FALSE)
|
||||
@ -73,7 +73,7 @@ set(HAVE_SYS_TYPES_H 1)
|
||||
set(HAVE_SYS_UN_H 1)
|
||||
set(HAVE_UNISTD_H 1)
|
||||
set(HAVE_UTIME_H 1)
|
||||
set(HAVE_UCONTEXT_H 1)
|
||||
set(HAVE_UCONTEXT_H 0)
|
||||
set(HAVE_ALLOCA 1)
|
||||
set(HAVE_DLERROR 0)
|
||||
set(HAVE_DLOPEN 0)
|
||||
@ -116,9 +116,13 @@ CONFIGURE_FILE(${CC_SOURCE_DIR}/include/ma_config.h.in
|
||||
CONFIGURE_FILE(${CC_SOURCE_DIR}/include/mariadb_version.h.in
|
||||
${CC_BINARY_DIR}/include-public/mariadb_version.h)
|
||||
|
||||
if(WITH_SSL)
|
||||
if (WITH_SSL)
|
||||
set(SYSTEM_LIBS ${SYSTEM_LIBS} ${SSL_LIBRARIES})
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
if (OS_DARWIN)
|
||||
set(SYSTEM_LIBS ${SYSTEM_LIBS} iconv)
|
||||
endif ()
|
||||
|
||||
|
||||
function(REGISTER_PLUGIN)
|
||||
@ -227,15 +231,8 @@ ${CC_SOURCE_DIR}/libmariadb/secure/openssl_crypt.c
|
||||
${CC_BINARY_DIR}/libmariadb/ma_client_plugin.c
|
||||
)
|
||||
|
||||
if(ICONV_INCLUDE_DIR)
|
||||
include_directories(BEFORE ${ICONV_INCLUDE_DIR})
|
||||
endif()
|
||||
add_definitions(-DLIBICONV_PLUG)
|
||||
|
||||
if(WITH_DYNCOL)
|
||||
set(LIBMARIADB_SOURCES ${LIBMARIADB_SOURCES} ${CC_SOURCE_DIR}/libmariadb/mariadb_dyncol.c)
|
||||
endif()
|
||||
|
||||
set(LIBMARIADB_SOURCES ${LIBMARIADB_SOURCES} ${CC_SOURCE_DIR}/libmariadb/mariadb_async.c ${CC_SOURCE_DIR}/libmariadb/ma_context.c)
|
||||
|
||||
|
||||
|
1
contrib/pocketfft
vendored
Submodule
1
contrib/pocketfft
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 9efd4da52cf8d28d14531d14e43ad9d913807546
|
10
contrib/pocketfft-cmake/CMakeLists.txt
Normal file
10
contrib/pocketfft-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,10 @@
|
||||
option (ENABLE_POCKETFFT "Enable pocketfft" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (NOT ENABLE_POCKETFFT)
|
||||
message(STATUS "Not using pocketfft")
|
||||
return()
|
||||
endif()
|
||||
|
||||
add_library(_pocketfft INTERFACE)
|
||||
target_include_directories(_pocketfft INTERFACE ${ClickHouse_SOURCE_DIR}/contrib/pocketfft)
|
||||
add_library(ch_contrib::pocketfft ALIAS _pocketfft)
|
@ -125,6 +125,7 @@
|
||||
"docker/test/server-jepsen",
|
||||
"docker/test/sqllogic",
|
||||
"docker/test/sqltest",
|
||||
"docker/test/clickbench",
|
||||
"docker/test/stateless"
|
||||
]
|
||||
},
|
||||
@ -145,6 +146,10 @@
|
||||
"name": "clickhouse/server-jepsen-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/clickbench": {
|
||||
"name": "clickhouse/clickbench",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/install/deb": {
|
||||
"name": "clickhouse/install-deb-test",
|
||||
"dependent": []
|
||||
|
@ -34,8 +34,9 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="23.11.1.2711"
|
||||
ARG VERSION="23.12.1.1368"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
# It is especially important for rootless containers: in that case entrypoint
|
||||
@ -47,15 +48,27 @@ ARG PACKAGES="clickhouse-keeper"
|
||||
|
||||
ARG TARGETARCH
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& for package in ${PACKAGES}; do \
|
||||
( \
|
||||
cd /tmp \
|
||||
&& echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
|
||||
&& cd /tmp && rm -f /tmp/*tgz && rm -f /tmp/*tgz.sha512 |: \
|
||||
&& if [ -n "${DIRECT_DOWNLOAD_URLS}" ]; then \
|
||||
echo "installing from provided urls with tgz packages: ${DIRECT_DOWNLOAD_URLS}" \
|
||||
&& for url in $DIRECT_DOWNLOAD_URLS; do \
|
||||
echo "Get ${url}" \
|
||||
&& wget -c -q "$url" \
|
||||
; done \
|
||||
else \
|
||||
for package in ${PACKAGES}; do \
|
||||
cd /tmp \
|
||||
&& echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
|
||||
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
|
||||
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz.sha512" \
|
||||
&& sed 's:/output/:/tmp/:' < "${package}-${VERSION}-${arch}.tgz.sha512" | sha512sum -c \
|
||||
&& tar xvzf "${package}-${VERSION}-${arch}.tgz" --strip-components=1 -C / \
|
||||
) \
|
||||
; done \
|
||||
fi \
|
||||
&& cat *.tgz.sha512 | sha512sum -c \
|
||||
&& for file in *.tgz; do \
|
||||
if [ -f "$file" ]; then \
|
||||
echo "Unpacking $file"; \
|
||||
tar xvzf "$file" --strip-components=1 -C /; \
|
||||
fi \
|
||||
; done \
|
||||
&& rm /tmp/*.tgz /install -r \
|
||||
&& addgroup -S -g 101 clickhouse \
|
||||
|
@ -3,10 +3,10 @@ compilers and build settings. Correctly configured Docker daemon is single depen
|
||||
|
||||
Usage:
|
||||
|
||||
Build deb package with `clang-14` in `debug` mode:
|
||||
Build deb package with `clang-17` in `debug` mode:
|
||||
```
|
||||
$ mkdir deb/test_output
|
||||
$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=clang-14 --debug-build
|
||||
$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=clang-17 --debug-build
|
||||
$ ls -l deb/test_output
|
||||
-rw-r--r-- 1 root root 3730 clickhouse-client_22.2.2+debug_all.deb
|
||||
-rw-r--r-- 1 root root 84221888 clickhouse-common-static_22.2.2+debug_amd64.deb
|
||||
@ -17,11 +17,11 @@ $ ls -l deb/test_output
|
||||
|
||||
```
|
||||
|
||||
Build ClickHouse binary with `clang-14` and `address` sanitizer in `relwithdebuginfo`
|
||||
Build ClickHouse binary with `clang-17` and `address` sanitizer in `relwithdebuginfo`
|
||||
mode:
|
||||
```
|
||||
$ mkdir $HOME/some_clickhouse
|
||||
$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-14 --sanitizer=address
|
||||
$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-17 --sanitizer=address
|
||||
$ ls -l $HOME/some_clickhouse
|
||||
-rwxr-xr-x 1 root root 787061952 clickhouse
|
||||
lrwxrwxrwx 1 root root 10 clickhouse-benchmark -> clickhouse
|
||||
|
@ -49,6 +49,7 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \
|
||||
chmod 777 -R /rust && \
|
||||
rustup toolchain install nightly-2023-07-04 && \
|
||||
rustup default nightly-2023-07-04 && \
|
||||
rustup toolchain remove stable && \
|
||||
rustup component add rust-src && \
|
||||
rustup target add x86_64-unknown-linux-gnu && \
|
||||
rustup target add aarch64-unknown-linux-gnu && \
|
||||
|
@ -149,7 +149,7 @@ then
|
||||
mkdir -p "$PERF_OUTPUT"
|
||||
cp -r ../tests/performance "$PERF_OUTPUT"
|
||||
cp -r ../tests/config/top_level_domains "$PERF_OUTPUT"
|
||||
cp -r ../docker/test/performance-comparison/config "$PERF_OUTPUT" ||:
|
||||
cp -r ../tests/performance/scripts/config "$PERF_OUTPUT" ||:
|
||||
for SRC in /output/clickhouse*; do
|
||||
# Copy all clickhouse* files except packages and bridges
|
||||
[[ "$SRC" != *.* ]] && [[ "$SRC" != *-bridge ]] && \
|
||||
@ -160,7 +160,7 @@ then
|
||||
ln -sf clickhouse "$PERF_OUTPUT"/clickhouse-keeper
|
||||
fi
|
||||
|
||||
cp -r ../docker/test/performance-comparison "$PERF_OUTPUT"/scripts ||:
|
||||
cp -r ../tests/performance/scripts "$PERF_OUTPUT"/scripts ||:
|
||||
prepare_combined_output "$PERF_OUTPUT"
|
||||
|
||||
# We have to know the revision that corresponds to this binary build.
|
||||
|
@ -32,8 +32,9 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="23.11.1.2711"
|
||||
ARG VERSION="23.12.1.1368"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
# It is especially important for rootless containers: in that case entrypoint
|
||||
@ -43,15 +44,26 @@ ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
# The same uid / gid (101) is used both for alpine and ubuntu.
|
||||
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& for package in ${PACKAGES}; do \
|
||||
( \
|
||||
cd /tmp \
|
||||
&& echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
|
||||
&& cd /tmp \
|
||||
&& if [ -n "${DIRECT_DOWNLOAD_URLS}" ]; then \
|
||||
echo "installing from provided urls with tgz packages: ${DIRECT_DOWNLOAD_URLS}" \
|
||||
&& for url in $DIRECT_DOWNLOAD_URLS; do \
|
||||
echo "Get ${url}" \
|
||||
&& wget -c -q "$url" \
|
||||
; done \
|
||||
else \
|
||||
for package in ${PACKAGES}; do \
|
||||
echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
|
||||
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
|
||||
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz.sha512" \
|
||||
&& sed 's:/output/:/tmp/:' < "${package}-${VERSION}-${arch}.tgz.sha512" | sha512sum -c \
|
||||
&& tar xvzf "${package}-${VERSION}-${arch}.tgz" --strip-components=1 -C / \
|
||||
) \
|
||||
; done \
|
||||
fi \
|
||||
&& cat *.tgz.sha512 | sed 's:/output/:/tmp/:' | sha512sum -c \
|
||||
&& for file in *.tgz; do \
|
||||
if [ -f "$file" ]; then \
|
||||
echo "Unpacking $file"; \
|
||||
tar xvzf "$file" --strip-components=1 -C /; \
|
||||
fi \
|
||||
; done \
|
||||
&& rm /tmp/*.tgz /install -r \
|
||||
&& addgroup -S -g 101 clickhouse \
|
||||
|
@ -30,13 +30,14 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="23.11.1.2711"
|
||||
ARG VERSION="23.12.1.1368"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
# from debs created by CI build, for example:
|
||||
# docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://..." -t ...
|
||||
ARG deb_location_url=""
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
# set non-empty single_binary_location_url to create docker image
|
||||
# from a single binary url (useful for non-standard builds - with sanitizers, for arm64).
|
||||
@ -44,6 +45,18 @@ ARG single_binary_location_url=""
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
# install from direct URL
|
||||
RUN if [ -n "${DIRECT_DOWNLOAD_URLS}" ]; then \
|
||||
echo "installing from custom predefined urls with deb packages: ${DIRECT_DOWNLOAD_URLS}" \
|
||||
&& rm -rf /tmp/clickhouse_debs \
|
||||
&& mkdir -p /tmp/clickhouse_debs \
|
||||
&& for url in $DIRECT_DOWNLOAD_URLS; do \
|
||||
wget --progress=bar:force:noscroll "$url" -P /tmp/clickhouse_debs || exit 1 \
|
||||
; done \
|
||||
&& dpkg -i /tmp/clickhouse_debs/*.deb \
|
||||
&& rm -rf /tmp/* ; \
|
||||
fi
|
||||
|
||||
# install from a web location with deb packages
|
||||
RUN arch="${TARGETARCH:-amd64}" \
|
||||
&& if [ -n "${deb_location_url}" ]; then \
|
||||
|
@ -12,6 +12,7 @@ RUN apt-get update \
|
||||
ripgrep \
|
||||
zstd \
|
||||
locales \
|
||||
sudo \
|
||||
--yes --no-install-recommends
|
||||
|
||||
# Sanitizer options for services (clickhouse-server)
|
||||
|
@ -21,7 +21,7 @@ EXTRA_ORDER_BY_COLUMNS=${EXTRA_ORDER_BY_COLUMNS:-"check_name, "}
|
||||
|
||||
# trace_log needs more columns for symbolization
|
||||
EXTRA_COLUMNS_TRACE_LOG="${EXTRA_COLUMNS} symbols Array(LowCardinality(String)), lines Array(LowCardinality(String)), "
|
||||
EXTRA_COLUMNS_EXPRESSION_TRACE_LOG="${EXTRA_COLUMNS_EXPRESSION}, arrayMap(x -> toLowCardinality(demangle(addressToSymbol(x))), trace) AS symbols, arrayMap(x -> toLowCardinality(addressToLine(x)), trace) AS lines"
|
||||
EXTRA_COLUMNS_EXPRESSION_TRACE_LOG="${EXTRA_COLUMNS_EXPRESSION}, arrayMap(x -> demangle(addressToSymbol(x)), trace)::Array(LowCardinality(String)) AS symbols, arrayMap(x -> addressToLine(x), trace)::Array(LowCardinality(String)) AS lines"
|
||||
|
||||
|
||||
function __set_connection_args
|
||||
|
10
docker/test/clickbench/Dockerfile
Normal file
10
docker/test/clickbench/Dockerfile
Normal file
@ -0,0 +1,10 @@
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/test-base:$FROM_TAG
|
||||
|
||||
ENV TZ=Europe/Amsterdam
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
COPY *.sh /
|
||||
COPY *.sql /
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
112
docker/test/clickbench/create.sql
Normal file
112
docker/test/clickbench/create.sql
Normal file
@ -0,0 +1,112 @@
|
||||
ATTACH TABLE hits UUID 'c449dfbf-ba06-4d13-abec-8396559eb955'
|
||||
(
|
||||
WatchID BIGINT NOT NULL,
|
||||
JavaEnable SMALLINT NOT NULL,
|
||||
Title TEXT NOT NULL,
|
||||
GoodEvent SMALLINT NOT NULL,
|
||||
EventTime TIMESTAMP NOT NULL,
|
||||
EventDate Date NOT NULL,
|
||||
CounterID INTEGER NOT NULL,
|
||||
ClientIP INTEGER NOT NULL,
|
||||
RegionID INTEGER NOT NULL,
|
||||
UserID BIGINT NOT NULL,
|
||||
CounterClass SMALLINT NOT NULL,
|
||||
OS SMALLINT NOT NULL,
|
||||
UserAgent SMALLINT NOT NULL,
|
||||
URL TEXT NOT NULL,
|
||||
Referer TEXT NOT NULL,
|
||||
IsRefresh SMALLINT NOT NULL,
|
||||
RefererCategoryID SMALLINT NOT NULL,
|
||||
RefererRegionID INTEGER NOT NULL,
|
||||
URLCategoryID SMALLINT NOT NULL,
|
||||
URLRegionID INTEGER NOT NULL,
|
||||
ResolutionWidth SMALLINT NOT NULL,
|
||||
ResolutionHeight SMALLINT NOT NULL,
|
||||
ResolutionDepth SMALLINT NOT NULL,
|
||||
FlashMajor SMALLINT NOT NULL,
|
||||
FlashMinor SMALLINT NOT NULL,
|
||||
FlashMinor2 TEXT NOT NULL,
|
||||
NetMajor SMALLINT NOT NULL,
|
||||
NetMinor SMALLINT NOT NULL,
|
||||
UserAgentMajor SMALLINT NOT NULL,
|
||||
UserAgentMinor VARCHAR(255) NOT NULL,
|
||||
CookieEnable SMALLINT NOT NULL,
|
||||
JavascriptEnable SMALLINT NOT NULL,
|
||||
IsMobile SMALLINT NOT NULL,
|
||||
MobilePhone SMALLINT NOT NULL,
|
||||
MobilePhoneModel TEXT NOT NULL,
|
||||
Params TEXT NOT NULL,
|
||||
IPNetworkID INTEGER NOT NULL,
|
||||
TraficSourceID SMALLINT NOT NULL,
|
||||
SearchEngineID SMALLINT NOT NULL,
|
||||
SearchPhrase TEXT NOT NULL,
|
||||
AdvEngineID SMALLINT NOT NULL,
|
||||
IsArtifical SMALLINT NOT NULL,
|
||||
WindowClientWidth SMALLINT NOT NULL,
|
||||
WindowClientHeight SMALLINT NOT NULL,
|
||||
ClientTimeZone SMALLINT NOT NULL,
|
||||
ClientEventTime TIMESTAMP NOT NULL,
|
||||
SilverlightVersion1 SMALLINT NOT NULL,
|
||||
SilverlightVersion2 SMALLINT NOT NULL,
|
||||
SilverlightVersion3 INTEGER NOT NULL,
|
||||
SilverlightVersion4 SMALLINT NOT NULL,
|
||||
PageCharset TEXT NOT NULL,
|
||||
CodeVersion INTEGER NOT NULL,
|
||||
IsLink SMALLINT NOT NULL,
|
||||
IsDownload SMALLINT NOT NULL,
|
||||
IsNotBounce SMALLINT NOT NULL,
|
||||
FUniqID BIGINT NOT NULL,
|
||||
OriginalURL TEXT NOT NULL,
|
||||
HID INTEGER NOT NULL,
|
||||
IsOldCounter SMALLINT NOT NULL,
|
||||
IsEvent SMALLINT NOT NULL,
|
||||
IsParameter SMALLINT NOT NULL,
|
||||
DontCountHits SMALLINT NOT NULL,
|
||||
WithHash SMALLINT NOT NULL,
|
||||
HitColor CHAR NOT NULL,
|
||||
LocalEventTime TIMESTAMP NOT NULL,
|
||||
Age SMALLINT NOT NULL,
|
||||
Sex SMALLINT NOT NULL,
|
||||
Income SMALLINT NOT NULL,
|
||||
Interests SMALLINT NOT NULL,
|
||||
Robotness SMALLINT NOT NULL,
|
||||
RemoteIP INTEGER NOT NULL,
|
||||
WindowName INTEGER NOT NULL,
|
||||
OpenerName INTEGER NOT NULL,
|
||||
HistoryLength SMALLINT NOT NULL,
|
||||
BrowserLanguage TEXT NOT NULL,
|
||||
BrowserCountry TEXT NOT NULL,
|
||||
SocialNetwork TEXT NOT NULL,
|
||||
SocialAction TEXT NOT NULL,
|
||||
HTTPError SMALLINT NOT NULL,
|
||||
SendTiming INTEGER NOT NULL,
|
||||
DNSTiming INTEGER NOT NULL,
|
||||
ConnectTiming INTEGER NOT NULL,
|
||||
ResponseStartTiming INTEGER NOT NULL,
|
||||
ResponseEndTiming INTEGER NOT NULL,
|
||||
FetchTiming INTEGER NOT NULL,
|
||||
SocialSourceNetworkID SMALLINT NOT NULL,
|
||||
SocialSourcePage TEXT NOT NULL,
|
||||
ParamPrice BIGINT NOT NULL,
|
||||
ParamOrderID TEXT NOT NULL,
|
||||
ParamCurrency TEXT NOT NULL,
|
||||
ParamCurrencyID SMALLINT NOT NULL,
|
||||
OpenstatServiceName TEXT NOT NULL,
|
||||
OpenstatCampaignID TEXT NOT NULL,
|
||||
OpenstatAdID TEXT NOT NULL,
|
||||
OpenstatSourceID TEXT NOT NULL,
|
||||
UTMSource TEXT NOT NULL,
|
||||
UTMMedium TEXT NOT NULL,
|
||||
UTMCampaign TEXT NOT NULL,
|
||||
UTMContent TEXT NOT NULL,
|
||||
UTMTerm TEXT NOT NULL,
|
||||
FromTag TEXT NOT NULL,
|
||||
HasGCLID SMALLINT NOT NULL,
|
||||
RefererHash BIGINT NOT NULL,
|
||||
URLHash BIGINT NOT NULL,
|
||||
CLID INTEGER NOT NULL,
|
||||
PRIMARY KEY (CounterID, EventDate, UserID, EventTime, WatchID)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
SETTINGS disk = disk(type = cache, path = '/dev/shm/clickhouse/', max_size = '16G',
|
||||
disk = disk(type = web, endpoint = 'https://clickhouse-datasets-web.s3.us-east-1.amazonaws.com/'));
|
43
docker/test/clickbench/queries.sql
Normal file
43
docker/test/clickbench/queries.sql
Normal file
@ -0,0 +1,43 @@
|
||||
SELECT COUNT(*) FROM hits;
|
||||
SELECT COUNT(*) FROM hits WHERE AdvEngineID <> 0;
|
||||
SELECT SUM(AdvEngineID), COUNT(*), AVG(ResolutionWidth) FROM hits;
|
||||
SELECT AVG(UserID) FROM hits;
|
||||
SELECT COUNT(DISTINCT UserID) FROM hits;
|
||||
SELECT COUNT(DISTINCT SearchPhrase) FROM hits;
|
||||
SELECT MIN(EventDate), MAX(EventDate) FROM hits;
|
||||
SELECT AdvEngineID, COUNT(*) FROM hits WHERE AdvEngineID <> 0 GROUP BY AdvEngineID ORDER BY COUNT(*) DESC;
|
||||
SELECT RegionID, COUNT(DISTINCT UserID) AS u FROM hits GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||
SELECT RegionID, SUM(AdvEngineID), COUNT(*) AS c, AVG(ResolutionWidth), COUNT(DISTINCT UserID) FROM hits GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
||||
SELECT MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM hits WHERE MobilePhoneModel <> '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT MobilePhone, MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM hits WHERE MobilePhoneModel <> '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchPhrase, COUNT(*) AS c FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchEngineID, SearchPhrase, COUNT(*) AS c FROM hits WHERE SearchPhrase <> '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT UserID, COUNT(*) FROM hits GROUP BY UserID ORDER BY COUNT(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, COUNT(*) FROM hits GROUP BY UserID, SearchPhrase ORDER BY COUNT(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, COUNT(*) FROM hits GROUP BY UserID, SearchPhrase LIMIT 10;
|
||||
SELECT UserID, extract(minute FROM EventTime) AS m, SearchPhrase, COUNT(*) FROM hits GROUP BY UserID, m, SearchPhrase ORDER BY COUNT(*) DESC LIMIT 10;
|
||||
SELECT UserID FROM hits WHERE UserID = 435090932899640449;
|
||||
SELECT COUNT(*) FROM hits WHERE URL LIKE '%google%';
|
||||
SELECT SearchPhrase, MIN(URL), COUNT(*) AS c FROM hits WHERE URL LIKE '%google%' AND SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, MIN(URL), MIN(Title), COUNT(*) AS c, COUNT(DISTINCT UserID) FROM hits WHERE Title LIKE '%Google%' AND URL NOT LIKE '%.google.%' AND SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT * FROM hits WHERE URL LIKE '%google%' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM hits WHERE SearchPhrase <> '' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM hits WHERE SearchPhrase <> '' ORDER BY SearchPhrase LIMIT 10;
|
||||
SELECT SearchPhrase FROM hits WHERE SearchPhrase <> '' ORDER BY EventTime, SearchPhrase LIMIT 10;
|
||||
SELECT CounterID, AVG(length(URL)) AS l, COUNT(*) AS c FROM hits WHERE URL <> '' GROUP BY CounterID HAVING COUNT(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT REGEXP_REPLACE(Referer, '^https?://(?:www\.)?([^/]+)/.*$', '\1') AS k, AVG(length(Referer)) AS l, COUNT(*) AS c, MIN(Referer) FROM hits WHERE Referer <> '' GROUP BY k HAVING COUNT(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT SUM(ResolutionWidth), SUM(ResolutionWidth + 1), SUM(ResolutionWidth + 2), SUM(ResolutionWidth + 3), SUM(ResolutionWidth + 4), SUM(ResolutionWidth + 5), SUM(ResolutionWidth + 6), SUM(ResolutionWidth + 7), SUM(ResolutionWidth + 8), SUM(ResolutionWidth + 9), SUM(ResolutionWidth + 10), SUM(ResolutionWidth + 11), SUM(ResolutionWidth + 12), SUM(ResolutionWidth + 13), SUM(ResolutionWidth + 14), SUM(ResolutionWidth + 15), SUM(ResolutionWidth + 16), SUM(ResolutionWidth + 17), SUM(ResolutionWidth + 18), SUM(ResolutionWidth + 19), SUM(ResolutionWidth + 20), SUM(ResolutionWidth + 21), SUM(ResolutionWidth + 22), SUM(ResolutionWidth + 23), SUM(ResolutionWidth + 24), SUM(ResolutionWidth + 25), SUM(ResolutionWidth + 26), SUM(ResolutionWidth + 27), SUM(ResolutionWidth + 28), SUM(ResolutionWidth + 29), SUM(ResolutionWidth + 30), SUM(ResolutionWidth + 31), SUM(ResolutionWidth + 32), SUM(ResolutionWidth + 33), SUM(ResolutionWidth + 34), SUM(ResolutionWidth + 35), SUM(ResolutionWidth + 36), SUM(ResolutionWidth + 37), SUM(ResolutionWidth + 38), SUM(ResolutionWidth + 39), SUM(ResolutionWidth + 40), SUM(ResolutionWidth + 41), SUM(ResolutionWidth + 42), SUM(ResolutionWidth + 43), SUM(ResolutionWidth + 44), SUM(ResolutionWidth + 45), SUM(ResolutionWidth + 46), SUM(ResolutionWidth + 47), SUM(ResolutionWidth + 48), SUM(ResolutionWidth + 49), SUM(ResolutionWidth + 50), SUM(ResolutionWidth + 51), SUM(ResolutionWidth + 52), SUM(ResolutionWidth + 53), SUM(ResolutionWidth + 54), SUM(ResolutionWidth + 55), SUM(ResolutionWidth + 56), SUM(ResolutionWidth + 57), SUM(ResolutionWidth + 58), SUM(ResolutionWidth + 59), SUM(ResolutionWidth + 60), SUM(ResolutionWidth + 61), SUM(ResolutionWidth + 62), SUM(ResolutionWidth + 63), SUM(ResolutionWidth + 64), SUM(ResolutionWidth + 65), SUM(ResolutionWidth + 66), SUM(ResolutionWidth + 67), SUM(ResolutionWidth + 68), SUM(ResolutionWidth + 69), SUM(ResolutionWidth + 70), SUM(ResolutionWidth + 71), SUM(ResolutionWidth + 72), SUM(ResolutionWidth + 73), SUM(ResolutionWidth + 74), SUM(ResolutionWidth + 75), SUM(ResolutionWidth + 76), SUM(ResolutionWidth + 77), SUM(ResolutionWidth + 78), SUM(ResolutionWidth + 79), SUM(ResolutionWidth + 80), SUM(ResolutionWidth + 81), SUM(ResolutionWidth + 82), SUM(ResolutionWidth + 83), SUM(ResolutionWidth + 84), SUM(ResolutionWidth + 85), SUM(ResolutionWidth + 86), SUM(ResolutionWidth + 87), SUM(ResolutionWidth + 88), SUM(ResolutionWidth + 89) FROM hits;
|
||||
SELECT SearchEngineID, ClientIP, COUNT(*) AS c, SUM(IsRefresh), AVG(ResolutionWidth) FROM hits WHERE SearchPhrase <> '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, COUNT(*) AS c, SUM(IsRefresh), AVG(ResolutionWidth) FROM hits WHERE SearchPhrase <> '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, COUNT(*) AS c, SUM(IsRefresh), AVG(ResolutionWidth) FROM hits GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, COUNT(*) AS c FROM hits GROUP BY URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT 1, URL, COUNT(*) AS c FROM hits GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, COUNT(*) AS c FROM hits GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, COUNT(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND IsRefresh = 0 AND URL <> '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT Title, COUNT(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND IsRefresh = 0 AND Title <> '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT URL, COUNT(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND IsRefresh = 0 AND IsLink <> 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 10 OFFSET 1000;
|
||||
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END AS Src, URL AS Dst, COUNT(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND IsRefresh = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 10 OFFSET 1000;
|
||||
SELECT URLHash, EventDate, COUNT(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND IsRefresh = 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 3594120000172545465 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 10 OFFSET 100;
|
||||
SELECT WindowClientWidth, WindowClientHeight, COUNT(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND IsRefresh = 0 AND DontCountHits = 0 AND URLHash = 2868770270353813622 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10 OFFSET 10000;
|
||||
SELECT DATE_TRUNC('minute', EventTime) AS M, COUNT(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-14' AND EventDate <= '2013-07-15' AND IsRefresh = 0 AND DontCountHits = 0 GROUP BY DATE_TRUNC('minute', EventTime) ORDER BY DATE_TRUNC('minute', EventTime) LIMIT 10 OFFSET 1000;
|
79
docker/test/clickbench/run.sh
Executable file
79
docker/test/clickbench/run.sh
Executable file
@ -0,0 +1,79 @@
|
||||
#!/bin/bash
|
||||
|
||||
SCRIPT_PID=$!
|
||||
(sleep 1200 && kill -9 $SCRIPT_PID) &
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source /setup_export_logs.sh
|
||||
|
||||
# fail on errors, verbose and export all env variables
|
||||
set -e -x -a
|
||||
|
||||
dpkg -i package_folder/clickhouse-common-static_*.deb
|
||||
dpkg -i package_folder/clickhouse-server_*.deb
|
||||
dpkg -i package_folder/clickhouse-client_*.deb
|
||||
|
||||
# A directory for cache
|
||||
mkdir /dev/shm/clickhouse
|
||||
chown clickhouse:clickhouse /dev/shm/clickhouse
|
||||
|
||||
# Allow introspection functions, needed for sending the logs
|
||||
echo "
|
||||
profiles:
|
||||
default:
|
||||
allow_introspection_functions: 1
|
||||
" > /etc/clickhouse-server/users.d/allow_introspection_functions.yaml
|
||||
|
||||
# Enable text_log
|
||||
echo "
|
||||
text_log:
|
||||
" > /etc/clickhouse-server/config.d/text_log.yaml
|
||||
|
||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
|
||||
clickhouse start
|
||||
|
||||
# Wait for the server to start, but not for too long.
|
||||
for _ in {1..100}
|
||||
do
|
||||
clickhouse-client --query "SELECT 1" && break
|
||||
sleep 1
|
||||
done
|
||||
|
||||
setup_logs_replication
|
||||
|
||||
# Load the data
|
||||
|
||||
clickhouse-client --time < /create.sql
|
||||
|
||||
# Run the queries
|
||||
|
||||
set +x
|
||||
|
||||
TRIES=3
|
||||
QUERY_NUM=1
|
||||
while read -r query; do
|
||||
echo -n "["
|
||||
for i in $(seq 1 $TRIES); do
|
||||
RES=$(clickhouse-client --query_id "q${QUERY_NUM}-${i}" --time --format Null --query "$query" --progress 0 2>&1 ||:)
|
||||
echo -n "${RES}"
|
||||
[[ "$i" != "$TRIES" ]] && echo -n ", "
|
||||
|
||||
echo "${QUERY_NUM},${i},${RES}" >> /test_output/test_results.tsv
|
||||
done
|
||||
echo "],"
|
||||
|
||||
QUERY_NUM=$((QUERY_NUM + 1))
|
||||
done < /queries.sql
|
||||
|
||||
set -x
|
||||
|
||||
clickhouse-client --query "SELECT total_bytes FROM system.tables WHERE name = 'hits' AND database = 'default'"
|
||||
|
||||
clickhouse-client -q "system flush logs" ||:
|
||||
stop_logs_replication
|
||||
clickhouse stop
|
||||
|
||||
mv /var/log/clickhouse-server/* /test_output/
|
||||
|
||||
echo -e "success\tClickBench finished" > /test_output/check_status.tsv
|
@ -74,7 +74,7 @@ RUN python3 -m pip install --no-cache-dir \
|
||||
delta-spark==2.3.0 \
|
||||
dict2xml \
|
||||
dicttoxml \
|
||||
docker \
|
||||
docker==6.1.3 \
|
||||
docker-compose==1.29.2 \
|
||||
grpcio \
|
||||
grpcio-tools \
|
||||
|
@ -34,7 +34,7 @@ services:
|
||||
|
||||
# Empty container to run proxy resolver.
|
||||
resolver:
|
||||
image: clickhouse/python-bottle
|
||||
image: clickhouse/python-bottle:${DOCKER_PYTHON_BOTTLE_TAG:-latest}
|
||||
expose:
|
||||
- "8080"
|
||||
tty: true
|
||||
|
@ -39,18 +39,8 @@ RUN apt-get update \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY * /
|
||||
COPY run.sh /
|
||||
|
||||
# Bind everything to one NUMA node, if there's more than one. Theoretically the
|
||||
# node #0 should be less stable because of system interruptions. We bind
|
||||
# randomly to node 1 or 0 to gather some statistics on that. We have to bind
|
||||
# both servers and the tmpfs on which the database is stored. How to do it
|
||||
# is unclear, but by default tmpfs uses
|
||||
# 'process allocation policy', not sure which process but hopefully the one that
|
||||
# writes to it, so just bind the downloader script as well.
|
||||
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
|
||||
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
|
||||
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
|
||||
CMD ["bash", "-c", "node=$((RANDOM % $(numactl --hardware | sed -n 's/^.*available:\\(.*\\)nodes.*$/\\1/p'))); echo Will bind to NUMA node $node; numactl --cpunodebind=$node --membind=$node /entrypoint.sh"]
|
||||
CMD ["bash", "/run.sh"]
|
||||
|
||||
# docker run --network=host --volume <workspace>:/workspace --volume=<output>:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/performance-comparison
|
||||
|
18
docker/test/performance-comparison/run.sh
Normal file
18
docker/test/performance-comparison/run.sh
Normal file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
entry="/usr/share/clickhouse-test/performance/scripts/entrypoint.sh"
|
||||
[ ! -e "$entry" ] && echo "ERROR: test scripts are not found" && exit 1
|
||||
|
||||
# Bind everything to one NUMA node, if there's more than one. Theoretically the
|
||||
# node #0 should be less stable because of system interruptions. We bind
|
||||
# randomly to node 1 or 0 to gather some statistics on that. We have to bind
|
||||
# both servers and the tmpfs on which the database is stored. How to do it
|
||||
# is unclear, but by default tmpfs uses
|
||||
# 'process allocation policy', not sure which process but hopefully the one that
|
||||
# writes to it, so just bind the downloader script as well.
|
||||
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
|
||||
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
|
||||
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
|
||||
node=$(( RANDOM % $(numactl --hardware | sed -n 's/^.*available:\(.*\)nodes.*$/\1/p') ));
|
||||
echo Will bind to NUMA node $node;
|
||||
numactl --cpunodebind=$node --membind=$node $entry
|
@ -24,6 +24,28 @@ azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
||||
|
||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
|
||||
cache_policy=""
|
||||
if [ $(( $(date +%-d) % 2 )) -eq 1 ]; then
|
||||
cache_policy="SLRU"
|
||||
else
|
||||
cache_policy="LRU"
|
||||
fi
|
||||
|
||||
echo "Using cache policy: $cache_policy"
|
||||
|
||||
if [ "$cache_policy" = "SLRU" ]; then
|
||||
sudo cat /etc/clickhouse-server/config.d/storage_conf.xml \
|
||||
| sed "s|<cache_policy>LRU</cache_policy>|<cache_policy>SLRU</cache_policy>|" \
|
||||
> /etc/clickhouse-server/config.d/storage_conf.xml.tmp
|
||||
mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml
|
||||
fi
|
||||
|
||||
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
|
||||
# It is not needed, we will explicitly create tables on s3.
|
||||
# We do not have statefull tests with s3 storage run in public repository, but this is needed for another repository.
|
||||
rm /etc/clickhouse-server/config.d/s3_storage_policy_for_merge_tree_by_default.xml
|
||||
fi
|
||||
|
||||
function start()
|
||||
{
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
@ -107,8 +129,76 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
|
||||
else
|
||||
clickhouse-client --query "CREATE DATABASE test"
|
||||
clickhouse-client --query "SHOW TABLES FROM test"
|
||||
clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits"
|
||||
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
|
||||
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
|
||||
clickhouse-client --query "CREATE TABLE test.hits (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16,
|
||||
EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32,
|
||||
UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String,
|
||||
RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16),
|
||||
URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8,
|
||||
FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16,
|
||||
UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8,
|
||||
MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16,
|
||||
SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16,
|
||||
ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32,
|
||||
SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8,
|
||||
FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8,
|
||||
HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8,
|
||||
GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32,
|
||||
HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String,
|
||||
HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32,
|
||||
FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32,
|
||||
LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32,
|
||||
RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String,
|
||||
ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String,
|
||||
OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String,
|
||||
UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64,
|
||||
URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String,
|
||||
ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64),
|
||||
IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate)
|
||||
ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
||||
clickhouse-client --query "CREATE TABLE test.visits (CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8,
|
||||
VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32,
|
||||
Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String,
|
||||
EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String,
|
||||
AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32),
|
||||
RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32,
|
||||
SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32,
|
||||
ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32,
|
||||
SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16,
|
||||
UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16,
|
||||
FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8,
|
||||
FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8,
|
||||
Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8,
|
||||
BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16),
|
||||
Params Array(String), Goals Nested(ID UInt32, Serial UInt32, EventTime DateTime, Price Int64, OrderID String, CurrencyID UInt32),
|
||||
WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64,
|
||||
ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32,
|
||||
ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32,
|
||||
ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32,
|
||||
ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16,
|
||||
ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32,
|
||||
OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String,
|
||||
UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime,
|
||||
PredLastVisit Date, LastVisit Date, TotalVisits UInt32, TraficSource Nested(ID Int8, SearchEngineID UInt16, AdvEngineID UInt8,
|
||||
PlaceID UInt16, SocialSourceNetworkID UInt8, Domain String, SearchPhrase String, SocialSourcePage String), Attendance FixedString(16),
|
||||
CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64,
|
||||
StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64,
|
||||
OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64,
|
||||
UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32,
|
||||
ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64),
|
||||
Market Nested(Type UInt8, GoalID UInt32, OrderID String, OrderPrice Int64, PP UInt32, DirectPlaceID UInt32, DirectOrderID UInt32,
|
||||
DirectBannerID UInt32, GoodID String, GoodName String, GoodQuantity Int32, GoodPrice Int64), IslandID FixedString(16))
|
||||
ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
||||
SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
||||
|
||||
clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0"
|
||||
clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0"
|
||||
clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC"
|
||||
clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC"
|
||||
else
|
||||
clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits"
|
||||
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
|
||||
fi
|
||||
clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
||||
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0"
|
||||
fi
|
||||
@ -128,6 +218,10 @@ function run_tests()
|
||||
ADDITIONAL_OPTIONS+=('--replicated-database')
|
||||
fi
|
||||
|
||||
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
|
||||
ADDITIONAL_OPTIONS+=('--s3-storage')
|
||||
fi
|
||||
|
||||
if [[ -n "$USE_DATABASE_ORDINARY" ]] && [[ "$USE_DATABASE_ORDINARY" -eq 1 ]]; then
|
||||
ADDITIONAL_OPTIONS+=('--db-engine=Ordinary')
|
||||
fi
|
||||
@ -135,7 +229,7 @@ function run_tests()
|
||||
set +e
|
||||
|
||||
if [[ -n "$USE_PARALLEL_REPLICAS" ]] && [[ "$USE_PARALLEL_REPLICAS" -eq 1 ]]; then
|
||||
clickhouse-test --client="clickhouse-client --use_hedged_requests=0 --allow_experimental_parallel_reading_from_replicas=1 --parallel_replicas_for_non_replicated_merge_tree=1 \
|
||||
clickhouse-test --client="clickhouse-client --allow_experimental_parallel_reading_from_replicas=1 --parallel_replicas_for_non_replicated_merge_tree=1 \
|
||||
--max_parallel_replicas=100 --cluster_for_parallel_replicas='parallel_replicas'" \
|
||||
-j 2 --testname --shard --zookeeper --check-zookeeper-session --no-stateless --no-parallel-replicas --hung-check --print-time "${ADDITIONAL_OPTIONS[@]}" \
|
||||
"$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
|
@ -30,7 +30,7 @@ def build_url(base_url, dataset):
|
||||
return os.path.join(base_url, dataset, "partitions", AVAILABLE_DATASETS[dataset])
|
||||
|
||||
|
||||
def dowload_with_progress(url, path):
|
||||
def download_with_progress(url, path):
|
||||
logging.info("Downloading from %s to temp path %s", url, path)
|
||||
for i in range(RETRIES_COUNT):
|
||||
try:
|
||||
@ -110,7 +110,7 @@ if __name__ == "__main__":
|
||||
temp_archive_path = _get_temp_file_name()
|
||||
try:
|
||||
download_url_for_dataset = build_url(args.url_prefix, dataset)
|
||||
dowload_with_progress(download_url_for_dataset, temp_archive_path)
|
||||
download_with_progress(download_url_for_dataset, temp_archive_path)
|
||||
unpack_to_clickhouse_directory(temp_archive_path, args.clickhouse_data_path)
|
||||
except Exception as ex:
|
||||
logging.info("Some exception occured %s", str(ex))
|
||||
|
@ -58,6 +58,7 @@ if [[ -n "$BUGFIX_VALIDATE_CHECK" ]] && [[ "$BUGFIX_VALIDATE_CHECK" -eq 1 ]]; th
|
||||
|
||||
# it contains some new settings, but we can safely remove it
|
||||
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
|
||||
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
|
||||
fi
|
||||
|
||||
# For flaky check we also enable thread fuzzer
|
||||
@ -216,11 +217,11 @@ export -f run_tests
|
||||
if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
# We don't run tests with Ordinary database in PRs, only in master.
|
||||
# So run new/changed tests with Ordinary at least once in flaky check.
|
||||
timeout "$MAX_RUN_TIME" bash -c 'NUM_TRIES=1; USE_DATABASE_ORDINARY=1; run_tests' \
|
||||
timeout_with_logging "$MAX_RUN_TIME" bash -c 'NUM_TRIES=1; USE_DATABASE_ORDINARY=1; run_tests' \
|
||||
| sed 's/All tests have finished//' | sed 's/No tests were run//' ||:
|
||||
fi
|
||||
|
||||
timeout "$MAX_RUN_TIME" bash -c run_tests ||:
|
||||
timeout_with_logging "$MAX_RUN_TIME" bash -c run_tests ||:
|
||||
|
||||
echo "Files in current directory"
|
||||
ls -la ./
|
||||
@ -300,9 +301,6 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
|
||||
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server2.log ||:
|
||||
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.zst ||:
|
||||
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.zst ||:
|
||||
# FIXME: remove once only github actions will be left
|
||||
rm /var/log/clickhouse-server/clickhouse-server1.log
|
||||
rm /var/log/clickhouse-server/clickhouse-server2.log
|
||||
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
|
||||
mv /var/log/clickhouse-server/stderr2.log /test_output/ ||:
|
||||
tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||:
|
||||
|
@ -35,4 +35,17 @@ function fn_exists() {
|
||||
declare -F "$1" > /dev/null;
|
||||
}
|
||||
|
||||
function timeout_with_logging() {
|
||||
local exit_code=0
|
||||
|
||||
timeout "${@}" || exit_code="${?}"
|
||||
|
||||
if [[ "${exit_code}" -eq "124" ]]
|
||||
then
|
||||
echo "The command 'timeout ${*}' has been killed by timeout"
|
||||
fi
|
||||
|
||||
return $exit_code
|
||||
}
|
||||
|
||||
# vi: ft=bash
|
||||
|
@ -65,9 +65,27 @@ chmod 777 -R /var/lib/clickhouse
|
||||
clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordinary"
|
||||
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test"
|
||||
|
||||
|
||||
stop
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.initial.log
|
||||
|
||||
# Randomize cache policies.
|
||||
cache_policy=""
|
||||
if [ $(( $(date +%-d) % 2 )) -eq 1 ]; then
|
||||
cache_policy="SLRU"
|
||||
else
|
||||
cache_policy="LRU"
|
||||
fi
|
||||
|
||||
echo "Using cache policy: $cache_policy"
|
||||
|
||||
if [ "$cache_policy" = "SLRU" ]; then
|
||||
sudo cat /etc/clickhouse-server/config.d/storage_conf.xml \
|
||||
| sed "s|<cache_policy>LRU</cache_policy>|<cache_policy>SLRU</cache_policy>|" \
|
||||
> /etc/clickhouse-server/config.d/storage_conf.xml.tmp
|
||||
mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml
|
||||
fi
|
||||
|
||||
start
|
||||
|
||||
clickhouse-client --query "SHOW TABLES FROM datasets"
|
||||
@ -191,6 +209,13 @@ sudo cat /etc/clickhouse-server/config.d/logger_trace.xml \
|
||||
> /etc/clickhouse-server/config.d/logger_trace.xml.tmp
|
||||
mv /etc/clickhouse-server/config.d/logger_trace.xml.tmp /etc/clickhouse-server/config.d/logger_trace.xml
|
||||
|
||||
if [ "$cache_policy" = "SLRU" ]; then
|
||||
sudo cat /etc/clickhouse-server/config.d/storage_conf.xml \
|
||||
| sed "s|<cache_policy>LRU</cache_policy>|<cache_policy>SLRU</cache_policy>|" \
|
||||
> /etc/clickhouse-server/config.d/storage_conf.xml.tmp
|
||||
mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml
|
||||
fi
|
||||
|
||||
# Randomize async_load_databases
|
||||
if [ $(( $(date +%-d) % 2 )) -eq 1 ]; then
|
||||
sudo echo "<clickhouse><async_load_databases>true</async_load_databases></clickhouse>" \
|
||||
|
@ -23,6 +23,7 @@ echo "Check submodules" | ts
|
||||
./check-submodules |& tee /test_output/submodules_output.txt
|
||||
echo "Check shell scripts with shellcheck" | ts
|
||||
./shellcheck-run.sh |& tee /test_output/shellcheck_output.txt
|
||||
|
||||
/process_style_check_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||
echo "Check help for changelog generator works" | ts
|
||||
cd ../changelog || exit 1
|
||||
|
@ -77,6 +77,7 @@ remove_keeper_config "create_if_not_exists" "[01]"
|
||||
# it contains some new settings, but we can safely remove it
|
||||
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
||||
rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml
|
||||
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
|
||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
|
||||
rm /etc/clickhouse-server/users.d/replicated_ddl_entry.xml
|
||||
@ -115,6 +116,7 @@ sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_defau
|
||||
# it contains some new settings, but we can safely remove it
|
||||
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
||||
rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml
|
||||
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
|
||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
|
||||
rm /etc/clickhouse-server/users.d/replicated_ddl_entry.xml
|
||||
|
22
docs/changelogs/v23.11.2.11-stable.md
Normal file
22
docs/changelogs/v23.11.2.11-stable.md
Normal file
@ -0,0 +1,22 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.11.2.11-stable (6e5411358c8) FIXME as compared to v23.11.1.2711-stable (05bc8ef1e02)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#57661](https://github.com/ClickHouse/ClickHouse/issues/57661): Handle sigabrt case when getting PostgreSQl table structure with empty array. [#57618](https://github.com/ClickHouse/ClickHouse/pull/57618) ([Mike Kot (Михаил Кот)](https://github.com/myrrc)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Ignore ON CLUSTER clause in grant/revoke queries for management of replicated access entities. [#57538](https://github.com/ClickHouse/ClickHouse/pull/57538) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Fix SIGSEGV for aggregation of sparse columns with any() RESPECT NULL [#57710](https://github.com/ClickHouse/ClickHouse/pull/57710) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix bug window functions: revert [#39631](https://github.com/ClickHouse/ClickHouse/issues/39631) [#57766](https://github.com/ClickHouse/ClickHouse/pull/57766) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Pin alpine version of integration tests helper container [#57669](https://github.com/ClickHouse/ClickHouse/pull/57669) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
26
docs/changelogs/v23.11.3.23-stable.md
Normal file
26
docs/changelogs/v23.11.3.23-stable.md
Normal file
@ -0,0 +1,26 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.11.3.23-stable (a14ab450b0e) FIXME as compared to v23.11.2.11-stable (6e5411358c8)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix invalid memory access in BLAKE3 (Rust) [#57876](https://github.com/ClickHouse/ClickHouse/pull/57876) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Normalize function names in CREATE INDEX [#57906](https://github.com/ClickHouse/ClickHouse/pull/57906) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix handling of unavailable replicas before first request happened [#57933](https://github.com/ClickHouse/ClickHouse/pull/57933) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Revert "Fix bug window functions: revert [#39631](https://github.com/ClickHouse/ClickHouse/issues/39631)" [#58031](https://github.com/ClickHouse/ClickHouse/pull/58031) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Backported in [#57918](https://github.com/ClickHouse/ClickHouse/issues/57918):. [#57909](https://github.com/ClickHouse/ClickHouse/pull/57909) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Remove heavy rust stable toolchain [#57905](https://github.com/ClickHouse/ClickHouse/pull/57905) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix docker image for integration tests (fixes CI) [#57952](https://github.com/ClickHouse/ClickHouse/pull/57952) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Always use `pread` for reading cache segments [#57970](https://github.com/ClickHouse/ClickHouse/pull/57970) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
|
327
docs/changelogs/v23.12.1.1368-stable.md
Normal file
327
docs/changelogs/v23.12.1.1368-stable.md
Normal file
@ -0,0 +1,327 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.12.1.1368-stable (a2faa65b080) FIXME as compared to v23.11.1.2711-stable (05bc8ef1e02)
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Fix check for non-deterministic functions in TTL expressions. Previously, you could create a TTL expression with non-deterministic functions in some cases, which could lead to undefined behavior later. This fixes [#37250](https://github.com/ClickHouse/ClickHouse/issues/37250). Disallow TTL expressions that don't depend on any columns of a table by default. It can be allowed back by `SET allow_suspicious_ttl_expressions = 1` or `SET compatibility = '23.11'`. Closes [#37286](https://github.com/ClickHouse/ClickHouse/issues/37286). [#51858](https://github.com/ClickHouse/ClickHouse/pull/51858) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove function `arrayFold` because it has a bug. This closes [#57816](https://github.com/ClickHouse/ClickHouse/issues/57816). This closes [#57458](https://github.com/ClickHouse/ClickHouse/issues/57458). [#57836](https://github.com/ClickHouse/ClickHouse/pull/57836) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove the feature of `is_deleted` row in ReplacingMergeTree and the `CLEANUP` modifier for the OPTIMIZE query. This fixes [#57930](https://github.com/ClickHouse/ClickHouse/issues/57930). This closes [#54988](https://github.com/ClickHouse/ClickHouse/issues/54988). This closes [#54570](https://github.com/ClickHouse/ClickHouse/issues/54570). This closes [#50346](https://github.com/ClickHouse/ClickHouse/issues/50346). This closes [#47579](https://github.com/ClickHouse/ClickHouse/issues/47579). The feature has to be removed because it is not good. We have to remove it as quickly as possible, because there is no other option. [#57932](https://github.com/ClickHouse/ClickHouse/pull/57932) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The MergeTree setting `clean_deleted_rows` is deprecated, it has no effect anymore. The `CLEANUP` keyword for `OPTIMIZE` is not allowed by default (unless `allow_experimental_replacing_merge_with_cleanup` is enabled). [#58267](https://github.com/ClickHouse/ClickHouse/pull/58267) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
||||
#### New Feature
|
||||
* Allow disabling of HEAD request before GET request. [#54602](https://github.com/ClickHouse/ClickHouse/pull/54602) ([Fionera](https://github.com/fionera)).
|
||||
* Add a HTTP endpoint for checking if Keeper is ready to accept traffic. [#55876](https://github.com/ClickHouse/ClickHouse/pull/55876) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Add 'union' mode for schema inference. In this mode the resulting table schema is the union of all files schemas (so schema is inferred from each file). The mode of schema inference is controlled by a setting `schema_inference_mode` with 2 possible values - `default` and `union`. Closes [#55428](https://github.com/ClickHouse/ClickHouse/issues/55428). [#55892](https://github.com/ClickHouse/ClickHouse/pull/55892) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add new setting `input_format_csv_try_infer_numbers_from_strings` that allows to infer numbers from strings in CSV format. Closes [#56455](https://github.com/ClickHouse/ClickHouse/issues/56455). [#56859](https://github.com/ClickHouse/ClickHouse/pull/56859) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Refreshable materialized views. [#56946](https://github.com/ClickHouse/ClickHouse/pull/56946) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Add more warnings on the number of databases, tables. [#57375](https://github.com/ClickHouse/ClickHouse/pull/57375) ([凌涛](https://github.com/lingtaolf)).
|
||||
* Added a new mutation command `ALTER TABLE <table> APPLY DELETED MASK`, which allows to enforce applying of mask written by lightweight delete and to remove rows marked as deleted from disk. [#57433](https://github.com/ClickHouse/ClickHouse/pull/57433) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Added a new SQL function `sqid` to generate Sqids (https://sqids.org/), example: `SELECT sqid(125, 126)`. [#57512](https://github.com/ClickHouse/ClickHouse/pull/57512) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Dictionary with `HASHED_ARRAY` (and `COMPLEX_KEY_HASHED_ARRAY`) layout supports `SHARDS` similarly to `HASHED`. [#57544](https://github.com/ClickHouse/ClickHouse/pull/57544) ([vdimir](https://github.com/vdimir)).
|
||||
* Add asynchronous metrics for total primary key bytes and total allocated primary key bytes in memory. [#57551](https://github.com/ClickHouse/ClickHouse/pull/57551) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Table system.dropped_tables_parts contains parts of system.dropped_tables tables (dropped but not yet removed tables). [#57555](https://github.com/ClickHouse/ClickHouse/pull/57555) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Add `FORMAT_BYTES` as an alias for `formatReadableSize`. [#57592](https://github.com/ClickHouse/ClickHouse/pull/57592) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Add SHA512_256 function. [#57645](https://github.com/ClickHouse/ClickHouse/pull/57645) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Allow passing optional SESSION_TOKEN to `s3` table function. [#57850](https://github.com/ClickHouse/ClickHouse/pull/57850) ([Shani Elharrar](https://github.com/shanielh)).
|
||||
* Clause `ORDER BY` now supports specifying `ALL`, meaning that ClickHouse sorts by all columns in the `SELECT` clause. Example: `SELECT col1, col2 FROM tab WHERE [...] ORDER BY ALL`. [#57875](https://github.com/ClickHouse/ClickHouse/pull/57875) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||
* Added functions for punycode encoding/decoding: `punycodeEncode()` and `punycodeDecode()`. [#57969](https://github.com/ClickHouse/ClickHouse/pull/57969) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* This PR reproduces the implementation of `PASTE JOIN`, which allows users to join tables without `ON` clause. Example: ``` SQL SELECT * FROM ( SELECT number AS a FROM numbers(2) ) AS t1 PASTE JOIN ( SELECT number AS a FROM numbers(2) ORDER BY a DESC ) AS t2. [#57995](https://github.com/ClickHouse/ClickHouse/pull/57995) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* A handler `/binary` opens a visual viewer of symbols inside the ClickHouse binary. [#58211](https://github.com/ClickHouse/ClickHouse/pull/58211) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Made copy between s3 disks using a s3-server-side copy instead of copying through the buffer. Improves `BACKUP/RESTORE` operations and `clickhouse-disks copy` command. [#56744](https://github.com/ClickHouse/ClickHouse/pull/56744) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* HashJoin respects setting `max_joined_block_size_rows` and do not produce large blocks for `ALL JOIN`. [#56996](https://github.com/ClickHouse/ClickHouse/pull/56996) ([vdimir](https://github.com/vdimir)).
|
||||
* Release memory for aggregation earlier. This may avoid unnecessary external aggregation. [#57691](https://github.com/ClickHouse/ClickHouse/pull/57691) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Improve performance of string serialization. [#57717](https://github.com/ClickHouse/ClickHouse/pull/57717) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Support trivial count optimization for `Merge`-engine tables. [#57867](https://github.com/ClickHouse/ClickHouse/pull/57867) ([skyoct](https://github.com/skyoct)).
|
||||
* Optimized aggregation in some cases. [#57872](https://github.com/ClickHouse/ClickHouse/pull/57872) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* The `hasAny()` function can now take advantage of the full-text skipping indices. [#57878](https://github.com/ClickHouse/ClickHouse/pull/57878) ([Jpnock](https://github.com/Jpnock)).
|
||||
* Function `if(cond, then, else)` (and its alias `cond ? : then : else`) were optimized to use branch-free evaluation. [#57885](https://github.com/ClickHouse/ClickHouse/pull/57885) ([zhanglistar](https://github.com/zhanglistar)).
|
||||
* Extract non intersecting parts ranges from MergeTree table during FINAL processing. That way we can avoid additional FINAL logic for this non intersecting parts ranges. In case when amount of duplicate values with same primary key is low, performance will be almost the same as without FINAL. Improve reading performance for MergeTree FINAL when `do_not_merge_across_partitions_select_final` setting is set. [#58120](https://github.com/ClickHouse/ClickHouse/pull/58120) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* MergeTree automatically derive `do_not_merge_across_partitions_select_final` setting if partition key expression contains only columns from primary key expression. [#58218](https://github.com/ClickHouse/ClickHouse/pull/58218) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Speedup MIN and MAX for native types. [#58231](https://github.com/ClickHouse/ClickHouse/pull/58231) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### Improvement
|
||||
* Make inserts into distributed tables handle updated cluster configuration properly. When the list of cluster nodes is dynamically updated, the Directory Monitor of the distribution table cannot sense the new node, and the Directory Monitor must be re-noded to sense it. [#42826](https://github.com/ClickHouse/ClickHouse/pull/42826) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||
* Replace --no-system-tables with loading virtual tables of system database lazily. [#55271](https://github.com/ClickHouse/ClickHouse/pull/55271) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Clickhouse-test print case sn, current time and case name in one test case. [#55710](https://github.com/ClickHouse/ClickHouse/pull/55710) ([guoxiaolong](https://github.com/guoxiaolongzte)).
|
||||
* Do not allow creating replicated table with inconsistent merge params. [#56833](https://github.com/ClickHouse/ClickHouse/pull/56833) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Implement SLRU cache policy for filesystem cache. [#57076](https://github.com/ClickHouse/ClickHouse/pull/57076) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Show uncompressed size in `system.tables`, obtained from data parts' checksums [#56618](https://github.com/ClickHouse/ClickHouse/issues/56618). [#57186](https://github.com/ClickHouse/ClickHouse/pull/57186) ([Chen Lixiang](https://github.com/chenlx0)).
|
||||
* Add `skip_unavailable_shards` as a setting for `Distributed` tables that is similar to the corresponding query-level setting. Closes [#43666](https://github.com/ClickHouse/ClickHouse/issues/43666). [#57218](https://github.com/ClickHouse/ClickHouse/pull/57218) ([Gagan Goel](https://github.com/tntnatbry)).
|
||||
* Function `substring()` (aliases: `substr`, `mid`) can now be used with `Enum` types. Previously, the first function argument had to be a value of type `String` or `FixedString`. This improves compatibility with 3rd party tools such as Tableau via MySQL interface. [#57277](https://github.com/ClickHouse/ClickHouse/pull/57277) ([Serge Klochkov](https://github.com/slvrtrn)).
|
||||
* Better hints when a table doesn't exist. [#57342](https://github.com/ClickHouse/ClickHouse/pull/57342) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Allow to overwrite `max_partition_size_to_drop` and `max_table_size_to_drop` server settings in query time. [#57452](https://github.com/ClickHouse/ClickHouse/pull/57452) ([Jordi Villar](https://github.com/jrdi)).
|
||||
* Add support for read-only flag when connecting to the ZooKeeper server (fixes [#53749](https://github.com/ClickHouse/ClickHouse/issues/53749)). [#57479](https://github.com/ClickHouse/ClickHouse/pull/57479) ([Mikhail Koviazin](https://github.com/mkmkme)).
|
||||
* Fix possible distributed sends stuck due to "No such file or directory" (during recovering batch from disk). Fix possible issues with `error_count` from `system.distribution_queue` (in case of `distributed_directory_monitor_max_sleep_time_ms` >5min). Introduce profile event to track async INSERT failures - `DistributedAsyncInsertionFailures`. [#57480](https://github.com/ClickHouse/ClickHouse/pull/57480) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* The limit for the number of connections per endpoint for background fetches was raised from `15` to the value of `background_fetches_pool_size` setting. - MergeTree-level setting `replicated_max_parallel_fetches_for_host` became obsolete - MergeTree-level settings `replicated_fetches_http_connection_timeout`, `replicated_fetches_http_send_timeout` and `replicated_fetches_http_receive_timeout` are moved to the Server-level. - Setting `keep_alive_timeout` is added to the list of Server-level settings. [#57523](https://github.com/ClickHouse/ClickHouse/pull/57523) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* It is now possible to refer to ALIAS column in index (non-primary-key) definitions (issue [#55650](https://github.com/ClickHouse/ClickHouse/issues/55650)). Example: `CREATE TABLE tab(col UInt32, col_alias ALIAS col + 1, INDEX idx (col_alias) TYPE minmax) ENGINE = MergeTree ORDER BY col;`. [#57546](https://github.com/ClickHouse/ClickHouse/pull/57546) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Function `format()` now supports arbitrary argument types (instead of only `String` and `FixedString` arguments). This is important to calculate `SELECT format('The {0} to all questions is {1}', 'answer', 42)`. [#57549](https://github.com/ClickHouse/ClickHouse/pull/57549) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Support PostgreSQL generated columns and default column values in `MaterializedPostgreSQL`. Closes [#40449](https://github.com/ClickHouse/ClickHouse/issues/40449). [#57568](https://github.com/ClickHouse/ClickHouse/pull/57568) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Allow to apply some filesystem cache config settings changes without server restart. [#57578](https://github.com/ClickHouse/ClickHouse/pull/57578) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Handle sigabrt case when getting PostgreSQl table structure with empty array. [#57618](https://github.com/ClickHouse/ClickHouse/pull/57618) ([Mike Kot (Михаил Кот)](https://github.com/myrrc)).
|
||||
* Allows to use the `date_trunc()` function with the first argument not depending on the case of it. Both cases are now supported: `SELECT date_trunc('day', now())` and `SELECT date_trunc('DAY', now())`. [#57624](https://github.com/ClickHouse/ClickHouse/pull/57624) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Expose the total number of errors occurred since last server as a `ClickHouseErrorMetric_ALL` metric. [#57627](https://github.com/ClickHouse/ClickHouse/pull/57627) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Allow nodes in config with from_env/from_zk and non empty element with replace=1. [#57628](https://github.com/ClickHouse/ClickHouse/pull/57628) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Generate malformed output that cannot be parsed as JSON. [#57646](https://github.com/ClickHouse/ClickHouse/pull/57646) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* Consider lightweight deleted rows when selecting parts to merge if enabled. [#57648](https://github.com/ClickHouse/ClickHouse/pull/57648) ([Zhuo Qiu](https://github.com/jewelzqiu)).
|
||||
* Make querying system.filesystem_cache not memory intensive. [#57687](https://github.com/ClickHouse/ClickHouse/pull/57687) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Allow IPv6 to UInt128 conversion and binary arithmetic. [#57707](https://github.com/ClickHouse/ClickHouse/pull/57707) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Support negative positional arguments. Closes [#57736](https://github.com/ClickHouse/ClickHouse/issues/57736). [#57741](https://github.com/ClickHouse/ClickHouse/pull/57741) ([flynn](https://github.com/ucasfl)).
|
||||
* Add a setting for `async inserts deduplication cache` -- how long we wait for cache update. Deprecate setting `async_block_ids_cache_min_update_interval_ms`. Now cache is updated only in case of conflicts. [#57743](https://github.com/ClickHouse/ClickHouse/pull/57743) ([alesapin](https://github.com/alesapin)).
|
||||
* `sleep()` function now can be cancelled with `KILL QUERY`. [#57746](https://github.com/ClickHouse/ClickHouse/pull/57746) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Slightly better inference of unnamed tupes in JSON formats. [#57751](https://github.com/ClickHouse/ClickHouse/pull/57751) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Refactor UserDefinedSQL* classes to make it possible to add SQL UDF storages which are different from ZooKeeper and Disk. [#57752](https://github.com/ClickHouse/ClickHouse/pull/57752) ([Natasha Chizhonkova](https://github.com/chizhonkova)).
|
||||
* Forbid `CREATE TABLE ... AS SELECT` queries for Replicated table engines in Replicated database because they are broken. Reference [#35408](https://github.com/ClickHouse/ClickHouse/issues/35408). [#57796](https://github.com/ClickHouse/ClickHouse/pull/57796) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix and improve transform query for external database, we should recursively obtain all compatible predicates. [#57888](https://github.com/ClickHouse/ClickHouse/pull/57888) ([flynn](https://github.com/ucasfl)).
|
||||
* Support dynamic reloading of filesystem cache size. Closes [#57866](https://github.com/ClickHouse/ClickHouse/issues/57866). [#57897](https://github.com/ClickHouse/ClickHouse/pull/57897) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix system.stack_trace for threads with blocked SIGRTMIN. [#57907](https://github.com/ClickHouse/ClickHouse/pull/57907) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Added a new setting `readonly` which can be used to specify a s3 disk is read only. It can be useful to create a table with read only `s3_plain` type disk. [#57977](https://github.com/ClickHouse/ClickHouse/pull/57977) ([Pengyuan Bian](https://github.com/bianpengyuan)).
|
||||
* Support keeper failures in quorum check. [#57986](https://github.com/ClickHouse/ClickHouse/pull/57986) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Add max/peak RSS (`MemoryResidentMax`) into system.asynchronous_metrics. [#58095](https://github.com/ClickHouse/ClickHouse/pull/58095) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix system.stack_trace for threads with blocked SIGRTMIN (and also send signal to the threads only if it is not blocked to avoid waiting `storage_system_stack_trace_pipe_read_timeout_ms` when it does not make any sense). [#58136](https://github.com/ClickHouse/ClickHouse/pull/58136) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* This PR allows users to use s3 links (`https://` and `s3://`) without mentioning region if it's not default. Also find the correct region if the user mentioned the wrong one. ### Documentation entry for user-facing changes. [#58148](https://github.com/ClickHouse/ClickHouse/pull/58148) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* `clickhouse-format --obfuscate` will know about Settings, MergeTreeSettings, and time zones and keep their names unchanged. [#58179](https://github.com/ClickHouse/ClickHouse/pull/58179) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Added explicit `finalize()` function in `ZipArchiveWriter`. Simplify too complicated code in `ZipArchiveWriter`. This PR fixes [#58074](https://github.com/ClickHouse/ClickHouse/issues/58074). [#58202](https://github.com/ClickHouse/ClickHouse/pull/58202) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* The primary key analysis in MergeTree tables will now be applied to predicates that include the virtual column `_part_offset` (optionally with `_part`). This feature can serve as a poor man's secondary index. [#58224](https://github.com/ClickHouse/ClickHouse/pull/58224) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Make caches with the same path use the same cache objects. This behaviour existed before, but was broken in https://github.com/ClickHouse/ClickHouse/pull/48805 (in 23.4). If such caches with the same path have different set of cache settings, an exception will be thrown, that this is not allowed. [#58264](https://github.com/ClickHouse/ClickHouse/pull/58264) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Allow usage of Azure-related table engines/functions on macOS. [#51866](https://github.com/ClickHouse/ClickHouse/pull/51866) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* ClickHouse Fast Test now uses Musl instead of GLibc. [#57711](https://github.com/ClickHouse/ClickHouse/pull/57711) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Run ClickBench for every commit. This closes [#57708](https://github.com/ClickHouse/ClickHouse/issues/57708). [#57712](https://github.com/ClickHouse/ClickHouse/pull/57712) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fixed a sorting order breakage in TTL GROUP BY [#49103](https://github.com/ClickHouse/ClickHouse/pull/49103) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* fix: split lttb bucket strategy, first bucket and last bucket should only contain single point [#57003](https://github.com/ClickHouse/ClickHouse/pull/57003) ([FFish](https://github.com/wxybear)).
|
||||
* Fix possible deadlock in Template format during sync after error [#57004](https://github.com/ClickHouse/ClickHouse/pull/57004) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix early stop while parsing file with skipping lots of errors [#57006](https://github.com/ClickHouse/ClickHouse/pull/57006) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Prevent dictionary's ACL bypass via dictionary() table function [#57362](https://github.com/ClickHouse/ClickHouse/pull/57362) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Fix another case of non-ready set. [#57423](https://github.com/ClickHouse/ClickHouse/pull/57423) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix several issues regarding PostgreSQL `array_ndims` usage. [#57436](https://github.com/ClickHouse/ClickHouse/pull/57436) ([Ryan Jacobs](https://github.com/ryanmjacobs)).
|
||||
* Fix RWLock inconsistency after write lock timeout [#57454](https://github.com/ClickHouse/ClickHouse/pull/57454) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix: don't exclude ephemeral column when building pushing to view chain [#57461](https://github.com/ClickHouse/ClickHouse/pull/57461) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* MaterializedPostgreSQL: fix issue [#41922](https://github.com/ClickHouse/ClickHouse/issues/41922), add test for [#41923](https://github.com/ClickHouse/ClickHouse/issues/41923) [#57515](https://github.com/ClickHouse/ClickHouse/pull/57515) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Ignore ON CLUSTER clause in grant/revoke queries for management of replicated access entities. [#57538](https://github.com/ClickHouse/ClickHouse/pull/57538) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Fix crash in clickhouse-local [#57553](https://github.com/ClickHouse/ClickHouse/pull/57553) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Materialize block in HashJoin for Type::EMPTY [#57564](https://github.com/ClickHouse/ClickHouse/pull/57564) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix possible segfault in PostgreSQLSource [#57567](https://github.com/ClickHouse/ClickHouse/pull/57567) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix type correction in HashJoin for nested low cardinality [#57614](https://github.com/ClickHouse/ClickHouse/pull/57614) ([vdimir](https://github.com/vdimir)).
|
||||
* Avoid hangs of system.stack_trace by correctly prohibit parallel read from it [#57641](https://github.com/ClickHouse/ClickHouse/pull/57641) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix SIGSEGV for aggregation of sparse columns with any() RESPECT NULL [#57710](https://github.com/ClickHouse/ClickHouse/pull/57710) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix unary operators parsing [#57713](https://github.com/ClickHouse/ClickHouse/pull/57713) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix RWLock inconsistency after write lock timeout (again) [#57733](https://github.com/ClickHouse/ClickHouse/pull/57733) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Table engine MaterializedPostgreSQL fix dependency loading [#57754](https://github.com/ClickHouse/ClickHouse/pull/57754) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix retries for disconnected nodes for BACKUP/RESTORE ON CLUSTER [#57764](https://github.com/ClickHouse/ClickHouse/pull/57764) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix bug window functions: revert [#39631](https://github.com/ClickHouse/ClickHouse/issues/39631) [#57766](https://github.com/ClickHouse/ClickHouse/pull/57766) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix result of external aggregation in case of partially materialized projection [#57790](https://github.com/ClickHouse/ClickHouse/pull/57790) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix merge in aggregation functions with `*Map` combinator [#57795](https://github.com/ClickHouse/ClickHouse/pull/57795) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Disable system.kafka_consumers by default (due to possible live memory leak) [#57822](https://github.com/ClickHouse/ClickHouse/pull/57822) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix low-cardinality keys support in MergeJoin [#57827](https://github.com/ClickHouse/ClickHouse/pull/57827) ([vdimir](https://github.com/vdimir)).
|
||||
* Create consumers for Kafka tables on fly (but keep them for some period since last used) [#57829](https://github.com/ClickHouse/ClickHouse/pull/57829) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* InterpreterCreateQuery sample block fix [#57855](https://github.com/ClickHouse/ClickHouse/pull/57855) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* bugfix: addresses_expr ignored for psql named collections [#57874](https://github.com/ClickHouse/ClickHouse/pull/57874) ([joelynch](https://github.com/joelynch)).
|
||||
* Fix invalid memory access in BLAKE3 (Rust) [#57876](https://github.com/ClickHouse/ClickHouse/pull/57876) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Resurrect `arrayFold()` [#57879](https://github.com/ClickHouse/ClickHouse/pull/57879) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Normalize function names in CREATE INDEX [#57906](https://github.com/ClickHouse/ClickHouse/pull/57906) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix handling of unavailable replicas before first request happened [#57933](https://github.com/ClickHouse/ClickHouse/pull/57933) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix literal alias misclassification [#57988](https://github.com/ClickHouse/ClickHouse/pull/57988) ([Chen768959](https://github.com/Chen768959)).
|
||||
* Revert "Fix bug window functions: revert [#39631](https://github.com/ClickHouse/ClickHouse/issues/39631)" [#58031](https://github.com/ClickHouse/ClickHouse/pull/58031) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix invalid preprocessing on Keeper [#58069](https://github.com/ClickHouse/ClickHouse/pull/58069) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix Integer overflow in Poco::UTF32Encoding [#58073](https://github.com/ClickHouse/ClickHouse/pull/58073) ([Andrey Fedotov](https://github.com/anfedotoff)).
|
||||
* Fix parallel replicas in presence of a scalar subquery with a big integer value [#58118](https://github.com/ClickHouse/ClickHouse/pull/58118) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix `accurateCastOrNull` for out-of-range DateTime [#58139](https://github.com/ClickHouse/ClickHouse/pull/58139) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Fix possible PARAMETER_OUT_OF_BOUND error during subcolumns reading from wide part in MergeTree [#58175](https://github.com/ClickHouse/ClickHouse/pull/58175) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Remove parallel parsing for JSONCompactEachRow [#58181](https://github.com/ClickHouse/ClickHouse/pull/58181) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* fix CREATE VIEW hang [#58220](https://github.com/ClickHouse/ClickHouse/pull/58220) ([Tao Wang](https://github.com/wangtZJU)).
|
||||
* Fix parallel parsing for JSONCompactEachRow [#58250](https://github.com/ClickHouse/ClickHouse/pull/58250) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Revert "Revert "Update Sentry""'. [#57694](https://github.com/ClickHouse/ClickHouse/pull/57694) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Fix RWLock inconsistency after write lock timeout"'. [#57730](https://github.com/ClickHouse/ClickHouse/pull/57730) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "improve CI with digest for docker, build and test jobs"'. [#57903](https://github.com/ClickHouse/ClickHouse/pull/57903) ([Max K.](https://github.com/mkaynov)).
|
||||
* NO CL ENTRY: 'Reapply "improve CI with digest for docker, build and test jobs"'. [#57904](https://github.com/ClickHouse/ClickHouse/pull/57904) ([Max K.](https://github.com/mkaynov)).
|
||||
* NO CL ENTRY: 'Revert "Merge pull request [#56573](https://github.com/ClickHouse/ClickHouse/issues/56573) from mkmkme/mkmkme/reload-config"'. [#57909](https://github.com/ClickHouse/ClickHouse/pull/57909) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Add system.dropped_tables_parts table"'. [#58022](https://github.com/ClickHouse/ClickHouse/pull/58022) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* NO CL ENTRY: 'Revert "Consider lightweight deleted rows when selecting parts to merge"'. [#58097](https://github.com/ClickHouse/ClickHouse/pull/58097) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Fix leftover processes/hangs in tests"'. [#58207](https://github.com/ClickHouse/ClickHouse/pull/58207) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Create consumers for Kafka tables on fly (but keep them for some period since last used)"'. [#58272](https://github.com/ClickHouse/ClickHouse/pull/58272) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Implement punycode encoding/decoding"'. [#58277](https://github.com/ClickHouse/ClickHouse/pull/58277) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Randomize more settings [#39663](https://github.com/ClickHouse/ClickHouse/pull/39663) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Add more tests for `compile_expressions` [#51113](https://github.com/ClickHouse/ClickHouse/pull/51113) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* [RFC] Correctly wait background threads [#52717](https://github.com/ClickHouse/ClickHouse/pull/52717) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* improve CI with digest for docker, build and test jobs [#56317](https://github.com/ClickHouse/ClickHouse/pull/56317) ([Max K.](https://github.com/mkaynov)).
|
||||
* Prepare the introduction of more keeper faults [#56917](https://github.com/ClickHouse/ClickHouse/pull/56917) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Analyzer: Fix assert in tryReplaceAndEqualsChainsWithConstant [#57139](https://github.com/ClickHouse/ClickHouse/pull/57139) ([vdimir](https://github.com/vdimir)).
|
||||
* Check what will happen if we build ClickHouse with Musl [#57180](https://github.com/ClickHouse/ClickHouse/pull/57180) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* support memory soft limit for keeper [#57271](https://github.com/ClickHouse/ClickHouse/pull/57271) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Randomize disabled optimizations in CI [#57315](https://github.com/ClickHouse/ClickHouse/pull/57315) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Don't throw if noop when dropping database replica in batch [#57337](https://github.com/ClickHouse/ClickHouse/pull/57337) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Better JSON -> JSONEachRow fallback without catching exceptions [#57364](https://github.com/ClickHouse/ClickHouse/pull/57364) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add tests for [#48496](https://github.com/ClickHouse/ClickHouse/issues/48496) [#57414](https://github.com/ClickHouse/ClickHouse/pull/57414) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Add profile event for cache lookup in `ThreadPoolRemoteFSReader` [#57437](https://github.com/ClickHouse/ClickHouse/pull/57437) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Remove select() usage [#57467](https://github.com/ClickHouse/ClickHouse/pull/57467) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Parallel replicas: friendly settings [#57542](https://github.com/ClickHouse/ClickHouse/pull/57542) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix formatting string prompt error [#57569](https://github.com/ClickHouse/ClickHouse/pull/57569) ([skyoct](https://github.com/skyoct)).
|
||||
* Tune CI scale up/down multipliers [#57572](https://github.com/ClickHouse/ClickHouse/pull/57572) ([Max K.](https://github.com/mkaynov)).
|
||||
* Revert "Revert "Implemented series period detect method using pocketfft lib"" [#57574](https://github.com/ClickHouse/ClickHouse/pull/57574) ([Bhavna Jindal](https://github.com/bhavnajindal)).
|
||||
* Correctly handle errors during opening query in editor in client [#57587](https://github.com/ClickHouse/ClickHouse/pull/57587) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add a test for [#55251](https://github.com/ClickHouse/ClickHouse/issues/55251) [#57588](https://github.com/ClickHouse/ClickHouse/pull/57588) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add a test for [#48039](https://github.com/ClickHouse/ClickHouse/issues/48039) [#57593](https://github.com/ClickHouse/ClickHouse/pull/57593) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Update CHANGELOG.md [#57594](https://github.com/ClickHouse/ClickHouse/pull/57594) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Update version after release [#57595](https://github.com/ClickHouse/ClickHouse/pull/57595) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Update version_date.tsv and changelogs after v23.11.1.2711-stable [#57597](https://github.com/ClickHouse/ClickHouse/pull/57597) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Identify failed jobs in lambda and mark as steps=0 [#57600](https://github.com/ClickHouse/ClickHouse/pull/57600) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix flaky test: distinct in order with analyzer [#57606](https://github.com/ClickHouse/ClickHouse/pull/57606) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* CHJIT add assembly printer [#57610](https://github.com/ClickHouse/ClickHouse/pull/57610) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix parsing virtual hosted S3 URI in clickhouse_backupview script [#57612](https://github.com/ClickHouse/ClickHouse/pull/57612) ([Daniel Pozo Escalona](https://github.com/danipozo)).
|
||||
* Fix docs for `fileCluster` [#57613](https://github.com/ClickHouse/ClickHouse/pull/57613) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Analyzer: Fix logical error in MultiIfToIfPass [#57622](https://github.com/ClickHouse/ClickHouse/pull/57622) ([vdimir](https://github.com/vdimir)).
|
||||
* Throw more clear exception [#57626](https://github.com/ClickHouse/ClickHouse/pull/57626) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix "logs and exception messages formatting", part 1 [#57630](https://github.com/ClickHouse/ClickHouse/pull/57630) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix "logs and exception messages formatting", part 2 [#57632](https://github.com/ClickHouse/ClickHouse/pull/57632) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix "logs and exception messages formatting", part 3 [#57633](https://github.com/ClickHouse/ClickHouse/pull/57633) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix "logs and exception messages formatting", part 4 [#57634](https://github.com/ClickHouse/ClickHouse/pull/57634) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove bad test (1) [#57636](https://github.com/ClickHouse/ClickHouse/pull/57636) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove bad test (2) [#57637](https://github.com/ClickHouse/ClickHouse/pull/57637) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* ClickHouse Cloud promotion [#57638](https://github.com/ClickHouse/ClickHouse/pull/57638) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Remove bad test (3) [#57639](https://github.com/ClickHouse/ClickHouse/pull/57639) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove bad test (4) [#57640](https://github.com/ClickHouse/ClickHouse/pull/57640) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Random changes in random files [#57642](https://github.com/ClickHouse/ClickHouse/pull/57642) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Merge half of [#51113](https://github.com/ClickHouse/ClickHouse/issues/51113) [#57643](https://github.com/ClickHouse/ClickHouse/pull/57643) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Analyzer: Fix JOIN ON true with join_use_nulls [#57662](https://github.com/ClickHouse/ClickHouse/pull/57662) ([vdimir](https://github.com/vdimir)).
|
||||
* Pin alpine version of integration tests helper container [#57669](https://github.com/ClickHouse/ClickHouse/pull/57669) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Add support for system.stack_trace filtering optimizations for analyzer [#57682](https://github.com/ClickHouse/ClickHouse/pull/57682) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* test for [#33308](https://github.com/ClickHouse/ClickHouse/issues/33308) [#57693](https://github.com/ClickHouse/ClickHouse/pull/57693) ([Denny Crane](https://github.com/den-crane)).
|
||||
* support keeper memory soft limit ratio [#57699](https://github.com/ClickHouse/ClickHouse/pull/57699) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Fix test_dictionaries_update_and_reload/test.py::test_reload_while_loading flakiness [#57714](https://github.com/ClickHouse/ClickHouse/pull/57714) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Tune autoscale to scale for single job in the queue [#57742](https://github.com/ClickHouse/ClickHouse/pull/57742) ([Max K.](https://github.com/mkaynov)).
|
||||
* Tune network memory for dockerhub proxy hosts [#57744](https://github.com/ClickHouse/ClickHouse/pull/57744) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Parallel replicas: announcement response handling improvement [#57749](https://github.com/ClickHouse/ClickHouse/pull/57749) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix building Rust with Musl [#57756](https://github.com/ClickHouse/ClickHouse/pull/57756) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix flaky test_parallel_replicas_distributed_read_from_all [#57757](https://github.com/ClickHouse/ClickHouse/pull/57757) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Minor refactoring of toStartOfInterval() [#57761](https://github.com/ClickHouse/ClickHouse/pull/57761) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Don't run test 02919_skip_lots_of_parsing_errors on aarch64 [#57762](https://github.com/ClickHouse/ClickHouse/pull/57762) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* More respect to `min_number_of_marks` in `ParallelReplicasReadingCoordinator` [#57763](https://github.com/ClickHouse/ClickHouse/pull/57763) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* SerializationString reduce memory usage [#57787](https://github.com/ClickHouse/ClickHouse/pull/57787) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix ThreadSanitizer data race in librdkafka [#57791](https://github.com/ClickHouse/ClickHouse/pull/57791) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
* Rename `system.async_loader` into `system.asynchronous_loader` [#57793](https://github.com/ClickHouse/ClickHouse/pull/57793) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Set replica number to its position in cluster definition [#57800](https://github.com/ClickHouse/ClickHouse/pull/57800) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* fix clickhouse-client invocation in 02327_capnproto_protobuf_empty_messages [#57804](https://github.com/ClickHouse/ClickHouse/pull/57804) ([Mikhail Koviazin](https://github.com/mkmkme)).
|
||||
* Fix flaky test_parallel_replicas_over_distributed [#57809](https://github.com/ClickHouse/ClickHouse/pull/57809) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Revert [#57741](https://github.com/ClickHouse/ClickHouse/issues/57741) [#57811](https://github.com/ClickHouse/ClickHouse/pull/57811) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Dumb down `substring()` tests [#57821](https://github.com/ClickHouse/ClickHouse/pull/57821) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Update version_date.tsv and changelogs after v23.11.2.11-stable [#57824](https://github.com/ClickHouse/ClickHouse/pull/57824) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Fix 02906_force_optimize_projection_name [#57826](https://github.com/ClickHouse/ClickHouse/pull/57826) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* ClickBench: slightly better [#57831](https://github.com/ClickHouse/ClickHouse/pull/57831) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix 02932_kill_query_sleep flakiness [#57849](https://github.com/ClickHouse/ClickHouse/pull/57849) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Revert "Replace --no-system-tables with loading virtual tables of system database lazily" [#57851](https://github.com/ClickHouse/ClickHouse/pull/57851) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix memory leak in StorageHDFS [#57860](https://github.com/ClickHouse/ClickHouse/pull/57860) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Remove hardcoded clickhouse-client invocations from tests [#57861](https://github.com/ClickHouse/ClickHouse/pull/57861) ([Mikhail Koviazin](https://github.com/mkmkme)).
|
||||
* Follow up to [#57568](https://github.com/ClickHouse/ClickHouse/issues/57568) [#57863](https://github.com/ClickHouse/ClickHouse/pull/57863) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix assertion in HashJoin [#57873](https://github.com/ClickHouse/ClickHouse/pull/57873) ([vdimir](https://github.com/vdimir)).
|
||||
* More efficient constructor for SerializationEnum [#57887](https://github.com/ClickHouse/ClickHouse/pull/57887) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Fix test_unset_skip_unavailable_shards [#57895](https://github.com/ClickHouse/ClickHouse/pull/57895) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Add argument to fill the gap in cherry-pick [#57896](https://github.com/ClickHouse/ClickHouse/pull/57896) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Delete debug logging in OutputFormatWithUTF8ValidationAdaptor [#57899](https://github.com/ClickHouse/ClickHouse/pull/57899) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Remove heavy rust stable toolchain [#57905](https://github.com/ClickHouse/ClickHouse/pull/57905) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Improvements for 00002_log_and_exception_messages_formatting [#57910](https://github.com/ClickHouse/ClickHouse/pull/57910) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Update CHANGELOG.md [#57911](https://github.com/ClickHouse/ClickHouse/pull/57911) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* remove cruft from TablesLoader [#57938](https://github.com/ClickHouse/ClickHouse/pull/57938) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Fix `/dashboard` work with passwords [#57948](https://github.com/ClickHouse/ClickHouse/pull/57948) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Remove wrong test [#57950](https://github.com/ClickHouse/ClickHouse/pull/57950) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Fix docker image for integration tests (fixes CI) [#57952](https://github.com/ClickHouse/ClickHouse/pull/57952) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Remove C++ templates (normalizeQuery) [#57963](https://github.com/ClickHouse/ClickHouse/pull/57963) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* A small fix for dashboard [#57964](https://github.com/ClickHouse/ClickHouse/pull/57964) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Always use `pread` for reading cache segments [#57970](https://github.com/ClickHouse/ClickHouse/pull/57970) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Improve some tests [#57973](https://github.com/ClickHouse/ClickHouse/pull/57973) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Revert "Merge pull request [#57907](https://github.com/ClickHouse/ClickHouse/issues/57907) from azat/system.stack_trace-rt_tgsigqueueinfo" [#57974](https://github.com/ClickHouse/ClickHouse/pull/57974) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#49708](https://github.com/ClickHouse/ClickHouse/issues/49708) [#57979](https://github.com/ClickHouse/ClickHouse/pull/57979) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix style-check checkout head-ref [#57989](https://github.com/ClickHouse/ClickHouse/pull/57989) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* refine error message [#57991](https://github.com/ClickHouse/ClickHouse/pull/57991) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* CI for docs only fix [#57992](https://github.com/ClickHouse/ClickHouse/pull/57992) ([Max K.](https://github.com/mkaynov)).
|
||||
* Replace rust's BLAKE3 with llvm's implementation [#57994](https://github.com/ClickHouse/ClickHouse/pull/57994) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Better trivial count optimization for storage `Merge` [#57996](https://github.com/ClickHouse/ClickHouse/pull/57996) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* enhanced docs for `date_trunc()` [#58000](https://github.com/ClickHouse/ClickHouse/pull/58000) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* CI: add needs_changed_files flag for pr_info [#58003](https://github.com/ClickHouse/ClickHouse/pull/58003) ([Max K.](https://github.com/mkaynov)).
|
||||
* more messages in ci [#58007](https://github.com/ClickHouse/ClickHouse/pull/58007) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Test parallel replicas with force_primary_key setting [#58010](https://github.com/ClickHouse/ClickHouse/pull/58010) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Update 00002_log_and_exception_messages_formatting.sql [#58012](https://github.com/ClickHouse/ClickHouse/pull/58012) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix rare race in external sort/aggregation with temporary data in cache [#58013](https://github.com/ClickHouse/ClickHouse/pull/58013) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix segfault in FuzzJSON engine [#58015](https://github.com/ClickHouse/ClickHouse/pull/58015) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* fix freebsd build [#58019](https://github.com/ClickHouse/ClickHouse/pull/58019) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* Rename canUseParallelReplicas to canUseTaskBasedParallelReplicas [#58025](https://github.com/ClickHouse/ClickHouse/pull/58025) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Remove fixed tests from analyzer_tech_debt.txt [#58028](https://github.com/ClickHouse/ClickHouse/pull/58028) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* More verbose errors on 00002_log_and_exception_messages_formatting [#58037](https://github.com/ClickHouse/ClickHouse/pull/58037) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Make window insert result into constant [#58045](https://github.com/ClickHouse/ClickHouse/pull/58045) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* CI: Happy new year [#58046](https://github.com/ClickHouse/ClickHouse/pull/58046) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Follow up for [#57691](https://github.com/ClickHouse/ClickHouse/issues/57691) [#58048](https://github.com/ClickHouse/ClickHouse/pull/58048) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* always run ast_fuzz and sqllancer [#58049](https://github.com/ClickHouse/ClickHouse/pull/58049) ([Max K.](https://github.com/mkaynov)).
|
||||
* Add GH status for PR formating [#58050](https://github.com/ClickHouse/ClickHouse/pull/58050) ([Max K.](https://github.com/mkaynov)).
|
||||
* Small improvement for SystemLogBase [#58051](https://github.com/ClickHouse/ClickHouse/pull/58051) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Bump Azure to v1.6.0 [#58052](https://github.com/ClickHouse/ClickHouse/pull/58052) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Correct values for randomization [#58058](https://github.com/ClickHouse/ClickHouse/pull/58058) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Non post request should be readonly [#58060](https://github.com/ClickHouse/ClickHouse/pull/58060) ([San](https://github.com/santrancisco)).
|
||||
* Revert "Merge pull request [#55710](https://github.com/ClickHouse/ClickHouse/issues/55710) from guoxiaolongzte/clickhouse-test… [#58066](https://github.com/ClickHouse/ClickHouse/pull/58066) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* fix typo in the test 02479 [#58072](https://github.com/ClickHouse/ClickHouse/pull/58072) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Bump Azure to 1.7.2 [#58075](https://github.com/ClickHouse/ClickHouse/pull/58075) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix flaky test `02567_and_consistency` [#58076](https://github.com/ClickHouse/ClickHouse/pull/58076) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix Tests Bugfix Validate Check [#58078](https://github.com/ClickHouse/ClickHouse/pull/58078) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix for nightly job for digest-ci [#58079](https://github.com/ClickHouse/ClickHouse/pull/58079) ([Max K.](https://github.com/mkaynov)).
|
||||
* Test for parallel replicas with remote() [#58081](https://github.com/ClickHouse/ClickHouse/pull/58081) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Minor cosmetic changes [#58092](https://github.com/ClickHouse/ClickHouse/pull/58092) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Reintroduce OPTIMIZE CLEANUP as no-op [#58100](https://github.com/ClickHouse/ClickHouse/pull/58100) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add compatibility in the replication protocol for a removed feature [#58104](https://github.com/ClickHouse/ClickHouse/pull/58104) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Flaky 02922_analyzer_aggregate_nothing_type [#58105](https://github.com/ClickHouse/ClickHouse/pull/58105) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Update version_date.tsv and changelogs after v23.11.3.23-stable [#58106](https://github.com/ClickHouse/ClickHouse/pull/58106) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Limited CI on the master for docs only change [#58121](https://github.com/ClickHouse/ClickHouse/pull/58121) ([Max K.](https://github.com/mkaynov)).
|
||||
* style fix [#58125](https://github.com/ClickHouse/ClickHouse/pull/58125) ([Max K.](https://github.com/mkaynov)).
|
||||
* Support "do not test" label with ci.py [#58128](https://github.com/ClickHouse/ClickHouse/pull/58128) ([Max K.](https://github.com/mkaynov)).
|
||||
* Use the single images list for integration tests everywhere [#58130](https://github.com/ClickHouse/ClickHouse/pull/58130) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Disable parallel replicas with IN (subquery) [#58133](https://github.com/ClickHouse/ClickHouse/pull/58133) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix clang-tidy [#58134](https://github.com/ClickHouse/ClickHouse/pull/58134) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Run build report check job on build failures, fix [#58135](https://github.com/ClickHouse/ClickHouse/pull/58135) ([Max K.](https://github.com/mkaynov)).
|
||||
* Fix dashboard legend sorting and rows number [#58151](https://github.com/ClickHouse/ClickHouse/pull/58151) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Remove retryStrategy assignments overwritten in ClientFactory::create() [#58163](https://github.com/ClickHouse/ClickHouse/pull/58163) ([Daniel Pozo Escalona](https://github.com/danipozo)).
|
||||
* Helper improvements [#58164](https://github.com/ClickHouse/ClickHouse/pull/58164) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Pass through exceptions for reading from S3 [#58165](https://github.com/ClickHouse/ClickHouse/pull/58165) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* [RFC] Adjust all std::ios implementations in poco to set failbit/badbit by default [#58166](https://github.com/ClickHouse/ClickHouse/pull/58166) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add bytes_uncompressed to system.part_log [#58167](https://github.com/ClickHouse/ClickHouse/pull/58167) ([Jordi Villar](https://github.com/jrdi)).
|
||||
* Update docker/test/stateful/run.sh [#58168](https://github.com/ClickHouse/ClickHouse/pull/58168) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Update 00165_jit_aggregate_functions.sql [#58169](https://github.com/ClickHouse/ClickHouse/pull/58169) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Update clickhouse-test [#58170](https://github.com/ClickHouse/ClickHouse/pull/58170) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Profile event 'ParallelReplicasUsedCount' [#58173](https://github.com/ClickHouse/ClickHouse/pull/58173) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix flaky test `02719_aggregate_with_empty_string_key` [#58176](https://github.com/ClickHouse/ClickHouse/pull/58176) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix [#58171](https://github.com/ClickHouse/ClickHouse/issues/58171) [#58177](https://github.com/ClickHouse/ClickHouse/pull/58177) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add base backup name to system.backups and system.backup_log tables [#58178](https://github.com/ClickHouse/ClickHouse/pull/58178) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||
* Fix use-after-move [#58182](https://github.com/ClickHouse/ClickHouse/pull/58182) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Looking at strange code [#58196](https://github.com/ClickHouse/ClickHouse/pull/58196) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix all Exception with missing arguments [#58198](https://github.com/ClickHouse/ClickHouse/pull/58198) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix leftover processes/hangs in tests [#58200](https://github.com/ClickHouse/ClickHouse/pull/58200) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix DWARFBlockInputFormat failing on DWARF 5 unit address ranges [#58204](https://github.com/ClickHouse/ClickHouse/pull/58204) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix error in archive reader [#58206](https://github.com/ClickHouse/ClickHouse/pull/58206) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix DWARFBlockInputFormat using wrong base address sometimes [#58208](https://github.com/ClickHouse/ClickHouse/pull/58208) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Add support for specifying query parameters in the command line in clickhouse-local [#58210](https://github.com/ClickHouse/ClickHouse/pull/58210) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||
* Fix leftover processes/hangs in tests (resubmit) [#58213](https://github.com/ClickHouse/ClickHouse/pull/58213) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add optimization for AND notEquals chain in logical expression optimizer [#58214](https://github.com/ClickHouse/ClickHouse/pull/58214) ([Kevin Mingtarja](https://github.com/kevinmingtarja)).
|
||||
* Fix syntax and doc [#58221](https://github.com/ClickHouse/ClickHouse/pull/58221) ([San](https://github.com/santrancisco)).
|
||||
* Cleanup some known short messages [#58226](https://github.com/ClickHouse/ClickHouse/pull/58226) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Some code refactoring (was an attempt to improve build time, but failed) [#58237](https://github.com/ClickHouse/ClickHouse/pull/58237) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix perf test README [#58245](https://github.com/ClickHouse/ClickHouse/pull/58245) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* [Analyzer] Add test for [#57086](https://github.com/ClickHouse/ClickHouse/issues/57086) [#58249](https://github.com/ClickHouse/ClickHouse/pull/58249) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Reintroduce compatibility with `is_deleted` on a syntax level [#58251](https://github.com/ClickHouse/ClickHouse/pull/58251) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Avoid throwing ABORTED on normal situations [#58252](https://github.com/ClickHouse/ClickHouse/pull/58252) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Remove mayBenefitFromIndexForIn [#58265](https://github.com/ClickHouse/ClickHouse/pull/58265) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Allow a few retries when committing a part during shutdown [#58269](https://github.com/ClickHouse/ClickHouse/pull/58269) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Revert [#58267](https://github.com/ClickHouse/ClickHouse/issues/58267) [#58274](https://github.com/ClickHouse/ClickHouse/pull/58274) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
@ -1,206 +1,206 @@
|
||||
---
|
||||
slug: /en/development/build-cross-s390x
|
||||
sidebar_position: 69
|
||||
title: How to Build, Run and Debug ClickHouse on Linux for s390x (zLinux)
|
||||
sidebar_label: Build on Linux for s390x (zLinux)
|
||||
---
|
||||
|
||||
As of writing (2023/3/10) building for s390x considered to be experimental. Not all features can be enabled, has broken features and is currently under active development.
|
||||
|
||||
|
||||
## Building
|
||||
|
||||
As s390x does not support boringssl, it uses OpenSSL and has two related build options.
|
||||
- By default, the s390x build will dynamically link to OpenSSL libraries. It will build OpenSSL shared objects, so it's not necessary to install OpenSSL beforehand. (This option is recommended in all cases.)
|
||||
- Another option is to build OpenSSL in-tree. In this case two build flags need to be supplied to cmake
|
||||
```bash
|
||||
-DENABLE_OPENSSL_DYNAMIC=0 -DENABLE_OPENSSL=1
|
||||
```
|
||||
|
||||
These instructions assume that the host machine is x86_64 and has all the tooling required to build natively based on the [build instructions](../development/build.md). It also assumes that the host is Ubuntu 22.04 but the following instructions should also work on Ubuntu 20.04.
|
||||
|
||||
In addition to installing the tooling used to build natively, the following additional packages need to be installed:
|
||||
|
||||
```bash
|
||||
apt-get install binutils-s390x-linux-gnu libc6-dev-s390x-cross gcc-s390x-linux-gnu binfmt-support qemu-user-static
|
||||
```
|
||||
|
||||
If you wish to cross compile rust code install the rust cross compile target for s390x:
|
||||
```bash
|
||||
rustup target add s390x-unknown-linux-gnu
|
||||
```
|
||||
|
||||
To build for s390x:
|
||||
```bash
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-s390x.cmake ..
|
||||
ninja
|
||||
```
|
||||
|
||||
## Running
|
||||
|
||||
Once built, the binary can be run with, eg.:
|
||||
|
||||
```bash
|
||||
qemu-s390x-static -L /usr/s390x-linux-gnu ./clickhouse
|
||||
```
|
||||
|
||||
## Debugging
|
||||
|
||||
Install LLDB:
|
||||
|
||||
```bash
|
||||
apt-get install lldb-15
|
||||
```
|
||||
|
||||
To Debug a s390x executable, run clickhouse using QEMU in debug mode:
|
||||
|
||||
```bash
|
||||
qemu-s390x-static -g 31338 -L /usr/s390x-linux-gnu ./clickhouse
|
||||
```
|
||||
|
||||
In another shell run LLDB and attach, replace `<Clickhouse Parent Directory>` and `<build directory>` with the values corresponding to your environment.
|
||||
```bash
|
||||
lldb-15
|
||||
(lldb) target create ./clickhouse
|
||||
Current executable set to '/<Clickhouse Parent Directory>/ClickHouse/<build directory>/programs/clickhouse' (s390x).
|
||||
(lldb) settings set target.source-map <build directory> /<Clickhouse Parent Directory>/ClickHouse
|
||||
(lldb) gdb-remote 31338
|
||||
Process 1 stopped
|
||||
* thread #1, stop reason = signal SIGTRAP
|
||||
frame #0: 0x0000004020e74cd0
|
||||
-> 0x4020e74cd0: lgr %r2, %r15
|
||||
0x4020e74cd4: aghi %r15, -160
|
||||
0x4020e74cd8: xc 0(8,%r15), 0(%r15)
|
||||
0x4020e74cde: brasl %r14, 275429939040
|
||||
(lldb) b main
|
||||
Breakpoint 1: 9 locations.
|
||||
(lldb) c
|
||||
Process 1 resuming
|
||||
Process 1 stopped
|
||||
* thread #1, stop reason = breakpoint 1.1
|
||||
frame #0: 0x0000004005cd9fc0 clickhouse`main(argc_=1, argv_=0x0000004020e594a8) at main.cpp:450:17
|
||||
447 #if !defined(FUZZING_MODE)
|
||||
448 int main(int argc_, char ** argv_)
|
||||
449 {
|
||||
-> 450 inside_main = true;
|
||||
451 SCOPE_EXIT({ inside_main = false; });
|
||||
452
|
||||
453 /// PHDR cache is required for query profiler to work reliably
|
||||
```
|
||||
|
||||
## Visual Studio Code integration
|
||||
|
||||
- [CodeLLDB](https://github.com/vadimcn/vscode-lldb) extension is required for visual debugging.
|
||||
- [Command Variable](https://github.com/rioj7/command-variable) extension can help dynamic launches if using [CMake Variants](https://github.com/microsoft/vscode-cmake-tools/blob/main/docs/variants.md).
|
||||
- Make sure to set the backend to your LLVM installation eg. `"lldb.library": "/usr/lib/x86_64-linux-gnu/liblldb-15.so"`
|
||||
- Make sure to run the clickhouse executable in debug mode prior to launch. (It is also possible to create a `preLaunchTask` that automates this)
|
||||
|
||||
### Example configurations
|
||||
#### cmake-variants.yaml
|
||||
```yaml
|
||||
buildType:
|
||||
default: relwithdebinfo
|
||||
choices:
|
||||
debug:
|
||||
short: Debug
|
||||
long: Emit debug information
|
||||
buildType: Debug
|
||||
release:
|
||||
short: Release
|
||||
long: Optimize generated code
|
||||
buildType: Release
|
||||
relwithdebinfo:
|
||||
short: RelWithDebInfo
|
||||
long: Release with Debug Info
|
||||
buildType: RelWithDebInfo
|
||||
tsan:
|
||||
short: MinSizeRel
|
||||
long: Minimum Size Release
|
||||
buildType: MinSizeRel
|
||||
|
||||
toolchain:
|
||||
default: default
|
||||
description: Select toolchain
|
||||
choices:
|
||||
default:
|
||||
short: x86_64
|
||||
long: x86_64
|
||||
s390x:
|
||||
short: s390x
|
||||
long: s390x
|
||||
settings:
|
||||
CMAKE_TOOLCHAIN_FILE: cmake/linux/toolchain-s390x.cmake
|
||||
```
|
||||
|
||||
#### launch.json
|
||||
```json
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "custom",
|
||||
"name": "(lldb) Launch s390x with qemu",
|
||||
"targetCreateCommands": ["target create ${command:cmake.launchTargetPath}"],
|
||||
"processCreateCommands": ["gdb-remote 2159"],
|
||||
"preLaunchTask": "Run ClickHouse"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### settings.json
|
||||
This would also put different builds under different subfolders of the `build` folder.
|
||||
```json
|
||||
{
|
||||
"cmake.buildDirectory": "${workspaceFolder}/build/${buildKitVendor}-${buildKitVersion}-${variant:toolchain}-${variant:buildType}",
|
||||
"lldb.library": "/usr/lib/x86_64-linux-gnu/liblldb-15.so"
|
||||
}
|
||||
```
|
||||
|
||||
#### run-debug.sh
|
||||
```sh
|
||||
#! /bin/sh
|
||||
echo 'Starting debugger session'
|
||||
cd $1
|
||||
qemu-s390x-static -g 2159 -L /usr/s390x-linux-gnu $2 $3 $4
|
||||
```
|
||||
|
||||
#### tasks.json
|
||||
Defines a task to run the compiled executable in `server` mode under a `tmp` folder next to the binaries, with configuration from under `programs/server/config.xml`.
|
||||
```json
|
||||
{
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"label": "Run ClickHouse",
|
||||
"type": "shell",
|
||||
"isBackground": true,
|
||||
"command": "${workspaceFolder}/.vscode/run-debug.sh",
|
||||
"args": [
|
||||
"${command:cmake.launchTargetDirectory}/tmp",
|
||||
"${command:cmake.launchTargetPath}",
|
||||
"server",
|
||||
"--config-file=${workspaceFolder}/programs/server/config.xml"
|
||||
],
|
||||
"problemMatcher": [
|
||||
{
|
||||
"pattern": [
|
||||
{
|
||||
"regexp": ".",
|
||||
"file": 1,
|
||||
"location": 2,
|
||||
"message": 3
|
||||
}
|
||||
],
|
||||
"background": {
|
||||
"activeOnStart": true,
|
||||
"beginsPattern": "^Starting debugger session",
|
||||
"endsPattern": ".*"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
---
|
||||
slug: /en/development/build-cross-s390x
|
||||
sidebar_position: 69
|
||||
title: How to Build, Run and Debug ClickHouse on Linux for s390x (zLinux)
|
||||
sidebar_label: Build on Linux for s390x (zLinux)
|
||||
---
|
||||
|
||||
As of writing (2023/3/10) building for s390x considered to be experimental. Not all features can be enabled, has broken features and is currently under active development.
|
||||
|
||||
|
||||
## Building
|
||||
|
||||
As s390x does not support boringssl, it uses OpenSSL and has two related build options.
|
||||
- By default, the s390x build will dynamically link to OpenSSL libraries. It will build OpenSSL shared objects, so it's not necessary to install OpenSSL beforehand. (This option is recommended in all cases.)
|
||||
- Another option is to build OpenSSL in-tree. In this case two build flags need to be supplied to cmake
|
||||
```bash
|
||||
-DENABLE_OPENSSL_DYNAMIC=0 -DENABLE_OPENSSL=1
|
||||
```
|
||||
|
||||
These instructions assume that the host machine is x86_64 and has all the tooling required to build natively based on the [build instructions](../development/build.md). It also assumes that the host is Ubuntu 22.04 but the following instructions should also work on Ubuntu 20.04.
|
||||
|
||||
In addition to installing the tooling used to build natively, the following additional packages need to be installed:
|
||||
|
||||
```bash
|
||||
apt-get install binutils-s390x-linux-gnu libc6-dev-s390x-cross gcc-s390x-linux-gnu binfmt-support qemu-user-static
|
||||
```
|
||||
|
||||
If you wish to cross compile rust code install the rust cross compile target for s390x:
|
||||
```bash
|
||||
rustup target add s390x-unknown-linux-gnu
|
||||
```
|
||||
|
||||
To build for s390x:
|
||||
```bash
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-s390x.cmake ..
|
||||
ninja
|
||||
```
|
||||
|
||||
## Running
|
||||
|
||||
Once built, the binary can be run with, eg.:
|
||||
|
||||
```bash
|
||||
qemu-s390x-static -L /usr/s390x-linux-gnu ./clickhouse
|
||||
```
|
||||
|
||||
## Debugging
|
||||
|
||||
Install LLDB:
|
||||
|
||||
```bash
|
||||
apt-get install lldb-15
|
||||
```
|
||||
|
||||
To Debug a s390x executable, run clickhouse using QEMU in debug mode:
|
||||
|
||||
```bash
|
||||
qemu-s390x-static -g 31338 -L /usr/s390x-linux-gnu ./clickhouse
|
||||
```
|
||||
|
||||
In another shell run LLDB and attach, replace `<Clickhouse Parent Directory>` and `<build directory>` with the values corresponding to your environment.
|
||||
```bash
|
||||
lldb-15
|
||||
(lldb) target create ./clickhouse
|
||||
Current executable set to '/<Clickhouse Parent Directory>/ClickHouse/<build directory>/programs/clickhouse' (s390x).
|
||||
(lldb) settings set target.source-map <build directory> /<Clickhouse Parent Directory>/ClickHouse
|
||||
(lldb) gdb-remote 31338
|
||||
Process 1 stopped
|
||||
* thread #1, stop reason = signal SIGTRAP
|
||||
frame #0: 0x0000004020e74cd0
|
||||
-> 0x4020e74cd0: lgr %r2, %r15
|
||||
0x4020e74cd4: aghi %r15, -160
|
||||
0x4020e74cd8: xc 0(8,%r15), 0(%r15)
|
||||
0x4020e74cde: brasl %r14, 275429939040
|
||||
(lldb) b main
|
||||
Breakpoint 1: 9 locations.
|
||||
(lldb) c
|
||||
Process 1 resuming
|
||||
Process 1 stopped
|
||||
* thread #1, stop reason = breakpoint 1.1
|
||||
frame #0: 0x0000004005cd9fc0 clickhouse`main(argc_=1, argv_=0x0000004020e594a8) at main.cpp:450:17
|
||||
447 #if !defined(FUZZING_MODE)
|
||||
448 int main(int argc_, char ** argv_)
|
||||
449 {
|
||||
-> 450 inside_main = true;
|
||||
451 SCOPE_EXIT({ inside_main = false; });
|
||||
452
|
||||
453 /// PHDR cache is required for query profiler to work reliably
|
||||
```
|
||||
|
||||
## Visual Studio Code integration
|
||||
|
||||
- [CodeLLDB](https://github.com/vadimcn/vscode-lldb) extension is required for visual debugging.
|
||||
- [Command Variable](https://github.com/rioj7/command-variable) extension can help dynamic launches if using [CMake Variants](https://github.com/microsoft/vscode-cmake-tools/blob/main/docs/variants.md).
|
||||
- Make sure to set the backend to your LLVM installation eg. `"lldb.library": "/usr/lib/x86_64-linux-gnu/liblldb-15.so"`
|
||||
- Make sure to run the clickhouse executable in debug mode prior to launch. (It is also possible to create a `preLaunchTask` that automates this)
|
||||
|
||||
### Example configurations
|
||||
#### cmake-variants.yaml
|
||||
```yaml
|
||||
buildType:
|
||||
default: relwithdebinfo
|
||||
choices:
|
||||
debug:
|
||||
short: Debug
|
||||
long: Emit debug information
|
||||
buildType: Debug
|
||||
release:
|
||||
short: Release
|
||||
long: Optimize generated code
|
||||
buildType: Release
|
||||
relwithdebinfo:
|
||||
short: RelWithDebInfo
|
||||
long: Release with Debug Info
|
||||
buildType: RelWithDebInfo
|
||||
tsan:
|
||||
short: MinSizeRel
|
||||
long: Minimum Size Release
|
||||
buildType: MinSizeRel
|
||||
|
||||
toolchain:
|
||||
default: default
|
||||
description: Select toolchain
|
||||
choices:
|
||||
default:
|
||||
short: x86_64
|
||||
long: x86_64
|
||||
s390x:
|
||||
short: s390x
|
||||
long: s390x
|
||||
settings:
|
||||
CMAKE_TOOLCHAIN_FILE: cmake/linux/toolchain-s390x.cmake
|
||||
```
|
||||
|
||||
#### launch.json
|
||||
```json
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "custom",
|
||||
"name": "(lldb) Launch s390x with qemu",
|
||||
"targetCreateCommands": ["target create ${command:cmake.launchTargetPath}"],
|
||||
"processCreateCommands": ["gdb-remote 2159"],
|
||||
"preLaunchTask": "Run ClickHouse"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### settings.json
|
||||
This would also put different builds under different subfolders of the `build` folder.
|
||||
```json
|
||||
{
|
||||
"cmake.buildDirectory": "${workspaceFolder}/build/${buildKitVendor}-${buildKitVersion}-${variant:toolchain}-${variant:buildType}",
|
||||
"lldb.library": "/usr/lib/x86_64-linux-gnu/liblldb-15.so"
|
||||
}
|
||||
```
|
||||
|
||||
#### run-debug.sh
|
||||
```sh
|
||||
#! /bin/sh
|
||||
echo 'Starting debugger session'
|
||||
cd $1
|
||||
qemu-s390x-static -g 2159 -L /usr/s390x-linux-gnu $2 $3 $4
|
||||
```
|
||||
|
||||
#### tasks.json
|
||||
Defines a task to run the compiled executable in `server` mode under a `tmp` folder next to the binaries, with configuration from under `programs/server/config.xml`.
|
||||
```json
|
||||
{
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"label": "Run ClickHouse",
|
||||
"type": "shell",
|
||||
"isBackground": true,
|
||||
"command": "${workspaceFolder}/.vscode/run-debug.sh",
|
||||
"args": [
|
||||
"${command:cmake.launchTargetDirectory}/tmp",
|
||||
"${command:cmake.launchTargetPath}",
|
||||
"server",
|
||||
"--config-file=${workspaceFolder}/programs/server/config.xml"
|
||||
],
|
||||
"problemMatcher": [
|
||||
{
|
||||
"pattern": [
|
||||
{
|
||||
"regexp": ".",
|
||||
"file": 1,
|
||||
"location": 2,
|
||||
"message": 3
|
||||
}
|
||||
],
|
||||
"background": {
|
||||
"activeOnStart": true,
|
||||
"beginsPattern": "^Starting debugger session",
|
||||
"endsPattern": ".*"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
@ -3,7 +3,7 @@ slug: /en/development/build-osx
|
||||
sidebar_position: 65
|
||||
sidebar_label: Build on macOS
|
||||
title: How to Build ClickHouse on macOS
|
||||
description: How to build ClickHouse on macOS
|
||||
description: How to build ClickHouse on macOS for macOS
|
||||
---
|
||||
|
||||
:::info You don't have to build ClickHouse yourself!
|
||||
|
@ -7,42 +7,39 @@ description: Prerequisites and an overview of how to build ClickHouse
|
||||
|
||||
# Getting Started Guide for Building ClickHouse
|
||||
|
||||
The building of ClickHouse is supported on Linux, FreeBSD and macOS.
|
||||
ClickHouse can be build on Linux, FreeBSD and macOS. If you use Windows, you can still build ClickHouse in a virtual machine running Linux, e.g. [VirtualBox](https://www.virtualbox.org/) with Ubuntu.
|
||||
|
||||
If you use Windows, you need to create a virtual machine with Ubuntu. To start working with a virtual machine please install VirtualBox. You can download Ubuntu from the website: https://www.ubuntu.com/#download. Please create a virtual machine from the downloaded image (you should reserve at least 4GB of RAM for it). To run a command-line terminal in Ubuntu, please locate a program containing the word “terminal” in its name (gnome-terminal, konsole etc.) or just press Ctrl+Alt+T.
|
||||
|
||||
ClickHouse cannot work or build on a 32-bit system. You should acquire access to a 64-bit system and you can continue reading.
|
||||
ClickHouse requires a 64-bit system to compile and run, 32-bit systems do not work.
|
||||
|
||||
## Creating a Repository on GitHub {#creating-a-repository-on-github}
|
||||
|
||||
To start working with ClickHouse repository you will need a GitHub account.
|
||||
To start developing for ClickHouse you will need a [GitHub](https://www.virtualbox.org/) account. Please also generate a SSH key locally (if you don't have one already) and upload the public key to GitHub as this is a prerequisite for contributing patches.
|
||||
|
||||
You probably already have one, but if you do not, please register at https://github.com. In case you do not have SSH keys, you should generate them and then upload them on GitHub. It is required for sending over your patches. It is also possible to use the same SSH keys that you use with any other SSH servers - probably you already have those.
|
||||
Next, create a fork of the [ClickHouse repository](https://github.com/ClickHouse/ClickHouse/) in your personal account by clicking the "fork" button in the upper right corner.
|
||||
|
||||
Create a fork of ClickHouse repository. To do that please click on the “fork” button in the upper right corner at https://github.com/ClickHouse/ClickHouse. It will fork your own copy of ClickHouse/ClickHouse to your account.
|
||||
To contribute, e.g. a fix for an issue or a feature, please commit your changes to a branch in your fork, then create a "pull request" with the changes to the main repository.
|
||||
|
||||
The development process consists of first committing the intended changes into your fork of ClickHouse and then creating a “pull request” for these changes to be accepted into the main repository (ClickHouse/ClickHouse).
|
||||
For working with Git repositories, please install `git`. In Ubuntu run these commands in a terminal:
|
||||
|
||||
To work with Git repositories, please install `git`. To do that in Ubuntu you would run in the command line terminal:
|
||||
```sh
|
||||
sudo apt update
|
||||
sudo apt install git
|
||||
```
|
||||
|
||||
sudo apt update
|
||||
sudo apt install git
|
||||
|
||||
A brief manual on using Git can be found [here](https://education.github.com/git-cheat-sheet-education.pdf).
|
||||
For a detailed manual on Git see [here](https://git-scm.com/book/en/v2).
|
||||
A cheatsheet for using Git can be found [here](https://education.github.com/git-cheat-sheet-education.pdf). The detailed manual for Git is [here](https://git-scm.com/book/en/v2).
|
||||
|
||||
## Cloning a Repository to Your Development Machine {#cloning-a-repository-to-your-development-machine}
|
||||
|
||||
Next, you need to download the source files onto your working machine. This is called “to clone a repository” because it creates a local copy of the repository on your working machine.
|
||||
First, download the source files to your working machine, i.e. clone the repository:
|
||||
|
||||
Run in your terminal:
|
||||
```sh
|
||||
git clone git@github.com:your_github_username/ClickHouse.git # replace placeholder with your GitHub user name
|
||||
cd ClickHouse
|
||||
```
|
||||
|
||||
git clone git@github.com:your_github_username/ClickHouse.git # replace placeholder with your GitHub user name
|
||||
cd ClickHouse
|
||||
This command creates a directory `ClickHouse/` containing the source code of ClickHouse. If you specify a custom checkout directory after the URL but it is important that this path does not contain whitespaces as it may lead to problems with the build later on.
|
||||
|
||||
This command will create a directory `ClickHouse/` containing the source code of ClickHouse. If you specify a custom checkout directory (after the URL), it is important that this path does not contain whitespaces as it may lead to problems with the build system.
|
||||
|
||||
To make library dependencies available for the build, the ClickHouse repository uses Git submodules, i.e. references to external repositories. These are not checked out by default. To do so, you can either
|
||||
The ClickHouse repository uses Git submodules, i.e. references to external repositories (usually 3rd party libraries used by ClickHouse). These are not checked out by default. To do so, you can either
|
||||
|
||||
- run `git clone` with option `--recurse-submodules`,
|
||||
|
||||
@ -52,7 +49,7 @@ To make library dependencies available for the build, the ClickHouse repository
|
||||
|
||||
You can check the Git status with the command: `git submodule status`.
|
||||
|
||||
If you get the following error message:
|
||||
If you get the following error message
|
||||
|
||||
Permission denied (publickey).
|
||||
fatal: Could not read from remote repository.
|
||||
@ -60,7 +57,7 @@ If you get the following error message:
|
||||
Please make sure you have the correct access rights
|
||||
and the repository exists.
|
||||
|
||||
It generally means that the SSH keys for connecting to GitHub are missing. These keys are normally located in `~/.ssh`. For SSH keys to be accepted you need to upload them in the settings section of GitHub UI.
|
||||
it generally means that the SSH keys for connecting to GitHub are missing. These keys are normally located in `~/.ssh`. For SSH keys to be accepted you need to upload them in GitHub's settings.
|
||||
|
||||
You can also clone the repository via https protocol:
|
||||
|
||||
@ -74,12 +71,17 @@ You can also add original ClickHouse repo address to your local repository to pu
|
||||
|
||||
After successfully running this command you will be able to pull updates from the main ClickHouse repo by running `git pull upstream master`.
|
||||
|
||||
:::note
|
||||
Instructions below assume you are building on Linux. If you are cross-compiling or building on macOS, please also check for operating system and architecture specific guides, such as building [on macOS for macOS](build-osx.md), [on Linux for macOS](build-cross-osx.md), [on Linux for Linux/RISC-V](build-cross-riscv.md) and so on.
|
||||
:::
|
||||
|
||||
## Build System {#build-system}
|
||||
|
||||
ClickHouse uses CMake and Ninja for building.
|
||||
|
||||
CMake - a meta-build system that can generate Ninja files (build tasks).
|
||||
Ninja - a smaller build system with a focus on the speed used to execute those cmake generated tasks.
|
||||
- CMake - a meta-build system that can generate Ninja files (build tasks).
|
||||
|
||||
- Ninja - a smaller build system with a focus on the speed used to execute those cmake generated tasks.
|
||||
|
||||
To install on Ubuntu, Debian or Mint run `sudo apt install cmake ninja-build`.
|
||||
|
||||
|
@ -489,7 +489,7 @@ When using functions with response codes or `errno`, always check the result and
|
||||
|
||||
``` cpp
|
||||
if (0 != close(fd))
|
||||
throwFromErrno("Cannot close file " + file_name, ErrorCodes::CANNOT_CLOSE_FILE);
|
||||
throw ErrnoException(ErrorCodes::CANNOT_CLOSE_FILE, "Cannot close file {}", file_name);
|
||||
```
|
||||
|
||||
You can use assert to check invariant in code.
|
||||
|
@ -67,7 +67,6 @@ Engines in the family:
|
||||
Engines in the family:
|
||||
|
||||
- [Distributed](../../engines/table-engines/special/distributed.md#distributed)
|
||||
- [MaterializedView](../../engines/table-engines/special/materializedview.md#materializedview)
|
||||
- [Dictionary](../../engines/table-engines/special/dictionary.md#dictionary)
|
||||
- [Merge](../../engines/table-engines/special/merge.md#merge)
|
||||
- [File](../../engines/table-engines/special/file.md#file)
|
||||
|
@ -212,5 +212,5 @@ ORDER BY key ASC
|
||||
```
|
||||
|
||||
### More information on Joins
|
||||
- [`join_algorithm` setting](/docs/en/operations/settings/settings.md#settings-join_algorithm)
|
||||
- [`join_algorithm` setting](/docs/en/operations/settings/settings.md#join_algorithm)
|
||||
- [JOIN clause](/docs/en/sql-reference/statements/select/join.md)
|
||||
|
@ -236,7 +236,7 @@ libhdfs3 support HDFS namenode HA.
|
||||
|
||||
## Storage Settings {#storage-settings}
|
||||
|
||||
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
|
||||
- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||
|
||||
|
@ -54,7 +54,7 @@ Optional parameters:
|
||||
|
||||
- `kafka_schema` — Parameter that must be used if the format requires a schema definition. For example, [Cap’n Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object.
|
||||
- `kafka_num_consumers` — The number of consumers per table. Specify more consumers if the throughput of one consumer is insufficient. The total number of consumers should not exceed the number of partitions in the topic, since only one consumer can be assigned per partition, and must not be greater than the number of physical cores on the server where ClickHouse is deployed. Default: `1`.
|
||||
- `kafka_max_block_size` — The maximum batch size (in messages) for poll. Default: [max_insert_block_size](../../../operations/settings/settings.md#setting-max_insert_block_size).
|
||||
- `kafka_max_block_size` — The maximum batch size (in messages) for poll. Default: [max_insert_block_size](../../../operations/settings/settings.md#max_insert_block_size).
|
||||
- `kafka_skip_broken_messages` — Kafka message parser tolerance to schema-incompatible messages per block. If `kafka_skip_broken_messages = N` then the engine skips *N* Kafka messages that cannot be parsed (a message equals a row of data). Default: `0`.
|
||||
- `kafka_commit_every_batch` — Commit every consumed and handled batch instead of a single commit after writing a whole block. Default: `0`.
|
||||
- `kafka_client_id` — Client identifier. Empty by default.
|
||||
@ -151,7 +151,7 @@ Example:
|
||||
|
||||
SELECT level, sum(total) FROM daily GROUP BY level;
|
||||
```
|
||||
To improve performance, received messages are grouped into blocks the size of [max_insert_block_size](../../../operations/settings/settings.md#settings-max_insert_block_size). If the block wasn’t formed within [stream_flush_interval_ms](../../../operations/settings/settings.md/#stream-flush-interval-ms) milliseconds, the data will be flushed to the table regardless of the completeness of the block.
|
||||
To improve performance, received messages are grouped into blocks the size of [max_insert_block_size](../../../operations/settings/settings.md#max_insert_block_size). If the block wasn’t formed within [stream_flush_interval_ms](../../../operations/settings/settings.md/#stream-flush-interval-ms) milliseconds, the data will be flushed to the table regardless of the completeness of the block.
|
||||
|
||||
To stop receiving topic data or to change the conversion logic, detach the materialized view:
|
||||
|
||||
|
@ -58,7 +58,7 @@ Optional parameters:
|
||||
- `nats_reconnect_wait` – Amount of time in milliseconds to sleep between each reconnect attempt. Default: `5000`.
|
||||
- `nats_server_list` - Server list for connection. Can be specified to connect to NATS cluster.
|
||||
- `nats_skip_broken_messages` - NATS message parser tolerance to schema-incompatible messages per block. Default: `0`. If `nats_skip_broken_messages = N` then the engine skips *N* RabbitMQ messages that cannot be parsed (a message equals a row of data).
|
||||
- `nats_max_block_size` - Number of row collected by poll(s) for flushing data from NATS. Default: [max_insert_block_size](../../../operations/settings/settings.md#setting-max_insert_block_size).
|
||||
- `nats_max_block_size` - Number of row collected by poll(s) for flushing data from NATS. Default: [max_insert_block_size](../../../operations/settings/settings.md#max_insert_block_size).
|
||||
- `nats_flush_interval_ms` - Timeout for flushing data read from NATS. Default: [stream_flush_interval_ms](../../../operations/settings/settings.md#stream-flush-interval-ms).
|
||||
- `nats_username` - NATS username.
|
||||
- `nats_password` - NATS password.
|
||||
|
@ -65,7 +65,7 @@ Optional parameters:
|
||||
- `rabbitmq_deadletter_exchange` - Specify name for a [dead letter exchange](https://www.rabbitmq.com/dlx.html). You can create another table with this exchange name and collect messages in cases when they are republished to dead letter exchange. By default dead letter exchange is not specified.
|
||||
- `rabbitmq_persistent` - If set to 1 (true), in insert query delivery mode will be set to 2 (marks messages as 'persistent'). Default: `0`.
|
||||
- `rabbitmq_skip_broken_messages` – RabbitMQ message parser tolerance to schema-incompatible messages per block. If `rabbitmq_skip_broken_messages = N` then the engine skips *N* RabbitMQ messages that cannot be parsed (a message equals a row of data). Default: `0`.
|
||||
- `rabbitmq_max_block_size` - Number of row collected before flushing data from RabbitMQ. Default: [max_insert_block_size](../../../operations/settings/settings.md#setting-max_insert_block_size).
|
||||
- `rabbitmq_max_block_size` - Number of row collected before flushing data from RabbitMQ. Default: [max_insert_block_size](../../../operations/settings/settings.md#max_insert_block_size).
|
||||
- `rabbitmq_flush_interval_ms` - Timeout for flushing data from RabbitMQ. Default: [stream_flush_interval_ms](../../../operations/settings/settings.md#stream-flush-interval-ms).
|
||||
- `rabbitmq_queue_settings_list` - allows to set RabbitMQ settings when creating a queue. Available settings: `x-max-length`, `x-max-length-bytes`, `x-message-ttl`, `x-expires`, `x-priority`, `x-max-priority`, `x-overflow`, `x-dead-letter-exchange`, `x-queue-type`. The `durable` setting is enabled automatically for the queue.
|
||||
- `rabbitmq_address` - Address for connection. Use ether this setting or `rabbitmq_host_port`.
|
||||
|
@ -222,7 +222,7 @@ CREATE TABLE table_with_asterisk (name String, value UInt32)
|
||||
|
||||
## Storage Settings {#storage-settings}
|
||||
|
||||
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
|
||||
- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||
|
||||
|
@ -12,7 +12,7 @@ In most cases you do not need a partition key, and in most other cases you do no
|
||||
You should never use too granular of partitioning. Don't partition your data by client identifiers or names. Instead, make a client identifier or name the first column in the ORDER BY expression.
|
||||
:::
|
||||
|
||||
Partitioning is available for the [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) family tables (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables). [Materialized views](../../../engines/table-engines/special/materializedview.md#materializedview) based on MergeTree tables support partitioning, as well.
|
||||
Partitioning is available for the [MergeTree family tables](../../../engines/table-engines/mergetree-family/mergetree.md), including [replicated tables](../../../engines/table-engines/mergetree-family/replication.md) and [materialized views](../../../sql-reference/statements/create/view.md#materialized-view).
|
||||
|
||||
A partition is a logical combination of records in a table by a specified criterion. You can set a partition by an arbitrary criterion, such as by month, by day, or by event type. Each partition is stored separately to simplify manipulations of this data. When accessing the data, ClickHouse uses the smallest subset of partitions possible. Partitions improve performance for queries containing a partitioning key because ClickHouse will filter for that partition before selecting the parts and granules within the partition.
|
||||
|
||||
|
@ -520,7 +520,7 @@ Indexes of type `set` can be utilized by all functions. The other index types ar
|
||||
| [empty](/docs/en/sql-reference/functions/array-functions#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
|
||||
| [notEmpty](/docs/en/sql-reference/functions/array-functions#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
|
||||
| [has](/docs/en/sql-reference/functions/array-functions#function-has) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [hasAny](/docs/en/sql-reference/functions/array-functions#function-hasAny) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ |
|
||||
| [hasAny](/docs/en/sql-reference/functions/array-functions#function-hasAny) | ✗ | ✗ | ✔ | ✔ | ✔ | ✗ |
|
||||
| [hasAll](/docs/en/sql-reference/functions/array-functions#function-hasAll) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ |
|
||||
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ |
|
||||
| hasTokenOrNull | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ |
|
||||
|
@ -1,13 +1,16 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/special/distributed
|
||||
sidebar_label: "Distributed"
|
||||
sidebar_position: 10
|
||||
sidebar_label: Distributed
|
||||
slug: /en/engines/table-engines/special/distributed
|
||||
---
|
||||
|
||||
# Distributed Table Engine
|
||||
|
||||
Tables with Distributed engine do not store any data of their own, but allow distributed query processing on multiple servers.
|
||||
Reading is automatically parallelized. During a read, the table indexes on remote servers are used, if there are any.
|
||||
:::warning
|
||||
To create a distributed table engine in the cloud, you can use the [remote and remoteSecure](../../../sql-reference/table-functions/remote) table functions. The `Distributed(...)` syntax cannot be used in ClickHouse Cloud.
|
||||
:::
|
||||
|
||||
Tables with Distributed engine do not store any data of their own, but allow distributed query processing on multiple servers. Reading is automatically parallelized. During a read, the table indexes on remote servers are used, if there are any.
|
||||
|
||||
## Creating a Table {#distributed-creating-a-table}
|
||||
|
||||
@ -22,6 +25,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
```
|
||||
|
||||
### From a Table {#distributed-from-a-table}
|
||||
|
||||
When the `Distributed` table is pointing to a table on the current server you can adopt that table's schema:
|
||||
|
||||
``` sql
|
||||
@ -48,7 +52,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] AS [db2.]name2
|
||||
|
||||
Specifying the `sharding_key` is necessary for the following:
|
||||
|
||||
- For `INSERTs` into a distributed table (as the table engine needs the `sharding_key` to determine how to split the data). However, if `insert_distributed_one_random_shard` setting is enabled, then `INSERTs` do not need the sharding key
|
||||
- For `INSERTs` into a distributed table (as the table engine needs the `sharding_key` to determine how to split the data). However, if `insert_distributed_one_random_shard` setting is enabled, then `INSERTs` do not need the sharding key.
|
||||
- For use with `optimize_skip_unused_shards` as the `sharding_key` is necessary to determine what shards should be queried
|
||||
|
||||
#### policy_name
|
||||
@ -108,7 +112,7 @@ Specifying the `sharding_key` is necessary for the following:
|
||||
For **Insert limit settings** (`..._insert`) see also:
|
||||
|
||||
- [distributed_foreground_insert](../../../operations/settings/settings.md#distributed_foreground_insert) setting
|
||||
- [prefer_localhost_replica](../../../operations/settings/settings.md#settings-prefer-localhost-replica) setting
|
||||
- [prefer_localhost_replica](../../../operations/settings/settings.md#prefer-localhost-replica) setting
|
||||
- `bytes_to_throw_insert` handled before `bytes_to_delay_insert`, so you should not set it to the value less then `bytes_to_delay_insert`
|
||||
:::
|
||||
|
||||
@ -122,9 +126,7 @@ SETTINGS
|
||||
fsync_directories=0;
|
||||
```
|
||||
|
||||
Data will be read from all servers in the `logs` cluster, from the `default.hits` table located on every server in the cluster.
|
||||
Data is not only read but is partially processed on the remote servers (to the extent that this is possible).
|
||||
For example, for a query with `GROUP BY`, data will be aggregated on remote servers, and the intermediate states of aggregate functions will be sent to the requestor server. Then data will be further aggregated.
|
||||
Data will be read from all servers in the `logs` cluster, from the `default.hits` table located on every server in the cluster. Data is not only read but is partially processed on the remote servers (to the extent that this is possible). For example, for a query with `GROUP BY`, data will be aggregated on remote servers, and the intermediate states of aggregate functions will be sent to the requestor server. Then data will be further aggregated.
|
||||
|
||||
Instead of the database name, you can use a constant expression that returns a string. For example: `currentDatabase()`.
|
||||
|
||||
@ -183,9 +185,7 @@ Clusters are configured in the [server configuration file](../../../operations/c
|
||||
</remote_servers>
|
||||
```
|
||||
|
||||
Here a cluster is defined with the name `logs` that consists of two shards, each of which contains two replicas.
|
||||
Shards refer to the servers that contain different parts of the data (in order to read all the data, you must access all the shards).
|
||||
Replicas are duplicating servers (in order to read all the data, you can access the data on any one of the replicas).
|
||||
Here a cluster is defined with the name `logs` that consists of two shards, each of which contains two replicas. Shards refer to the servers that contain different parts of the data (in order to read all the data, you must access all the shards). Replicas are duplicating servers (in order to read all the data, you can access the data on any one of the replicas).
|
||||
|
||||
Cluster names must not contain dots.
|
||||
|
||||
@ -198,9 +198,7 @@ The parameters `host`, `port`, and optionally `user`, `password`, `secure`, `com
|
||||
- `secure` - Whether to use a secure SSL/TLS connection. Usually also requires specifying the port (the default secure port is `9440`). The server should listen on `<tcp_port_secure>9440</tcp_port_secure>` and be configured with correct certificates.
|
||||
- `compression` - Use data compression. Default value: `true`.
|
||||
|
||||
When specifying replicas, one of the available replicas will be selected for each of the shards when reading. You can configure the algorithm for load balancing (the preference for which replica to access) – see the [load_balancing](../../../operations/settings/settings.md#settings-load_balancing) setting.
|
||||
If the connection with the server is not established, there will be an attempt to connect with a short timeout. If the connection failed, the next replica will be selected, and so on for all the replicas. If the connection attempt failed for all the replicas, the attempt will be repeated the same way, several times.
|
||||
This works in favour of resiliency, but does not provide complete fault tolerance: a remote server might accept the connection, but might not work, or work poorly.
|
||||
When specifying replicas, one of the available replicas will be selected for each of the shards when reading. You can configure the algorithm for load balancing (the preference for which replica to access) – see the [load_balancing](../../../operations/settings/settings.md#load_balancing) setting. If the connection with the server is not established, there will be an attempt to connect with a short timeout. If the connection failed, the next replica will be selected, and so on for all the replicas. If the connection attempt failed for all the replicas, the attempt will be repeated the same way, several times. This works in favour of resiliency, but does not provide complete fault tolerance: a remote server might accept the connection, but might not work, or work poorly.
|
||||
|
||||
You can specify just one of the shards (in this case, query processing should be called remote, rather than distributed) or up to any number of shards. In each shard, you can specify from one to any number of replicas. You can specify a different number of replicas for each shard.
|
||||
|
||||
@ -245,7 +243,7 @@ If the server ceased to exist or had a rough restart (for example, due to a hard
|
||||
|
||||
When querying a `Distributed` table, `SELECT` queries are sent to all shards and work regardless of how data is distributed across the shards (they can be distributed completely randomly). When you add a new shard, you do not have to transfer old data into it. Instead, you can write new data to it by using a heavier weight – the data will be distributed slightly unevenly, but queries will work correctly and efficiently.
|
||||
|
||||
When the `max_parallel_replicas` option is enabled, query processing is parallelized across all replicas within a single shard. For more information, see the section [max_parallel_replicas](../../../operations/settings/settings.md#settings-max_parallel_replicas).
|
||||
When the `max_parallel_replicas` option is enabled, query processing is parallelized across all replicas within a single shard. For more information, see the section [max_parallel_replicas](../../../operations/settings/settings.md#max_parallel_replicas).
|
||||
|
||||
To learn more about how distributed `in` and `global in` queries are processed, refer to [this](../../../sql-reference/operators/in.md#select-distributed-subqueries) documentation.
|
||||
|
||||
|
@ -101,8 +101,8 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da
|
||||
|
||||
## Settings {#settings}
|
||||
|
||||
- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default.
|
||||
- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-empty_if-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default.
|
||||
- [engine_file_truncate_on_insert](/docs/en/operations/settings/settings.md#engine-file-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||
- [engine_file_allow_create_multiple_files](/docs/en/operations/settings/settings.md#engine_file_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||
- [engine_file_skip_empty_files](/docs/en/operations/settings/settings.md#engine_file_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||
- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - method of reading data from storage file, one of: `read`, `pread`, `mmap`. The mmap method does not apply to clickhouse-server (it's intended for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local.
|
||||
- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-empty_if-not-exists) - method of reading data from storage file, one of: `read`, `pread`, `mmap`. The mmap method does not apply to clickhouse-server (it's intended for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local.
|
||||
|
@ -41,7 +41,7 @@ Optional parameters:
|
||||
|
||||
- `poll_timeout_ms` - Timeout for single poll from log file. Default: [stream_poll_timeout_ms](../../../operations/settings/settings.md#stream_poll_timeout_ms).
|
||||
- `poll_max_batch_size` — Maximum amount of records to be polled in a single poll. Default: [max_block_size](../../../operations/settings/settings.md#setting-max_block_size).
|
||||
- `max_block_size` — The maximum batch size (in records) for poll. Default: [max_insert_block_size](../../../operations/settings/settings.md#setting-max_insert_block_size).
|
||||
- `max_block_size` — The maximum batch size (in records) for poll. Default: [max_insert_block_size](../../../operations/settings/settings.md#max_insert_block_size).
|
||||
- `max_threads` - Number of max threads to parse files, default is 0, which means the number will be max(1, physical_cpu_cores / 4).
|
||||
- `poll_directory_watch_events_backoff_init` - The initial sleep value for watch directory thread. Default: `500`.
|
||||
- `poll_directory_watch_events_backoff_max` - The max sleep value for watch directory thread. Default: `32000`.
|
||||
|
@ -1,9 +0,0 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/special/materializedview
|
||||
sidebar_position: 100
|
||||
sidebar_label: MaterializedView
|
||||
---
|
||||
|
||||
# MaterializedView Table Engine
|
||||
|
||||
Used for implementing materialized views (for more information, see [CREATE VIEW](../../../sql-reference/statements/create/view.md#materialized)). For storing data, it uses a different engine that was specified when creating the view. When reading from a table, it just uses that engine.
|
@ -25,8 +25,7 @@ The steps below will easily work on a local install of ClickHouse too. The only
|
||||
1. Let's see what the data looks like. The `s3cluster` table function returns a table, so we can `DESCRIBE` the result:
|
||||
|
||||
```sql
|
||||
DESCRIBE s3Cluster(
|
||||
'default',
|
||||
DESCRIBE s3(
|
||||
'https://clickhouse-public-datasets.s3.amazonaws.com/youtube/original/files/*.zst',
|
||||
'JSONLines'
|
||||
);
|
||||
@ -35,29 +34,29 @@ DESCRIBE s3Cluster(
|
||||
ClickHouse infers the following schema from the JSON file:
|
||||
|
||||
```response
|
||||
┌─name────────────────┬─type─────────────────────────────────┐
|
||||
│ id │ Nullable(String) │
|
||||
│ fetch_date │ Nullable(Int64) │
|
||||
│ upload_date │ Nullable(String) │
|
||||
│ title │ Nullable(String) │
|
||||
│ uploader_id │ Nullable(String) │
|
||||
│ uploader │ Nullable(String) │
|
||||
│ uploader_sub_count │ Nullable(Int64) │
|
||||
│ is_age_limit │ Nullable(Bool) │
|
||||
│ view_count │ Nullable(Int64) │
|
||||
│ like_count │ Nullable(Int64) │
|
||||
│ dislike_count │ Nullable(Int64) │
|
||||
│ is_crawlable │ Nullable(Bool) │
|
||||
│ is_live_content │ Nullable(Bool) │
|
||||
│ has_subtitles │ Nullable(Bool) │
|
||||
│ is_ads_enabled │ Nullable(Bool) │
|
||||
│ is_comments_enabled │ Nullable(Bool) │
|
||||
│ description │ Nullable(String) │
|
||||
│ rich_metadata │ Array(Map(String, Nullable(String))) │
|
||||
│ super_titles │ Array(Map(String, Nullable(String))) │
|
||||
│ uploader_badges │ Nullable(String) │
|
||||
│ video_badges │ Nullable(String) │
|
||||
└─────────────────────┴──────────────────────────────────────┘
|
||||
┌─name────────────────┬─type───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ id │ Nullable(String) │ │ │ │ │ │
|
||||
│ fetch_date │ Nullable(String) │ │ │ │ │ │
|
||||
│ upload_date │ Nullable(String) │ │ │ │ │ │
|
||||
│ title │ Nullable(String) │ │ │ │ │ │
|
||||
│ uploader_id │ Nullable(String) │ │ │ │ │ │
|
||||
│ uploader │ Nullable(String) │ │ │ │ │ │
|
||||
│ uploader_sub_count │ Nullable(Int64) │ │ │ │ │ │
|
||||
│ is_age_limit │ Nullable(Bool) │ │ │ │ │ │
|
||||
│ view_count │ Nullable(Int64) │ │ │ │ │ │
|
||||
│ like_count │ Nullable(Int64) │ │ │ │ │ │
|
||||
│ dislike_count │ Nullable(Int64) │ │ │ │ │ │
|
||||
│ is_crawlable │ Nullable(Bool) │ │ │ │ │ │
|
||||
│ is_live_content │ Nullable(Bool) │ │ │ │ │ │
|
||||
│ has_subtitles │ Nullable(Bool) │ │ │ │ │ │
|
||||
│ is_ads_enabled │ Nullable(Bool) │ │ │ │ │ │
|
||||
│ is_comments_enabled │ Nullable(Bool) │ │ │ │ │ │
|
||||
│ description │ Nullable(String) │ │ │ │ │ │
|
||||
│ rich_metadata │ Array(Tuple(call Nullable(String), content Nullable(String), subtitle Nullable(String), title Nullable(String), url Nullable(String))) │ │ │ │ │ │
|
||||
│ super_titles │ Array(Tuple(text Nullable(String), url Nullable(String))) │ │ │ │ │ │
|
||||
│ uploader_badges │ Nullable(String) │ │ │ │ │ │
|
||||
│ video_badges │ Nullable(String) │ │ │ │ │ │
|
||||
└─────────────────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
2. Based on the inferred schema, we cleaned up the data types and added a primary key. Define the following table:
|
||||
@ -82,13 +81,13 @@ CREATE TABLE youtube
|
||||
`is_ads_enabled` Bool,
|
||||
`is_comments_enabled` Bool,
|
||||
`description` String,
|
||||
`rich_metadata` Array(Map(String, String)),
|
||||
`super_titles` Array(Map(String, String)),
|
||||
`rich_metadata` Array(Tuple(call String, content String, subtitle String, title String, url String)),
|
||||
`super_titles` Array(Tuple(text String, url String)),
|
||||
`uploader_badges` String,
|
||||
`video_badges` String
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY (uploader, upload_date);
|
||||
ORDER BY (uploader, upload_date)
|
||||
```
|
||||
|
||||
3. The following command streams the records from the S3 files into the `youtube` table.
|
||||
|
@ -478,6 +478,7 @@ The CSV format supports the output of totals and extremes the same way as `TabSe
|
||||
- [input_format_csv_allow_whitespace_or_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_allow_whitespace_or_tab_as_delimiter) - Allow to use whitespace or tab as field delimiter in CSV strings. Default value - `false`.
|
||||
- [input_format_csv_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_csv_allow_variable_number_of_columns) - allow variable number of columns in CSV format, ignore extra columns and use default values on missing columns. Default value - `false`.
|
||||
- [input_format_csv_use_default_on_bad_values](/docs/en/operations/settings/settings-formats.md/#input_format_csv_use_default_on_bad_values) - Allow to set default value to column when CSV field deserialization failed on bad value. Default value - `false`.
|
||||
- [input_format_csv_try_infer_numbers_from_strings](/docs/en/operations/settings/settings-formats.md/#input_format_csv_try_infer_numbers_from_strings) - Try to infer numbers from string fields while schema inference. Default value - `false`.
|
||||
|
||||
## CSVWithNames {#csvwithnames}
|
||||
|
||||
|
@ -167,7 +167,7 @@ For successful requests that do not return a data table, an empty response body
|
||||
|
||||
You can use compression to reduce network traffic when transmitting a large amount of data or for creating dumps that are immediately compressed.
|
||||
|
||||
You can use the internal ClickHouse compression format when transmitting data. The compressed data has a non-standard format, and you need `clickhouse-compressor` program to work with it. It is installed with the `clickhouse-client` package. To increase the efficiency of data insertion, you can disable server-side checksum verification by using the [http_native_compression_disable_checksumming_on_decompress](../operations/settings/settings.md#settings-http_native_compression_disable_checksumming_on_decompress) setting.
|
||||
You can use the internal ClickHouse compression format when transmitting data. The compressed data has a non-standard format, and you need `clickhouse-compressor` program to work with it. It is installed with the `clickhouse-client` package. To increase the efficiency of data insertion, you can disable server-side checksum verification by using the [http_native_compression_disable_checksumming_on_decompress](../operations/settings/settings.md#http_native_compression_disable_checksumming_on_decompress) setting.
|
||||
|
||||
If you specify `compress=1` in the URL, the server will compress the data it sends to you. If you specify `decompress=1` in the URL, the server will decompress the data which you pass in the `POST` method.
|
||||
|
||||
@ -183,7 +183,7 @@ You can also choose to use [HTTP compression](https://en.wikipedia.org/wiki/HTTP
|
||||
- `snappy`
|
||||
|
||||
To send a compressed `POST` request, append the request header `Content-Encoding: compression_method`.
|
||||
In order for ClickHouse to compress the response, enable compression with [enable_http_compression](../operations/settings/settings.md#settings-enable_http_compression) setting and append `Accept-Encoding: compression_method` header to the request. You can configure the data compression level in the [http_zlib_compression_level](../operations/settings/settings.md#settings-http_zlib_compression_level) setting for all compression methods.
|
||||
In order for ClickHouse to compress the response, enable compression with [enable_http_compression](../operations/settings/settings.md#enable_http_compression) setting and append `Accept-Encoding: compression_method` header to the request. You can configure the data compression level in the [http_zlib_compression_level](../operations/settings/settings.md#http_zlib_compression_level) setting for all compression methods.
|
||||
|
||||
:::info
|
||||
Some HTTP clients might decompress data from the server by default (with `gzip` and `deflate`) and you might get decompressed data even if you use the compression settings correctly.
|
||||
@ -285,7 +285,7 @@ For information about other parameters, see the section “SET”.
|
||||
|
||||
Similarly, you can use ClickHouse sessions in the HTTP protocol. To do this, you need to add the `session_id` GET parameter to the request. You can use any string as the session ID. By default, the session is terminated after 60 seconds of inactivity. To change this timeout, modify the `default_session_timeout` setting in the server configuration, or add the `session_timeout` GET parameter to the request. To check the session status, use the `session_check=1` parameter. Only one query at a time can be executed within a single session.
|
||||
|
||||
You can receive information about the progress of a query in `X-ClickHouse-Progress` response headers. To do this, enable [send_progress_in_http_headers](../operations/settings/settings.md#settings-send_progress_in_http_headers). Example of the header sequence:
|
||||
You can receive information about the progress of a query in `X-ClickHouse-Progress` response headers. To do this, enable [send_progress_in_http_headers](../operations/settings/settings.md#send_progress_in_http_headers). Example of the header sequence:
|
||||
|
||||
``` text
|
||||
X-ClickHouse-Progress: {"read_rows":"2752512","read_bytes":"240570816","total_rows_to_read":"8880128","elapsed_ns":"662334"}
|
||||
@ -496,7 +496,7 @@ Next are the configuration methods for different `type`.
|
||||
|
||||
`query` value is a predefined query of `predefined_query_handler`, which is executed by ClickHouse when an HTTP request is matched and the result of the query is returned. It is a must configuration.
|
||||
|
||||
The following example defines the values of [max_threads](../operations/settings/settings.md#settings-max_threads) and `max_final_threads` settings, then queries the system table to check whether these settings were set successfully.
|
||||
The following example defines the values of [max_threads](../operations/settings/settings.md#max_threads) and `max_final_threads` settings, then queries the system table to check whether these settings were set successfully.
|
||||
|
||||
:::note
|
||||
To keep the default `handlers` such as` query`, `play`,` ping`, add the `<defaults/>` rule.
|
||||
@ -539,7 +539,7 @@ In `dynamic_query_handler`, the query is written in the form of parameter of the
|
||||
|
||||
ClickHouse extracts and executes the value corresponding to the `query_param_name` value in the URL of the HTTP request. The default value of `query_param_name` is `/query` . It is an optional configuration. If there is no definition in the configuration file, the parameter is not passed in.
|
||||
|
||||
To experiment with this functionality, the example defines the values of [max_threads](../operations/settings/settings.md#settings-max_threads) and `max_final_threads` and `queries` whether the settings were set successfully.
|
||||
To experiment with this functionality, the example defines the values of [max_threads](../operations/settings/settings.md#max_threads) and `max_final_threads` and `queries` whether the settings were set successfully.
|
||||
|
||||
Example:
|
||||
|
||||
|
@ -25,6 +25,7 @@ ClickHouse server provides embedded visual interfaces for power users:
|
||||
|
||||
- Play UI: open `/play` in the browser;
|
||||
- Advanced Dashboard: open `/dashboard` in the browser;
|
||||
- Binary symbols viewer for ClickHouse engineers: open `/binary` in the browser;
|
||||
|
||||
There are also a wide range of third-party libraries for working with ClickHouse:
|
||||
|
||||
|
@ -834,6 +834,27 @@ $$)
|
||||
└──────────────┴───────────────┘
|
||||
```
|
||||
|
||||
#### CSV settings {#csv-settings}
|
||||
|
||||
##### input_format_csv_try_infer_numbers_from_strings
|
||||
|
||||
Enabling this setting allows inferring numbers from string values.
|
||||
|
||||
This setting is disabled by default.
|
||||
|
||||
**Example:**
|
||||
|
||||
```sql
|
||||
SET input_format_json_try_infer_numbers_from_strings = 1;
|
||||
DESC format(CSV, '"42","42.42"');
|
||||
```
|
||||
```reponse
|
||||
┌─name─┬─type──────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ c1 │ Nullable(Int64) │ │ │ │ │ │
|
||||
│ c2 │ Nullable(Float64) │ │ │ │ │ │
|
||||
└──────┴───────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
### TSV/TSKV {#tsv-tskv}
|
||||
|
||||
In TSV/TSKV formats ClickHouse extracts column value from the row according to tabular delimiters and then parses extracted value using
|
||||
@ -1846,3 +1867,102 @@ DESC format(JSONAsString, '{"x" : 42, "y" : "Hello, World!"}') SETTINGS allow_ex
|
||||
│ json │ Object('json') │ │ │ │ │ │
|
||||
└──────┴────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
## Schema inference modes {#schema-inference-modes}
|
||||
|
||||
Schema inference from the set of data files can work in 2 different modes: `default` and `union`.
|
||||
The mode is controlled by the setting `schema_inference_mode`.
|
||||
|
||||
### Default mode {#default-schema-inference-mode}
|
||||
|
||||
In default mode, ClickHouse assumes that all files have the same schema and tries to infer the schema by reading files one by one until it succeeds.
|
||||
|
||||
Example:
|
||||
|
||||
Let's say we have 3 files `data1.jsonl`, `data2.jsonl` and `data3.jsonl` with the next content:
|
||||
|
||||
`data1.jsonl`:
|
||||
```json
|
||||
{"field1" : 1, "field2" : null}
|
||||
{"field1" : 2, "field2" : null}
|
||||
{"field1" : 3, "field2" : null}
|
||||
```
|
||||
|
||||
`data2.jsonl`:
|
||||
```json
|
||||
{"field1" : 4, "field2" : "Data4"}
|
||||
{"field1" : 5, "field2" : "Data5"}
|
||||
{"field1" : 6, "field2" : "Data5"}
|
||||
```
|
||||
|
||||
`data3.jsonl`:
|
||||
```json
|
||||
{"field1" : 7, "field2" : "Data7", "field3" : [1, 2, 3]}
|
||||
{"field1" : 8, "field2" : "Data8", "field3" : [4, 5, 6]}
|
||||
{"field1" : 9, "field2" : "Data9", "field3" : [7, 8, 9]}
|
||||
```
|
||||
|
||||
Let's try to use schema inference on these 3 files:
|
||||
```sql
|
||||
:) DESCRIBE file('data{1,2,3}.jsonl') SETTINGS schema_inference_mode='default'
|
||||
```
|
||||
|
||||
Result:
|
||||
```text
|
||||
┌─name───┬─type─────────────┐
|
||||
│ field1 │ Nullable(Int64) │
|
||||
│ field2 │ Nullable(String) │
|
||||
└────────┴──────────────────┘
|
||||
```
|
||||
|
||||
As we can see, we don't have `field3` from file `data3.jsonl`.
|
||||
It happens because ClickHouse first tried to infer schema from file `data1.jsonl`, failed because of only nulls for field `field2`,
|
||||
and then tried to infer schema from `data2.jsonl` and succeeded, so data from file `data3.jsonl` wasn't read.
|
||||
|
||||
### Union mode {#default-schema-inference-mode}
|
||||
|
||||
In union mode, ClickHouse assumes that files can have different schemas, so it infer schemas of all files and then union them to the common schema.
|
||||
|
||||
Let's say we have 3 files `data1.jsonl`, `data2.jsonl` and `data3.jsonl` with the next content:
|
||||
|
||||
`data1.jsonl`:
|
||||
```json
|
||||
{"field1" : 1}
|
||||
{"field1" : 2}
|
||||
{"field1" : 3}
|
||||
```
|
||||
|
||||
`data2.jsonl`:
|
||||
```json
|
||||
{"field2" : "Data4"}
|
||||
{"field2" : "Data5"}
|
||||
{"field2" : "Data5"}
|
||||
```
|
||||
|
||||
`data3.jsonl`:
|
||||
```json
|
||||
{"field3" : [1, 2, 3]}
|
||||
{"field3" : [4, 5, 6]}
|
||||
{"field3" : [7, 8, 9]}
|
||||
```
|
||||
|
||||
Let's try to use schema inference on these 3 files:
|
||||
```sql
|
||||
:) DESCRIBE file('data{1,2,3}.jsonl') SETTINGS schema_inference_mode='union'
|
||||
```
|
||||
|
||||
Result:
|
||||
```text
|
||||
┌─name───┬─type───────────────────┐
|
||||
│ field1 │ Nullable(Int64) │
|
||||
│ field2 │ Nullable(String) │
|
||||
│ field3 │ Array(Nullable(Int64)) │
|
||||
└────────┴────────────────────────┘
|
||||
```
|
||||
|
||||
As we can see, we have all fields from all files.
|
||||
|
||||
Note:
|
||||
- As some of the files may not contain some columns from the resulting schema, union mode is supported only for formats that support reading subset of columns (like JSONEachRow, Parquet, TSVWithNames, etc) and won't work for other formats (like CSV, TSV, JSONCompactEachRow, etc).
|
||||
- If ClickHouse cannot infer the schema from one of the files, the exception will be thrown.
|
||||
- If you have a lot of files, reading schema from all of them can take a lot of time.
|
||||
|
@ -406,7 +406,7 @@ RESTORE TABLE data AS data_restored FROM Disk('s3_plain', 'cloud_backup');
|
||||
:::note
|
||||
But keep in mind that:
|
||||
- This disk should not be used for `MergeTree` itself, only for `BACKUP`/`RESTORE`
|
||||
- If your tables are backed by S3 storage, it doesn't use `CopyObject` calls to copy parts to the destination bucket, instead, it downloads and uploads them, which is very inefficient. Prefer to use `BACKUP ... TO S3(<endpoint>)` syntax for this use-case.
|
||||
- If your tables are backed by S3 storage and types of the disks are different, it doesn't use `CopyObject` calls to copy parts to the destination bucket, instead, it downloads and uploads them, which is very inefficient. Prefer to use `BACKUP ... TO S3(<endpoint>)` syntax for this use-case.
|
||||
:::
|
||||
|
||||
## Alternatives
|
||||
|
@ -64,4 +64,4 @@ You can configure ClickHouse to export metrics to [Prometheus](https://prometheu
|
||||
|
||||
Additionally, you can monitor server availability through the HTTP API. Send the `HTTP GET` request to `/ping`. If the server is available, it responds with `200 OK`.
|
||||
|
||||
To monitor servers in a cluster configuration, you should set the [max_replica_delay_for_distributed_queries](../operations/settings/settings.md#settings-max_replica_delay_for_distributed_queries) parameter and use the HTTP resource `/replicas_status`. A request to `/replicas_status` returns `200 OK` if the replica is available and is not delayed behind the other replicas. If a replica is delayed, it returns `503 HTTP_SERVICE_UNAVAILABLE` with information about the gap.
|
||||
To monitor servers in a cluster configuration, you should set the [max_replica_delay_for_distributed_queries](../operations/settings/settings.md#max_replica_delay_for_distributed_queries) parameter and use the HTTP resource `/replicas_status`. A request to `/replicas_status` returns `200 OK` if the replica is available and is not delayed behind the other replicas. If a replica is delayed, it returns `503 HTTP_SERVICE_UNAVAILABLE` with information about the gap.
|
||||
|
@ -42,7 +42,7 @@ To analyze the `trace_log` system table:
|
||||
|
||||
- Install the `clickhouse-common-static-dbg` package. See [Install from DEB Packages](../../getting-started/install.md#install-from-deb-packages).
|
||||
|
||||
- Allow introspection functions by the [allow_introspection_functions](../../operations/settings/settings.md#settings-allow_introspection_functions) setting.
|
||||
- Allow introspection functions by the [allow_introspection_functions](../../operations/settings/settings.md#allow_introspection_functions) setting.
|
||||
|
||||
For security reasons, introspection functions are disabled by default.
|
||||
|
||||
|
@ -29,6 +29,10 @@ Transactionally inconsistent caching is traditionally provided by client tools o
|
||||
the same caching logic and configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side.
|
||||
This reduces maintenance effort and avoids redundancy.
|
||||
|
||||
:::note
|
||||
Security consideration: The cached query result is tied to the user executing it. Authorization checks are performed when the query is executed. This means that if there are any alterations to the user's role or permissions between the time the query is cached and when the cache is accessed, the result will not reflect these changes. We recommend using different users to distinguish between different levels of access, instead of actively toggling roles for a single user between queries, as this practice may lead to unexpected query results.
|
||||
:::
|
||||
|
||||
## Configuration Settings and Usage
|
||||
|
||||
Setting [use_query_cache](settings/settings.md#use-query-cache) can be used to control whether a specific query or all queries of the
|
||||
@ -99,7 +103,7 @@ It is also possible to limit the cache usage of individual users using [settings
|
||||
constraints](settings/constraints-on-settings.md). More specifically, you can restrict the maximum amount of memory (in bytes) a user may
|
||||
allocate in the query cache and the maximum number of stored query results. For that, first provide configurations
|
||||
[query_cache_max_size_in_bytes](settings/settings.md#query-cache-max-size-in-bytes) and
|
||||
[query_cache_max_entries](settings/settings.md#query-cache-size-max-entries) in a user profile in `users.xml`, then make both settings
|
||||
[query_cache_max_entries](settings/settings.md#query-cache-max-entries) in a user profile in `users.xml`, then make both settings
|
||||
readonly:
|
||||
|
||||
``` xml
|
||||
@ -140,7 +144,7 @@ value can be specified at session, profile or query level using setting [query_c
|
||||
Entries in the query cache are compressed by default. This reduces the overall memory consumption at the cost of slower writes into / reads
|
||||
from the query cache. To disable compression, use setting [query_cache_compress_entries](settings/settings.md#query-cache-compress-entries).
|
||||
|
||||
ClickHouse reads table data in blocks of [max_block_size](settings/settings.md#settings-max_block_size) rows. Due to filtering, aggregation,
|
||||
ClickHouse reads table data in blocks of [max_block_size](settings/settings.md#setting-max_block_size) rows. Due to filtering, aggregation,
|
||||
etc., result blocks are typically much smaller than 'max_block_size' but there are also cases where they are much bigger. Setting
|
||||
[query_cache_squash_partial_results](settings/settings.md#query-cache-squash-partial-results) (enabled by default) controls if result blocks
|
||||
are squashed (if they are tiny) or split (if they are large) into blocks of 'max_block_size' size before insertion into the query result
|
||||
|
@ -472,6 +472,39 @@ The value 0 means that you can delete all tables without any restrictions.
|
||||
``` xml
|
||||
<max_table_size_to_drop>0</max_table_size_to_drop>
|
||||
```
|
||||
|
||||
|
||||
## max\_database\_num\_to\_warn {#max-database-num-to-warn}
|
||||
If the number of attached databases exceeds the specified value, clickhouse server will add warning messages to `system.warnings` table.
|
||||
Default value: 1000
|
||||
|
||||
**Example**
|
||||
|
||||
``` xml
|
||||
<max_database_num_to_warn>50</max_database_num_to_warn>
|
||||
```
|
||||
|
||||
## max\_table\_num\_to\_warn {#max-table-num-to-warn}
|
||||
If the number of attached tables exceeds the specified value, clickhouse server will add warning messages to `system.warnings` table.
|
||||
Default value: 5000
|
||||
|
||||
**Example**
|
||||
|
||||
``` xml
|
||||
<max_table_num_to_warn>400</max_table_num_to_warn>
|
||||
```
|
||||
|
||||
|
||||
## max\_part\_num\_to\_warn {#max-part-num-to-warn}
|
||||
If the number of active parts exceeds the specified value, clickhouse server will add warning messages to `system.warnings` table.
|
||||
Default value: 100000
|
||||
|
||||
**Example**
|
||||
|
||||
``` xml
|
||||
<max_part_num_to_warn>400</max_part_num_to_warn>
|
||||
```
|
||||
|
||||
|
||||
## max_temporary_data_on_disk_size
|
||||
|
||||
@ -1650,7 +1683,7 @@ Default value: `0.5`.
|
||||
|
||||
Asynchronous loading of databases and tables.
|
||||
|
||||
If `true` all non-system databases with `Ordinary`, `Atomic` and `Replicated` engine will be loaded asynchronously after the ClickHouse server start up. See `system.async_loader` table, `tables_loader_background_pool_size` and `tables_loader_foreground_pool_size` server settings. Any query that tries to access a table, that is not yet loaded, will wait for exactly this table to be started up. If load job fails, query will rethrow an error (instead of shutting down the whole server in case of `async_load_databases = false`). The table that is waited for by at least one query will be loaded with higher priority. DDL queries on a database will wait for exactly that database to be started up.
|
||||
If `true` all non-system databases with `Ordinary`, `Atomic` and `Replicated` engine will be loaded asynchronously after the ClickHouse server start up. See `system.asynchronous_loader` table, `tables_loader_background_pool_size` and `tables_loader_foreground_pool_size` server settings. Any query that tries to access a table, that is not yet loaded, will wait for exactly this table to be started up. If load job fails, query will rethrow an error (instead of shutting down the whole server in case of `async_load_databases = false`). The table that is waited for by at least one query will be loaded with higher priority. DDL queries on a database will wait for exactly that database to be started up.
|
||||
|
||||
If `false`, all databases are loaded when the server starts.
|
||||
|
||||
@ -1976,7 +2009,7 @@ Data for the query cache is allocated in DRAM. If memory is scarce, make sure to
|
||||
|
||||
## query_thread_log {#query_thread_log}
|
||||
|
||||
Setting for logging threads of queries received with the [log_query_threads=1](../../operations/settings/settings.md#settings-log-query-threads) setting.
|
||||
Setting for logging threads of queries received with the [log_query_threads=1](../../operations/settings/settings.md#log-query-threads) setting.
|
||||
|
||||
Queries are logged in the [system.query_thread_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log) table, not in a separate file. You can change the name of the table in the `table` parameter (see below).
|
||||
|
||||
@ -2018,7 +2051,7 @@ If the table does not exist, ClickHouse will create it. If the structure of the
|
||||
|
||||
## query_views_log {#query_views_log}
|
||||
|
||||
Setting for logging views (live, materialized etc) dependant of queries received with the [log_query_views=1](../../operations/settings/settings.md#settings-log-query-views) setting.
|
||||
Setting for logging views (live, materialized etc) dependant of queries received with the [log_query_views=1](../../operations/settings/settings.md#log-query-views) setting.
|
||||
|
||||
Queries are logged in the [system.query_views_log](../../operations/system-tables/query_views_log.md#system_tables-query_views_log) table, not in a separate file. You can change the name of the table in the `table` parameter (see below).
|
||||
|
||||
@ -2298,7 +2331,7 @@ For the value of the `incl` attribute, see the section “[Configuration files](
|
||||
|
||||
**See Also**
|
||||
|
||||
- [skip_unavailable_shards](../../operations/settings/settings.md#settings-skip_unavailable_shards)
|
||||
- [skip_unavailable_shards](../../operations/settings/settings.md#skip_unavailable_shards)
|
||||
- [Cluster Discovery](../../operations/cluster-discovery.md)
|
||||
- [Replicated database engine](../../engines/database-engines/replicated.md)
|
||||
|
||||
|
@ -139,7 +139,7 @@ Limit on the number of bytes in the result. The same as the previous setting.
|
||||
|
||||
What to do if the volume of the result exceeds one of the limits: ‘throw’ or ‘break’. By default, throw.
|
||||
|
||||
Using ‘break’ is similar to using LIMIT. `Break` interrupts execution only at the block level. This means that amount of returned rows is greater than [max_result_rows](#setting-max_result_rows), multiple of [max_block_size](../../operations/settings/settings.md#setting-max_block_size) and depends on [max_threads](../../operations/settings/settings.md#settings-max_threads).
|
||||
Using ‘break’ is similar to using LIMIT. `Break` interrupts execution only at the block level. This means that amount of returned rows is greater than [max_result_rows](#setting-max_result_rows), multiple of [max_block_size](../../operations/settings/settings.md#setting-max_block_size) and depends on [max_threads](../../operations/settings/settings.md#max_threads).
|
||||
|
||||
Example:
|
||||
|
||||
|
@ -1130,6 +1130,13 @@ Result
|
||||
a 0 1971-01-01
|
||||
```
|
||||
|
||||
## input_format_csv_try_infer_numbers_from_strings {#input_format_csv_try_infer_numbers_from_strings}
|
||||
|
||||
If enabled, during schema inference ClickHouse will try to infer numbers from string fields.
|
||||
It can be useful if CSV data contains quoted UInt64 numbers.
|
||||
|
||||
Disabled by default.
|
||||
|
||||
## Values format settings {#values-format-settings}
|
||||
|
||||
### input_format_values_interpret_expressions {#input_format_values_interpret_expressions}
|
||||
|
@ -4,7 +4,7 @@ sidebar_position: 63
|
||||
sidebar_label: User Settings
|
||||
---
|
||||
|
||||
# User Settings
|
||||
# Users and Roles Settings
|
||||
|
||||
The `users` section of the `user.xml` configuration file contains user settings.
|
||||
|
||||
@ -187,3 +187,34 @@ The following configuration forces that user `user1` can only see the rows of `t
|
||||
```
|
||||
|
||||
The `filter` can be any expression resulting in a [UInt8](../../sql-reference/data-types/int-uint.md)-type value. It usually contains comparisons and logical operators. Rows from `database_name.table1` where filter results to 0 are not returned for this user. The filtering is incompatible with `PREWHERE` operations and disables `WHERE→PREWHERE` optimization.
|
||||
|
||||
## Roles
|
||||
|
||||
You can create any predefined roles using the `roles` section of the `user.xml` configuration file.
|
||||
|
||||
Structure of the `roles` section:
|
||||
|
||||
```xml
|
||||
<roles>
|
||||
<test_role>
|
||||
<grants>
|
||||
<query>GRANT SHOW ON *.*</query>
|
||||
<query>REVOKE SHOW ON system.*</query>
|
||||
<query>GRANT CREATE ON *.* WITH GRANT OPTION</query>
|
||||
</grants>
|
||||
</test_role>
|
||||
</roles>
|
||||
```
|
||||
|
||||
These roles can also be granted to users from the `users` section:
|
||||
|
||||
```xml
|
||||
<users>
|
||||
<user_name>
|
||||
...
|
||||
<grants>
|
||||
<query>GRANT test_role</query>
|
||||
</grants>
|
||||
</user_name>
|
||||
<users>
|
||||
```
|
||||
|
@ -460,6 +460,12 @@ Possible values:
|
||||
|
||||
Default value: 1048576.
|
||||
|
||||
## http_make_head_request {#http-make-head-request}
|
||||
|
||||
The `http_make_head_request` setting allows the execution of a `HEAD` request while reading data from HTTP to retrieve information about the file to be read, such as its size. Since it's enabled by default, it may be desirable to disable this setting in cases where the server does not support `HEAD` requests.
|
||||
|
||||
Default value: `true`.
|
||||
|
||||
## table_function_remote_max_addresses {#table_function_remote_max_addresses}
|
||||
|
||||
Sets the maximum number of addresses generated from patterns for the [remote](../../sql-reference/table-functions/remote.md) function.
|
||||
@ -1578,9 +1584,15 @@ Default value: `default`.
|
||||
|
||||
## allow_experimental_parallel_reading_from_replicas
|
||||
|
||||
If true, ClickHouse will send a SELECT query to all replicas of a table (up to `max_parallel_replicas`) . It will work for any kind of MergeTree table.
|
||||
Enables or disables sending SELECT queries to all replicas of a table (up to `max_parallel_replicas`). Reading is parallelized and coordinated dynamically. It will work for any kind of MergeTree table.
|
||||
|
||||
Default value: `false`.
|
||||
Possible values:
|
||||
|
||||
- 0 - Disabled.
|
||||
- 1 - Enabled, silently disabled in case of failure.
|
||||
- 2 - Enabled, throws an exception in case of failure.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## compile_expressions {#compile-expressions}
|
||||
|
||||
@ -1704,7 +1716,7 @@ Default value: `1`
|
||||
|
||||
## query_cache_squash_partial_results {#query-cache-squash-partial-results}
|
||||
|
||||
Squash partial result blocks to blocks of size [max_block_size](#setting-max_block_size). Reduces performance of inserts into the [query cache](../query-cache.md) but improves the compressability of cache entries (see [query_cache_compress-entries](#query_cache_compress_entries)).
|
||||
Squash partial result blocks to blocks of size [max_block_size](#setting-max_block_size). Reduces performance of inserts into the [query cache](../query-cache.md) but improves the compressability of cache entries (see [query_cache_compress-entries](#query-cache-compress-entries)).
|
||||
|
||||
Possible values:
|
||||
|
||||
@ -2474,7 +2486,7 @@ See also:
|
||||
- [load_balancing](#load_balancing-round_robin)
|
||||
- [Table engine Distributed](../../engines/table-engines/special/distributed.md)
|
||||
- [distributed_replica_error_cap](#distributed_replica_error_cap)
|
||||
- [distributed_replica_error_half_life](#settings-distributed_replica_error_half_life)
|
||||
- [distributed_replica_error_half_life](#distributed_replica_error_half_life)
|
||||
|
||||
## distributed_background_insert_sleep_time_ms {#distributed_background_insert_sleep_time_ms}
|
||||
|
||||
@ -4152,6 +4164,41 @@ Result:
|
||||
└─────┴─────┴───────┘
|
||||
```
|
||||
|
||||
## enable_order_by_all {#enable-order-by-all}
|
||||
|
||||
Enables or disables sorting by `ALL` columns, i.e. [ORDER BY](../../sql-reference/statements/select/order-by.md)
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Disable ORDER BY ALL.
|
||||
- 1 — Enable ORDER BY ALL.
|
||||
|
||||
Default value: `1`.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
CREATE TABLE TAB(C1 Int, C2 Int, ALL Int) ENGINE=Memory();
|
||||
|
||||
INSERT INTO TAB VALUES (10, 20, 30), (20, 20, 10), (30, 10, 20);
|
||||
|
||||
SELECT * FROM TAB ORDER BY ALL; -- returns an error that ALL is ambiguous
|
||||
|
||||
SELECT * FROM TAB ORDER BY ALL SETTINGS enable_order_by_all;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─C1─┬─C2─┬─ALL─┐
|
||||
│ 20 │ 20 │ 10 │
|
||||
│ 30 │ 10 │ 20 │
|
||||
│ 10 │ 20 │ 30 │
|
||||
└────┴────┴─────┘
|
||||
```
|
||||
|
||||
## splitby_max_substrings_includes_remaining_string {#splitby_max_substrings_includes_remaining_string}
|
||||
|
||||
Controls whether function [splitBy*()](../../sql-reference/functions/splitting-merging-functions.md) with argument `max_substrings` > 0 will include the remaining string in the last element of the result array.
|
||||
@ -4349,6 +4396,8 @@ Default value: `1GiB`.
|
||||
|
||||
## Schema Inference settings
|
||||
|
||||
See [schema inference](../../interfaces/schema-inference.md#schema-inference-modes) documentation for more details.
|
||||
|
||||
### schema_inference_use_cache_for_file {schema_inference_use_cache_for_file}
|
||||
|
||||
Enable schemas cache for schema inference in `file` table function.
|
||||
@ -4390,6 +4439,13 @@ Possible values:
|
||||
|
||||
Default value: 2.
|
||||
|
||||
### schema_inference_mode {schema_inference_mode}
|
||||
|
||||
The mode of schema inference. Possible values: `default` and `union`.
|
||||
See [schema inference modes](../../interfaces/schema-inference.md#schema-inference-modes) section for more details.
|
||||
|
||||
Default value: `default`.
|
||||
|
||||
## compatibility {#compatibility}
|
||||
|
||||
The `compatibility` setting causes ClickHouse to use the default settings of a previous version of ClickHouse, where the previous version is provided as the setting.
|
||||
@ -4659,7 +4715,7 @@ Possible values:
|
||||
|
||||
Default value: `false`.
|
||||
|
||||
## rename_files_after_processing
|
||||
## rename_files_after_processing {#rename_files_after_processing}
|
||||
|
||||
- **Type:** String
|
||||
|
||||
@ -5078,3 +5134,25 @@ When set to `true` than for all s3 requests first two attempts are made with low
|
||||
When set to `false` than all attempts are made with identical timeouts.
|
||||
|
||||
Default value: `true`.
|
||||
|
||||
## max_partition_size_to_drop
|
||||
|
||||
Restriction on dropping partitions in query time.
|
||||
|
||||
Default value: 50 GB.
|
||||
The value 0 means that you can drop partitions without any restrictions.
|
||||
|
||||
:::note
|
||||
This query setting overwrites its server setting equivalent, see [max_partition_size_to_drop](/docs/en/operations/server-configuration-parameters/settings.md/#max-partition-size-to-drop)
|
||||
:::
|
||||
|
||||
## max_table_size_to_drop
|
||||
|
||||
Restriction on deleting tables in query time.
|
||||
|
||||
Default value: 50 GB.
|
||||
The value 0 means that you can delete all tables without any restrictions.
|
||||
|
||||
:::note
|
||||
This query setting overwrites its server setting equivalent, see [max_table_size_to_drop](/docs/en/operations/server-configuration-parameters/settings.md/#max-table-size-to-drop)
|
||||
:::
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /en/operations/system-tables/async_loader
|
||||
slug: /en/operations/system-tables/asynchronous_loader
|
||||
---
|
||||
# async_loader
|
||||
# asynchronous_loader
|
||||
|
||||
Contains information and status for recent asynchronous jobs (e.g. for tables loading). The table contains a row for every job. There is a tool for visualizing information from this table `utils/async_loader_graph`.
|
||||
|
||||
@ -9,7 +9,7 @@ Example:
|
||||
|
||||
``` sql
|
||||
SELECT *
|
||||
FROM system.async_loader
|
||||
FROM system.asynchronous_loader
|
||||
FORMAT Vertical
|
||||
LIMIT 1
|
||||
```
|
@ -239,6 +239,10 @@ The amount of virtual memory mapped for the pages of machine code of the server
|
||||
|
||||
The amount of virtual memory mapped for the use of stack and for the allocated memory, in bytes. It is unspecified whether it includes the per-thread stacks and most of the allocated memory, that is allocated with the 'mmap' system call. This metric exists only for completeness reasons. I recommend to use the `MemoryResident` metric for monitoring.
|
||||
|
||||
### MemoryResidentMax
|
||||
|
||||
Maximum amount of physical memory used by the server process, in bytes.
|
||||
|
||||
### MemoryResident
|
||||
|
||||
The amount of physical memory used by the server process, in bytes.
|
||||
@ -547,6 +551,14 @@ Total amount of bytes (compressed, including data and indices) stored in all tab
|
||||
|
||||
Total amount of data parts in all tables of MergeTree family. Numbers larger than 10 000 will negatively affect the server startup time and it may indicate unreasonable choice of the partition key.
|
||||
|
||||
### TotalPrimaryKeyBytesInMemory
|
||||
|
||||
The total amount of memory (in bytes) used by primary key values (only takes active parts into account).
|
||||
|
||||
### TotalPrimaryKeyBytesInMemoryAllocated
|
||||
|
||||
The total amount of memory (in bytes) reserved for primary key values (only takes active parts into account).
|
||||
|
||||
### TotalRowsOfMergeTreeTables
|
||||
|
||||
Total amount of rows (records) stored in all tables of MergeTree family.
|
||||
|
@ -78,5 +78,5 @@ is_active: NULL
|
||||
**See Also**
|
||||
|
||||
- [Table engine Distributed](../../engines/table-engines/special/distributed.md)
|
||||
- [distributed_replica_error_cap setting](../../operations/settings/settings.md#settings-distributed_replica_error_cap)
|
||||
- [distributed_replica_error_half_life setting](../../operations/settings/settings.md#settings-distributed_replica_error_half_life)
|
||||
- [distributed_replica_error_cap setting](../../operations/settings/settings.md#distributed_replica_error_cap)
|
||||
- [distributed_replica_error_half_life setting](../../operations/settings/settings.md#distributed_replica_error_half_life)
|
||||
|
26
docs/en/operations/system-tables/database_engines.md
Normal file
26
docs/en/operations/system-tables/database_engines.md
Normal file
@ -0,0 +1,26 @@
|
||||
---
|
||||
slug: /en/operations/system-tables/database_engines
|
||||
---
|
||||
# database_engines
|
||||
|
||||
Contains the list of database engines supported by the server.
|
||||
|
||||
This table contains the following columns (the column type is shown in brackets):
|
||||
|
||||
- `name` (String) — The name of database engine.
|
||||
|
||||
Example:
|
||||
|
||||
``` sql
|
||||
SELECT *
|
||||
FROM system.database_engines
|
||||
WHERE name in ('Atomic', 'Lazy', 'Ordinary')
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name─────┐
|
||||
│ Ordinary │
|
||||
│ Atomic │
|
||||
│ Lazy │
|
||||
└──────────┘
|
||||
```
|
@ -9,11 +9,15 @@ Columns:
|
||||
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — name of the error (`errorCodeToName`).
|
||||
- `code` ([Int32](../../sql-reference/data-types/int-uint.md)) — code number of the error.
|
||||
- `value` ([UInt64](../../sql-reference/data-types/int-uint.md)) — the number of times this error has been happened.
|
||||
- `last_error_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — time when the last error happened.
|
||||
- `value` ([UInt64](../../sql-reference/data-types/int-uint.md)) — the number of times this error happened.
|
||||
- `last_error_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — the time when the last error happened.
|
||||
- `last_error_message` ([String](../../sql-reference/data-types/string.md)) — message for the last error.
|
||||
- `last_error_trace` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — A [stack trace](https://en.wikipedia.org/wiki/Stack_trace) which represents a list of physical addresses where the called methods are stored.
|
||||
- `remote` ([UInt8](../../sql-reference/data-types/int-uint.md)) — remote exception (i.e. received during one of the distributed query).
|
||||
- `last_error_trace` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — A [stack trace](https://en.wikipedia.org/wiki/Stack_trace) that represents a list of physical addresses where the called methods are stored.
|
||||
- `remote` ([UInt8](../../sql-reference/data-types/int-uint.md)) — remote exception (i.e. received during one of the distributed queries).
|
||||
|
||||
:::note
|
||||
Counters for some errors may increase during successful query execution. It's not recommended to use this table for server monitoring purposes unless you are sure that corresponding error can not be a false positive.
|
||||
:::
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -11,7 +11,7 @@ This table does not contain the ingested data for `INSERT` queries.
|
||||
|
||||
You can change settings of queries logging in the [query_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query-log) section of the server configuration.
|
||||
|
||||
You can disable queries logging by setting [log_queries = 0](../../operations/settings/settings.md#settings-log-queries). We do not recommend to turn off logging because information in this table is important for solving issues.
|
||||
You can disable queries logging by setting [log_queries = 0](../../operations/settings/settings.md#log-queries). We do not recommend to turn off logging because information in this table is important for solving issues.
|
||||
|
||||
The flushing period of data is set in `flush_interval_milliseconds` parameter of the [query_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server settings section. To force flushing, use the [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs) query.
|
||||
|
||||
@ -30,7 +30,7 @@ Each query creates one or two rows in the `query_log` table, depending on the st
|
||||
|
||||
You can use the [log_queries_probability](../../operations/settings/settings.md#log-queries-probability) setting to reduce the number of queries, registered in the `query_log` table.
|
||||
|
||||
You can use the [log_formatted_queries](../../operations/settings/settings.md#settings-log-formatted-queries) setting to log formatted queries to the `formatted_query` column.
|
||||
You can use the [log_formatted_queries](../../operations/settings/settings.md#log-formatted-queries) setting to log formatted queries to the `formatted_query` column.
|
||||
|
||||
Columns:
|
||||
|
||||
@ -101,7 +101,7 @@ Columns:
|
||||
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
|
||||
- `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/map.md)) — ProfileEvents that measure different metrics. The description of them could be found in the table [system.events](../../operations/system-tables/events.md#system_tables-events)
|
||||
- `Settings` ([Map(String, String)](../../sql-reference/data-types/map.md)) — Settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
|
||||
- `log_comment` ([String](../../sql-reference/data-types/string.md)) — Log comment. It can be set to arbitrary string no longer than [max_query_size](../../operations/settings/settings.md#settings-max_query_size). An empty string if it is not defined.
|
||||
- `log_comment` ([String](../../sql-reference/data-types/string.md)) — Log comment. It can be set to arbitrary string no longer than [max_query_size](../../operations/settings/settings.md#max_query_size). An empty string if it is not defined.
|
||||
- `thread_ids` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Thread ids that are participating in query execution. These threads may not have run simultaneously.
|
||||
- `peak_threads_usage` ([UInt64)](../../sql-reference/data-types/int-uint.md)) — Maximum count of simultaneous threads executing the query.
|
||||
- `used_aggregate_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `aggregate functions`, which were used during query execution.
|
||||
|
@ -8,7 +8,7 @@ Contains information about threads that execute queries, for example, thread nam
|
||||
To start logging:
|
||||
|
||||
1. Configure parameters in the [query_thread_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) section.
|
||||
2. Set [log_query_threads](../../operations/settings/settings.md#settings-log-query-threads) to 1.
|
||||
2. Set [log_query_threads](../../operations/settings/settings.md#log-query-threads) to 1.
|
||||
|
||||
The flushing period of data is set in `flush_interval_milliseconds` parameter of the [query_thread_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server settings section. To force flushing, use the [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs) query.
|
||||
|
||||
|
@ -8,7 +8,7 @@ Contains information about the dependent views executed when running a query, fo
|
||||
To start logging:
|
||||
|
||||
1. Configure parameters in the [query_views_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_views_log) section.
|
||||
2. Set [log_query_views](../../operations/settings/settings.md#settings-log-query-views) to 1.
|
||||
2. Set [log_query_views](../../operations/settings/settings.md#log-query-views) to 1.
|
||||
|
||||
The flushing period of data is set in `flush_interval_milliseconds` parameter of the [query_views_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_views_log) server settings section. To force flushing, use the [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs) query.
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user