mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-23 16:12:01 +00:00
Merge remote-tracking branch 'upstream/master' into s3queue
This commit is contained in:
commit
7359dd518f
43
.github/workflows/master.yml
vendored
43
.github/workflows/master.yml
vendored
@ -850,6 +850,48 @@ jobs:
|
|||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
|
BuilderBinRISCV64:
|
||||||
|
needs: [DockerHubPush]
|
||||||
|
runs-on: [self-hosted, builder]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/build_check
|
||||||
|
IMAGES_PATH=${{runner.temp}}/images_path
|
||||||
|
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||||
|
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||||
|
BUILD_NAME=binary_riscv64
|
||||||
|
EOF
|
||||||
|
- name: Download changed images
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: changed_images
|
||||||
|
path: ${{ env.IMAGES_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
submodules: true
|
||||||
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
|
- name: Build
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||||
|
- name: Upload build URLs to artifacts
|
||||||
|
if: ${{ success() || failure() }}
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: ${{ env.BUILD_URLS }}
|
||||||
|
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
############################################################################################
|
############################################################################################
|
||||||
##################################### Docker images #######################################
|
##################################### Docker images #######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
@ -932,6 +974,7 @@ jobs:
|
|||||||
- BuilderBinDarwinAarch64
|
- BuilderBinDarwinAarch64
|
||||||
- BuilderBinFreeBSD
|
- BuilderBinFreeBSD
|
||||||
- BuilderBinPPC64
|
- BuilderBinPPC64
|
||||||
|
- BuilderBinRISCV64
|
||||||
- BuilderBinAmd64Compat
|
- BuilderBinAmd64Compat
|
||||||
- BuilderBinAarch64V80Compat
|
- BuilderBinAarch64V80Compat
|
||||||
- BuilderBinClangTidy
|
- BuilderBinClangTidy
|
||||||
|
52
.github/workflows/nightly.yml
vendored
52
.github/workflows/nightly.yml
vendored
@ -75,54 +75,7 @@ jobs:
|
|||||||
Codebrowser:
|
Codebrowser:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
uses: ./.github/workflows/woboq.yml
|
uses: ./.github/workflows/woboq.yml
|
||||||
BuilderCoverity:
|
|
||||||
needs: DockerHubPush
|
|
||||||
runs-on: [self-hosted, builder]
|
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
BUILD_NAME=coverity
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
EOF
|
|
||||||
echo "COVERITY_TOKEN=${{ secrets.COVERITY_TOKEN }}" >> "$GITHUB_ENV"
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: changed_images
|
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload Coverity Analysis
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
run: |
|
|
||||||
curl --form token="${COVERITY_TOKEN}" \
|
|
||||||
--form email='security+coverity@clickhouse.com' \
|
|
||||||
--form file="@$TEMP_PATH/$BUILD_NAME/coverity-scan.tar.gz" \
|
|
||||||
--form version="${GITHUB_REF#refs/heads/}-${GITHUB_SHA::6}" \
|
|
||||||
--form description="Nighly Scan: $(date +'%Y-%m-%dT%H:%M:%S')" \
|
|
||||||
https://scan.coverity.com/builds?project=ClickHouse%2FClickHouse
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
SonarCloud:
|
SonarCloud:
|
||||||
# TODO: Remove if: whenever SonarCloud supports c++23
|
|
||||||
if: ${{ false }}
|
|
||||||
runs-on: [self-hosted, builder]
|
runs-on: [self-hosted, builder]
|
||||||
env:
|
env:
|
||||||
SONAR_SCANNER_VERSION: 4.8.0.2856
|
SONAR_SCANNER_VERSION: 4.8.0.2856
|
||||||
@ -159,7 +112,7 @@ jobs:
|
|||||||
- name: Set Up Build Tools
|
- name: Set Up Build Tools
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install -yq git cmake ccache ninja-build python3 yasm
|
sudo apt-get install -yq git cmake ccache ninja-build python3 yasm nasm
|
||||||
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||||
- name: Run build-wrapper
|
- name: Run build-wrapper
|
||||||
run: |
|
run: |
|
||||||
@ -178,4 +131,5 @@ jobs:
|
|||||||
--define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" \
|
--define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" \
|
||||||
--define sonar.projectKey="ClickHouse_ClickHouse" \
|
--define sonar.projectKey="ClickHouse_ClickHouse" \
|
||||||
--define sonar.organization="clickhouse-java" \
|
--define sonar.organization="clickhouse-java" \
|
||||||
--define sonar.exclusions="**/*.java,**/*.ts,**/*.js,**/*.css,**/*.sql" \
|
--define sonar.cfamily.cpp23.enabled=true \
|
||||||
|
--define sonar.exclusions="**/*.java,**/*.ts,**/*.js,**/*.css,**/*.sql"
|
||||||
|
252
.github/workflows/pull_request.yml
vendored
252
.github/workflows/pull_request.yml
vendored
@ -911,6 +911,47 @@ jobs:
|
|||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
|
BuilderBinRISCV64:
|
||||||
|
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||||
|
runs-on: [self-hosted, builder]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/build_check
|
||||||
|
IMAGES_PATH=${{runner.temp}}/images_path
|
||||||
|
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||||
|
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||||
|
BUILD_NAME=binary_riscv64
|
||||||
|
EOF
|
||||||
|
- name: Download changed images
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: changed_images
|
||||||
|
path: ${{ env.IMAGES_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
submodules: true
|
||||||
|
- name: Build
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||||
|
- name: Upload build URLs to artifacts
|
||||||
|
if: ${{ success() || failure() }}
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: ${{ env.BUILD_URLS }}
|
||||||
|
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
############################################################################################
|
############################################################################################
|
||||||
##################################### Docker images #######################################
|
##################################### Docker images #######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
@ -992,6 +1033,7 @@ jobs:
|
|||||||
- BuilderBinDarwinAarch64
|
- BuilderBinDarwinAarch64
|
||||||
- BuilderBinFreeBSD
|
- BuilderBinFreeBSD
|
||||||
- BuilderBinPPC64
|
- BuilderBinPPC64
|
||||||
|
- BuilderBinRISCV64
|
||||||
- BuilderBinAmd64Compat
|
- BuilderBinAmd64Compat
|
||||||
- BuilderBinAarch64V80Compat
|
- BuilderBinAarch64V80Compat
|
||||||
- BuilderBinClangTidy
|
- BuilderBinClangTidy
|
||||||
@ -3861,6 +3903,216 @@ jobs:
|
|||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan0:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=0
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan1:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=1
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan2:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=2
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan3:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=3
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan4:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=4
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan5:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=5
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
IntegrationTestsTsan0:
|
IntegrationTestsTsan0:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
runs-on: [self-hosted, stress-tester]
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
@ -87,7 +87,6 @@ if (ENABLE_FUZZING)
|
|||||||
set (ENABLE_CLICKHOUSE_ODBC_BRIDGE OFF)
|
set (ENABLE_CLICKHOUSE_ODBC_BRIDGE OFF)
|
||||||
set (ENABLE_LIBRARIES 0)
|
set (ENABLE_LIBRARIES 0)
|
||||||
set (ENABLE_SSL 1)
|
set (ENABLE_SSL 1)
|
||||||
set (USE_UNWIND ON)
|
|
||||||
set (ENABLE_EMBEDDED_COMPILER 0)
|
set (ENABLE_EMBEDDED_COMPILER 0)
|
||||||
set (ENABLE_EXAMPLES 0)
|
set (ENABLE_EXAMPLES 0)
|
||||||
set (ENABLE_UTILS 0)
|
set (ENABLE_UTILS 0)
|
||||||
@ -344,9 +343,9 @@ if (COMPILER_CLANG)
|
|||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-absolute-paths")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-absolute-paths")
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-absolute-paths")
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-absolute-paths")
|
||||||
|
|
||||||
if (NOT ENABLE_TESTS AND NOT SANITIZE)
|
if (NOT ENABLE_TESTS AND NOT SANITIZE AND OS_LINUX)
|
||||||
# https://clang.llvm.org/docs/ThinLTO.html
|
# https://clang.llvm.org/docs/ThinLTO.html
|
||||||
# Applies to clang only.
|
# Applies to clang and linux only.
|
||||||
# Disabled when building with tests or sanitizers.
|
# Disabled when building with tests or sanitizers.
|
||||||
option(ENABLE_THINLTO "Clang-specific link time optimization" ON)
|
option(ENABLE_THINLTO "Clang-specific link time optimization" ON)
|
||||||
endif()
|
endif()
|
||||||
|
15
README.md
15
README.md
@ -16,30 +16,31 @@ curl https://clickhouse.com/ | sh
|
|||||||
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
|
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
|
||||||
* [Slack](https://clickhouse.com/slack) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
|
* [Slack](https://clickhouse.com/slack) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
|
||||||
* [Blog](https://clickhouse.com/blog/) contains various ClickHouse-related articles, as well as announcements and reports about events.
|
* [Blog](https://clickhouse.com/blog/) contains various ClickHouse-related articles, as well as announcements and reports about events.
|
||||||
* [Code Browser (Woboq)](https://clickhouse.com/codebrowser/ClickHouse/index.html) with syntax highlight and navigation.
|
* [Code Browser (Woboq)](https://clickhouse.com/codebrowser/ClickHouse/index.html) with syntax highlighting and navigation.
|
||||||
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlight, powered by github.dev.
|
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlighting, powered by github.dev.
|
||||||
|
* [Static Analysis (SonarCloud)](https://sonarcloud.io/project/issues?resolved=false&id=ClickHouse_ClickHouse) proposes C++ quality improvements.
|
||||||
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
|
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
|
||||||
|
|
||||||
## Upcoming Events
|
## Upcoming Events
|
||||||
|
|
||||||
* [**v23.6 Release Webinar**](https://clickhouse.com/company/events/v23-6-release-call?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-06) - Jun 29 - 23.6 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
* [**v23.7 Release Webinar**](https://clickhouse.com/company/events/v23-7-community-release-call?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-07) - Jul 27 - 23.7 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
||||||
* [**ClickHouse Meetup in Paris**](https://www.meetup.com/clickhouse-france-user-group/events/294283460) - Jul 4
|
|
||||||
* [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/293913596) - Jul 18
|
* [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/293913596) - Jul 18
|
||||||
* [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/293913441) - Jul 19
|
* [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/293913441) - Jul 19
|
||||||
* [**ClickHouse Meetup in Toronto**](https://www.meetup.com/clickhouse-toronto-user-group/events/294183127) - Jul 20
|
* [**ClickHouse Meetup in Toronto**](https://www.meetup.com/clickhouse-toronto-user-group/events/294183127) - Jul 20
|
||||||
* [**ClickHouse Meetup in Singapore**](https://www.meetup.com/clickhouse-singapore-meetup-group/events/294428050/) - Jul 27
|
* [**ClickHouse Meetup in Singapore**](https://www.meetup.com/clickhouse-singapore-meetup-group/events/294428050/) - Jul 27
|
||||||
|
* [**ClickHouse Meetup in Paris**](https://www.meetup.com/clickhouse-france-user-group/events/294283460) - Sep 12
|
||||||
|
|
||||||
Also, keep an eye out for upcoming meetups around the world. Somewhere else you want us to be? Please feel free to reach out to tyler <at> clickhouse <dot> com.
|
Also, keep an eye out for upcoming meetups around the world. Somewhere else you want us to be? Please feel free to reach out to tyler <at> clickhouse <dot> com.
|
||||||
|
|
||||||
## Recent Recordings
|
## Recent Recordings
|
||||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||||
* **Recording available**: [**v23.4 Release Webinar**](https://www.youtube.com/watch?v=4rrf6bk_mOg) Faster Parquet Reading, Asynchonous Connections to Reoplicas, Trailing Comma before FROM, extractKeyValuePairs, integrations updates, and so much more! Watch it now!
|
* **Recording available**: [**v23.6 Release Webinar**](https://www.youtube.com/watch?v=cuf_hYn7dqU) All the features of 23.6, one convenient video! Watch it now!
|
||||||
* **All release webinar recordings**: [YouTube playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3jAlSy1JxyP8zluvXaN3nxU)
|
* **All release webinar recordings**: [YouTube playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3jAlSy1JxyP8zluvXaN3nxU)
|
||||||
|
|
||||||
|
|
||||||
## Interested in joining ClickHouse and making it your full time job?
|
## Interested in joining ClickHouse and making it your full-time job?
|
||||||
|
|
||||||
We are a globally diverse and distributed team, united behind a common goal of creating industry-leading, real-time analytics. Here, you will have an opportunity to solve some of the most cutting edge technical challenges and have direct ownership of your work and vision. If you are a contributor by nature, a thinker as well as a doer - we’ll definitely click!
|
We are a globally diverse and distributed team, united behind a common goal of creating industry-leading, real-time analytics. Here, you will have an opportunity to solve some of the most cutting-edge technical challenges and have direct ownership of your work and vision. If you are a contributor by nature, a thinker and a doer - we’ll definitely click!
|
||||||
|
|
||||||
Check out our **current openings** here: https://clickhouse.com/company/careers
|
Check out our **current openings** here: https://clickhouse.com/company/careers
|
||||||
|
|
||||||
|
@ -13,6 +13,7 @@ The following versions of ClickHouse server are currently being supported with s
|
|||||||
|
|
||||||
| Version | Supported |
|
| Version | Supported |
|
||||||
|:-|:-|
|
|:-|:-|
|
||||||
|
| 23.6 | ✔️ |
|
||||||
| 23.5 | ✔️ |
|
| 23.5 | ✔️ |
|
||||||
| 23.4 | ✔️ |
|
| 23.4 | ✔️ |
|
||||||
| 23.3 | ✔️ |
|
| 23.3 | ✔️ |
|
||||||
|
@ -15,25 +15,34 @@
|
|||||||
|
|
||||||
|
|
||||||
static thread_local uint64_t current_tid = 0;
|
static thread_local uint64_t current_tid = 0;
|
||||||
|
|
||||||
|
static void setCurrentThreadId()
|
||||||
|
{
|
||||||
|
#if defined(OS_ANDROID)
|
||||||
|
current_tid = gettid();
|
||||||
|
#elif defined(OS_LINUX)
|
||||||
|
current_tid = static_cast<uint64_t>(syscall(SYS_gettid)); /// This call is always successful. - man gettid
|
||||||
|
#elif defined(OS_FREEBSD)
|
||||||
|
current_tid = pthread_getthreadid_np();
|
||||||
|
#elif defined(OS_SUNOS)
|
||||||
|
// On Solaris-derived systems, this returns the ID of the LWP, analogous
|
||||||
|
// to a thread.
|
||||||
|
current_tid = static_cast<uint64_t>(pthread_self());
|
||||||
|
#else
|
||||||
|
if (0 != pthread_threadid_np(nullptr, ¤t_tid))
|
||||||
|
throw std::logic_error("pthread_threadid_np returned error");
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
uint64_t getThreadId()
|
uint64_t getThreadId()
|
||||||
{
|
{
|
||||||
if (!current_tid)
|
if (!current_tid)
|
||||||
{
|
setCurrentThreadId();
|
||||||
#if defined(OS_ANDROID)
|
|
||||||
current_tid = gettid();
|
|
||||||
#elif defined(OS_LINUX)
|
|
||||||
current_tid = static_cast<uint64_t>(syscall(SYS_gettid)); /// This call is always successful. - man gettid
|
|
||||||
#elif defined(OS_FREEBSD)
|
|
||||||
current_tid = pthread_getthreadid_np();
|
|
||||||
#elif defined(OS_SUNOS)
|
|
||||||
// On Solaris-derived systems, this returns the ID of the LWP, analogous
|
|
||||||
// to a thread.
|
|
||||||
current_tid = static_cast<uint64_t>(pthread_self());
|
|
||||||
#else
|
|
||||||
if (0 != pthread_threadid_np(nullptr, ¤t_tid))
|
|
||||||
throw std::logic_error("pthread_threadid_np returned error");
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
return current_tid;
|
return current_tid;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void updateCurrentThreadIdAfterFork()
|
||||||
|
{
|
||||||
|
setCurrentThreadId();
|
||||||
|
}
|
||||||
|
@ -3,3 +3,5 @@
|
|||||||
|
|
||||||
/// Obtain thread id from OS. The value is cached in thread local variable.
|
/// Obtain thread id from OS. The value is cached in thread local variable.
|
||||||
uint64_t getThreadId();
|
uint64_t getThreadId();
|
||||||
|
|
||||||
|
void updateCurrentThreadIdAfterFork();
|
||||||
|
426
base/base/hex.h
426
base/base/hex.h
@ -4,212 +4,288 @@
|
|||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include "types.h"
|
#include "types.h"
|
||||||
|
|
||||||
/// Maps 0..15 to 0..9A..F or 0..9a..f correspondingly.
|
namespace CityHash_v1_0_2 { struct uint128; }
|
||||||
|
|
||||||
constexpr inline std::string_view hex_digit_to_char_uppercase_table = "0123456789ABCDEF";
|
namespace wide
|
||||||
constexpr inline std::string_view hex_digit_to_char_lowercase_table = "0123456789abcdef";
|
{
|
||||||
|
template <size_t Bits, typename Signed>
|
||||||
|
class integer;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace impl
|
||||||
|
{
|
||||||
|
/// Maps 0..15 to 0..9A..F or 0..9a..f correspondingly.
|
||||||
|
constexpr inline std::string_view hex_digit_to_char_uppercase_table = "0123456789ABCDEF";
|
||||||
|
constexpr inline std::string_view hex_digit_to_char_lowercase_table = "0123456789abcdef";
|
||||||
|
|
||||||
|
/// Maps 0..255 to 00..FF or 00..ff correspondingly.
|
||||||
|
constexpr inline std::string_view hex_byte_to_char_uppercase_table = //
|
||||||
|
"000102030405060708090A0B0C0D0E0F"
|
||||||
|
"101112131415161718191A1B1C1D1E1F"
|
||||||
|
"202122232425262728292A2B2C2D2E2F"
|
||||||
|
"303132333435363738393A3B3C3D3E3F"
|
||||||
|
"404142434445464748494A4B4C4D4E4F"
|
||||||
|
"505152535455565758595A5B5C5D5E5F"
|
||||||
|
"606162636465666768696A6B6C6D6E6F"
|
||||||
|
"707172737475767778797A7B7C7D7E7F"
|
||||||
|
"808182838485868788898A8B8C8D8E8F"
|
||||||
|
"909192939495969798999A9B9C9D9E9F"
|
||||||
|
"A0A1A2A3A4A5A6A7A8A9AAABACADAEAF"
|
||||||
|
"B0B1B2B3B4B5B6B7B8B9BABBBCBDBEBF"
|
||||||
|
"C0C1C2C3C4C5C6C7C8C9CACBCCCDCECF"
|
||||||
|
"D0D1D2D3D4D5D6D7D8D9DADBDCDDDEDF"
|
||||||
|
"E0E1E2E3E4E5E6E7E8E9EAEBECEDEEEF"
|
||||||
|
"F0F1F2F3F4F5F6F7F8F9FAFBFCFDFEFF";
|
||||||
|
|
||||||
|
constexpr inline std::string_view hex_byte_to_char_lowercase_table = //
|
||||||
|
"000102030405060708090a0b0c0d0e0f"
|
||||||
|
"101112131415161718191a1b1c1d1e1f"
|
||||||
|
"202122232425262728292a2b2c2d2e2f"
|
||||||
|
"303132333435363738393a3b3c3d3e3f"
|
||||||
|
"404142434445464748494a4b4c4d4e4f"
|
||||||
|
"505152535455565758595a5b5c5d5e5f"
|
||||||
|
"606162636465666768696a6b6c6d6e6f"
|
||||||
|
"707172737475767778797a7b7c7d7e7f"
|
||||||
|
"808182838485868788898a8b8c8d8e8f"
|
||||||
|
"909192939495969798999a9b9c9d9e9f"
|
||||||
|
"a0a1a2a3a4a5a6a7a8a9aaabacadaeaf"
|
||||||
|
"b0b1b2b3b4b5b6b7b8b9babbbcbdbebf"
|
||||||
|
"c0c1c2c3c4c5c6c7c8c9cacbcccdcecf"
|
||||||
|
"d0d1d2d3d4d5d6d7d8d9dadbdcdddedf"
|
||||||
|
"e0e1e2e3e4e5e6e7e8e9eaebecedeeef"
|
||||||
|
"f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff";
|
||||||
|
|
||||||
|
/// Maps 0..255 to 00000000..11111111 correspondingly.
|
||||||
|
constexpr inline std::string_view bin_byte_to_char_table = //
|
||||||
|
"0000000000000001000000100000001100000100000001010000011000000111"
|
||||||
|
"0000100000001001000010100000101100001100000011010000111000001111"
|
||||||
|
"0001000000010001000100100001001100010100000101010001011000010111"
|
||||||
|
"0001100000011001000110100001101100011100000111010001111000011111"
|
||||||
|
"0010000000100001001000100010001100100100001001010010011000100111"
|
||||||
|
"0010100000101001001010100010101100101100001011010010111000101111"
|
||||||
|
"0011000000110001001100100011001100110100001101010011011000110111"
|
||||||
|
"0011100000111001001110100011101100111100001111010011111000111111"
|
||||||
|
"0100000001000001010000100100001101000100010001010100011001000111"
|
||||||
|
"0100100001001001010010100100101101001100010011010100111001001111"
|
||||||
|
"0101000001010001010100100101001101010100010101010101011001010111"
|
||||||
|
"0101100001011001010110100101101101011100010111010101111001011111"
|
||||||
|
"0110000001100001011000100110001101100100011001010110011001100111"
|
||||||
|
"0110100001101001011010100110101101101100011011010110111001101111"
|
||||||
|
"0111000001110001011100100111001101110100011101010111011001110111"
|
||||||
|
"0111100001111001011110100111101101111100011111010111111001111111"
|
||||||
|
"1000000010000001100000101000001110000100100001011000011010000111"
|
||||||
|
"1000100010001001100010101000101110001100100011011000111010001111"
|
||||||
|
"1001000010010001100100101001001110010100100101011001011010010111"
|
||||||
|
"1001100010011001100110101001101110011100100111011001111010011111"
|
||||||
|
"1010000010100001101000101010001110100100101001011010011010100111"
|
||||||
|
"1010100010101001101010101010101110101100101011011010111010101111"
|
||||||
|
"1011000010110001101100101011001110110100101101011011011010110111"
|
||||||
|
"1011100010111001101110101011101110111100101111011011111010111111"
|
||||||
|
"1100000011000001110000101100001111000100110001011100011011000111"
|
||||||
|
"1100100011001001110010101100101111001100110011011100111011001111"
|
||||||
|
"1101000011010001110100101101001111010100110101011101011011010111"
|
||||||
|
"1101100011011001110110101101101111011100110111011101111011011111"
|
||||||
|
"1110000011100001111000101110001111100100111001011110011011100111"
|
||||||
|
"1110100011101001111010101110101111101100111011011110111011101111"
|
||||||
|
"1111000011110001111100101111001111110100111101011111011011110111"
|
||||||
|
"1111100011111001111110101111101111111100111111011111111011111111";
|
||||||
|
|
||||||
|
/// Maps 0..9, A..F, a..f to 0..15. Other chars are mapped to implementation specific value.
|
||||||
|
constexpr inline std::string_view hex_char_to_digit_table
|
||||||
|
= {"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||||
|
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||||
|
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||||
|
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\xff\xff\xff\xff\xff\xff" //0-9
|
||||||
|
"\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" //A-Z
|
||||||
|
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||||
|
"\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" //a-z
|
||||||
|
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||||
|
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||||
|
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||||
|
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||||
|
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||||
|
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||||
|
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||||
|
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||||
|
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff",
|
||||||
|
256};
|
||||||
|
|
||||||
|
/// Converts a hex digit '0'..'f' or '0'..'F' to its value 0..15.
|
||||||
|
constexpr UInt8 unhexDigit(char c)
|
||||||
|
{
|
||||||
|
return hex_char_to_digit_table[static_cast<UInt8>(c)];
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts an unsigned integer in the native endian to hexadecimal representation and back. Used as a base class for HexConversion<T>.
|
||||||
|
template <typename TUInt, typename = void>
|
||||||
|
struct HexConversionUInt
|
||||||
|
{
|
||||||
|
static const constexpr size_t num_hex_digits = sizeof(TUInt) * 2;
|
||||||
|
|
||||||
|
static void hex(TUInt uint_, char * out, std::string_view table)
|
||||||
|
{
|
||||||
|
union
|
||||||
|
{
|
||||||
|
TUInt value;
|
||||||
|
UInt8 uint8[sizeof(TUInt)];
|
||||||
|
};
|
||||||
|
|
||||||
|
value = uint_;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < sizeof(TUInt); ++i)
|
||||||
|
{
|
||||||
|
if constexpr (std::endian::native == std::endian::little)
|
||||||
|
memcpy(out + i * 2, &table[static_cast<size_t>(uint8[sizeof(TUInt) - 1 - i]) * 2], 2);
|
||||||
|
else
|
||||||
|
memcpy(out + i * 2, &table[static_cast<size_t>(uint8[i]) * 2], 2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static TUInt unhex(const char * data)
|
||||||
|
{
|
||||||
|
TUInt res;
|
||||||
|
if constexpr (sizeof(TUInt) == 1)
|
||||||
|
{
|
||||||
|
res = static_cast<UInt8>(unhexDigit(data[0])) * 0x10 + static_cast<UInt8>(unhexDigit(data[1]));
|
||||||
|
}
|
||||||
|
else if constexpr (sizeof(TUInt) == 2)
|
||||||
|
{
|
||||||
|
res = static_cast<UInt16>(unhexDigit(data[0])) * 0x1000 + static_cast<UInt16>(unhexDigit(data[1])) * 0x100
|
||||||
|
+ static_cast<UInt16>(unhexDigit(data[2])) * 0x10 + static_cast<UInt16>(unhexDigit(data[3]));
|
||||||
|
}
|
||||||
|
else if constexpr ((sizeof(TUInt) <= 8) || ((sizeof(TUInt) % 8) != 0))
|
||||||
|
{
|
||||||
|
res = 0;
|
||||||
|
for (size_t i = 0; i < sizeof(TUInt) * 2; ++i, ++data)
|
||||||
|
{
|
||||||
|
res <<= 4;
|
||||||
|
res += unhexDigit(*data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
res = 0;
|
||||||
|
for (size_t i = 0; i < sizeof(TUInt) / 8; ++i, data += 16)
|
||||||
|
{
|
||||||
|
res <<= 64;
|
||||||
|
res += HexConversionUInt<UInt64>::unhex(data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Helper template class to convert a value of any supported type to hexadecimal representation and back.
|
||||||
|
template <typename T, typename SFINAE = void>
|
||||||
|
struct HexConversion;
|
||||||
|
|
||||||
|
template <typename TUInt>
|
||||||
|
struct HexConversion<TUInt, std::enable_if_t<std::is_integral_v<TUInt>>> : public HexConversionUInt<TUInt> {};
|
||||||
|
|
||||||
|
template <size_t Bits, typename Signed>
|
||||||
|
struct HexConversion<wide::integer<Bits, Signed>> : public HexConversionUInt<wide::integer<Bits, Signed>> {};
|
||||||
|
|
||||||
|
template <typename CityHashUInt128> /// Partial specialization here allows not to include <city.h> in this header.
|
||||||
|
struct HexConversion<CityHashUInt128, std::enable_if_t<std::is_same_v<CityHashUInt128, typename CityHash_v1_0_2::uint128>>>
|
||||||
|
{
|
||||||
|
static const constexpr size_t num_hex_digits = 32;
|
||||||
|
|
||||||
|
static void hex(const CityHashUInt128 & uint_, char * out, std::string_view table)
|
||||||
|
{
|
||||||
|
HexConversion<UInt64>::hex(uint_.high64, out, table);
|
||||||
|
HexConversion<UInt64>::hex(uint_.low64, out + 16, table);
|
||||||
|
}
|
||||||
|
|
||||||
|
static CityHashUInt128 unhex(const char * data)
|
||||||
|
{
|
||||||
|
CityHashUInt128 res;
|
||||||
|
res.high64 = HexConversion<UInt64>::unhex(data);
|
||||||
|
res.low64 = HexConversion<UInt64>::unhex(data + 16);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Produces a hexadecimal representation of an integer value with leading zeros (for checksums).
|
||||||
|
/// The function supports native integer types, wide::integer, CityHash_v1_0_2::uint128.
|
||||||
|
/// It can be used with signed types as well, however they are written as corresponding unsigned numbers
|
||||||
|
/// using two's complement (i.e. for example "-1" is written as "0xFF", not as "-0x01").
|
||||||
|
template <typename T>
|
||||||
|
void writeHexUIntUppercase(const T & value, char * out)
|
||||||
|
{
|
||||||
|
impl::HexConversion<T>::hex(value, out, impl::hex_byte_to_char_uppercase_table);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void writeHexUIntLowercase(const T & value, char * out)
|
||||||
|
{
|
||||||
|
impl::HexConversion<T>::hex(value, out, impl::hex_byte_to_char_lowercase_table);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
std::string getHexUIntUppercase(const T & value)
|
||||||
|
{
|
||||||
|
std::string res(impl::HexConversion<T>::num_hex_digits, '\0');
|
||||||
|
writeHexUIntUppercase(value, res.data());
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
std::string getHexUIntLowercase(const T & value)
|
||||||
|
{
|
||||||
|
std::string res(impl::HexConversion<T>::num_hex_digits, '\0');
|
||||||
|
writeHexUIntLowercase(value, res.data());
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
constexpr char hexDigitUppercase(unsigned char c)
|
constexpr char hexDigitUppercase(unsigned char c)
|
||||||
{
|
{
|
||||||
return hex_digit_to_char_uppercase_table[c];
|
return impl::hex_digit_to_char_uppercase_table[c];
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr char hexDigitLowercase(unsigned char c)
|
constexpr char hexDigitLowercase(unsigned char c)
|
||||||
{
|
{
|
||||||
return hex_digit_to_char_lowercase_table[c];
|
return impl::hex_digit_to_char_lowercase_table[c];
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Maps 0..255 to 00..FF or 00..ff correspondingly
|
|
||||||
|
|
||||||
constexpr inline std::string_view hex_byte_to_char_uppercase_table = //
|
|
||||||
"000102030405060708090A0B0C0D0E0F"
|
|
||||||
"101112131415161718191A1B1C1D1E1F"
|
|
||||||
"202122232425262728292A2B2C2D2E2F"
|
|
||||||
"303132333435363738393A3B3C3D3E3F"
|
|
||||||
"404142434445464748494A4B4C4D4E4F"
|
|
||||||
"505152535455565758595A5B5C5D5E5F"
|
|
||||||
"606162636465666768696A6B6C6D6E6F"
|
|
||||||
"707172737475767778797A7B7C7D7E7F"
|
|
||||||
"808182838485868788898A8B8C8D8E8F"
|
|
||||||
"909192939495969798999A9B9C9D9E9F"
|
|
||||||
"A0A1A2A3A4A5A6A7A8A9AAABACADAEAF"
|
|
||||||
"B0B1B2B3B4B5B6B7B8B9BABBBCBDBEBF"
|
|
||||||
"C0C1C2C3C4C5C6C7C8C9CACBCCCDCECF"
|
|
||||||
"D0D1D2D3D4D5D6D7D8D9DADBDCDDDEDF"
|
|
||||||
"E0E1E2E3E4E5E6E7E8E9EAEBECEDEEEF"
|
|
||||||
"F0F1F2F3F4F5F6F7F8F9FAFBFCFDFEFF";
|
|
||||||
|
|
||||||
constexpr inline std::string_view hex_byte_to_char_lowercase_table = //
|
|
||||||
"000102030405060708090a0b0c0d0e0f"
|
|
||||||
"101112131415161718191a1b1c1d1e1f"
|
|
||||||
"202122232425262728292a2b2c2d2e2f"
|
|
||||||
"303132333435363738393a3b3c3d3e3f"
|
|
||||||
"404142434445464748494a4b4c4d4e4f"
|
|
||||||
"505152535455565758595a5b5c5d5e5f"
|
|
||||||
"606162636465666768696a6b6c6d6e6f"
|
|
||||||
"707172737475767778797a7b7c7d7e7f"
|
|
||||||
"808182838485868788898a8b8c8d8e8f"
|
|
||||||
"909192939495969798999a9b9c9d9e9f"
|
|
||||||
"a0a1a2a3a4a5a6a7a8a9aaabacadaeaf"
|
|
||||||
"b0b1b2b3b4b5b6b7b8b9babbbcbdbebf"
|
|
||||||
"c0c1c2c3c4c5c6c7c8c9cacbcccdcecf"
|
|
||||||
"d0d1d2d3d4d5d6d7d8d9dadbdcdddedf"
|
|
||||||
"e0e1e2e3e4e5e6e7e8e9eaebecedeeef"
|
|
||||||
"f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff";
|
|
||||||
|
|
||||||
inline void writeHexByteUppercase(UInt8 byte, void * out)
|
inline void writeHexByteUppercase(UInt8 byte, void * out)
|
||||||
{
|
{
|
||||||
memcpy(out, &hex_byte_to_char_uppercase_table[static_cast<size_t>(byte) * 2], 2);
|
memcpy(out, &impl::hex_byte_to_char_uppercase_table[static_cast<size_t>(byte) * 2], 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void writeHexByteLowercase(UInt8 byte, void * out)
|
inline void writeHexByteLowercase(UInt8 byte, void * out)
|
||||||
{
|
{
|
||||||
memcpy(out, &hex_byte_to_char_lowercase_table[static_cast<size_t>(byte) * 2], 2);
|
memcpy(out, &impl::hex_byte_to_char_lowercase_table[static_cast<size_t>(byte) * 2], 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr inline std::string_view bin_byte_to_char_table = //
|
/// Converts a hex representation with leading zeros back to an integer value.
|
||||||
"0000000000000001000000100000001100000100000001010000011000000111"
|
/// The function supports native integer types, wide::integer, CityHash_v1_0_2::uint128.
|
||||||
"0000100000001001000010100000101100001100000011010000111000001111"
|
template <typename T>
|
||||||
"0001000000010001000100100001001100010100000101010001011000010111"
|
constexpr T unhexUInt(const char * data)
|
||||||
"0001100000011001000110100001101100011100000111010001111000011111"
|
|
||||||
"0010000000100001001000100010001100100100001001010010011000100111"
|
|
||||||
"0010100000101001001010100010101100101100001011010010111000101111"
|
|
||||||
"0011000000110001001100100011001100110100001101010011011000110111"
|
|
||||||
"0011100000111001001110100011101100111100001111010011111000111111"
|
|
||||||
"0100000001000001010000100100001101000100010001010100011001000111"
|
|
||||||
"0100100001001001010010100100101101001100010011010100111001001111"
|
|
||||||
"0101000001010001010100100101001101010100010101010101011001010111"
|
|
||||||
"0101100001011001010110100101101101011100010111010101111001011111"
|
|
||||||
"0110000001100001011000100110001101100100011001010110011001100111"
|
|
||||||
"0110100001101001011010100110101101101100011011010110111001101111"
|
|
||||||
"0111000001110001011100100111001101110100011101010111011001110111"
|
|
||||||
"0111100001111001011110100111101101111100011111010111111001111111"
|
|
||||||
"1000000010000001100000101000001110000100100001011000011010000111"
|
|
||||||
"1000100010001001100010101000101110001100100011011000111010001111"
|
|
||||||
"1001000010010001100100101001001110010100100101011001011010010111"
|
|
||||||
"1001100010011001100110101001101110011100100111011001111010011111"
|
|
||||||
"1010000010100001101000101010001110100100101001011010011010100111"
|
|
||||||
"1010100010101001101010101010101110101100101011011010111010101111"
|
|
||||||
"1011000010110001101100101011001110110100101101011011011010110111"
|
|
||||||
"1011100010111001101110101011101110111100101111011011111010111111"
|
|
||||||
"1100000011000001110000101100001111000100110001011100011011000111"
|
|
||||||
"1100100011001001110010101100101111001100110011011100111011001111"
|
|
||||||
"1101000011010001110100101101001111010100110101011101011011010111"
|
|
||||||
"1101100011011001110110101101101111011100110111011101111011011111"
|
|
||||||
"1110000011100001111000101110001111100100111001011110011011100111"
|
|
||||||
"1110100011101001111010101110101111101100111011011110111011101111"
|
|
||||||
"1111000011110001111100101111001111110100111101011111011011110111"
|
|
||||||
"1111100011111001111110101111101111111100111111011111111011111111";
|
|
||||||
|
|
||||||
inline void writeBinByte(UInt8 byte, void * out)
|
|
||||||
{
|
{
|
||||||
memcpy(out, &bin_byte_to_char_table[static_cast<size_t>(byte) * 8], 8);
|
return impl::HexConversion<T>::unhex(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Produces hex representation of an unsigned int with leading zeros (for checksums)
|
/// Converts a hexadecimal digit '0'..'f' or '0'..'F' to UInt8.
|
||||||
template <typename TUInt>
|
|
||||||
inline void writeHexUIntImpl(TUInt uint_, char * out, std::string_view table)
|
|
||||||
{
|
|
||||||
union
|
|
||||||
{
|
|
||||||
TUInt value;
|
|
||||||
UInt8 uint8[sizeof(TUInt)];
|
|
||||||
};
|
|
||||||
|
|
||||||
value = uint_;
|
|
||||||
|
|
||||||
for (size_t i = 0; i < sizeof(TUInt); ++i)
|
|
||||||
{
|
|
||||||
if constexpr (std::endian::native == std::endian::little)
|
|
||||||
memcpy(out + i * 2, &table[static_cast<size_t>(uint8[sizeof(TUInt) - 1 - i]) * 2], 2);
|
|
||||||
else
|
|
||||||
memcpy(out + i * 2, &table[static_cast<size_t>(uint8[i]) * 2], 2);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename TUInt>
|
|
||||||
inline void writeHexUIntUppercase(TUInt uint_, char * out)
|
|
||||||
{
|
|
||||||
writeHexUIntImpl(uint_, out, hex_byte_to_char_uppercase_table);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename TUInt>
|
|
||||||
inline void writeHexUIntLowercase(TUInt uint_, char * out)
|
|
||||||
{
|
|
||||||
writeHexUIntImpl(uint_, out, hex_byte_to_char_lowercase_table);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename TUInt>
|
|
||||||
std::string getHexUIntUppercase(TUInt uint_)
|
|
||||||
{
|
|
||||||
std::string res(sizeof(TUInt) * 2, '\0');
|
|
||||||
writeHexUIntUppercase(uint_, res.data());
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename TUInt>
|
|
||||||
std::string getHexUIntLowercase(TUInt uint_)
|
|
||||||
{
|
|
||||||
std::string res(sizeof(TUInt) * 2, '\0');
|
|
||||||
writeHexUIntLowercase(uint_, res.data());
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Maps 0..9, A..F, a..f to 0..15. Other chars are mapped to implementation specific value.
|
|
||||||
|
|
||||||
constexpr inline std::string_view hex_char_to_digit_table
|
|
||||||
= {"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
|
||||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
|
||||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
|
||||||
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\xff\xff\xff\xff\xff\xff" //0-9
|
|
||||||
"\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" //A-Z
|
|
||||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
|
||||||
"\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" //a-z
|
|
||||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
|
||||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
|
||||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
|
||||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
|
||||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
|
||||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
|
||||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
|
||||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
|
||||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff",
|
|
||||||
256};
|
|
||||||
|
|
||||||
constexpr UInt8 unhex(char c)
|
constexpr UInt8 unhex(char c)
|
||||||
{
|
{
|
||||||
return hex_char_to_digit_table[static_cast<UInt8>(c)];
|
return impl::unhexDigit(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Converts two hexadecimal digits to UInt8.
|
||||||
constexpr UInt8 unhex2(const char * data)
|
constexpr UInt8 unhex2(const char * data)
|
||||||
{
|
{
|
||||||
return static_cast<UInt8>(unhex(data[0])) * 0x10 + static_cast<UInt8>(unhex(data[1]));
|
return unhexUInt<UInt8>(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Converts four hexadecimal digits to UInt16.
|
||||||
constexpr UInt16 unhex4(const char * data)
|
constexpr UInt16 unhex4(const char * data)
|
||||||
{
|
{
|
||||||
return static_cast<UInt16>(unhex(data[0])) * 0x1000 + static_cast<UInt16>(unhex(data[1])) * 0x100
|
return unhexUInt<UInt16>(data);
|
||||||
+ static_cast<UInt16>(unhex(data[2])) * 0x10 + static_cast<UInt16>(unhex(data[3]));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename TUInt>
|
/// Produces a binary representation of a single byte.
|
||||||
constexpr TUInt unhexUInt(const char * data)
|
inline void writeBinByte(UInt8 byte, void * out)
|
||||||
{
|
{
|
||||||
TUInt res = 0;
|
memcpy(out, &impl::bin_byte_to_char_table[static_cast<size_t>(byte) * 8], 8);
|
||||||
if constexpr ((sizeof(TUInt) <= 8) || ((sizeof(TUInt) % 8) != 0))
|
|
||||||
{
|
|
||||||
for (size_t i = 0; i < sizeof(TUInt) * 2; ++i, ++data)
|
|
||||||
{
|
|
||||||
res <<= 4;
|
|
||||||
res += unhex(*data);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
for (size_t i = 0; i < sizeof(TUInt) / 8; ++i, data += 16)
|
|
||||||
{
|
|
||||||
res <<= 64;
|
|
||||||
res += unhexUInt<UInt64>(data);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
|
@ -87,7 +87,6 @@ set (SRCS
|
|||||||
src/LoggingRegistry.cpp
|
src/LoggingRegistry.cpp
|
||||||
src/LogStream.cpp
|
src/LogStream.cpp
|
||||||
src/MD5Engine.cpp
|
src/MD5Engine.cpp
|
||||||
src/MemoryPool.cpp
|
|
||||||
src/MemoryStream.cpp
|
src/MemoryStream.cpp
|
||||||
src/Message.cpp
|
src/Message.cpp
|
||||||
src/Mutex.cpp
|
src/Mutex.cpp
|
||||||
|
@ -1,116 +0,0 @@
|
|||||||
//
|
|
||||||
// MemoryPool.h
|
|
||||||
//
|
|
||||||
// Library: Foundation
|
|
||||||
// Package: Core
|
|
||||||
// Module: MemoryPool
|
|
||||||
//
|
|
||||||
// Definition of the MemoryPool class.
|
|
||||||
//
|
|
||||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
|
||||||
// and Contributors.
|
|
||||||
//
|
|
||||||
// SPDX-License-Identifier: BSL-1.0
|
|
||||||
//
|
|
||||||
|
|
||||||
|
|
||||||
#ifndef Foundation_MemoryPool_INCLUDED
|
|
||||||
#define Foundation_MemoryPool_INCLUDED
|
|
||||||
|
|
||||||
|
|
||||||
#include <cstddef>
|
|
||||||
#include <vector>
|
|
||||||
#include "Poco/Foundation.h"
|
|
||||||
#include "Poco/Mutex.h"
|
|
||||||
|
|
||||||
|
|
||||||
namespace Poco
|
|
||||||
{
|
|
||||||
|
|
||||||
|
|
||||||
class Foundation_API MemoryPool
|
|
||||||
/// A simple pool for fixed-size memory blocks.
|
|
||||||
///
|
|
||||||
/// The main purpose of this class is to speed-up
|
|
||||||
/// memory allocations, as well as to reduce memory
|
|
||||||
/// fragmentation in situations where the same blocks
|
|
||||||
/// are allocated all over again, such as in server
|
|
||||||
/// applications.
|
|
||||||
///
|
|
||||||
/// All allocated blocks are retained for future use.
|
|
||||||
/// A limit on the number of blocks can be specified.
|
|
||||||
/// Blocks can be preallocated.
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
MemoryPool(std::size_t blockSize, int preAlloc = 0, int maxAlloc = 0);
|
|
||||||
/// Creates a MemoryPool for blocks with the given blockSize.
|
|
||||||
/// The number of blocks given in preAlloc are preallocated.
|
|
||||||
|
|
||||||
~MemoryPool();
|
|
||||||
|
|
||||||
void * get();
|
|
||||||
/// Returns a memory block. If there are no more blocks
|
|
||||||
/// in the pool, a new block will be allocated.
|
|
||||||
///
|
|
||||||
/// If maxAlloc blocks are already allocated, an
|
|
||||||
/// OutOfMemoryException is thrown.
|
|
||||||
|
|
||||||
void release(void * ptr);
|
|
||||||
/// Releases a memory block and returns it to the pool.
|
|
||||||
|
|
||||||
std::size_t blockSize() const;
|
|
||||||
/// Returns the block size.
|
|
||||||
|
|
||||||
int allocated() const;
|
|
||||||
/// Returns the number of allocated blocks.
|
|
||||||
|
|
||||||
int available() const;
|
|
||||||
/// Returns the number of available blocks in the pool.
|
|
||||||
|
|
||||||
private:
|
|
||||||
MemoryPool();
|
|
||||||
MemoryPool(const MemoryPool &);
|
|
||||||
MemoryPool & operator=(const MemoryPool &);
|
|
||||||
|
|
||||||
void clear();
|
|
||||||
|
|
||||||
enum
|
|
||||||
{
|
|
||||||
BLOCK_RESERVE = 128
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef std::vector<char *> BlockVec;
|
|
||||||
|
|
||||||
std::size_t _blockSize;
|
|
||||||
int _maxAlloc;
|
|
||||||
int _allocated;
|
|
||||||
BlockVec _blocks;
|
|
||||||
FastMutex _mutex;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
//
|
|
||||||
// inlines
|
|
||||||
//
|
|
||||||
inline std::size_t MemoryPool::blockSize() const
|
|
||||||
{
|
|
||||||
return _blockSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
inline int MemoryPool::allocated() const
|
|
||||||
{
|
|
||||||
return _allocated;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
inline int MemoryPool::available() const
|
|
||||||
{
|
|
||||||
return (int)_blocks.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
} // namespace Poco
|
|
||||||
|
|
||||||
|
|
||||||
#endif // Foundation_MemoryPool_INCLUDED
|
|
@ -1,105 +0,0 @@
|
|||||||
//
|
|
||||||
// MemoryPool.cpp
|
|
||||||
//
|
|
||||||
// Library: Foundation
|
|
||||||
// Package: Core
|
|
||||||
// Module: MemoryPool
|
|
||||||
//
|
|
||||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
|
||||||
// and Contributors.
|
|
||||||
//
|
|
||||||
// SPDX-License-Identifier: BSL-1.0
|
|
||||||
//
|
|
||||||
|
|
||||||
|
|
||||||
#include "Poco/MemoryPool.h"
|
|
||||||
#include "Poco/Exception.h"
|
|
||||||
|
|
||||||
|
|
||||||
namespace Poco {
|
|
||||||
|
|
||||||
|
|
||||||
MemoryPool::MemoryPool(std::size_t blockSize, int preAlloc, int maxAlloc):
|
|
||||||
_blockSize(blockSize),
|
|
||||||
_maxAlloc(maxAlloc),
|
|
||||||
_allocated(preAlloc)
|
|
||||||
{
|
|
||||||
poco_assert (maxAlloc == 0 || maxAlloc >= preAlloc);
|
|
||||||
poco_assert (preAlloc >= 0 && maxAlloc >= 0);
|
|
||||||
|
|
||||||
int r = BLOCK_RESERVE;
|
|
||||||
if (preAlloc > r)
|
|
||||||
r = preAlloc;
|
|
||||||
if (maxAlloc > 0 && maxAlloc < r)
|
|
||||||
r = maxAlloc;
|
|
||||||
_blocks.reserve(r);
|
|
||||||
|
|
||||||
try
|
|
||||||
{
|
|
||||||
for (int i = 0; i < preAlloc; ++i)
|
|
||||||
{
|
|
||||||
_blocks.push_back(new char[_blockSize]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
clear();
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
MemoryPool::~MemoryPool()
|
|
||||||
{
|
|
||||||
clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void MemoryPool::clear()
|
|
||||||
{
|
|
||||||
for (BlockVec::iterator it = _blocks.begin(); it != _blocks.end(); ++it)
|
|
||||||
{
|
|
||||||
delete [] *it;
|
|
||||||
}
|
|
||||||
_blocks.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void* MemoryPool::get()
|
|
||||||
{
|
|
||||||
FastMutex::ScopedLock lock(_mutex);
|
|
||||||
|
|
||||||
if (_blocks.empty())
|
|
||||||
{
|
|
||||||
if (_maxAlloc == 0 || _allocated < _maxAlloc)
|
|
||||||
{
|
|
||||||
++_allocated;
|
|
||||||
return new char[_blockSize];
|
|
||||||
}
|
|
||||||
else throw OutOfMemoryException("MemoryPool exhausted");
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
char* ptr = _blocks.back();
|
|
||||||
_blocks.pop_back();
|
|
||||||
return ptr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void MemoryPool::release(void* ptr)
|
|
||||||
{
|
|
||||||
FastMutex::ScopedLock lock(_mutex);
|
|
||||||
|
|
||||||
try
|
|
||||||
{
|
|
||||||
_blocks.push_back(reinterpret_cast<char*>(ptr));
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
delete [] reinterpret_cast<char*>(ptr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
} // namespace Poco
|
|
@ -19,7 +19,6 @@
|
|||||||
|
|
||||||
|
|
||||||
#include "Poco/BufferedStreamBuf.h"
|
#include "Poco/BufferedStreamBuf.h"
|
||||||
#include "Poco/Net/HTTPBufferAllocator.h"
|
|
||||||
#include "Poco/Net/Net.h"
|
#include "Poco/Net/Net.h"
|
||||||
|
|
||||||
|
|
||||||
@ -27,9 +26,9 @@ namespace Poco
|
|||||||
{
|
{
|
||||||
namespace Net
|
namespace Net
|
||||||
{
|
{
|
||||||
|
constexpr size_t HTTP_DEFAULT_BUFFER_SIZE = 8 * 1024;
|
||||||
|
|
||||||
|
typedef Poco::BasicBufferedStreamBuf<char, std::char_traits<char>> HTTPBasicStreamBuf;
|
||||||
typedef Poco::BasicBufferedStreamBuf<char, std::char_traits<char>, HTTPBufferAllocator> HTTPBasicStreamBuf;
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,53 +0,0 @@
|
|||||||
//
|
|
||||||
// HTTPBufferAllocator.h
|
|
||||||
//
|
|
||||||
// Library: Net
|
|
||||||
// Package: HTTP
|
|
||||||
// Module: HTTPBufferAllocator
|
|
||||||
//
|
|
||||||
// Definition of the HTTPBufferAllocator class.
|
|
||||||
//
|
|
||||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
|
||||||
// and Contributors.
|
|
||||||
//
|
|
||||||
// SPDX-License-Identifier: BSL-1.0
|
|
||||||
//
|
|
||||||
|
|
||||||
|
|
||||||
#ifndef Net_HTTPBufferAllocator_INCLUDED
|
|
||||||
#define Net_HTTPBufferAllocator_INCLUDED
|
|
||||||
|
|
||||||
|
|
||||||
#include <ios>
|
|
||||||
#include "Poco/MemoryPool.h"
|
|
||||||
#include "Poco/Net/Net.h"
|
|
||||||
|
|
||||||
|
|
||||||
namespace Poco
|
|
||||||
{
|
|
||||||
namespace Net
|
|
||||||
{
|
|
||||||
|
|
||||||
|
|
||||||
class Net_API HTTPBufferAllocator
|
|
||||||
/// A BufferAllocator for HTTP streams.
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
static char * allocate(std::streamsize size);
|
|
||||||
static void deallocate(char * ptr, std::streamsize size);
|
|
||||||
|
|
||||||
enum
|
|
||||||
{
|
|
||||||
BUFFER_SIZE = 128 * 1024
|
|
||||||
};
|
|
||||||
|
|
||||||
private:
|
|
||||||
static Poco::MemoryPool _pool;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
||||||
} // namespace Poco::Net
|
|
||||||
|
|
||||||
|
|
||||||
#endif // Net_HTTPBufferAllocator_INCLUDED
|
|
@ -21,7 +21,6 @@
|
|||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <istream>
|
#include <istream>
|
||||||
#include <ostream>
|
#include <ostream>
|
||||||
#include "Poco/MemoryPool.h"
|
|
||||||
#include "Poco/Net/HTTPBasicStreamBuf.h"
|
#include "Poco/Net/HTTPBasicStreamBuf.h"
|
||||||
#include "Poco/Net/Net.h"
|
#include "Poco/Net/Net.h"
|
||||||
|
|
||||||
@ -80,12 +79,6 @@ namespace Net
|
|||||||
public:
|
public:
|
||||||
HTTPChunkedInputStream(HTTPSession & session);
|
HTTPChunkedInputStream(HTTPSession & session);
|
||||||
~HTTPChunkedInputStream();
|
~HTTPChunkedInputStream();
|
||||||
|
|
||||||
void * operator new(std::size_t size);
|
|
||||||
void operator delete(void * ptr);
|
|
||||||
|
|
||||||
private:
|
|
||||||
static Poco::MemoryPool _pool;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -95,12 +88,6 @@ namespace Net
|
|||||||
public:
|
public:
|
||||||
HTTPChunkedOutputStream(HTTPSession & session);
|
HTTPChunkedOutputStream(HTTPSession & session);
|
||||||
~HTTPChunkedOutputStream();
|
~HTTPChunkedOutputStream();
|
||||||
|
|
||||||
void * operator new(std::size_t size);
|
|
||||||
void operator delete(void * ptr);
|
|
||||||
|
|
||||||
private:
|
|
||||||
static Poco::MemoryPool _pool;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -78,12 +78,6 @@ namespace Net
|
|||||||
public:
|
public:
|
||||||
HTTPFixedLengthInputStream(HTTPSession & session, HTTPFixedLengthStreamBuf::ContentLength length);
|
HTTPFixedLengthInputStream(HTTPSession & session, HTTPFixedLengthStreamBuf::ContentLength length);
|
||||||
~HTTPFixedLengthInputStream();
|
~HTTPFixedLengthInputStream();
|
||||||
|
|
||||||
void * operator new(std::size_t size);
|
|
||||||
void operator delete(void * ptr);
|
|
||||||
|
|
||||||
private:
|
|
||||||
static Poco::MemoryPool _pool;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -93,12 +87,6 @@ namespace Net
|
|||||||
public:
|
public:
|
||||||
HTTPFixedLengthOutputStream(HTTPSession & session, HTTPFixedLengthStreamBuf::ContentLength length);
|
HTTPFixedLengthOutputStream(HTTPSession & session, HTTPFixedLengthStreamBuf::ContentLength length);
|
||||||
~HTTPFixedLengthOutputStream();
|
~HTTPFixedLengthOutputStream();
|
||||||
|
|
||||||
void * operator new(std::size_t size);
|
|
||||||
void operator delete(void * ptr);
|
|
||||||
|
|
||||||
private:
|
|
||||||
static Poco::MemoryPool _pool;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -21,7 +21,6 @@
|
|||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <istream>
|
#include <istream>
|
||||||
#include <ostream>
|
#include <ostream>
|
||||||
#include "Poco/MemoryPool.h"
|
|
||||||
#include "Poco/Net/HTTPBasicStreamBuf.h"
|
#include "Poco/Net/HTTPBasicStreamBuf.h"
|
||||||
#include "Poco/Net/Net.h"
|
#include "Poco/Net/Net.h"
|
||||||
|
|
||||||
@ -74,12 +73,6 @@ namespace Net
|
|||||||
public:
|
public:
|
||||||
HTTPHeaderInputStream(HTTPSession & session);
|
HTTPHeaderInputStream(HTTPSession & session);
|
||||||
~HTTPHeaderInputStream();
|
~HTTPHeaderInputStream();
|
||||||
|
|
||||||
void * operator new(std::size_t size);
|
|
||||||
void operator delete(void * ptr);
|
|
||||||
|
|
||||||
private:
|
|
||||||
static Poco::MemoryPool _pool;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -89,12 +82,6 @@ namespace Net
|
|||||||
public:
|
public:
|
||||||
HTTPHeaderOutputStream(HTTPSession & session);
|
HTTPHeaderOutputStream(HTTPSession & session);
|
||||||
~HTTPHeaderOutputStream();
|
~HTTPHeaderOutputStream();
|
||||||
|
|
||||||
void * operator new(std::size_t size);
|
|
||||||
void operator delete(void * ptr);
|
|
||||||
|
|
||||||
private:
|
|
||||||
static Poco::MemoryPool _pool;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -192,7 +192,7 @@ namespace Net
|
|||||||
HTTPSession & operator=(const HTTPSession &);
|
HTTPSession & operator=(const HTTPSession &);
|
||||||
|
|
||||||
StreamSocket _socket;
|
StreamSocket _socket;
|
||||||
char * _pBuffer;
|
std::unique_ptr<char[]> _pBuffer;
|
||||||
char * _pCurrent;
|
char * _pCurrent;
|
||||||
char * _pEnd;
|
char * _pEnd;
|
||||||
bool _keepAlive;
|
bool _keepAlive;
|
||||||
|
@ -21,7 +21,6 @@
|
|||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <istream>
|
#include <istream>
|
||||||
#include <ostream>
|
#include <ostream>
|
||||||
#include "Poco/MemoryPool.h"
|
|
||||||
#include "Poco/Net/HTTPBasicStreamBuf.h"
|
#include "Poco/Net/HTTPBasicStreamBuf.h"
|
||||||
#include "Poco/Net/Net.h"
|
#include "Poco/Net/Net.h"
|
||||||
|
|
||||||
@ -75,12 +74,6 @@ namespace Net
|
|||||||
public:
|
public:
|
||||||
HTTPInputStream(HTTPSession & session);
|
HTTPInputStream(HTTPSession & session);
|
||||||
~HTTPInputStream();
|
~HTTPInputStream();
|
||||||
|
|
||||||
void * operator new(std::size_t size);
|
|
||||||
void operator delete(void * ptr);
|
|
||||||
|
|
||||||
private:
|
|
||||||
static Poco::MemoryPool _pool;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -90,12 +83,6 @@ namespace Net
|
|||||||
public:
|
public:
|
||||||
HTTPOutputStream(HTTPSession & session);
|
HTTPOutputStream(HTTPSession & session);
|
||||||
~HTTPOutputStream();
|
~HTTPOutputStream();
|
||||||
|
|
||||||
void * operator new(std::size_t size);
|
|
||||||
void operator delete(void * ptr);
|
|
||||||
|
|
||||||
private:
|
|
||||||
static Poco::MemoryPool _pool;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,44 +0,0 @@
|
|||||||
//
|
|
||||||
// HTTPBufferAllocator.cpp
|
|
||||||
//
|
|
||||||
// Library: Net
|
|
||||||
// Package: HTTP
|
|
||||||
// Module: HTTPBufferAllocator
|
|
||||||
//
|
|
||||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
|
||||||
// and Contributors.
|
|
||||||
//
|
|
||||||
// SPDX-License-Identifier: BSL-1.0
|
|
||||||
//
|
|
||||||
|
|
||||||
|
|
||||||
#include "Poco/Net/HTTPBufferAllocator.h"
|
|
||||||
|
|
||||||
|
|
||||||
using Poco::MemoryPool;
|
|
||||||
|
|
||||||
|
|
||||||
namespace Poco {
|
|
||||||
namespace Net {
|
|
||||||
|
|
||||||
|
|
||||||
MemoryPool HTTPBufferAllocator::_pool(HTTPBufferAllocator::BUFFER_SIZE, 16);
|
|
||||||
|
|
||||||
|
|
||||||
char* HTTPBufferAllocator::allocate(std::streamsize size)
|
|
||||||
{
|
|
||||||
poco_assert_dbg (size == BUFFER_SIZE);
|
|
||||||
|
|
||||||
return reinterpret_cast<char*>(_pool.get());
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void HTTPBufferAllocator::deallocate(char* ptr, std::streamsize size)
|
|
||||||
{
|
|
||||||
poco_assert_dbg (size == BUFFER_SIZE);
|
|
||||||
|
|
||||||
_pool.release(ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
} } // namespace Poco::Net
|
|
@ -34,7 +34,7 @@ namespace Net {
|
|||||||
|
|
||||||
|
|
||||||
HTTPChunkedStreamBuf::HTTPChunkedStreamBuf(HTTPSession& session, openmode mode):
|
HTTPChunkedStreamBuf::HTTPChunkedStreamBuf(HTTPSession& session, openmode mode):
|
||||||
HTTPBasicStreamBuf(HTTPBufferAllocator::BUFFER_SIZE, mode),
|
HTTPBasicStreamBuf(HTTP_DEFAULT_BUFFER_SIZE, mode),
|
||||||
_session(session),
|
_session(session),
|
||||||
_mode(mode),
|
_mode(mode),
|
||||||
_chunk(0)
|
_chunk(0)
|
||||||
@ -181,10 +181,6 @@ HTTPChunkedStreamBuf* HTTPChunkedIOS::rdbuf()
|
|||||||
// HTTPChunkedInputStream
|
// HTTPChunkedInputStream
|
||||||
//
|
//
|
||||||
|
|
||||||
|
|
||||||
Poco::MemoryPool HTTPChunkedInputStream::_pool(sizeof(HTTPChunkedInputStream));
|
|
||||||
|
|
||||||
|
|
||||||
HTTPChunkedInputStream::HTTPChunkedInputStream(HTTPSession& session):
|
HTTPChunkedInputStream::HTTPChunkedInputStream(HTTPSession& session):
|
||||||
HTTPChunkedIOS(session, std::ios::in),
|
HTTPChunkedIOS(session, std::ios::in),
|
||||||
std::istream(&_buf)
|
std::istream(&_buf)
|
||||||
@ -196,34 +192,10 @@ HTTPChunkedInputStream::~HTTPChunkedInputStream()
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void* HTTPChunkedInputStream::operator new(std::size_t size)
|
|
||||||
{
|
|
||||||
return _pool.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void HTTPChunkedInputStream::operator delete(void* ptr)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
_pool.release(ptr);
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
poco_unexpected();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// HTTPChunkedOutputStream
|
// HTTPChunkedOutputStream
|
||||||
//
|
//
|
||||||
|
|
||||||
|
|
||||||
Poco::MemoryPool HTTPChunkedOutputStream::_pool(sizeof(HTTPChunkedOutputStream));
|
|
||||||
|
|
||||||
|
|
||||||
HTTPChunkedOutputStream::HTTPChunkedOutputStream(HTTPSession& session):
|
HTTPChunkedOutputStream::HTTPChunkedOutputStream(HTTPSession& session):
|
||||||
HTTPChunkedIOS(session, std::ios::out),
|
HTTPChunkedIOS(session, std::ios::out),
|
||||||
std::ostream(&_buf)
|
std::ostream(&_buf)
|
||||||
@ -235,24 +207,4 @@ HTTPChunkedOutputStream::~HTTPChunkedOutputStream()
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void* HTTPChunkedOutputStream::operator new(std::size_t size)
|
|
||||||
{
|
|
||||||
return _pool.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void HTTPChunkedOutputStream::operator delete(void* ptr)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
_pool.release(ptr);
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
poco_unexpected();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
} } // namespace Poco::Net
|
} } // namespace Poco::Net
|
||||||
|
@ -30,7 +30,7 @@ namespace Net {
|
|||||||
|
|
||||||
|
|
||||||
HTTPFixedLengthStreamBuf::HTTPFixedLengthStreamBuf(HTTPSession& session, ContentLength length, openmode mode):
|
HTTPFixedLengthStreamBuf::HTTPFixedLengthStreamBuf(HTTPSession& session, ContentLength length, openmode mode):
|
||||||
HTTPBasicStreamBuf(HTTPBufferAllocator::BUFFER_SIZE, mode),
|
HTTPBasicStreamBuf(HTTP_DEFAULT_BUFFER_SIZE, mode),
|
||||||
_session(session),
|
_session(session),
|
||||||
_length(length),
|
_length(length),
|
||||||
_count(0)
|
_count(0)
|
||||||
@ -109,9 +109,6 @@ HTTPFixedLengthStreamBuf* HTTPFixedLengthIOS::rdbuf()
|
|||||||
//
|
//
|
||||||
|
|
||||||
|
|
||||||
Poco::MemoryPool HTTPFixedLengthInputStream::_pool(sizeof(HTTPFixedLengthInputStream));
|
|
||||||
|
|
||||||
|
|
||||||
HTTPFixedLengthInputStream::HTTPFixedLengthInputStream(HTTPSession& session, HTTPFixedLengthStreamBuf::ContentLength length):
|
HTTPFixedLengthInputStream::HTTPFixedLengthInputStream(HTTPSession& session, HTTPFixedLengthStreamBuf::ContentLength length):
|
||||||
HTTPFixedLengthIOS(session, length, std::ios::in),
|
HTTPFixedLengthIOS(session, length, std::ios::in),
|
||||||
std::istream(&_buf)
|
std::istream(&_buf)
|
||||||
@ -124,33 +121,10 @@ HTTPFixedLengthInputStream::~HTTPFixedLengthInputStream()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void* HTTPFixedLengthInputStream::operator new(std::size_t size)
|
|
||||||
{
|
|
||||||
return _pool.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void HTTPFixedLengthInputStream::operator delete(void* ptr)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
_pool.release(ptr);
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
poco_unexpected();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// HTTPFixedLengthOutputStream
|
// HTTPFixedLengthOutputStream
|
||||||
//
|
//
|
||||||
|
|
||||||
|
|
||||||
Poco::MemoryPool HTTPFixedLengthOutputStream::_pool(sizeof(HTTPFixedLengthOutputStream));
|
|
||||||
|
|
||||||
|
|
||||||
HTTPFixedLengthOutputStream::HTTPFixedLengthOutputStream(HTTPSession& session, HTTPFixedLengthStreamBuf::ContentLength length):
|
HTTPFixedLengthOutputStream::HTTPFixedLengthOutputStream(HTTPSession& session, HTTPFixedLengthStreamBuf::ContentLength length):
|
||||||
HTTPFixedLengthIOS(session, length, std::ios::out),
|
HTTPFixedLengthIOS(session, length, std::ios::out),
|
||||||
std::ostream(&_buf)
|
std::ostream(&_buf)
|
||||||
@ -163,23 +137,4 @@ HTTPFixedLengthOutputStream::~HTTPFixedLengthOutputStream()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void* HTTPFixedLengthOutputStream::operator new(std::size_t size)
|
|
||||||
{
|
|
||||||
return _pool.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void HTTPFixedLengthOutputStream::operator delete(void* ptr)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
_pool.release(ptr);
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
poco_unexpected();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
} } // namespace Poco::Net
|
} } // namespace Poco::Net
|
||||||
|
@ -26,7 +26,7 @@ namespace Net {
|
|||||||
|
|
||||||
|
|
||||||
HTTPHeaderStreamBuf::HTTPHeaderStreamBuf(HTTPSession& session, openmode mode):
|
HTTPHeaderStreamBuf::HTTPHeaderStreamBuf(HTTPSession& session, openmode mode):
|
||||||
HTTPBasicStreamBuf(HTTPBufferAllocator::BUFFER_SIZE, mode),
|
HTTPBasicStreamBuf(HTTP_DEFAULT_BUFFER_SIZE, mode),
|
||||||
_session(session),
|
_session(session),
|
||||||
_end(false)
|
_end(false)
|
||||||
{
|
{
|
||||||
@ -101,10 +101,6 @@ HTTPHeaderStreamBuf* HTTPHeaderIOS::rdbuf()
|
|||||||
// HTTPHeaderInputStream
|
// HTTPHeaderInputStream
|
||||||
//
|
//
|
||||||
|
|
||||||
|
|
||||||
Poco::MemoryPool HTTPHeaderInputStream::_pool(sizeof(HTTPHeaderInputStream));
|
|
||||||
|
|
||||||
|
|
||||||
HTTPHeaderInputStream::HTTPHeaderInputStream(HTTPSession& session):
|
HTTPHeaderInputStream::HTTPHeaderInputStream(HTTPSession& session):
|
||||||
HTTPHeaderIOS(session, std::ios::in),
|
HTTPHeaderIOS(session, std::ios::in),
|
||||||
std::istream(&_buf)
|
std::istream(&_buf)
|
||||||
@ -116,34 +112,10 @@ HTTPHeaderInputStream::~HTTPHeaderInputStream()
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void* HTTPHeaderInputStream::operator new(std::size_t size)
|
|
||||||
{
|
|
||||||
return _pool.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void HTTPHeaderInputStream::operator delete(void* ptr)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
_pool.release(ptr);
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
poco_unexpected();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// HTTPHeaderOutputStream
|
// HTTPHeaderOutputStream
|
||||||
//
|
//
|
||||||
|
|
||||||
|
|
||||||
Poco::MemoryPool HTTPHeaderOutputStream::_pool(sizeof(HTTPHeaderOutputStream));
|
|
||||||
|
|
||||||
|
|
||||||
HTTPHeaderOutputStream::HTTPHeaderOutputStream(HTTPSession& session):
|
HTTPHeaderOutputStream::HTTPHeaderOutputStream(HTTPSession& session):
|
||||||
HTTPHeaderIOS(session, std::ios::out),
|
HTTPHeaderIOS(session, std::ios::out),
|
||||||
std::ostream(&_buf)
|
std::ostream(&_buf)
|
||||||
@ -155,24 +127,4 @@ HTTPHeaderOutputStream::~HTTPHeaderOutputStream()
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void* HTTPHeaderOutputStream::operator new(std::size_t size)
|
|
||||||
{
|
|
||||||
return _pool.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void HTTPHeaderOutputStream::operator delete(void* ptr)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
_pool.release(ptr);
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
poco_unexpected();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
} } // namespace Poco::Net
|
} } // namespace Poco::Net
|
||||||
|
@ -13,8 +13,8 @@
|
|||||||
|
|
||||||
|
|
||||||
#include "Poco/Net/HTTPSession.h"
|
#include "Poco/Net/HTTPSession.h"
|
||||||
#include "Poco/Net/HTTPBufferAllocator.h"
|
|
||||||
#include "Poco/Net/NetException.h"
|
#include "Poco/Net/NetException.h"
|
||||||
|
#include "Poco/Net/HTTPBasicStreamBuf.h"
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
|
||||||
|
|
||||||
@ -68,14 +68,6 @@ HTTPSession::HTTPSession(const StreamSocket& socket, bool keepAlive):
|
|||||||
|
|
||||||
HTTPSession::~HTTPSession()
|
HTTPSession::~HTTPSession()
|
||||||
{
|
{
|
||||||
try
|
|
||||||
{
|
|
||||||
if (_pBuffer) HTTPBufferAllocator::deallocate(_pBuffer, HTTPBufferAllocator::BUFFER_SIZE);
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
poco_unexpected();
|
|
||||||
}
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
close();
|
close();
|
||||||
@ -177,10 +169,10 @@ void HTTPSession::refill()
|
|||||||
{
|
{
|
||||||
if (!_pBuffer)
|
if (!_pBuffer)
|
||||||
{
|
{
|
||||||
_pBuffer = HTTPBufferAllocator::allocate(HTTPBufferAllocator::BUFFER_SIZE);
|
_pBuffer = std::make_unique<char[]>(HTTP_DEFAULT_BUFFER_SIZE);
|
||||||
}
|
}
|
||||||
_pCurrent = _pEnd = _pBuffer;
|
_pCurrent = _pEnd = _pBuffer.get();
|
||||||
int n = receive(_pBuffer, HTTPBufferAllocator::BUFFER_SIZE);
|
int n = receive(_pBuffer.get(), HTTP_DEFAULT_BUFFER_SIZE);
|
||||||
_pEnd += n;
|
_pEnd += n;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -199,7 +191,7 @@ void HTTPSession::connect(const SocketAddress& address)
|
|||||||
_socket.setNoDelay(true);
|
_socket.setNoDelay(true);
|
||||||
// There may be leftover data from a previous (failed) request in the buffer,
|
// There may be leftover data from a previous (failed) request in the buffer,
|
||||||
// so we clear it.
|
// so we clear it.
|
||||||
_pCurrent = _pEnd = _pBuffer;
|
_pCurrent = _pEnd = _pBuffer.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ namespace Net {
|
|||||||
|
|
||||||
|
|
||||||
HTTPStreamBuf::HTTPStreamBuf(HTTPSession& session, openmode mode):
|
HTTPStreamBuf::HTTPStreamBuf(HTTPSession& session, openmode mode):
|
||||||
HTTPBasicStreamBuf(HTTPBufferAllocator::BUFFER_SIZE, mode),
|
HTTPBasicStreamBuf(HTTP_DEFAULT_BUFFER_SIZE, mode),
|
||||||
_session(session),
|
_session(session),
|
||||||
_mode(mode)
|
_mode(mode)
|
||||||
{
|
{
|
||||||
@ -96,10 +96,6 @@ HTTPStreamBuf* HTTPIOS::rdbuf()
|
|||||||
// HTTPInputStream
|
// HTTPInputStream
|
||||||
//
|
//
|
||||||
|
|
||||||
|
|
||||||
Poco::MemoryPool HTTPInputStream::_pool(sizeof(HTTPInputStream));
|
|
||||||
|
|
||||||
|
|
||||||
HTTPInputStream::HTTPInputStream(HTTPSession& session):
|
HTTPInputStream::HTTPInputStream(HTTPSession& session):
|
||||||
HTTPIOS(session, std::ios::in),
|
HTTPIOS(session, std::ios::in),
|
||||||
std::istream(&_buf)
|
std::istream(&_buf)
|
||||||
@ -112,33 +108,11 @@ HTTPInputStream::~HTTPInputStream()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void* HTTPInputStream::operator new(std::size_t size)
|
|
||||||
{
|
|
||||||
return _pool.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void HTTPInputStream::operator delete(void* ptr)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
_pool.release(ptr);
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
poco_unexpected();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// HTTPOutputStream
|
// HTTPOutputStream
|
||||||
//
|
//
|
||||||
|
|
||||||
|
|
||||||
Poco::MemoryPool HTTPOutputStream::_pool(sizeof(HTTPOutputStream));
|
|
||||||
|
|
||||||
|
|
||||||
HTTPOutputStream::HTTPOutputStream(HTTPSession& session):
|
HTTPOutputStream::HTTPOutputStream(HTTPSession& session):
|
||||||
HTTPIOS(session, std::ios::out),
|
HTTPIOS(session, std::ios::out),
|
||||||
std::ostream(&_buf)
|
std::ostream(&_buf)
|
||||||
@ -150,24 +124,4 @@ HTTPOutputStream::~HTTPOutputStream()
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void* HTTPOutputStream::operator new(std::size_t size)
|
|
||||||
{
|
|
||||||
return _pool.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void HTTPOutputStream::operator delete(void* ptr)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
_pool.release(ptr);
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
poco_unexpected();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
} } // namespace Poco::Net
|
} } // namespace Poco::Net
|
||||||
|
@ -2,11 +2,11 @@
|
|||||||
|
|
||||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||||
SET(VERSION_REVISION 54475)
|
SET(VERSION_REVISION 54476)
|
||||||
SET(VERSION_MAJOR 23)
|
SET(VERSION_MAJOR 23)
|
||||||
SET(VERSION_MINOR 6)
|
SET(VERSION_MINOR 7)
|
||||||
SET(VERSION_PATCH 1)
|
SET(VERSION_PATCH 1)
|
||||||
SET(VERSION_GITHASH 2fec796e73efda10a538a03af3205ce8ffa1b2de)
|
SET(VERSION_GITHASH d1c7e13d08868cb04d3562dcced704dd577cb1df)
|
||||||
SET(VERSION_DESCRIBE v23.6.1.1-testing)
|
SET(VERSION_DESCRIBE v23.7.1.1-testing)
|
||||||
SET(VERSION_STRING 23.6.1.1)
|
SET(VERSION_STRING 23.7.1.1)
|
||||||
# end of autochange
|
# end of autochange
|
||||||
|
@ -15,6 +15,7 @@ set(CMAKE_OSX_DEPLOYMENT_TARGET 10.15)
|
|||||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||||
find_package(Threads REQUIRED)
|
find_package(Threads REQUIRED)
|
||||||
|
|
||||||
|
include (cmake/unwind.cmake)
|
||||||
include (cmake/cxx.cmake)
|
include (cmake/cxx.cmake)
|
||||||
link_libraries(global-group)
|
link_libraries(global-group)
|
||||||
|
|
||||||
|
@ -18,6 +18,9 @@ if (NOT PARALLEL_COMPILE_JOBS AND TOTAL_PHYSICAL_MEMORY AND MAX_COMPILER_MEMORY)
|
|||||||
if (NOT PARALLEL_COMPILE_JOBS)
|
if (NOT PARALLEL_COMPILE_JOBS)
|
||||||
set (PARALLEL_COMPILE_JOBS 1)
|
set (PARALLEL_COMPILE_JOBS 1)
|
||||||
endif ()
|
endif ()
|
||||||
|
if (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES)
|
||||||
|
set (PARALLEL_COMPILE_JOBS_LESS TRUE)
|
||||||
|
endif()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (PARALLEL_COMPILE_JOBS AND (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES))
|
if (PARALLEL_COMPILE_JOBS AND (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES))
|
||||||
@ -33,6 +36,9 @@ if (NOT PARALLEL_LINK_JOBS AND TOTAL_PHYSICAL_MEMORY AND MAX_LINKER_MEMORY)
|
|||||||
if (NOT PARALLEL_LINK_JOBS)
|
if (NOT PARALLEL_LINK_JOBS)
|
||||||
set (PARALLEL_LINK_JOBS 1)
|
set (PARALLEL_LINK_JOBS 1)
|
||||||
endif ()
|
endif ()
|
||||||
|
if (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_LINK_JOBS LESS NUMBER_OF_LOGICAL_CORES)
|
||||||
|
set (PARALLEL_LINK_JOBS_LESS TRUE)
|
||||||
|
endif()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# ThinLTO provides its own parallel linking
|
# ThinLTO provides its own parallel linking
|
||||||
@ -56,4 +62,10 @@ if (PARALLEL_COMPILE_JOBS OR PARALLEL_LINK_JOBS)
|
|||||||
message(STATUS
|
message(STATUS
|
||||||
"${CMAKE_CURRENT_SOURCE_DIR}: Have ${TOTAL_PHYSICAL_MEMORY} megabytes of memory.
|
"${CMAKE_CURRENT_SOURCE_DIR}: Have ${TOTAL_PHYSICAL_MEMORY} megabytes of memory.
|
||||||
Limiting concurrent linkers jobs to ${PARALLEL_LINK_JOBS} and compiler jobs to ${PARALLEL_COMPILE_JOBS} (system has ${NUMBER_OF_LOGICAL_CORES} logical cores)")
|
Limiting concurrent linkers jobs to ${PARALLEL_LINK_JOBS} and compiler jobs to ${PARALLEL_COMPILE_JOBS} (system has ${NUMBER_OF_LOGICAL_CORES} logical cores)")
|
||||||
|
if (PARALLEL_COMPILE_JOBS_LESS)
|
||||||
|
message(WARNING "The autocalculated compile jobs limit (${PARALLEL_COMPILE_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_COMPILE_JOBS to override.")
|
||||||
|
endif()
|
||||||
|
if (PARALLEL_LINK_JOBS_LESS)
|
||||||
|
message(WARNING "The autocalculated link jobs limit (${PARALLEL_LINK_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_LINK_JOBS to override.")
|
||||||
|
endif()
|
||||||
endif ()
|
endif ()
|
||||||
|
@ -33,6 +33,18 @@ if (CMAKE_CROSSCOMPILING)
|
|||||||
elseif (ARCH_PPC64LE)
|
elseif (ARCH_PPC64LE)
|
||||||
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
||||||
set (ENABLE_SENTRY OFF CACHE INTERNAL "")
|
set (ENABLE_SENTRY OFF CACHE INTERNAL "")
|
||||||
|
elseif (ARCH_RISCV64)
|
||||||
|
# RISC-V support is preliminary
|
||||||
|
set (GLIBC_COMPATIBILITY OFF CACHE INTERNAL "")
|
||||||
|
set (ENABLE_LDAP OFF CACHE INTERNAL "")
|
||||||
|
set (OPENSSL_NO_ASM ON CACHE INTERNAL "")
|
||||||
|
set (ENABLE_JEMALLOC ON CACHE INTERNAL "")
|
||||||
|
set (ENABLE_PARQUET OFF CACHE INTERNAL "")
|
||||||
|
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
||||||
|
set (ENABLE_HDFS OFF CACHE INTERNAL "")
|
||||||
|
set (ENABLE_MYSQL OFF CACHE INTERNAL "")
|
||||||
|
# It might be ok, but we need to update 'sysroot'
|
||||||
|
set (ENABLE_RUST OFF CACHE INTERNAL "")
|
||||||
elseif (ARCH_S390X)
|
elseif (ARCH_S390X)
|
||||||
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
||||||
set (ENABLE_SENTRY OFF CACHE INTERNAL "")
|
set (ENABLE_SENTRY OFF CACHE INTERNAL "")
|
||||||
|
@ -1,13 +1 @@
|
|||||||
option (USE_UNWIND "Enable libunwind (better stacktraces)" ${ENABLE_LIBRARIES})
|
add_subdirectory(contrib/libunwind-cmake)
|
||||||
|
|
||||||
if (USE_UNWIND)
|
|
||||||
add_subdirectory(contrib/libunwind-cmake)
|
|
||||||
set (UNWIND_LIBRARIES unwind)
|
|
||||||
set (EXCEPTION_HANDLING_LIBRARY ${UNWIND_LIBRARIES})
|
|
||||||
|
|
||||||
message (STATUS "Using libunwind: ${UNWIND_LIBRARIES}")
|
|
||||||
else ()
|
|
||||||
set (EXCEPTION_HANDLING_LIBRARY gcc_eh)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
message (STATUS "Using exception handler: ${EXCEPTION_HANDLING_LIBRARY}")
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
if (SANITIZE OR NOT (
|
if (SANITIZE OR NOT (
|
||||||
((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_AARCH64 OR ARCH_PPC64LE OR ARCH_RISCV64)) OR
|
((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_AARCH64 OR ARCH_PPC64LE OR ARCH_RISCV64 OR ARCH_S390X)) OR
|
||||||
(OS_DARWIN AND (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" OR CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG"))
|
(OS_DARWIN AND (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" OR CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG"))
|
||||||
))
|
))
|
||||||
if (ENABLE_JEMALLOC)
|
if (ENABLE_JEMALLOC)
|
||||||
@ -17,17 +17,17 @@ if (NOT ENABLE_JEMALLOC)
|
|||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (NOT OS_LINUX)
|
if (NOT OS_LINUX)
|
||||||
message (WARNING "jemalloc support on non-linux is EXPERIMENTAL")
|
message (WARNING "jemalloc support on non-Linux is EXPERIMENTAL")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (OS_LINUX)
|
if (OS_LINUX)
|
||||||
# ThreadPool select job randomly, and there can be some threads that had been
|
# ThreadPool select job randomly, and there can be some threads that have been
|
||||||
# performed some memory heavy task before and will be inactive for some time,
|
# performed some memory-heavy tasks before and will be inactive for some time,
|
||||||
# but until it will became active again, the memory will not be freed since by
|
# but until it becomes active again, the memory will not be freed since, by
|
||||||
# default each thread has it's own arena, but there should be not more then
|
# default, each thread has its arena, but there should be no more than
|
||||||
# 4*CPU arenas (see opt.nareans description).
|
# 4*CPU arenas (see opt.nareans description).
|
||||||
#
|
#
|
||||||
# By enabling percpu_arena number of arenas limited to number of CPUs and hence
|
# By enabling percpu_arena number of arenas is limited to the number of CPUs, and hence
|
||||||
# this problem should go away.
|
# this problem should go away.
|
||||||
#
|
#
|
||||||
# muzzy_decay_ms -- use MADV_FREE when available on newer Linuxes, to
|
# muzzy_decay_ms -- use MADV_FREE when available on newer Linuxes, to
|
||||||
@ -38,7 +38,7 @@ if (OS_LINUX)
|
|||||||
else()
|
else()
|
||||||
set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000")
|
set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000")
|
||||||
endif()
|
endif()
|
||||||
# CACHE variable is empty, to allow changing defaults without necessity
|
# CACHE variable is empty to allow changing defaults without the necessity
|
||||||
# to purge cache
|
# to purge cache
|
||||||
set (JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE "" CACHE STRING "Change default configuration string of JEMalloc" )
|
set (JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE "" CACHE STRING "Change default configuration string of JEMalloc" )
|
||||||
if (JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE)
|
if (JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE)
|
||||||
@ -148,6 +148,8 @@ elseif (ARCH_PPC64LE)
|
|||||||
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_ppc64le")
|
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_ppc64le")
|
||||||
elseif (ARCH_RISCV64)
|
elseif (ARCH_RISCV64)
|
||||||
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_riscv64")
|
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_riscv64")
|
||||||
|
elseif (ARCH_S390X)
|
||||||
|
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_s390x")
|
||||||
else ()
|
else ()
|
||||||
message (FATAL_ERROR "internal jemalloc: This arch is not supported")
|
message (FATAL_ERROR "internal jemalloc: This arch is not supported")
|
||||||
endif ()
|
endif ()
|
||||||
@ -170,16 +172,13 @@ endif ()
|
|||||||
|
|
||||||
target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1)
|
target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1)
|
||||||
|
|
||||||
if (USE_UNWIND)
|
# jemalloc provides support for two different libunwind flavors: the original HP libunwind and the one coming with gcc / g++ / libstdc++.
|
||||||
# jemalloc provides support for two different libunwind flavors: the original HP libunwind and the one coming with gcc / g++ / libstdc++.
|
# The latter is identified by `JEMALLOC_PROF_LIBGCC` and uses `_Unwind_Backtrace` method instead of `unw_backtrace`.
|
||||||
# The latter is identified by `JEMALLOC_PROF_LIBGCC` and uses `_Unwind_Backtrace` method instead of `unw_backtrace`.
|
# At the time ClickHouse uses LLVM libunwind which follows libgcc's way of backtracking.
|
||||||
# At the time ClickHouse uses LLVM libunwind which follows libgcc's way of backtracing.
|
#
|
||||||
|
# ClickHouse has to provide `unw_backtrace` method by the means of [commit 8e2b31e](https://github.com/ClickHouse/libunwind/commit/8e2b31e766dd502f6df74909e04a7dbdf5182eb1).
|
||||||
# ClickHouse has to provide `unw_backtrace` method by the means of [commit 8e2b31e](https://github.com/ClickHouse/libunwind/commit/8e2b31e766dd502f6df74909e04a7dbdf5182eb1).
|
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBGCC=1)
|
||||||
|
target_link_libraries (_jemalloc PRIVATE unwind)
|
||||||
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBGCC=1)
|
|
||||||
target_link_libraries (_jemalloc PRIVATE unwind)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
# for RTLD_NEXT
|
# for RTLD_NEXT
|
||||||
target_compile_options(_jemalloc PRIVATE -D_GNU_SOURCE)
|
target_compile_options(_jemalloc PRIVATE -D_GNU_SOURCE)
|
||||||
|
@ -0,0 +1,435 @@
|
|||||||
|
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
|
||||||
|
#ifndef JEMALLOC_INTERNAL_DEFS_H_
|
||||||
|
#define JEMALLOC_INTERNAL_DEFS_H_
|
||||||
|
/*
|
||||||
|
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
|
||||||
|
* public APIs to be prefixed. This makes it possible, with some care, to use
|
||||||
|
* multiple allocators simultaneously.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_PREFIX */
|
||||||
|
/* #undef JEMALLOC_CPREFIX */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Define overrides for non-standard allocator-related functions if they are
|
||||||
|
* present on the system.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_OVERRIDE___LIBC_CALLOC
|
||||||
|
#define JEMALLOC_OVERRIDE___LIBC_FREE
|
||||||
|
#define JEMALLOC_OVERRIDE___LIBC_MALLOC
|
||||||
|
#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
|
||||||
|
#define JEMALLOC_OVERRIDE___LIBC_REALLOC
|
||||||
|
#define JEMALLOC_OVERRIDE___LIBC_VALLOC
|
||||||
|
#define JEMALLOC_OVERRIDE___LIBC_PVALLOC
|
||||||
|
/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
||||||
|
* For shared libraries, symbol visibility mechanisms prevent these symbols
|
||||||
|
* from being exported, but for static libraries, naming collisions are a real
|
||||||
|
* possibility.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_PRIVATE_NAMESPACE je_
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Hyper-threaded CPUs may need a special instruction inside spin loops in
|
||||||
|
* order to yield to another virtual CPU.
|
||||||
|
*/
|
||||||
|
#define CPU_SPINWAIT
|
||||||
|
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
|
||||||
|
#define HAVE_CPU_SPINWAIT 0
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Number of significant bits in virtual addresses. This may be less than the
|
||||||
|
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
|
||||||
|
* bits are the same as bit 47.
|
||||||
|
*/
|
||||||
|
#define LG_VADDR 64
|
||||||
|
|
||||||
|
/* Defined if C11 atomics are available. */
|
||||||
|
#define JEMALLOC_C11_ATOMICS
|
||||||
|
|
||||||
|
/* Defined if GCC __atomic atomics are available. */
|
||||||
|
#define JEMALLOC_GCC_ATOMIC_ATOMICS
|
||||||
|
/* and the 8-bit variant support. */
|
||||||
|
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS
|
||||||
|
|
||||||
|
/* Defined if GCC __sync atomics are available. */
|
||||||
|
#define JEMALLOC_GCC_SYNC_ATOMICS
|
||||||
|
/* and the 8-bit variant support. */
|
||||||
|
#define JEMALLOC_GCC_U8_SYNC_ATOMICS
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_HAVE_BUILTIN_CLZ
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_OS_UNFAIR_LOCK */
|
||||||
|
|
||||||
|
/* Defined if syscall(2) is usable. */
|
||||||
|
#define JEMALLOC_USE_SYSCALL
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if secure_getenv(3) is available.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_HAVE_SECURE_GETENV
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if issetugid(2) is available.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_HAVE_ISSETUGID */
|
||||||
|
|
||||||
|
/* Defined if pthread_atfork(3) is available. */
|
||||||
|
#define JEMALLOC_HAVE_PTHREAD_ATFORK
|
||||||
|
|
||||||
|
/* Defined if pthread_setname_np(3) is available. */
|
||||||
|
#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
|
||||||
|
|
||||||
|
/* Defined if pthread_getname_np(3) is available. */
|
||||||
|
#define JEMALLOC_HAVE_PTHREAD_GETNAME_NP
|
||||||
|
|
||||||
|
/* Defined if pthread_get_name_np(3) is available. */
|
||||||
|
/* #undef JEMALLOC_HAVE_PTHREAD_GET_NAME_NP */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_HAVE_CLOCK_MONOTONIC
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if mach_absolute_time() is available.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_HAVE_CLOCK_REALTIME
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||||
|
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
||||||
|
* bootstrapping will cause recursion into the pthreads library. Therefore, if
|
||||||
|
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
|
||||||
|
* malloc_tsd.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if threaded initialization is known to be safe on this platform.
|
||||||
|
* Among other things, it must be possible to initialize a mutex without
|
||||||
|
* triggering allocation in order for threaded allocation to be safe.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_THREADED_INIT
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if the pthreads implementation defines
|
||||||
|
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
|
||||||
|
* to avoid recursive allocation during mutex initialization.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_MUTEX_INIT_CB */
|
||||||
|
|
||||||
|
/* Non-empty if the tls_model attribute is supported. */
|
||||||
|
#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
|
||||||
|
* inline functions.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_DEBUG */
|
||||||
|
|
||||||
|
/* JEMALLOC_STATS enables statistics calculation. */
|
||||||
|
#define JEMALLOC_STATS
|
||||||
|
|
||||||
|
/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
|
||||||
|
/* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */
|
||||||
|
|
||||||
|
/* JEMALLOC_PROF enables allocation profiling. */
|
||||||
|
/* #undef JEMALLOC_PROF */
|
||||||
|
|
||||||
|
/* Use libunwind for profile backtracing if defined. */
|
||||||
|
/* #undef JEMALLOC_PROF_LIBUNWIND */
|
||||||
|
|
||||||
|
/* Use libgcc for profile backtracing if defined. */
|
||||||
|
/* #undef JEMALLOC_PROF_LIBGCC */
|
||||||
|
|
||||||
|
/* Use gcc intrinsics for profile backtracing if defined. */
|
||||||
|
/* #undef JEMALLOC_PROF_GCC */
|
||||||
|
|
||||||
|
/* JEMALLOC_PAGEID enabled page id */
|
||||||
|
/* #undef JEMALLOC_PAGEID */
|
||||||
|
|
||||||
|
/* JEMALLOC_HAVE_PRCTL checks prctl */
|
||||||
|
#define JEMALLOC_HAVE_PRCTL
|
||||||
|
|
||||||
|
/*
|
||||||
|
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
|
||||||
|
* segment (DSS).
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_DSS
|
||||||
|
|
||||||
|
/* Support memory filling (junk/zero). */
|
||||||
|
#define JEMALLOC_FILL
|
||||||
|
|
||||||
|
/* Support utrace(2)-based tracing. */
|
||||||
|
/* #undef JEMALLOC_UTRACE */
|
||||||
|
|
||||||
|
/* Support utrace(2)-based tracing (label based signature). */
|
||||||
|
/* #undef JEMALLOC_UTRACE_LABEL */
|
||||||
|
|
||||||
|
/* Support optional abort() on OOM. */
|
||||||
|
/* #undef JEMALLOC_XMALLOC */
|
||||||
|
|
||||||
|
/* Support lazy locking (avoid locking unless a second thread is launched). */
|
||||||
|
/* #undef JEMALLOC_LAZY_LOCK */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
|
||||||
|
* classes).
|
||||||
|
*/
|
||||||
|
/* #undef LG_QUANTUM */
|
||||||
|
|
||||||
|
/* One page is 2^LG_PAGE bytes. */
|
||||||
|
#define LG_PAGE 12
|
||||||
|
|
||||||
|
/* Maximum number of regions in a slab. */
|
||||||
|
/* #undef CONFIG_LG_SLAB_MAXREGS */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||||
|
* system does not explicitly support huge pages; system calls that require
|
||||||
|
* explicit huge page support are separately configured.
|
||||||
|
*/
|
||||||
|
#define LG_HUGEPAGE 20
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If defined, adjacent virtual memory mappings with identical attributes
|
||||||
|
* automatically coalesce, and they fragment when changes are made to subranges.
|
||||||
|
* This is the normal order of things for mmap()/munmap(), but on Windows
|
||||||
|
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
|
||||||
|
* mappings do *not* coalesce/fragment.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_MAPS_COALESCE
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If defined, retain memory for later reuse by default rather than using e.g.
|
||||||
|
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
|
||||||
|
* common sequences of mmap()/munmap() calls will cause virtual memory map
|
||||||
|
* holes.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_RETAIN
|
||||||
|
|
||||||
|
/* TLS is used to map arenas and magazine caches to threads. */
|
||||||
|
#define JEMALLOC_TLS
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
|
||||||
|
* Don't use this directly; instead use unreachable() from util.h
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
|
||||||
|
* use ffs_*() from util.h.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll
|
||||||
|
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
|
||||||
|
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
|
||||||
|
|
||||||
|
/*
|
||||||
|
* popcount*() functions to use for bitmapping.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl
|
||||||
|
#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If defined, explicitly attempt to more uniformly distribute large allocation
|
||||||
|
* pointer alignments across all cache indices.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_CACHE_OBLIVIOUS
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If defined, enable logging facilities. We make this a configure option to
|
||||||
|
* avoid taking extra branches everywhere.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_LOG */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If defined, use readlinkat() (instead of readlink()) to follow
|
||||||
|
* /etc/malloc_conf.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_READLINKAT */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_ZONE */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Methods for determining whether the OS overcommits.
|
||||||
|
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
|
||||||
|
* /proc/sys/vm.overcommit_memory file.
|
||||||
|
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */
|
||||||
|
#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
|
||||||
|
|
||||||
|
/* Defined if madvise(2) is available. */
|
||||||
|
#define JEMALLOC_HAVE_MADVISE
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
|
||||||
|
* arguments to madvise(2).
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_HAVE_MADVISE_HUGE
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Methods for purging unused pages differ between operating systems.
|
||||||
|
*
|
||||||
|
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
|
||||||
|
* will be discarded rather than swapped out.
|
||||||
|
* madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
|
||||||
|
* defined, this immediately discards pages,
|
||||||
|
* such that new pages will be demand-zeroed if
|
||||||
|
* the address region is later touched;
|
||||||
|
* otherwise this behaves similarly to
|
||||||
|
* MADV_FREE, though typically with higher
|
||||||
|
* system overhead.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_PURGE_MADVISE_FREE
|
||||||
|
#define JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||||
|
#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
|
||||||
|
|
||||||
|
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
|
||||||
|
/* #undef JEMALLOC_DEFINE_MADVISE_FREE */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_MADVISE_DONTDUMP
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if MADV_[NO]CORE is supported as an argument to madvise.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_MADVISE_NOCORE */
|
||||||
|
|
||||||
|
/* Defined if mprotect(2) is available. */
|
||||||
|
#define JEMALLOC_HAVE_MPROTECT
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if transparent huge pages (THPs) are supported via the
|
||||||
|
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_THP */
|
||||||
|
|
||||||
|
/* Defined if posix_madvise is available. */
|
||||||
|
/* #undef JEMALLOC_HAVE_POSIX_MADVISE */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Method for purging unused pages using posix_madvise.
|
||||||
|
*
|
||||||
|
* posix_madvise(..., POSIX_MADV_DONTNEED)
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED */
|
||||||
|
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if memcntl page admin call is supported
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_HAVE_MEMCNTL */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if malloc_size is supported
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_HAVE_MALLOC_SIZE */
|
||||||
|
|
||||||
|
/* Define if operating system has alloca.h header. */
|
||||||
|
#define JEMALLOC_HAS_ALLOCA_H
|
||||||
|
|
||||||
|
/* C99 restrict keyword supported. */
|
||||||
|
#define JEMALLOC_HAS_RESTRICT
|
||||||
|
|
||||||
|
/* For use by hash code. */
|
||||||
|
#define JEMALLOC_BIG_ENDIAN
|
||||||
|
|
||||||
|
/* sizeof(int) == 2^LG_SIZEOF_INT. */
|
||||||
|
#define LG_SIZEOF_INT 2
|
||||||
|
|
||||||
|
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
|
||||||
|
#define LG_SIZEOF_LONG 3
|
||||||
|
|
||||||
|
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
|
||||||
|
#define LG_SIZEOF_LONG_LONG 3
|
||||||
|
|
||||||
|
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
|
||||||
|
#define LG_SIZEOF_INTMAX_T 3
|
||||||
|
|
||||||
|
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
|
||||||
|
/* #undef JEMALLOC_GLIBC_MALLOC_HOOK */
|
||||||
|
|
||||||
|
/* glibc memalign hook. */
|
||||||
|
/* #undef JEMALLOC_GLIBC_MEMALIGN_HOOK */
|
||||||
|
|
||||||
|
/* pthread support */
|
||||||
|
#define JEMALLOC_HAVE_PTHREAD
|
||||||
|
|
||||||
|
/* dlsym() support */
|
||||||
|
#define JEMALLOC_HAVE_DLSYM
|
||||||
|
|
||||||
|
/* Adaptive mutex support in pthreads. */
|
||||||
|
#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
|
||||||
|
|
||||||
|
/* GNU specific sched_getcpu support */
|
||||||
|
#define JEMALLOC_HAVE_SCHED_GETCPU
|
||||||
|
|
||||||
|
/* GNU specific sched_setaffinity support */
|
||||||
|
#define JEMALLOC_HAVE_SCHED_SETAFFINITY
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If defined, all the features necessary for background threads are present.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_BACKGROUND_THREAD
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If defined, jemalloc symbols are not exported (doesn't work when
|
||||||
|
* JEMALLOC_PREFIX is not defined).
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_EXPORT */
|
||||||
|
|
||||||
|
/* config.malloc_conf options string. */
|
||||||
|
#define JEMALLOC_CONFIG_MALLOC_CONF ""
|
||||||
|
|
||||||
|
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||||
|
#define JEMALLOC_IS_MALLOC
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
|
||||||
|
|
||||||
|
/* Performs additional safety checks when defined. */
|
||||||
|
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||||
|
|
||||||
|
/* Is C++ support being built? */
|
||||||
|
#define JEMALLOC_ENABLE_CXX
|
||||||
|
|
||||||
|
/* Performs additional size checks when defined. */
|
||||||
|
/* #undef JEMALLOC_OPT_SIZE_CHECKS */
|
||||||
|
|
||||||
|
/* Allows sampled junk and stash for checking use-after-free when defined. */
|
||||||
|
/* #undef JEMALLOC_UAF_DETECTION */
|
||||||
|
|
||||||
|
/* Darwin VM_MAKE_TAG support */
|
||||||
|
/* #undef JEMALLOC_HAVE_VM_MAKE_TAG */
|
||||||
|
|
||||||
|
/* If defined, realloc(ptr, 0) defaults to "free" instead of "alloc". */
|
||||||
|
#define JEMALLOC_ZERO_REALLOC_DEFAULT_FREE
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
@ -61,9 +61,7 @@ target_include_directories(cxx SYSTEM BEFORE PUBLIC $<$<COMPILE_LANGUAGE:CXX>:$
|
|||||||
target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI)
|
target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI)
|
||||||
|
|
||||||
# Enable capturing stack traces for all exceptions.
|
# Enable capturing stack traces for all exceptions.
|
||||||
if (USE_UNWIND)
|
target_compile_definitions(cxx PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1)
|
||||||
target_compile_definitions(cxx PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
if (USE_MUSL)
|
if (USE_MUSL)
|
||||||
target_compile_definitions(cxx PUBLIC -D_LIBCPP_HAS_MUSL_LIBC=1)
|
target_compile_definitions(cxx PUBLIC -D_LIBCPP_HAS_MUSL_LIBC=1)
|
||||||
|
@ -35,12 +35,10 @@ target_include_directories(cxxabi SYSTEM BEFORE
|
|||||||
)
|
)
|
||||||
target_compile_definitions(cxxabi PRIVATE -D_LIBCPP_BUILDING_LIBRARY)
|
target_compile_definitions(cxxabi PRIVATE -D_LIBCPP_BUILDING_LIBRARY)
|
||||||
target_compile_options(cxxabi PRIVATE -nostdinc++ -fno-sanitize=undefined -Wno-macro-redefined) # If we don't disable UBSan, infinite recursion happens in dynamic_cast.
|
target_compile_options(cxxabi PRIVATE -nostdinc++ -fno-sanitize=undefined -Wno-macro-redefined) # If we don't disable UBSan, infinite recursion happens in dynamic_cast.
|
||||||
target_link_libraries(cxxabi PUBLIC ${EXCEPTION_HANDLING_LIBRARY})
|
target_link_libraries(cxxabi PUBLIC unwind)
|
||||||
|
|
||||||
# Enable capturing stack traces for all exceptions.
|
# Enable capturing stack traces for all exceptions.
|
||||||
if (USE_UNWIND)
|
target_compile_definitions(cxxabi PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1)
|
||||||
target_compile_definitions(cxxabi PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
install(
|
install(
|
||||||
TARGETS cxxabi
|
TARGETS cxxabi
|
||||||
|
2
contrib/libhdfs3
vendored
2
contrib/libhdfs3
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 164b89253fad7991bce77882f01b51ab81d19f3d
|
Subproject commit 377220ef351ae24994a5fcd2b5fa3930d00c4db0
|
@ -120,11 +120,12 @@
|
|||||||
"docker/test/base": {
|
"docker/test/base": {
|
||||||
"name": "clickhouse/test-base",
|
"name": "clickhouse/test-base",
|
||||||
"dependent": [
|
"dependent": [
|
||||||
"docker/test/stateless",
|
|
||||||
"docker/test/integration/base",
|
|
||||||
"docker/test/fuzzer",
|
"docker/test/fuzzer",
|
||||||
|
"docker/test/integration/base",
|
||||||
"docker/test/keeper-jepsen",
|
"docker/test/keeper-jepsen",
|
||||||
"docker/test/server-jepsen"
|
"docker/test/server-jepsen",
|
||||||
|
"docker/test/sqllogic",
|
||||||
|
"docker/test/stateless"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"docker/test/integration/kerberized_hadoop": {
|
"docker/test/integration/kerberized_hadoop": {
|
||||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
esac
|
esac
|
||||||
|
|
||||||
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
||||||
ARG VERSION="23.5.4.25"
|
ARG VERSION="23.6.2.18"
|
||||||
ARG PACKAGES="clickhouse-keeper"
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -49,8 +49,8 @@ ENV CARGO_HOME=/rust/cargo
|
|||||||
ENV PATH="/rust/cargo/bin:${PATH}"
|
ENV PATH="/rust/cargo/bin:${PATH}"
|
||||||
RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \
|
RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \
|
||||||
chmod 777 -R /rust && \
|
chmod 777 -R /rust && \
|
||||||
rustup toolchain install nightly && \
|
rustup toolchain install nightly-2023-07-04 && \
|
||||||
rustup default nightly && \
|
rustup default nightly-2023-07-04 && \
|
||||||
rustup component add rust-src && \
|
rustup component add rust-src && \
|
||||||
rustup target add aarch64-unknown-linux-gnu && \
|
rustup target add aarch64-unknown-linux-gnu && \
|
||||||
rustup target add x86_64-apple-darwin && \
|
rustup target add x86_64-apple-darwin && \
|
||||||
|
@ -138,6 +138,7 @@ def parse_env_variables(
|
|||||||
ARM_V80COMPAT_SUFFIX = "-aarch64-v80compat"
|
ARM_V80COMPAT_SUFFIX = "-aarch64-v80compat"
|
||||||
FREEBSD_SUFFIX = "-freebsd"
|
FREEBSD_SUFFIX = "-freebsd"
|
||||||
PPC_SUFFIX = "-ppc64le"
|
PPC_SUFFIX = "-ppc64le"
|
||||||
|
RISCV_SUFFIX = "-riscv64"
|
||||||
AMD64_COMPAT_SUFFIX = "-amd64-compat"
|
AMD64_COMPAT_SUFFIX = "-amd64-compat"
|
||||||
|
|
||||||
result = []
|
result = []
|
||||||
@ -150,6 +151,7 @@ def parse_env_variables(
|
|||||||
is_cross_arm = compiler.endswith(ARM_SUFFIX)
|
is_cross_arm = compiler.endswith(ARM_SUFFIX)
|
||||||
is_cross_arm_v80compat = compiler.endswith(ARM_V80COMPAT_SUFFIX)
|
is_cross_arm_v80compat = compiler.endswith(ARM_V80COMPAT_SUFFIX)
|
||||||
is_cross_ppc = compiler.endswith(PPC_SUFFIX)
|
is_cross_ppc = compiler.endswith(PPC_SUFFIX)
|
||||||
|
is_cross_riscv = compiler.endswith(RISCV_SUFFIX)
|
||||||
is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX)
|
is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX)
|
||||||
is_amd64_compat = compiler.endswith(AMD64_COMPAT_SUFFIX)
|
is_amd64_compat = compiler.endswith(AMD64_COMPAT_SUFFIX)
|
||||||
|
|
||||||
@ -206,6 +208,11 @@ def parse_env_variables(
|
|||||||
cmake_flags.append(
|
cmake_flags.append(
|
||||||
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-ppc64le.cmake"
|
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-ppc64le.cmake"
|
||||||
)
|
)
|
||||||
|
elif is_cross_riscv:
|
||||||
|
cc = compiler[: -len(RISCV_SUFFIX)]
|
||||||
|
cmake_flags.append(
|
||||||
|
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-riscv64.cmake"
|
||||||
|
)
|
||||||
elif is_amd64_compat:
|
elif is_amd64_compat:
|
||||||
cc = compiler[: -len(AMD64_COMPAT_SUFFIX)]
|
cc = compiler[: -len(AMD64_COMPAT_SUFFIX)]
|
||||||
result.append("DEB_ARCH=amd64")
|
result.append("DEB_ARCH=amd64")
|
||||||
@ -370,6 +377,7 @@ def parse_args() -> argparse.Namespace:
|
|||||||
"clang-16-aarch64",
|
"clang-16-aarch64",
|
||||||
"clang-16-aarch64-v80compat",
|
"clang-16-aarch64-v80compat",
|
||||||
"clang-16-ppc64le",
|
"clang-16-ppc64le",
|
||||||
|
"clang-16-riscv64",
|
||||||
"clang-16-amd64-compat",
|
"clang-16-amd64-compat",
|
||||||
"clang-16-freebsd",
|
"clang-16-freebsd",
|
||||||
),
|
),
|
||||||
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="23.5.4.25"
|
ARG VERSION="23.6.2.18"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -23,7 +23,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="23.5.4.25"
|
ARG VERSION="23.6.2.18"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# set non-empty deb_location_url url to create a docker image
|
# set non-empty deb_location_url url to create a docker image
|
||||||
|
@ -97,8 +97,8 @@ docker run -d \
|
|||||||
|
|
||||||
You may also want to mount:
|
You may also want to mount:
|
||||||
|
|
||||||
* `/etc/clickhouse-server/config.d/*.xml` - files with server configuration adjustmenets
|
* `/etc/clickhouse-server/config.d/*.xml` - files with server configuration adjustments
|
||||||
* `/etc/clickhouse-server/users.d/*.xml` - files with user settings adjustmenets
|
* `/etc/clickhouse-server/users.d/*.xml` - files with user settings adjustments
|
||||||
* `/docker-entrypoint-initdb.d/` - folder with database initialization scripts (see below).
|
* `/docker-entrypoint-initdb.d/` - folder with database initialization scripts (see below).
|
||||||
|
|
||||||
### Linux capabilities
|
### Linux capabilities
|
||||||
|
@ -166,7 +166,6 @@ function run_cmake
|
|||||||
"-DENABLE_UTILS=0"
|
"-DENABLE_UTILS=0"
|
||||||
"-DENABLE_EMBEDDED_COMPILER=0"
|
"-DENABLE_EMBEDDED_COMPILER=0"
|
||||||
"-DENABLE_THINLTO=0"
|
"-DENABLE_THINLTO=0"
|
||||||
"-DUSE_UNWIND=1"
|
|
||||||
"-DENABLE_NURAFT=1"
|
"-DENABLE_NURAFT=1"
|
||||||
"-DENABLE_SIMDJSON=1"
|
"-DENABLE_SIMDJSON=1"
|
||||||
"-DENABLE_JEMALLOC=1"
|
"-DENABLE_JEMALLOC=1"
|
||||||
|
@ -291,7 +291,7 @@ quit
|
|||||||
if [ "$server_died" == 1 ]
|
if [ "$server_died" == 1 ]
|
||||||
then
|
then
|
||||||
# The server has died.
|
# The server has died.
|
||||||
if ! rg --text -o 'Received signal.*|Logical error.*|Assertion.*failed|Failed assertion.*|.*runtime error: .*|.*is located.*|(SUMMARY|ERROR): [a-zA-Z]+Sanitizer:.*|.*_LIBCPP_ASSERT.*' server.log > description.txt
|
if ! rg --text -o 'Received signal.*|Logical error.*|Assertion.*failed|Failed assertion.*|.*runtime error: .*|.*is located.*|(SUMMARY|ERROR): [a-zA-Z]+Sanitizer:.*|.*_LIBCPP_ASSERT.*|.*Child process was terminated by signal 9.*' server.log > description.txt
|
||||||
then
|
then
|
||||||
echo "Lost connection to server. See the logs." > description.txt
|
echo "Lost connection to server. See the logs." > description.txt
|
||||||
fi
|
fi
|
||||||
|
@ -47,11 +47,13 @@ ENV TZ=Etc/UTC
|
|||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
|
|
||||||
ENV DOCKER_CHANNEL stable
|
ENV DOCKER_CHANNEL stable
|
||||||
|
# Unpin the docker version after the release 24.0.3 is released
|
||||||
|
# https://github.com/moby/moby/issues/45770#issuecomment-1618255130
|
||||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||||
&& add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -c -s) ${DOCKER_CHANNEL}" \
|
&& add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -c -s) ${DOCKER_CHANNEL}" \
|
||||||
&& apt-get update \
|
&& apt-get update \
|
||||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
&& env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||||
docker-ce \
|
docker-ce='5:23.*' \
|
||||||
&& rm -rf \
|
&& rm -rf \
|
||||||
/var/lib/apt/lists/* \
|
/var/lib/apt/lists/* \
|
||||||
/var/cache/debconf \
|
/var/cache/debconf \
|
||||||
@ -96,6 +98,7 @@ RUN python3 -m pip install --no-cache-dir \
|
|||||||
redis \
|
redis \
|
||||||
requests-kerberos \
|
requests-kerberos \
|
||||||
tzlocal==2.1 \
|
tzlocal==2.1 \
|
||||||
|
retry \
|
||||||
urllib3
|
urllib3
|
||||||
|
|
||||||
# Hudi supports only spark 3.3.*, not 3.4
|
# Hudi supports only spark 3.3.*, not 3.4
|
||||||
|
@ -13,6 +13,7 @@ RUN apt-get update --yes \
|
|||||||
sqlite3 \
|
sqlite3 \
|
||||||
unixodbc \
|
unixodbc \
|
||||||
unixodbc-dev \
|
unixodbc-dev \
|
||||||
|
odbcinst \
|
||||||
sudo \
|
sudo \
|
||||||
&& apt-get clean
|
&& apt-get clean
|
||||||
|
|
||||||
|
@ -92,8 +92,8 @@ sudo clickhouse stop ||:
|
|||||||
|
|
||||||
for _ in $(seq 1 60); do if [[ $(wget --timeout=1 -q 'localhost:8123' -O-) == 'Ok.' ]]; then sleep 1 ; else break; fi ; done
|
for _ in $(seq 1 60); do if [[ $(wget --timeout=1 -q 'localhost:8123' -O-) == 'Ok.' ]]; then sleep 1 ; else break; fi ; done
|
||||||
|
|
||||||
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||:
|
rg -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||:
|
||||||
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz &
|
zstd < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst &
|
||||||
|
|
||||||
# Compressed (FIXME: remove once only github actions will be left)
|
# Compressed (FIXME: remove once only github actions will be left)
|
||||||
rm /var/log/clickhouse-server/clickhouse-server.log
|
rm /var/log/clickhouse-server/clickhouse-server.log
|
||||||
|
@ -33,7 +33,6 @@ RUN apt-get update -y \
|
|||||||
qemu-user-static \
|
qemu-user-static \
|
||||||
sqlite3 \
|
sqlite3 \
|
||||||
sudo \
|
sudo \
|
||||||
telnet \
|
|
||||||
tree \
|
tree \
|
||||||
unixodbc \
|
unixodbc \
|
||||||
wget \
|
wget \
|
||||||
|
@ -18,6 +18,9 @@ ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
|||||||
# shellcheck disable=SC1091
|
# shellcheck disable=SC1091
|
||||||
source /usr/share/clickhouse-test/ci/attach_gdb.lib || true # FIXME: to not break old builds, clean on 2023-09-01
|
source /usr/share/clickhouse-test/ci/attach_gdb.lib || true # FIXME: to not break old builds, clean on 2023-09-01
|
||||||
|
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
source /usr/share/clickhouse-test/ci/utils.lib || true # FIXME: to not break old builds, clean on 2023-09-01
|
||||||
|
|
||||||
# install test configs
|
# install test configs
|
||||||
/usr/share/clickhouse-test/config/install.sh
|
/usr/share/clickhouse-test/config/install.sh
|
||||||
|
|
||||||
@ -90,28 +93,20 @@ sleep 5
|
|||||||
|
|
||||||
attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01
|
attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01
|
||||||
|
|
||||||
function run_with_retry()
|
function fn_exists() {
|
||||||
{
|
declare -F "$1" > /dev/null;
|
||||||
set +e
|
}
|
||||||
|
|
||||||
|
# FIXME: to not break old builds, clean on 2023-09-01
|
||||||
|
function try_run_with_retry() {
|
||||||
local total_retries="$1"
|
local total_retries="$1"
|
||||||
shift
|
shift
|
||||||
|
|
||||||
local retry=0
|
if fn_exists run_with_retry; then
|
||||||
|
run_with_retry "$total_retries" "$@"
|
||||||
until [ "$retry" -ge "$total_retries" ]
|
else
|
||||||
do
|
"$@"
|
||||||
if "$@"; then
|
fi
|
||||||
set -e
|
|
||||||
return
|
|
||||||
else
|
|
||||||
retry=$((retry + 1))
|
|
||||||
sleep 3
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "Command '$*' failed after $total_retries retries, exiting"
|
|
||||||
exit 1
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function run_tests()
|
function run_tests()
|
||||||
@ -161,9 +156,7 @@ function run_tests()
|
|||||||
|
|
||||||
ADDITIONAL_OPTIONS+=('--report-logs-stats')
|
ADDITIONAL_OPTIONS+=('--report-logs-stats')
|
||||||
|
|
||||||
clickhouse-test "00001_select_1" > /dev/null ||:
|
try_run_with_retry 10 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')"
|
||||||
|
|
||||||
run_with_retry 5 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')"
|
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
||||||
|
@ -8,8 +8,6 @@ RUN apt-get update -y \
|
|||||||
apt-get install --yes --no-install-recommends \
|
apt-get install --yes --no-install-recommends \
|
||||||
bash \
|
bash \
|
||||||
tzdata \
|
tzdata \
|
||||||
fakeroot \
|
|
||||||
debhelper \
|
|
||||||
parallel \
|
parallel \
|
||||||
expect \
|
expect \
|
||||||
python3 \
|
python3 \
|
||||||
@ -20,7 +18,6 @@ RUN apt-get update -y \
|
|||||||
sudo \
|
sudo \
|
||||||
openssl \
|
openssl \
|
||||||
netcat-openbsd \
|
netcat-openbsd \
|
||||||
telnet \
|
|
||||||
brotli \
|
brotli \
|
||||||
&& apt-get clean
|
&& apt-get clean
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
|||||||
python3-pip \
|
python3-pip \
|
||||||
shellcheck \
|
shellcheck \
|
||||||
yamllint \
|
yamllint \
|
||||||
&& pip3 install black==23.1.0 boto3 codespell==2.2.1 dohq-artifactory mypy PyGithub unidiff pylint==2.6.2 \
|
&& pip3 install black==23.1.0 boto3 codespell==2.2.1 mypy==1.3.0 PyGithub unidiff pylint==2.6.2 \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /root/.cache/pip
|
&& rm -rf /root/.cache/pip
|
||||||
|
|
||||||
|
@ -8,8 +8,6 @@ RUN apt-get update -y \
|
|||||||
apt-get install --yes --no-install-recommends \
|
apt-get install --yes --no-install-recommends \
|
||||||
bash \
|
bash \
|
||||||
tzdata \
|
tzdata \
|
||||||
fakeroot \
|
|
||||||
debhelper \
|
|
||||||
parallel \
|
parallel \
|
||||||
expect \
|
expect \
|
||||||
python3 \
|
python3 \
|
||||||
@ -20,7 +18,6 @@ RUN apt-get update -y \
|
|||||||
sudo \
|
sudo \
|
||||||
openssl \
|
openssl \
|
||||||
netcat-openbsd \
|
netcat-openbsd \
|
||||||
telnet \
|
|
||||||
brotli \
|
brotli \
|
||||||
&& apt-get clean
|
&& apt-get clean
|
||||||
|
|
||||||
|
@ -67,6 +67,13 @@ start
|
|||||||
stop
|
stop
|
||||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.initial.log
|
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.initial.log
|
||||||
|
|
||||||
|
# Start server from previous release
|
||||||
|
# Let's enable S3 storage by default
|
||||||
|
export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
||||||
|
# Previous version may not be ready for fault injections
|
||||||
|
export ZOOKEEPER_FAULT_INJECTION=0
|
||||||
|
configure
|
||||||
|
|
||||||
# force_sync=false doesn't work correctly on some older versions
|
# force_sync=false doesn't work correctly on some older versions
|
||||||
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
|
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
|
||||||
| sed "s|<force_sync>false</force_sync>|<force_sync>true</force_sync>|" \
|
| sed "s|<force_sync>false</force_sync>|<force_sync>true</force_sync>|" \
|
||||||
@ -76,17 +83,11 @@ sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-serv
|
|||||||
# But we still need default disk because some tables loaded only into it
|
# But we still need default disk because some tables loaded only into it
|
||||||
sudo cat /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml \
|
sudo cat /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml \
|
||||||
| sed "s|<main><disk>s3</disk></main>|<main><disk>s3</disk></main><default><disk>default</disk></default>|" \
|
| sed "s|<main><disk>s3</disk></main>|<main><disk>s3</disk></main><default><disk>default</disk></default>|" \
|
||||||
> /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
> /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp
|
||||||
|
mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||||
sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||||
sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||||
|
|
||||||
# Start server from previous release
|
|
||||||
# Let's enable S3 storage by default
|
|
||||||
export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
|
||||||
# Previous version may not be ready for fault injections
|
|
||||||
export ZOOKEEPER_FAULT_INJECTION=0
|
|
||||||
configure
|
|
||||||
|
|
||||||
# it contains some new settings, but we can safely remove it
|
# it contains some new settings, but we can safely remove it
|
||||||
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
||||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||||
@ -189,6 +190,7 @@ rg -Fav -e "Code: 236. DB::Exception: Cancelled merging parts" \
|
|||||||
-e "Authentication failed" \
|
-e "Authentication failed" \
|
||||||
-e "Cannot flush" \
|
-e "Cannot flush" \
|
||||||
-e "Container already exists" \
|
-e "Container already exists" \
|
||||||
|
-e "doesn't have metadata version on disk" \
|
||||||
clickhouse-server.upgrade.log \
|
clickhouse-server.upgrade.log \
|
||||||
| grep -av -e "_repl_01111_.*Mapping for table with UUID" \
|
| grep -av -e "_repl_01111_.*Mapping for table with UUID" \
|
||||||
| zgrep -Fa "<Error>" > /test_output/upgrade_error_messages.txt \
|
| zgrep -Fa "<Error>" > /test_output/upgrade_error_messages.txt \
|
||||||
|
@ -44,7 +44,6 @@ RUN apt-get update \
|
|||||||
clang-${LLVM_VERSION} \
|
clang-${LLVM_VERSION} \
|
||||||
clang-tidy-${LLVM_VERSION} \
|
clang-tidy-${LLVM_VERSION} \
|
||||||
cmake \
|
cmake \
|
||||||
fakeroot \
|
|
||||||
gdb \
|
gdb \
|
||||||
git \
|
git \
|
||||||
gperf \
|
gperf \
|
||||||
@ -94,7 +93,10 @@ RUN mkdir /tmp/ccache \
|
|||||||
&& rm -rf /tmp/ccache
|
&& rm -rf /tmp/ccache
|
||||||
|
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
ARG SCCACHE_VERSION=v0.4.1
|
ARG SCCACHE_VERSION=v0.5.4
|
||||||
|
ENV SCCACHE_IGNORE_SERVER_IO_ERROR=1
|
||||||
|
# sccache requires a value for the region. So by default we use The Default Region
|
||||||
|
ENV SCCACHE_REGION=us-east-1
|
||||||
RUN arch=${TARGETARCH:-amd64} \
|
RUN arch=${TARGETARCH:-amd64} \
|
||||||
&& case $arch in \
|
&& case $arch in \
|
||||||
amd64) rarch=x86_64 ;; \
|
amd64) rarch=x86_64 ;; \
|
||||||
|
29
docs/_description_templates/template-data-type.md
Normal file
29
docs/_description_templates/template-data-type.md
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
toc_priority:
|
||||||
|
toc_title:
|
||||||
|
---
|
||||||
|
|
||||||
|
# data_type_name {#data_type-name}
|
||||||
|
|
||||||
|
Description.
|
||||||
|
|
||||||
|
**Parameters** (Optional)
|
||||||
|
|
||||||
|
- `x` — Description. [Type name](relative/path/to/type/dscr.md#type).
|
||||||
|
- `y` — Description. [Type name](relative/path/to/type/dscr.md#type).
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
## Additional Info {#additional-info} (Optional)
|
||||||
|
|
||||||
|
The name of an additional section can be any, for example, **Usage**.
|
||||||
|
|
||||||
|
**See Also** (Optional)
|
||||||
|
|
||||||
|
- [link](#)
|
||||||
|
|
||||||
|
[Original article](https://clickhouse.com/docs/en/data-types/<data-type-name>/) <!--hide-->
|
63
docs/_description_templates/template-engine.md
Normal file
63
docs/_description_templates/template-engine.md
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
# EngineName {#enginename}
|
||||||
|
|
||||||
|
- What the Database/Table engine does.
|
||||||
|
- Relations with other engines if they exist.
|
||||||
|
|
||||||
|
## Creating a Database {#creating-a-database}
|
||||||
|
``` sql
|
||||||
|
CREATE DATABASE ...
|
||||||
|
```
|
||||||
|
or
|
||||||
|
|
||||||
|
## Creating a Table {#creating-a-table}
|
||||||
|
``` sql
|
||||||
|
CREATE TABLE ...
|
||||||
|
```
|
||||||
|
|
||||||
|
**Engine Parameters**
|
||||||
|
|
||||||
|
**Query Clauses** (for Table engines only)
|
||||||
|
|
||||||
|
## Virtual columns {#virtual-columns} (for Table engines only)
|
||||||
|
|
||||||
|
List and virtual columns with description, if they exist.
|
||||||
|
|
||||||
|
## Data Types Support {#data_types-support} (for Database engines only)
|
||||||
|
|
||||||
|
| EngineName | ClickHouse |
|
||||||
|
|-----------------------|------------------------------------|
|
||||||
|
| NativeDataTypeName | [ClickHouseDataTypeName](link#) |
|
||||||
|
|
||||||
|
|
||||||
|
## Specifics and recommendations {#specifics-and-recommendations}
|
||||||
|
|
||||||
|
Algorithms
|
||||||
|
Specifics of read and write processes
|
||||||
|
Examples of tasks
|
||||||
|
Recommendations for usage
|
||||||
|
Specifics of data storage
|
||||||
|
|
||||||
|
## Usage Example {#usage-example}
|
||||||
|
|
||||||
|
The example must show usage and use cases. The following text contains the recommended parts of this section.
|
||||||
|
|
||||||
|
Input table:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
```
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
```
|
||||||
|
|
||||||
|
Follow up with any text to clarify the example.
|
||||||
|
|
||||||
|
**See Also**
|
||||||
|
|
||||||
|
- [link](#)
|
51
docs/_description_templates/template-function.md
Normal file
51
docs/_description_templates/template-function.md
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
## functionName {#functionname-in-lower-case}
|
||||||
|
|
||||||
|
Short description.
|
||||||
|
|
||||||
|
**Syntax** (without SELECT)
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
<function syntax>
|
||||||
|
```
|
||||||
|
|
||||||
|
Alias: `<alias name>`. (Optional)
|
||||||
|
|
||||||
|
More text (Optional).
|
||||||
|
|
||||||
|
**Arguments** (Optional)
|
||||||
|
|
||||||
|
- `x` — Description. Optional (only for optional arguments). Possible values: <values list>. Default value: <value>. [Type name](relative/path/to/type/dscr.md#type).
|
||||||
|
- `y` — Description. Optional (only for optional arguments). Possible values: <values list>.Default value: <value>. [Type name](relative/path/to/type/dscr.md#type).
|
||||||
|
|
||||||
|
**Parameters** (Optional, only for parametric aggregate functions)
|
||||||
|
|
||||||
|
- `z` — Description. Optional (only for optional parameters). Possible values: <values list>. Default value: <value>. [Type name](relative/path/to/type/dscr.md#type).
|
||||||
|
|
||||||
|
**Returned value(s)**
|
||||||
|
|
||||||
|
- Returned values list.
|
||||||
|
|
||||||
|
Type: [Type name](relative/path/to/type/dscr.md#type).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
The example must show usage and/or a use cases. The following text contains recommended parts of an example.
|
||||||
|
|
||||||
|
Input table (Optional):
|
||||||
|
|
||||||
|
``` text
|
||||||
|
```
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
```
|
||||||
|
|
||||||
|
**See Also** (Optional)
|
||||||
|
|
||||||
|
- [link](#)
|
33
docs/_description_templates/template-server-setting.md
Normal file
33
docs/_description_templates/template-server-setting.md
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
## server_setting_name {#server_setting_name}
|
||||||
|
|
||||||
|
Description.
|
||||||
|
|
||||||
|
Describe what is configured in this section of settings.
|
||||||
|
|
||||||
|
Possible value: ...
|
||||||
|
|
||||||
|
Default value: ...
|
||||||
|
|
||||||
|
**Settings** (Optional)
|
||||||
|
|
||||||
|
If the section contains several settings, list them here. Specify possible values and default values:
|
||||||
|
|
||||||
|
- setting_1 — Description.
|
||||||
|
- setting_2 — Description.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<server_setting_name>
|
||||||
|
<setting_1> ... </setting_1>
|
||||||
|
<setting_2> ... </setting_2>
|
||||||
|
</server_setting_name>
|
||||||
|
```
|
||||||
|
|
||||||
|
**Additional Info** (Optional)
|
||||||
|
|
||||||
|
The name of an additional section can be any, for example, **Usage**.
|
||||||
|
|
||||||
|
**See Also** (Optional)
|
||||||
|
|
||||||
|
- [link](#)
|
27
docs/_description_templates/template-setting.md
Normal file
27
docs/_description_templates/template-setting.md
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
## setting_name {#setting_name}
|
||||||
|
|
||||||
|
Description.
|
||||||
|
|
||||||
|
For the switch setting, use the typical phrase: “Enables or disables something …”.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
*For switcher setting:*
|
||||||
|
|
||||||
|
- 0 — Disabled.
|
||||||
|
- 1 — Enabled.
|
||||||
|
|
||||||
|
*For another setting (typical phrases):*
|
||||||
|
|
||||||
|
- Positive integer.
|
||||||
|
- 0 — Disabled or unlimited or something else.
|
||||||
|
|
||||||
|
Default value: `value`.
|
||||||
|
|
||||||
|
**Additional Info** (Optional)
|
||||||
|
|
||||||
|
The name of an additional section can be any, for example, **Usage**.
|
||||||
|
|
||||||
|
**See Also** (Optional)
|
||||||
|
|
||||||
|
- [link](#)
|
24
docs/_description_templates/template-statement.md
Normal file
24
docs/_description_templates/template-statement.md
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
# Statement name (for example, SHOW USER) {#statement-name-in-lower-case}
|
||||||
|
|
||||||
|
Brief description of what the statement does.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
Syntax of the statement.
|
||||||
|
```
|
||||||
|
|
||||||
|
## Other necessary sections of the description (Optional) {#anchor}
|
||||||
|
|
||||||
|
Examples of descriptions with a complicated structure:
|
||||||
|
|
||||||
|
- https://clickhouse.com/docs/en/sql-reference/statements/grant/
|
||||||
|
- https://clickhouse.com/docs/en/sql-reference/statements/revoke/
|
||||||
|
- https://clickhouse.com/docs/en/sql-reference/statements/select/join/
|
||||||
|
|
||||||
|
|
||||||
|
**See Also** (Optional)
|
||||||
|
|
||||||
|
Links to related topics as a list.
|
||||||
|
|
||||||
|
- [link](#)
|
25
docs/_description_templates/template-system-table.md
Normal file
25
docs/_description_templates/template-system-table.md
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
# system.table_name {#system-tables_table-name}
|
||||||
|
|
||||||
|
Description.
|
||||||
|
|
||||||
|
Columns:
|
||||||
|
|
||||||
|
- `column_name` ([data_type_name](path/to/data_type.md)) — Description.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT * FROM system.table_name
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
Some output. It shouldn't be too long.
|
||||||
|
```
|
||||||
|
|
||||||
|
**See Also**
|
||||||
|
|
||||||
|
- [Article name](path/to/article_name.md) — Some words about referenced information.
|
@ -33,6 +33,9 @@ then
|
|||||||
elif [ "${ARCH}" = "powerpc64le" -o "${ARCH}" = "ppc64le" ]
|
elif [ "${ARCH}" = "powerpc64le" -o "${ARCH}" = "ppc64le" ]
|
||||||
then
|
then
|
||||||
DIR="powerpc64le"
|
DIR="powerpc64le"
|
||||||
|
elif [ "${ARCH}" = "riscv64" ]
|
||||||
|
then
|
||||||
|
DIR="riscv64"
|
||||||
fi
|
fi
|
||||||
elif [ "${OS}" = "FreeBSD" ]
|
elif [ "${OS}" = "FreeBSD" ]
|
||||||
then
|
then
|
||||||
|
20
docs/changelogs/v22.8.20.11-lts.md
Normal file
20
docs/changelogs/v22.8.20.11-lts.md
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v22.8.20.11-lts (c9ca79e24e8) FIXME as compared to v22.8.19.10-lts (989bc2fe8b0)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix broken index analysis when binary operator contains a null constant argument [#50177](https://github.com/ClickHouse/ClickHouse/pull/50177) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix incorrect constant folding [#50536](https://github.com/ClickHouse/ClickHouse/pull/50536) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix fuzzer failure in ActionsDAG [#51301](https://github.com/ClickHouse/ClickHouse/pull/51301) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix segfault in MathUnary [#51499](https://github.com/ClickHouse/ClickHouse/pull/51499) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Decoupled commits from [#51180](https://github.com/ClickHouse/ClickHouse/issues/51180) for backports [#51561](https://github.com/ClickHouse/ClickHouse/pull/51561) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
23
docs/changelogs/v23.3.8.21-lts.md
Normal file
23
docs/changelogs/v23.3.8.21-lts.md
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.3.8.21-lts (1675f2264f3) FIXME as compared to v23.3.7.5-lts (bc683c11c92)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix backward compatibility for IP types hashing in aggregate functions [#50551](https://github.com/ClickHouse/ClickHouse/pull/50551) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Fix segfault in MathUnary [#51499](https://github.com/ClickHouse/ClickHouse/pull/51499) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Fix for moving 'IN' conditions to PREWHERE [#51610](https://github.com/ClickHouse/ClickHouse/pull/51610) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Fix reading from empty column in `parseSipHashKey` [#51804](https://github.com/ClickHouse/ClickHouse/pull/51804) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Check refcount in `RemoveManyObjectStorageOperation::finalize` instead of `execute` [#51954](https://github.com/ClickHouse/ClickHouse/pull/51954) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Allow parametric UDFs [#51964](https://github.com/ClickHouse/ClickHouse/pull/51964) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Decoupled commits from [#51180](https://github.com/ClickHouse/ClickHouse/issues/51180) for backports [#51561](https://github.com/ClickHouse/ClickHouse/pull/51561) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix MergeTreeMarksLoader segfaulting if marks file is longer than expected [#51636](https://github.com/ClickHouse/ClickHouse/pull/51636) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
|
26
docs/changelogs/v23.4.6.25-stable.md
Normal file
26
docs/changelogs/v23.4.6.25-stable.md
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.4.6.25-stable (a06848b1770) FIXME as compared to v23.4.5.22-stable (0ced5d6a8da)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#51234](https://github.com/ClickHouse/ClickHouse/issues/51234): Improve the progress bar for file/s3/hdfs/url table functions by using chunk size from source data and using incremental total size counting in each thread. Fix the progress bar for *Cluster functions. This closes [#47250](https://github.com/ClickHouse/ClickHouse/issues/47250). [#51088](https://github.com/ClickHouse/ClickHouse/pull/51088) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix backward compatibility for IP types hashing in aggregate functions [#50551](https://github.com/ClickHouse/ClickHouse/pull/50551) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Fix segfault in MathUnary [#51499](https://github.com/ClickHouse/ClickHouse/pull/51499) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Fix for moving 'IN' conditions to PREWHERE [#51610](https://github.com/ClickHouse/ClickHouse/pull/51610) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Fix reading from empty column in `parseSipHashKey` [#51804](https://github.com/ClickHouse/ClickHouse/pull/51804) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Allow parametric UDFs [#51964](https://github.com/ClickHouse/ClickHouse/pull/51964) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Decoupled commits from [#51180](https://github.com/ClickHouse/ClickHouse/issues/51180) for backports [#51561](https://github.com/ClickHouse/ClickHouse/pull/51561) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix MergeTreeMarksLoader segfaulting if marks file is longer than expected [#51636](https://github.com/ClickHouse/ClickHouse/pull/51636) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Fix source image for sqllogic [#51728](https://github.com/ClickHouse/ClickHouse/pull/51728) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
301
docs/changelogs/v23.6.1.1524-stable.md
Normal file
301
docs/changelogs/v23.6.1.1524-stable.md
Normal file
@ -0,0 +1,301 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.6.1.1524-stable (d1c7e13d088) FIXME as compared to v23.5.1.3174-stable (2fec796e73e)
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
* Delete feature `do_not_evict_index_and_mark_files` in the fs cache. This feature was only making things worse. [#51253](https://github.com/ClickHouse/ClickHouse/pull/51253) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Remove ALTER support for experimental LIVE VIEW. [#51287](https://github.com/ClickHouse/ClickHouse/pull/51287) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
* Add setting `session_timezone`, it is used as default timezone for session when not explicitly specified. [#44149](https://github.com/ClickHouse/ClickHouse/pull/44149) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||||
|
* Added overlay database engine and representation of a directory as a database This commit adds 4 databases: 1. DatabaseOverlay: Implements the IDatabase interface. Allow to combine multiple databases, such as FileSystem and Memory. Internally, it stores a vector with other database pointers and proxies requests to them in turn until it is executed successfully. 2. DatabaseFilesystem: allows to read-only interact with files stored on the file system. Internally, it uses TableFunctionFile to implicitly load file when a user requests the table. Result of TableFunctionFile call cached inside to provide quick access. 3. DatabaseS3: allows to read-only interact with s3 storage. It uses TableFunctionS3 to implicitly load table from s3 4. DatabaseHDFS: allows to interact with hdfs storage. It uses TableFunctionHDFS to implicitly load table from hdfs. [#48821](https://github.com/ClickHouse/ClickHouse/pull/48821) ([alekseygolub](https://github.com/alekseygolub)).
|
||||||
|
* Add a new setting named `use_mysql_types_in_show_columns` to alter the `SHOW COLUMNS` SQL statement to display MySQL equivalent types when a client is connected via the MySQL compatibility port. [#49577](https://github.com/ClickHouse/ClickHouse/pull/49577) ([Thomas Panetti](https://github.com/tpanetti)).
|
||||||
|
* Added option `--rename_files_after_processing <pattern>`. This closes [#34207](https://github.com/ClickHouse/ClickHouse/issues/34207). [#49626](https://github.com/ClickHouse/ClickHouse/pull/49626) ([alekseygolub](https://github.com/alekseygolub)).
|
||||||
|
* 1. Add `TableFunctionRedis` 3. Add table engine Redis 4. Add `RedisCommon` which contains Redis related tools and types 5. Support `equals` and `in` filter push down into Redis. [#50150](https://github.com/ClickHouse/ClickHouse/pull/50150) ([JackyWoo](https://github.com/JackyWoo)).
|
||||||
|
* Allow to skip empty files in file/s3/url/hdfs table functions using settings `s3_skip_empty_files`, `hdfs_skip_empty_files`, `engine_file_skip_empty_files`, `engine_url_skip_empty_files`. [#50364](https://github.com/ClickHouse/ClickHouse/pull/50364) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Clickhouse-client can now be called with a connection instead of "--host", "--port", "--user" etc. [#50689](https://github.com/ClickHouse/ClickHouse/pull/50689) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Codec DEFLATE_QPL is now controlled via server setting "enable_deflate_qpl_codec" (default: false) instead of setting "allow_experimental_codecs". This marks QPL_DEFLATE non-experimental. [#50775](https://github.com/ClickHouse/ClickHouse/pull/50775) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
* Improve performance with enabled QueryProfiler using thread-local timer_id instead of global object. [#48778](https://github.com/ClickHouse/ClickHouse/pull/48778) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||||
|
* Rewrite CapnProto input/output format to improve its performance. Map column names and CapnProto fields case insensitive, fix reading/writing of nested structure fields. [#49752](https://github.com/ClickHouse/ClickHouse/pull/49752) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Optimize parquet write performance for parallel threads. [#50102](https://github.com/ClickHouse/ClickHouse/pull/50102) ([Hongbin Ma](https://github.com/binmahone)).
|
||||||
|
* ### Documentation entry for user-facing changes Disable `parallelize_output_from_storages` for processing MATERIALIZED VIEWs and storages with one block only. [#50214](https://github.com/ClickHouse/ClickHouse/pull/50214) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Merge PR https://github.com/ClickHouse/ClickHouse/pull/46558 (Avoid processing already sorted data). Avoid block permutation during sort if the block is already sorted. [#50697](https://github.com/ClickHouse/ClickHouse/pull/50697) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* In the earlier PRs ([#50062](https://github.com/ClickHouse/ClickHouse/issues/50062), [#50307](https://github.com/ClickHouse/ClickHouse/issues/50307)), we used to propose an optimization pattern which transforms the predicates with toYear/toYYYYMM into its equivalent but converter-free form. This transformation could bring significant performance impact to some workloads, such as SSB. However, as issue [#50628](https://github.com/ClickHouse/ClickHouse/issues/50628) indicated, these two PRs would introduce some issues which may results in incomplete query results, and as a result, they were reverted by [#50629](https://github.com/ClickHouse/ClickHouse/issues/50629). [#50951](https://github.com/ClickHouse/ClickHouse/pull/50951) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||||
|
* Make multiple list requests to ZooKeeper in parallel to speed up reading from system.zookeeper table. [#51042](https://github.com/ClickHouse/ClickHouse/pull/51042) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Speedup initialization of DateTime lookup tables for time zones. This should reduce startup/connect time of clickhouse client especially in debug build as it is rather heavy. [#51347](https://github.com/ClickHouse/ClickHouse/pull/51347) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Allow to cast IPv6 to IPv4 address for CIDR ::ffff:0:0/96 (IPv4-mapped addresses). [#49759](https://github.com/ClickHouse/ClickHouse/pull/49759) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Update MongoDB protocol to support MongoDB 5.1 version and newer. Support for the versions with the old protocol (<3.6) is preserved. Closes [#45621](https://github.com/ClickHouse/ClickHouse/issues/45621), [#49879](https://github.com/ClickHouse/ClickHouse/issues/49879). [#50061](https://github.com/ClickHouse/ClickHouse/pull/50061) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Improved scheduling of merge selecting and cleanup tasks in `ReplicatedMergeTree`. The tasks will not be executed too frequently when there's nothing to merge or cleanup. Added settings `max_merge_selecting_sleep_ms`, `merge_selecting_sleep_slowdown_factor`, `max_cleanup_delay_period` and `cleanup_thread_preferred_points_per_iteration`. It should close [#31919](https://github.com/ClickHouse/ClickHouse/issues/31919). [#50107](https://github.com/ClickHouse/ClickHouse/pull/50107) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Support parallel replicas with the analyzer. [#50441](https://github.com/ClickHouse/ClickHouse/pull/50441) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Add setting `input_format_max_bytes_to_read_for_schema_inference` to limit the number of bytes to read in schema inference. Closes [#50577](https://github.com/ClickHouse/ClickHouse/issues/50577). [#50592](https://github.com/ClickHouse/ClickHouse/pull/50592) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Respect setting input_format_as_default in schema inference. [#50602](https://github.com/ClickHouse/ClickHouse/pull/50602) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Make filter push down through cross join. [#50605](https://github.com/ClickHouse/ClickHouse/pull/50605) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Actual lz4 version is used now. [#50621](https://github.com/ClickHouse/ClickHouse/pull/50621) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Allow to skip trailing empty lines in CSV/TSV/CustomSeparated formats via settings `input_format_csv_skip_trailing_empty_lines`, `input_format_tsv_skip_trailing_empty_lines` and `input_format_custom_skip_trailing_empty_lines` (disabled by default). Closes [#49315](https://github.com/ClickHouse/ClickHouse/issues/49315). [#50635](https://github.com/ClickHouse/ClickHouse/pull/50635) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Functions "toDateOrDefault|OrNull()" and "accuateCast[OrDefault|OrNull]()" now correctly parse numeric arguments. [#50709](https://github.com/ClickHouse/ClickHouse/pull/50709) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||||
|
* Currently, the csv input format can not parse the csv file with whitespace or \t field delimiter, and these delimiters is supported in spark. [#50712](https://github.com/ClickHouse/ClickHouse/pull/50712) ([KevinyhZou](https://github.com/KevinyhZou)).
|
||||||
|
* Settings `number_of_mutations_to_delay` and `number_of_mutations_to_throw` are enabled by default now with values 500 and 1000 respectively. [#50726](https://github.com/ClickHouse/ClickHouse/pull/50726) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Keeper improvement: add feature flags for Keeper API. Each feature flag can be disabled or enabled by defining it under `keeper_server.feature_flags` config. E.g. to enable `CheckNotExists` request, `keeper_server.feature_flags.check_not_exists` should be set to `1` on Keeper. [#50796](https://github.com/ClickHouse/ClickHouse/pull/50796) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* The dashboard correctly shows missing values. This closes [#50831](https://github.com/ClickHouse/ClickHouse/issues/50831). [#50832](https://github.com/ClickHouse/ClickHouse/pull/50832) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* CGroups metrics related to CPU are replaced with one metric, `CGroupMaxCPU` for better usability. The `Normalized` CPU usage metrics will be normalized to CGroups limits instead of the total number of CPUs when they are set. This closes [#50836](https://github.com/ClickHouse/ClickHouse/issues/50836). [#50835](https://github.com/ClickHouse/ClickHouse/pull/50835) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Relax the thresholds for "too many parts" to be more modern. Return the backpressure during long-running insert queries. [#50856](https://github.com/ClickHouse/ClickHouse/pull/50856) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Added the possibility to use date and time arguments in syslog timestamp format in functions parseDateTimeBestEffort*() and parseDateTime64BestEffort*(). [#50925](https://github.com/ClickHouse/ClickHouse/pull/50925) ([Victor Krasnov](https://github.com/sirvickr)).
|
||||||
|
* Suggest using `APPEND` or `TRUNCATE` for `INTO OUTFILE` when file exists. [#50950](https://github.com/ClickHouse/ClickHouse/pull/50950) ([alekar](https://github.com/alekar)).
|
||||||
|
* Add embedded keeper-client to standalone keeper binary. [#50964](https://github.com/ClickHouse/ClickHouse/pull/50964) ([pufit](https://github.com/pufit)).
|
||||||
|
* Command line parameter "--password" in clickhouse-client can now be specified only once. [#50966](https://github.com/ClickHouse/ClickHouse/pull/50966) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Fix data lakes slowness because of synchronous head requests. (Related to Iceberg/Deltalake/Hudi being slow with a lot of files). [#50976](https://github.com/ClickHouse/ClickHouse/pull/50976) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Use `hash_of_all_files` from `system.parts` to check identity of parts during on-cluster backups. [#50997](https://github.com/ClickHouse/ClickHouse/pull/50997) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* The system table zookeeper_connection connected_time identifies the time when the connection is established (standard format), and session_uptime_elapsed_seconds is added, which labels the duration of the established connection session (in seconds). [#51026](https://github.com/ClickHouse/ClickHouse/pull/51026) ([郭小龙](https://github.com/guoxiaolongzte)).
|
||||||
|
* Show halves of checksums in `system.parts`, `system.projection_parts` and in error messages in the correct order. [#51040](https://github.com/ClickHouse/ClickHouse/pull/51040) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Do not replicate `ALTER PARTITION` queries and mutations through `Replicated` database if it has only one shard and the underlying table is `ReplicatedMergeTree`. [#51049](https://github.com/ClickHouse/ClickHouse/pull/51049) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Improve the progress bar for file/s3/hdfs/url table functions by using chunk size from source data and using incremental total size counting in each thread. Fix the progress bar for *Cluster functions. This closes [#47250](https://github.com/ClickHouse/ClickHouse/issues/47250). [#51088](https://github.com/ClickHouse/ClickHouse/pull/51088) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Add total_bytes_to_read to Progress packet in TCP protocol for better Progress bar. [#51158](https://github.com/ClickHouse/ClickHouse/pull/51158) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Better checking of data parts on disks with filesystem cache. [#51164](https://github.com/ClickHouse/ClickHouse/pull/51164) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Disable cache setting `do_not_evict_index_and_mark_files` (Was enabled in `23.5`). [#51222](https://github.com/ClickHouse/ClickHouse/pull/51222) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix sometimes not correct current_elements_num in fs cache. [#51242](https://github.com/ClickHouse/ClickHouse/pull/51242) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add random sleep before merges/mutations execution to split load more evenly between replicas in case of zero-copy replication. [#51282](https://github.com/ClickHouse/ClickHouse/pull/51282) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* The function `transform` as well as `CASE` with value matching started to support all data types. This closes [#29730](https://github.com/ClickHouse/ClickHouse/issues/29730). This closes [#32387](https://github.com/ClickHouse/ClickHouse/issues/32387). This closes [#50827](https://github.com/ClickHouse/ClickHouse/issues/50827). This closes [#31336](https://github.com/ClickHouse/ClickHouse/issues/31336). This closes [#40493](https://github.com/ClickHouse/ClickHouse/issues/40493). [#51351](https://github.com/ClickHouse/ClickHouse/pull/51351) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* We have found a bug in LLVM that makes the usage of `compile_expressions` setting unsafe. It is disabled by default. [#51368](https://github.com/ClickHouse/ClickHouse/pull/51368) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Issue [#50220](https://github.com/ClickHouse/ClickHouse/issues/50220) reports a core in `grace_hash` join. We finally reproduce the exception on local, and found that the issue is related to the failure of creating temporary file. Somehow this is triggered in https://github.com/ClickHouse/ClickHouse/pull/49816 https://github.com/ClickHouse/ClickHouse/pull/49483. [#51382](https://github.com/ClickHouse/ClickHouse/pull/51382) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Update contrib/re2 to 2023-06-02. [#50949](https://github.com/ClickHouse/ClickHouse/pull/50949) ([Yuriy Chernyshov](https://github.com/georgthegreat)).
|
||||||
|
* ClickHouse server will print the list of changed settings on fatal errors. This closes [#51137](https://github.com/ClickHouse/ClickHouse/issues/51137). [#51138](https://github.com/ClickHouse/ClickHouse/pull/51138) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* In https://github.com/ClickHouse/ClickHouse/pull/51143 the fasstests failed, but the status wasn't created because of the chown `file not found`. This addresses it. Decrease the default values for `http-max-field-value-size` and `http_max_field_name_size` to 128K. [#51163](https://github.com/ClickHouse/ClickHouse/pull/51163) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Update Ubuntu version in docker containers. [#51180](https://github.com/ClickHouse/ClickHouse/pull/51180) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Allow building ClickHouse with clang-17. [#51300](https://github.com/ClickHouse/ClickHouse/pull/51300) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* [SQLancer](https://github.com/sqlancer/sqlancer) check is considered stable as bugs that were triggered by it are fixed. Now failures of SQLancer check will be reported as failed check status. [#51340](https://github.com/ClickHouse/ClickHouse/pull/51340) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Making our CI even better. [#51494](https://github.com/ClickHouse/ClickHouse/pull/51494) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Split huge `RUN` in Dockerfile into smaller conditional. Install the necessary tools on demand in the same `RUN` layer, and remove them after that. Upgrade the OS only once at the beginning. Use a modern way to check the signed repository. Downgrade the base repo to ubuntu:20.04 to address the issues on older docker versions. Upgrade golang version to address golang vulnerabilities. [#51504](https://github.com/ClickHouse/ClickHouse/pull/51504) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* This a follow-up for [#51504](https://github.com/ClickHouse/ClickHouse/issues/51504), the cleanup was lost during refactoring. [#51564](https://github.com/ClickHouse/ClickHouse/pull/51564) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Report loading status for executable dictionaries correctly [#48775](https://github.com/ClickHouse/ClickHouse/pull/48775) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||||
|
* Proper mutation of skip indices and projections [#50104](https://github.com/ClickHouse/ClickHouse/pull/50104) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Cleanup moving parts [#50489](https://github.com/ClickHouse/ClickHouse/pull/50489) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix backward compatibility for IP types hashing in aggregate functions [#50551](https://github.com/ClickHouse/ClickHouse/pull/50551) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Fix Log family table return wrong rows count after truncate [#50585](https://github.com/ClickHouse/ClickHouse/pull/50585) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Fix bug in `uniqExact` parallel merging [#50590](https://github.com/ClickHouse/ClickHouse/pull/50590) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Revert recent grace hash join changes [#50699](https://github.com/ClickHouse/ClickHouse/pull/50699) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Query Cache: Try to fix bad cast from ColumnConst to ColumnVector<char8_t> [#50704](https://github.com/ClickHouse/ClickHouse/pull/50704) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Do not read all the columns from right GLOBAL JOIN table. [#50721](https://github.com/ClickHouse/ClickHouse/pull/50721) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Avoid storing logs in Keeper containing unknown operation [#50751](https://github.com/ClickHouse/ClickHouse/pull/50751) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* SummingMergeTree support for DateTime64 [#50797](https://github.com/ClickHouse/ClickHouse/pull/50797) ([Jordi Villar](https://github.com/jrdi)).
|
||||||
|
* Add compat setting for non-const timezones [#50834](https://github.com/ClickHouse/ClickHouse/pull/50834) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix type of LDAP server params hash in cache entry [#50865](https://github.com/ClickHouse/ClickHouse/pull/50865) ([Julian Maicher](https://github.com/jmaicher)).
|
||||||
|
* Fallback to parsing big integer from String instead of exception in Parquet format [#50873](https://github.com/ClickHouse/ClickHouse/pull/50873) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix checking the lock file too often while writing a backup [#50889](https://github.com/ClickHouse/ClickHouse/pull/50889) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Do not apply projection if read-in-order was enabled. [#50923](https://github.com/ClickHouse/ClickHouse/pull/50923) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix race azure blob storage iterator [#50936](https://github.com/ClickHouse/ClickHouse/pull/50936) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Fix erroneous `sort_description` propagation in `CreatingSets` [#50955](https://github.com/ClickHouse/ClickHouse/pull/50955) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix iceberg V2 optional metadata parsing [#50974](https://github.com/ClickHouse/ClickHouse/pull/50974) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* MaterializedMySQL: Keep parentheses for empty table overrides [#50977](https://github.com/ClickHouse/ClickHouse/pull/50977) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* Fix crash in BackupCoordinationStageSync::setError() [#51012](https://github.com/ClickHouse/ClickHouse/pull/51012) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix subtly broken copy-on-write of ColumnLowCardinality dictionary [#51064](https://github.com/ClickHouse/ClickHouse/pull/51064) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Generate safe IVs [#51086](https://github.com/ClickHouse/ClickHouse/pull/51086) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
* Fix ineffective query cache for SELECTs with subqueries [#51132](https://github.com/ClickHouse/ClickHouse/pull/51132) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix Set index with constant nullable comparison. [#51205](https://github.com/ClickHouse/ClickHouse/pull/51205) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix a crash in s3 and s3Cluster functions [#51209](https://github.com/ClickHouse/ClickHouse/pull/51209) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix core dump when compile expression [#51231](https://github.com/ClickHouse/ClickHouse/pull/51231) ([LiuNeng](https://github.com/liuneng1994)).
|
||||||
|
* Fix use-after-free in StorageURL when switching URLs [#51260](https://github.com/ClickHouse/ClickHouse/pull/51260) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Updated check for parameterized view [#51272](https://github.com/ClickHouse/ClickHouse/pull/51272) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Fix multiple writing of same file to backup [#51299](https://github.com/ClickHouse/ClickHouse/pull/51299) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix fuzzer failure in ActionsDAG [#51301](https://github.com/ClickHouse/ClickHouse/pull/51301) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove garbage from function `transform` [#51350](https://github.com/ClickHouse/ClickHouse/pull/51350) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix MSan report in lowerUTF8/upperUTF8 [#51371](https://github.com/ClickHouse/ClickHouse/pull/51371) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* fs cache: fix a bit incorrect use_count after [#44985](https://github.com/ClickHouse/ClickHouse/issues/44985) [#51406](https://github.com/ClickHouse/ClickHouse/pull/51406) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix segfault in MathUnary [#51499](https://github.com/ClickHouse/ClickHouse/pull/51499) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Fix logical assert in `tupleElement()` with default values [#51534](https://github.com/ClickHouse/ClickHouse/pull/51534) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* fs cache: remove file from opened file cache immediately when evicting file [#51596](https://github.com/ClickHouse/ClickHouse/pull/51596) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Deprecate delete-on-destroy.txt [#49181](https://github.com/ClickHouse/ClickHouse/pull/49181) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Attempt to increase the general runners' survival rate [#49283](https://github.com/ClickHouse/ClickHouse/pull/49283) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Refactor subqueries for IN [#49570](https://github.com/ClickHouse/ClickHouse/pull/49570) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Test plan optimization analyzer [#50095](https://github.com/ClickHouse/ClickHouse/pull/50095) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Implement endianness-independent serialization for quantileTiming [#50324](https://github.com/ClickHouse/ClickHouse/pull/50324) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* require `finalize()` call before d-tor for all writes buffers [#50395](https://github.com/ClickHouse/ClickHouse/pull/50395) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Implement big-endian support for the deterministic reservoir sampler [#50405](https://github.com/ClickHouse/ClickHouse/pull/50405) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Fix compilation error on big-endian platforms [#50406](https://github.com/ClickHouse/ClickHouse/pull/50406) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Attach gdb in stateless tests [#50487](https://github.com/ClickHouse/ClickHouse/pull/50487) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* JIT infrastructure refactoring [#50531](https://github.com/ClickHouse/ClickHouse/pull/50531) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Analyzer: Do not apply Query Tree optimizations on shards [#50584](https://github.com/ClickHouse/ClickHouse/pull/50584) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Increase max array size in group bitmap [#50620](https://github.com/ClickHouse/ClickHouse/pull/50620) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Misc Annoy index improvements [#50661](https://github.com/ClickHouse/ClickHouse/pull/50661) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix reading negative decimals in avro format [#50668](https://github.com/ClickHouse/ClickHouse/pull/50668) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Unify priorities for connection pools [#50675](https://github.com/ClickHouse/ClickHouse/pull/50675) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Prostpone check of outdated parts [#50676](https://github.com/ClickHouse/ClickHouse/pull/50676) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Unify priorities: `IExecutableTask`s [#50677](https://github.com/ClickHouse/ClickHouse/pull/50677) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Disable grace_hash join in stress tests [#50693](https://github.com/ClickHouse/ClickHouse/pull/50693) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* ReverseTransform small improvement [#50698](https://github.com/ClickHouse/ClickHouse/pull/50698) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Support OPTIMIZE for temporary tables [#50710](https://github.com/ClickHouse/ClickHouse/pull/50710) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Refactor reading from object storages [#50711](https://github.com/ClickHouse/ClickHouse/pull/50711) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix data race in log message of cached buffer [#50723](https://github.com/ClickHouse/ClickHouse/pull/50723) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add new keywords into projections documentation [#50743](https://github.com/ClickHouse/ClickHouse/pull/50743) ([YalalovSM](https://github.com/YalalovSM)).
|
||||||
|
* Fix build for aarch64 (temporary disable azure) [#50770](https://github.com/ClickHouse/ClickHouse/pull/50770) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Update version after release [#50772](https://github.com/ClickHouse/ClickHouse/pull/50772) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.5.1.3174-stable [#50774](https://github.com/ClickHouse/ClickHouse/pull/50774) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update CHANGELOG.md [#50788](https://github.com/ClickHouse/ClickHouse/pull/50788) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.2.7.32-stable [#50809](https://github.com/ClickHouse/ClickHouse/pull/50809) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Desctructing --> Destructing [#50810](https://github.com/ClickHouse/ClickHouse/pull/50810) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Don't mark a part as broken on `Poco::TimeoutException` [#50811](https://github.com/ClickHouse/ClickHouse/pull/50811) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Rename azure_blob_storage to azureBlobStorage [#50812](https://github.com/ClickHouse/ClickHouse/pull/50812) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Fix ParallelReadBuffer seek [#50820](https://github.com/ClickHouse/ClickHouse/pull/50820) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* [RFC] Print git hash when crashing [#50823](https://github.com/ClickHouse/ClickHouse/pull/50823) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Add tests for function "transform" [#50833](https://github.com/ClickHouse/ClickHouse/pull/50833) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.5.2.7-stable [#50844](https://github.com/ClickHouse/ClickHouse/pull/50844) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Updated changelog with azureBlobStorage table function & engine entry [#50850](https://github.com/ClickHouse/ClickHouse/pull/50850) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Update easy_tasks_sorted_ru.md [#50853](https://github.com/ClickHouse/ClickHouse/pull/50853) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Document x86 / ARM prerequisites for Docker image [#50867](https://github.com/ClickHouse/ClickHouse/pull/50867) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* MaterializedMySQL: Add test_named_collections [#50874](https://github.com/ClickHouse/ClickHouse/pull/50874) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.8.18.31-lts [#50881](https://github.com/ClickHouse/ClickHouse/pull/50881) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.3.3.52-lts [#50882](https://github.com/ClickHouse/ClickHouse/pull/50882) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.4.3.48-stable [#50883](https://github.com/ClickHouse/ClickHouse/pull/50883) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* MaterializedMySQL: Add additional test case to insert_with_modify_binlog_checksum [#50884](https://github.com/ClickHouse/ClickHouse/pull/50884) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* Update broken tests list [#50886](https://github.com/ClickHouse/ClickHouse/pull/50886) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Fix LOGICAL_ERROR in snowflakeToDateTime*() [#50893](https://github.com/ClickHouse/ClickHouse/pull/50893) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Tests with parallel replicas are no more "always green" [#50896](https://github.com/ClickHouse/ClickHouse/pull/50896) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Slightly more information in error message about cached disk [#50897](https://github.com/ClickHouse/ClickHouse/pull/50897) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* do not call finalize after exception [#50907](https://github.com/ClickHouse/ClickHouse/pull/50907) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Update Annoy docs [#50912](https://github.com/ClickHouse/ClickHouse/pull/50912) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* A bit safer UserDefinedSQLFunctionVisitor [#50913](https://github.com/ClickHouse/ClickHouse/pull/50913) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Update contribe/orc in .gitmodules [#50920](https://github.com/ClickHouse/ClickHouse/pull/50920) ([San](https://github.com/santrancisco)).
|
||||||
|
* MaterializedMySQL: Add missing DROP DATABASE for tests [#50924](https://github.com/ClickHouse/ClickHouse/pull/50924) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* Fix 'Illegal column timezone' in stress tests [#50929](https://github.com/ClickHouse/ClickHouse/pull/50929) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix tests sanity checks and avoid dropping system.query_log table [#50934](https://github.com/ClickHouse/ClickHouse/pull/50934) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix tests for throttling by allowing more margin of error for trottling event [#50935](https://github.com/ClickHouse/ClickHouse/pull/50935) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* 01746_convert_type_with_default: Temporarily disable flaky test [#50937](https://github.com/ClickHouse/ClickHouse/pull/50937) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix the statless tests image for old commits [#50947](https://github.com/ClickHouse/ClickHouse/pull/50947) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix logic in `AsynchronousBoundedReadBuffer::seek` [#50952](https://github.com/ClickHouse/ClickHouse/pull/50952) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Uncomment flaky test (01746_convert_type_with_default) [#50954](https://github.com/ClickHouse/ClickHouse/pull/50954) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||||
|
* Fix keeper-client help message [#50965](https://github.com/ClickHouse/ClickHouse/pull/50965) ([pufit](https://github.com/pufit)).
|
||||||
|
* fix build issue on clang 15 [#50967](https://github.com/ClickHouse/ClickHouse/pull/50967) ([Chang chen](https://github.com/baibaichen)).
|
||||||
|
* Docs: Fix embedded video link [#50972](https://github.com/ClickHouse/ClickHouse/pull/50972) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Change submodule capnproto to it's fork in ClickHouse [#50987](https://github.com/ClickHouse/ClickHouse/pull/50987) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Attempt to make 01281_group_by_limit_memory_tracking not flaky [#50995](https://github.com/ClickHouse/ClickHouse/pull/50995) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Fix flaky 02561_null_as_default_more_formats [#51001](https://github.com/ClickHouse/ClickHouse/pull/51001) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix flaky test_seekable_formats [#51002](https://github.com/ClickHouse/ClickHouse/pull/51002) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Follow-up to [#50448](https://github.com/ClickHouse/ClickHouse/issues/50448) [#51006](https://github.com/ClickHouse/ClickHouse/pull/51006) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix a versions' tweak for tagged commits, improve version_helper [#51035](https://github.com/ClickHouse/ClickHouse/pull/51035) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Sqlancer has changed master to main [#51060](https://github.com/ClickHouse/ClickHouse/pull/51060) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Do not spam sqlancer build log [#51061](https://github.com/ClickHouse/ClickHouse/pull/51061) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Refactor IColumn::forEachSubcolumn to make it slightly harder to implement incorrectly [#51072](https://github.com/ClickHouse/ClickHouse/pull/51072) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* MaterializedMySQL: Rename materialize_with_ddl.py -> materialized_with_ddl [#51074](https://github.com/ClickHouse/ClickHouse/pull/51074) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* Improve woboq browser report [#51077](https://github.com/ClickHouse/ClickHouse/pull/51077) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix for part_names_mutex used after destruction [#51099](https://github.com/ClickHouse/ClickHouse/pull/51099) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Fix ColumnConst::forEachSubcolumn missing from previous PR [#51102](https://github.com/ClickHouse/ClickHouse/pull/51102) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Fix the test 02783_parsedatetimebesteffort_syslog flakiness [#51112](https://github.com/ClickHouse/ClickHouse/pull/51112) ([Victor Krasnov](https://github.com/sirvickr)).
|
||||||
|
* Compatibility with clang-17 [#51114](https://github.com/ClickHouse/ClickHouse/pull/51114) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Make more parallel get requests to ZooKeeper in system.zookeeper [#51118](https://github.com/ClickHouse/ClickHouse/pull/51118) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Fix 02703_max_local_write_bandwidth flakiness [#51120](https://github.com/ClickHouse/ClickHouse/pull/51120) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.5.3.24-stable [#51121](https://github.com/ClickHouse/ClickHouse/pull/51121) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.4.4.16-stable [#51122](https://github.com/ClickHouse/ClickHouse/pull/51122) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.3.4.17-lts [#51123](https://github.com/ClickHouse/ClickHouse/pull/51123) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.8.19.10-lts [#51124](https://github.com/ClickHouse/ClickHouse/pull/51124) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Fix typo [#51126](https://github.com/ClickHouse/ClickHouse/pull/51126) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Slightly better diagnostics [#51127](https://github.com/ClickHouse/ClickHouse/pull/51127) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Small fix in `MergeTreePrefetchedReadPool` [#51131](https://github.com/ClickHouse/ClickHouse/pull/51131) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Don't report table function accesses to system.errors [#51147](https://github.com/ClickHouse/ClickHouse/pull/51147) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix SQLancer branch name [#51148](https://github.com/ClickHouse/ClickHouse/pull/51148) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Revert "Added ability to implicitly use file/hdfs/s3 table functions in clickhouse-local" [#51149](https://github.com/ClickHouse/ClickHouse/pull/51149) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* More profile events for fs cache [#51161](https://github.com/ClickHouse/ClickHouse/pull/51161) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Unforget to pass callback to readBigAt() in ParallelReadBuffer [#51165](https://github.com/ClickHouse/ClickHouse/pull/51165) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Update README.md [#51179](https://github.com/ClickHouse/ClickHouse/pull/51179) ([Tyler Hannan](https://github.com/tylerhannan)).
|
||||||
|
* Update exception message [#51187](https://github.com/ClickHouse/ClickHouse/pull/51187) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Split long test 02149_schema_inference_formats_with_schema into several tests to avoid timeout in debug [#51197](https://github.com/ClickHouse/ClickHouse/pull/51197) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Avoid initializing DateLUT from emptyArray function registration [#51199](https://github.com/ClickHouse/ClickHouse/pull/51199) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Suppress check for covered parts in ZooKeeper [#51207](https://github.com/ClickHouse/ClickHouse/pull/51207) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* One more profile event for fs cache [#51223](https://github.com/ClickHouse/ClickHouse/pull/51223) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Typo: passowrd_sha256_hex --> password_sha256_hex [#51233](https://github.com/ClickHouse/ClickHouse/pull/51233) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Introduce settings enum field with auto-generated values list [#51237](https://github.com/ClickHouse/ClickHouse/pull/51237) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Drop session if we fail to get Keeper API version [#51238](https://github.com/ClickHouse/ClickHouse/pull/51238) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Revert "Fix a crash in s3 and s3Cluster functions" [#51239](https://github.com/ClickHouse/ClickHouse/pull/51239) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* fix flaky `AsyncLoader` destructor [#51245](https://github.com/ClickHouse/ClickHouse/pull/51245) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Docs: little cleanup of configuration-files.md [#51249](https://github.com/ClickHouse/ClickHouse/pull/51249) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix a stupid bug on Replicated database recovery [#51252](https://github.com/ClickHouse/ClickHouse/pull/51252) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* FileCache: tryReserve() slight improvement [#51259](https://github.com/ClickHouse/ClickHouse/pull/51259) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Ugly hotfix for "terminate on uncaught exception" in WriteBufferFromOStream [#51265](https://github.com/ClickHouse/ClickHouse/pull/51265) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Avoid too many calls to Poco::Logger::get [#51266](https://github.com/ClickHouse/ClickHouse/pull/51266) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.3.5.9-lts [#51269](https://github.com/ClickHouse/ClickHouse/pull/51269) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Better reporting of broken parts [#51270](https://github.com/ClickHouse/ClickHouse/pull/51270) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Update ext-dict-functions.md [#51283](https://github.com/ClickHouse/ClickHouse/pull/51283) ([Mike Kot](https://github.com/myrrc)).
|
||||||
|
* Disable table structure check for secondary queries from Replicated db [#51284](https://github.com/ClickHouse/ClickHouse/pull/51284) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Define Thrift version for parquet and use correct arrow version [#51285](https://github.com/ClickHouse/ClickHouse/pull/51285) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Restore Azure build on ARM [#51288](https://github.com/ClickHouse/ClickHouse/pull/51288) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Query Cache: Un-comment settings in server cfg [#51294](https://github.com/ClickHouse/ClickHouse/pull/51294) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Require more checks [#51295](https://github.com/ClickHouse/ClickHouse/pull/51295) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix metadata loading test [#51297](https://github.com/ClickHouse/ClickHouse/pull/51297) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Scratch the strange Python code [#51302](https://github.com/ClickHouse/ClickHouse/pull/51302) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a test for [#47865](https://github.com/ClickHouse/ClickHouse/issues/47865) [#51306](https://github.com/ClickHouse/ClickHouse/pull/51306) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a test for [#48894](https://github.com/ClickHouse/ClickHouse/issues/48894) [#51307](https://github.com/ClickHouse/ClickHouse/pull/51307) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a test for [#48676](https://github.com/ClickHouse/ClickHouse/issues/48676) [#51308](https://github.com/ClickHouse/ClickHouse/pull/51308) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix long test `functions_bad_arguments` [#51310](https://github.com/ClickHouse/ClickHouse/pull/51310) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Unify merge predicate [#51344](https://github.com/ClickHouse/ClickHouse/pull/51344) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix using locks in ProcessList [#51348](https://github.com/ClickHouse/ClickHouse/pull/51348) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Add a test for [#42631](https://github.com/ClickHouse/ClickHouse/issues/42631) [#51353](https://github.com/ClickHouse/ClickHouse/pull/51353) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix performance tests due to warnings from jemalloc about Per-CPU arena disabled [#51362](https://github.com/ClickHouse/ClickHouse/pull/51362) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix "merge_truncate_long" test [#51369](https://github.com/ClickHouse/ClickHouse/pull/51369) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Increase timeout of Fast Test [#51372](https://github.com/ClickHouse/ClickHouse/pull/51372) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix bad tests for DNS [#51374](https://github.com/ClickHouse/ClickHouse/pull/51374) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Attempt to fix the `relax_too_many_parts` test [#51375](https://github.com/ClickHouse/ClickHouse/pull/51375) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix MySQL test in Debug mode [#51376](https://github.com/ClickHouse/ClickHouse/pull/51376) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix bad test `01018_Distributed__shard_num` [#51377](https://github.com/ClickHouse/ClickHouse/pull/51377) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix "logical error" in addressToLineWithInlines [#51379](https://github.com/ClickHouse/ClickHouse/pull/51379) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test 01280_ttl_where_group_by [#51380](https://github.com/ClickHouse/ClickHouse/pull/51380) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Attempt to fix `test_ssl_cert_authentication` [#51384](https://github.com/ClickHouse/ClickHouse/pull/51384) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Revert "Merge pull request [#50951](https://github.com/ClickHouse/ClickHouse/issues/50951) from ZhiguoZh/20230607-toyear-fix" [#51390](https://github.com/ClickHouse/ClickHouse/pull/51390) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Two tests are twice longer in average with Analyzer and sometimes failing [#51391](https://github.com/ClickHouse/ClickHouse/pull/51391) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix 00899_long_attach_memory_limit [#51395](https://github.com/ClickHouse/ClickHouse/pull/51395) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test 01293_optimize_final_force [#51396](https://github.com/ClickHouse/ClickHouse/pull/51396) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test 02481_parquet_list_monotonically_increasing_offsets [#51397](https://github.com/ClickHouse/ClickHouse/pull/51397) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test 02497_trace_events_stress_long [#51398](https://github.com/ClickHouse/ClickHouse/pull/51398) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix broken labeling for `manual approve` [#51405](https://github.com/ClickHouse/ClickHouse/pull/51405) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix parts lifetime in `MergeTreeTransaction` [#51407](https://github.com/ClickHouse/ClickHouse/pull/51407) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix flaky test test_skip_empty_files [#51409](https://github.com/ClickHouse/ClickHouse/pull/51409) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* fix flacky test test_profile_events_s3 [#51412](https://github.com/ClickHouse/ClickHouse/pull/51412) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Update README.md [#51413](https://github.com/ClickHouse/ClickHouse/pull/51413) ([Tyler Hannan](https://github.com/tylerhannan)).
|
||||||
|
* Replace try/catch logic in hasTokenOrNull() by something more lightweight [#51425](https://github.com/ClickHouse/ClickHouse/pull/51425) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Add retries to `tlsv1_3` tests [#51434](https://github.com/ClickHouse/ClickHouse/pull/51434) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Update exception message [#51440](https://github.com/ClickHouse/ClickHouse/pull/51440) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* fs cache: add check for intersecting ranges [#51444](https://github.com/ClickHouse/ClickHouse/pull/51444) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Slightly better code around packets for parallel replicas [#51451](https://github.com/ClickHouse/ClickHouse/pull/51451) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Update system_warnings test [#51453](https://github.com/ClickHouse/ClickHouse/pull/51453) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Many fixes [#51455](https://github.com/ClickHouse/ClickHouse/pull/51455) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test 01605_adaptive_granularity_block_borders [#51457](https://github.com/ClickHouse/ClickHouse/pull/51457) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Try fix flaky 02497_storage_file_reader_selection [#51468](https://github.com/ClickHouse/ClickHouse/pull/51468) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Try making Keeper in `DatabaseReplicated` tests more stable [#51473](https://github.com/ClickHouse/ClickHouse/pull/51473) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Convert 02003_memory_limit_in_client from expect to sh test (to fix flakiness) [#51475](https://github.com/ClickHouse/ClickHouse/pull/51475) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix test_disk_over_web_server [#51476](https://github.com/ClickHouse/ClickHouse/pull/51476) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Delay shutdown of system and temporary databases [#51479](https://github.com/ClickHouse/ClickHouse/pull/51479) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix memory leakage in CompressionCodecDeflateQpl [#51480](https://github.com/ClickHouse/ClickHouse/pull/51480) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Increase retries in test_multiple_disks/test.py::test_start_stop_moves [#51482](https://github.com/ClickHouse/ClickHouse/pull/51482) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix race in BoundedReadBuffer [#51484](https://github.com/ClickHouse/ClickHouse/pull/51484) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix flaky unit test [#51485](https://github.com/ClickHouse/ClickHouse/pull/51485) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix flaky test `test_host_regexp_multiple_ptr_records` [#51506](https://github.com/ClickHouse/ClickHouse/pull/51506) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Add a comment [#51517](https://github.com/ClickHouse/ClickHouse/pull/51517) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Make `test_ssl_cert_authentication` similar to `test_tlvs1_3` [#51520](https://github.com/ClickHouse/ClickHouse/pull/51520) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fix duplicate storage set logical error. [#51521](https://github.com/ClickHouse/ClickHouse/pull/51521) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Update test_storage_postgresql/test.py::test_concurrent_queries [#51523](https://github.com/ClickHouse/ClickHouse/pull/51523) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix FATAL: query context is not detached from thread group [#51540](https://github.com/ClickHouse/ClickHouse/pull/51540) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.3.6.7-lts [#51548](https://github.com/ClickHouse/ClickHouse/pull/51548) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Decoupled commits from [#51180](https://github.com/ClickHouse/ClickHouse/issues/51180) for backports [#51561](https://github.com/ClickHouse/ClickHouse/pull/51561) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Try to fix deadlock in ZooKeeper client [#51563](https://github.com/ClickHouse/ClickHouse/pull/51563) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Retry chroot creation in ZK before stateless tests [#51585](https://github.com/ClickHouse/ClickHouse/pull/51585) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* use timeout instead trap in 01443_merge_truncate_long.sh [#51593](https://github.com/ClickHouse/ClickHouse/pull/51593) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.5.4.25-stable [#51604](https://github.com/ClickHouse/ClickHouse/pull/51604) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Fix MergeTreeMarksLoader segfaulting if marks file is longer than expected [#51636](https://github.com/ClickHouse/ClickHouse/pull/51636) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.4.5.22-stable [#51638](https://github.com/ClickHouse/ClickHouse/pull/51638) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.3.7.5-lts [#51639](https://github.com/ClickHouse/ClickHouse/pull/51639) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update parts.md [#51643](https://github.com/ClickHouse/ClickHouse/pull/51643) ([Ramazan Polat](https://github.com/ramazanpolat)).
|
||||||
|
|
25
docs/changelogs/v23.6.2.18-stable.md
Normal file
25
docs/changelogs/v23.6.2.18-stable.md
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.6.2.18-stable (89f39a7ccfe) FIXME as compared to v23.6.1.1524-stable (d1c7e13d088)
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Backported in [#51888](https://github.com/ClickHouse/ClickHouse/issues/51888): Update cargo dependencies. [#51721](https://github.com/ClickHouse/ClickHouse/pull/51721) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix reading from empty column in `parseSipHashKey` [#51804](https://github.com/ClickHouse/ClickHouse/pull/51804) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Allow parametric UDFs [#51964](https://github.com/ClickHouse/ClickHouse/pull/51964) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Remove the usage of Analyzer setting in the client [#51578](https://github.com/ClickHouse/ClickHouse/pull/51578) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix 02116_tuple_element with Analyzer [#51669](https://github.com/ClickHouse/ClickHouse/pull/51669) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix SQLLogic docker images [#51719](https://github.com/ClickHouse/ClickHouse/pull/51719) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix source image for sqllogic [#51728](https://github.com/ClickHouse/ClickHouse/pull/51728) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Pin for docker-ce [#51743](https://github.com/ClickHouse/ClickHouse/pull/51743) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
@ -23,7 +23,7 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
|||||||
``` bash
|
``` bash
|
||||||
cd ClickHouse
|
cd ClickHouse
|
||||||
mkdir build-riscv64
|
mkdir build-riscv64
|
||||||
CC=clang-16 CXX=clang++-16 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DUSE_UNWIND=OFF -DENABLE_GRPC=OFF -DENABLE_HDFS=OFF -DENABLE_MYSQL=OFF
|
CC=clang-16 CXX=clang++-16 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DENABLE_GRPC=OFF -DENABLE_HDFS=OFF -DENABLE_MYSQL=OFF
|
||||||
ninja -C build-riscv64
|
ninja -C build-riscv64
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -11,7 +11,8 @@ Supported platforms:
|
|||||||
|
|
||||||
- x86_64
|
- x86_64
|
||||||
- AArch64
|
- AArch64
|
||||||
- Power9 (experimental)
|
- PowerPC 64 LE (experimental)
|
||||||
|
- RISC-V 64 (experimental)
|
||||||
|
|
||||||
## Building on Ubuntu
|
## Building on Ubuntu
|
||||||
|
|
||||||
@ -42,7 +43,7 @@ sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
|
|||||||
|
|
||||||
For other Linux distribution - check the availability of LLVM's [prebuild packages](https://releases.llvm.org/download.html).
|
For other Linux distribution - check the availability of LLVM's [prebuild packages](https://releases.llvm.org/download.html).
|
||||||
|
|
||||||
As of April 2023, any version of Clang >= 15 will work.
|
As of April 2023, clang-16 or higher will work.
|
||||||
GCC as a compiler is not supported.
|
GCC as a compiler is not supported.
|
||||||
To build with a specific Clang version:
|
To build with a specific Clang version:
|
||||||
|
|
||||||
@ -86,8 +87,8 @@ The build requires the following components:
|
|||||||
|
|
||||||
- Git (used to checkout the sources, not needed for the build)
|
- Git (used to checkout the sources, not needed for the build)
|
||||||
- CMake 3.20 or newer
|
- CMake 3.20 or newer
|
||||||
- Compiler: Clang 15 or newer
|
- Compiler: clang-16 or newer
|
||||||
- Linker: lld 15 or newer
|
- Linker: lld-16 or newer
|
||||||
- Ninja
|
- Ninja
|
||||||
- Yasm
|
- Yasm
|
||||||
- Gawk
|
- Gawk
|
||||||
|
@ -33,6 +33,15 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name
|
|||||||
|
|
||||||
- `options` — MongoDB connection string options (optional parameter).
|
- `options` — MongoDB connection string options (optional parameter).
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
If you are using the MongoDB Atlas cloud offering please add these options:
|
||||||
|
|
||||||
|
```
|
||||||
|
'connectTimeoutMS=10000&ssl=true&authSource=admin'
|
||||||
|
```
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
## Usage Example {#usage-example}
|
## Usage Example {#usage-example}
|
||||||
|
|
||||||
Create a table in ClickHouse which allows to read data from MongoDB collection:
|
Create a table in ClickHouse which allows to read data from MongoDB collection:
|
||||||
|
@ -54,7 +54,7 @@ $ sudo mysql
|
|||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse';
|
mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse';
|
||||||
mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION;
|
mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'localhost' WITH GRANT OPTION;
|
||||||
```
|
```
|
||||||
|
|
||||||
Then configure the connection in `/etc/odbc.ini`.
|
Then configure the connection in `/etc/odbc.ini`.
|
||||||
@ -66,7 +66,7 @@ DRIVER = /usr/local/lib/libmyodbc5w.so
|
|||||||
SERVER = 127.0.0.1
|
SERVER = 127.0.0.1
|
||||||
PORT = 3306
|
PORT = 3306
|
||||||
DATABASE = test
|
DATABASE = test
|
||||||
USERNAME = clickhouse
|
USER = clickhouse
|
||||||
PASSWORD = clickhouse
|
PASSWORD = clickhouse
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -83,6 +83,9 @@ $ isql -v mysqlconn
|
|||||||
Table in MySQL:
|
Table in MySQL:
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
|
mysql> CREATE DATABASE test;
|
||||||
|
Query OK, 1 row affected (0,01 sec)
|
||||||
|
|
||||||
mysql> CREATE TABLE `test`.`test` (
|
mysql> CREATE TABLE `test`.`test` (
|
||||||
-> `int_id` INT NOT NULL AUTO_INCREMENT,
|
-> `int_id` INT NOT NULL AUTO_INCREMENT,
|
||||||
-> `int_nullable` INT NULL DEFAULT NULL,
|
-> `int_nullable` INT NULL DEFAULT NULL,
|
||||||
@ -91,10 +94,10 @@ mysql> CREATE TABLE `test`.`test` (
|
|||||||
-> PRIMARY KEY (`int_id`));
|
-> PRIMARY KEY (`int_id`));
|
||||||
Query OK, 0 rows affected (0,09 sec)
|
Query OK, 0 rows affected (0,09 sec)
|
||||||
|
|
||||||
mysql> insert into test (`int_id`, `float`) VALUES (1,2);
|
mysql> insert into test.test (`int_id`, `float`) VALUES (1,2);
|
||||||
Query OK, 1 row affected (0,00 sec)
|
Query OK, 1 row affected (0,00 sec)
|
||||||
|
|
||||||
mysql> select * from test;
|
mysql> select * from test.test;
|
||||||
+------+----------+-----+----------+
|
+------+----------+-----+----------+
|
||||||
| int_id | int_nullable | float | float_nullable |
|
| int_id | int_nullable | float | float_nullable |
|
||||||
+------+----------+-----+----------+
|
+------+----------+-----+----------+
|
||||||
|
@ -37,8 +37,8 @@ The [Merge](/docs/en/engines/table-engines/special/merge.md/#merge) engine does
|
|||||||
``` sql
|
``` sql
|
||||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||||
(
|
(
|
||||||
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1],
|
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS|EPHEMERAL expr1] [TTL expr1] [CODEC(codec1)] [[NOT] NULL|PRIMARY KEY],
|
||||||
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2],
|
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS|EPHEMERAL expr2] [TTL expr2] [CODEC(codec2)] [[NOT] NULL|PRIMARY KEY],
|
||||||
...
|
...
|
||||||
INDEX index_name1 expr1 TYPE type1(...) [GRANULARITY value1],
|
INDEX index_name1 expr1 TYPE type1(...) [GRANULARITY value1],
|
||||||
INDEX index_name2 expr2 TYPE type2(...) [GRANULARITY value2],
|
INDEX index_name2 expr2 TYPE type2(...) [GRANULARITY value2],
|
||||||
@ -439,41 +439,41 @@ Syntax: `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions,
|
|||||||
- `number_of_hash_functions` — The number of hash functions used in the Bloom filter.
|
- `number_of_hash_functions` — The number of hash functions used in the Bloom filter.
|
||||||
- `random_seed` — The seed for Bloom filter hash functions.
|
- `random_seed` — The seed for Bloom filter hash functions.
|
||||||
|
|
||||||
Users can create [UDF](/docs/en/sql-reference/statements/create/function.md) to estimate the parameters set of `ngrambf_v1`. Query statements are as follows:
|
Users can create [UDF](/docs/en/sql-reference/statements/create/function.md) to estimate the parameters set of `ngrambf_v1`. Query statements are as follows:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE FUNCTION bfEstimateFunctions [ON CLUSTER cluster]
|
CREATE FUNCTION bfEstimateFunctions [ON CLUSTER cluster]
|
||||||
AS
|
AS
|
||||||
(total_nubmer_of_all_grams, size_of_bloom_filter_in_bits) -> round((size_of_bloom_filter_in_bits / total_nubmer_of_all_grams) * log(2));
|
(total_nubmer_of_all_grams, size_of_bloom_filter_in_bits) -> round((size_of_bloom_filter_in_bits / total_nubmer_of_all_grams) * log(2));
|
||||||
|
|
||||||
CREATE FUNCTION bfEstimateBmSize [ON CLUSTER cluster]
|
CREATE FUNCTION bfEstimateBmSize [ON CLUSTER cluster]
|
||||||
AS
|
AS
|
||||||
(total_nubmer_of_all_grams, probability_of_false_positives) -> ceil((total_nubmer_of_all_grams * log(probability_of_false_positives)) / log(1 / pow(2, log(2))));
|
(total_nubmer_of_all_grams, probability_of_false_positives) -> ceil((total_nubmer_of_all_grams * log(probability_of_false_positives)) / log(1 / pow(2, log(2))));
|
||||||
|
|
||||||
CREATE FUNCTION bfEstimateFalsePositive [ON CLUSTER cluster]
|
CREATE FUNCTION bfEstimateFalsePositive [ON CLUSTER cluster]
|
||||||
AS
|
AS
|
||||||
(total_nubmer_of_all_grams, number_of_hash_functions, size_of_bloom_filter_in_bytes) -> pow(1 - exp(-number_of_hash_functions/ (size_of_bloom_filter_in_bytes / total_nubmer_of_all_grams)), number_of_hash_functions);
|
(total_nubmer_of_all_grams, number_of_hash_functions, size_of_bloom_filter_in_bytes) -> pow(1 - exp(-number_of_hash_functions/ (size_of_bloom_filter_in_bytes / total_nubmer_of_all_grams)), number_of_hash_functions);
|
||||||
|
|
||||||
CREATE FUNCTION bfEstimateGramNumber [ON CLUSTER cluster]
|
CREATE FUNCTION bfEstimateGramNumber [ON CLUSTER cluster]
|
||||||
AS
|
AS
|
||||||
(number_of_hash_functions, probability_of_false_positives, size_of_bloom_filter_in_bytes) -> ceil(size_of_bloom_filter_in_bytes / (-number_of_hash_functions / log(1 - exp(log(probability_of_false_positives) / number_of_hash_functions))))
|
(number_of_hash_functions, probability_of_false_positives, size_of_bloom_filter_in_bytes) -> ceil(size_of_bloom_filter_in_bytes / (-number_of_hash_functions / log(1 - exp(log(probability_of_false_positives) / number_of_hash_functions))))
|
||||||
|
|
||||||
```
|
```
|
||||||
To use those functions,we need to specify two parameter at least.
|
To use those functions,we need to specify two parameter at least.
|
||||||
For example, if there 4300 ngrams in the granule and we expect false positives to be less than 0.0001. The other parameters can be estimated by executing following queries:
|
For example, if there 4300 ngrams in the granule and we expect false positives to be less than 0.0001. The other parameters can be estimated by executing following queries:
|
||||||
|
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
--- estimate number of bits in the filter
|
--- estimate number of bits in the filter
|
||||||
SELECT bfEstimateBmSize(4300, 0.0001) / 8 as size_of_bloom_filter_in_bytes;
|
SELECT bfEstimateBmSize(4300, 0.0001) / 8 as size_of_bloom_filter_in_bytes;
|
||||||
|
|
||||||
┌─size_of_bloom_filter_in_bytes─┐
|
┌─size_of_bloom_filter_in_bytes─┐
|
||||||
│ 10304 │
|
│ 10304 │
|
||||||
└───────────────────────────────┘
|
└───────────────────────────────┘
|
||||||
|
|
||||||
--- estimate number of hash functions
|
--- estimate number of hash functions
|
||||||
SELECT bfEstimateFunctions(4300, bfEstimateBmSize(4300, 0.0001)) as number_of_hash_functions
|
SELECT bfEstimateFunctions(4300, bfEstimateBmSize(4300, 0.0001)) as number_of_hash_functions
|
||||||
|
|
||||||
┌─number_of_hash_functions─┐
|
┌─number_of_hash_functions─┐
|
||||||
│ 13 │
|
│ 13 │
|
||||||
└──────────────────────────┘
|
└──────────────────────────┘
|
||||||
@ -949,7 +949,14 @@ The example uses `type=web`, but any disk type can be configured as dynamic, eve
|
|||||||
|
|
||||||
#### Example dynamic web storage
|
#### Example dynamic web storage
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
A [demo dataset](https://github.com/ClickHouse/web-tables-demo) is hosted in GitHub. To prepare your own tables for web storage see the tool [clickhouse-static-files-uploader](/docs/en/operations/storing-data.md/#storing-data-on-webserver)
|
||||||
|
:::
|
||||||
|
|
||||||
|
In this `ATTACH TABLE` query the `UUID` provided matches the directory name of the data, and the endpoint is the URL for the raw GitHub content.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
# highlight-next-line
|
||||||
ATTACH TABLE uk_price_paid UUID 'cf712b4f-2ca8-435c-ac23-c4393efe52f7'
|
ATTACH TABLE uk_price_paid UUID 'cf712b4f-2ca8-435c-ac23-c4393efe52f7'
|
||||||
(
|
(
|
||||||
price UInt32,
|
price UInt32,
|
||||||
@ -984,7 +991,7 @@ use a local disk to cache data from a table stored at a URL. Neither the cache d
|
|||||||
nor the web storage is configured in the ClickHouse configuration files; both are
|
nor the web storage is configured in the ClickHouse configuration files; both are
|
||||||
configured in the CREATE/ATTACH query settings.
|
configured in the CREATE/ATTACH query settings.
|
||||||
|
|
||||||
In the settings highlighted below notice that the disk of `type=web` is nested within
|
In the settings highlighted below notice that the disk of `type=web` is nested within
|
||||||
the disk of `type=cache`.
|
the disk of `type=cache`.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
@ -1301,7 +1308,7 @@ configuration file.
|
|||||||
In this sample configuration:
|
In this sample configuration:
|
||||||
- the disk is of type `web`
|
- the disk is of type `web`
|
||||||
- the data is hosted at `http://nginx:80/test1/`
|
- the data is hosted at `http://nginx:80/test1/`
|
||||||
- a cache on local storage is used
|
- a cache on local storage is used
|
||||||
|
|
||||||
```xml
|
```xml
|
||||||
<clickhouse>
|
<clickhouse>
|
||||||
|
@ -378,6 +378,10 @@ request](https://github.com/ClickHouse/ClickHouse/commits/master) and find CI ch
|
|||||||
https://s3.amazonaws.com/clickhouse/builds/PRs/.../.../binary_aarch64_v80compat/clickhouse". You can then click the link to download the
|
https://s3.amazonaws.com/clickhouse/builds/PRs/.../.../binary_aarch64_v80compat/clickhouse". You can then click the link to download the
|
||||||
build.
|
build.
|
||||||
|
|
||||||
|
### macOS-only: Install with Homebrew
|
||||||
|
|
||||||
|
To install ClickHouse using the popular `brew` package manager, follow the instructions listed in the [ClickHouse Homebrew tap](https://github.com/ClickHouse/homebrew-clickhouse).
|
||||||
|
|
||||||
## Launch {#launch}
|
## Launch {#launch}
|
||||||
|
|
||||||
To start the server as a daemon, run:
|
To start the server as a daemon, run:
|
||||||
|
@ -471,6 +471,7 @@ The CSV format supports the output of totals and extremes the same way as `TabSe
|
|||||||
- [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`.
|
- [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`.
|
||||||
- [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`.
|
- [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`.
|
||||||
- [input_format_csv_allow_whitespace_or_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_allow_whitespace_or_tab_as_delimiter) - Allow to use whitespace or tab as field delimiter in CSV strings. Default value - `false`.
|
- [input_format_csv_allow_whitespace_or_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_allow_whitespace_or_tab_as_delimiter) - Allow to use whitespace or tab as field delimiter in CSV strings. Default value - `false`.
|
||||||
|
- [input_format_csv_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_csv_allow_variable_number_of_columns) - ignore extra columns in CSV input (if file has more columns than expected) and treat missing fields in CSV input as default values. Default value - `false`.
|
||||||
|
|
||||||
## CSVWithNames {#csvwithnames}
|
## CSVWithNames {#csvwithnames}
|
||||||
|
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
---
|
---
|
||||||
slug: /en/operations/server-configuration-parameters/settings
|
slug: /en/operations/server-configuration-parameters/settings
|
||||||
sidebar_position: 57
|
sidebar_position: 57
|
||||||
sidebar_label: Server Settings
|
sidebar_label: Global Server Settings
|
||||||
description: This section contains descriptions of server settings that cannot be changed at the session or query level.
|
description: This section contains descriptions of server settings that cannot be changed at the session or query level.
|
||||||
---
|
---
|
||||||
|
|
||||||
# Server Settings
|
# Global Server Settings
|
||||||
|
|
||||||
This section contains descriptions of server settings that cannot be changed at the session or query level.
|
This section contains descriptions of server settings that cannot be changed at the session or query level.
|
||||||
|
|
||||||
@ -1201,13 +1201,58 @@ Keys:
|
|||||||
- `console` – Send `log` and `errorlog` to the console instead of file. To enable, set to `1` or `true`.
|
- `console` – Send `log` and `errorlog` to the console instead of file. To enable, set to `1` or `true`.
|
||||||
- `stream_compress` – Compress `log` and `errorlog` with `lz4` stream compression. To enable, set to `1` or `true`.
|
- `stream_compress` – Compress `log` and `errorlog` with `lz4` stream compression. To enable, set to `1` or `true`.
|
||||||
|
|
||||||
|
Both log and error log file names (only file names, not directories) support date and time format specifiers.
|
||||||
|
|
||||||
|
**Format specifiers**
|
||||||
|
Using the following format specifiers, you can define a pattern for the resulting file name. “Example” column shows possible results for `2023-07-06 18:32:07`.
|
||||||
|
|
||||||
|
| Specifier | Description | Example |
|
||||||
|
|-------------|---------------------------------------------------------------------------------------------------------------------|--------------------------|
|
||||||
|
| %% | Literal % | % |
|
||||||
|
| %n | New-line character | |
|
||||||
|
| %t | Horizontal tab character | |
|
||||||
|
| %Y | Year as a decimal number, e.g. 2017 | 2023 |
|
||||||
|
| %y | Last 2 digits of year as a decimal number (range [00,99]) | 23 |
|
||||||
|
| %C | First 2 digits of year as a decimal number (range [00,99]) | 20 |
|
||||||
|
| %G | Four-digit [ISO 8601 week-based year](https://en.wikipedia.org/wiki/ISO_8601#Week_dates), i.e. the year that contains the specified week. Normally useful only with %V | 2023 |
|
||||||
|
| %g | Last 2 digits of [ISO 8601 week-based year](https://en.wikipedia.org/wiki/ISO_8601#Week_dates), i.e. the year that contains the specified week. | 23 |
|
||||||
|
| %b | Abbreviated month name, e.g. Oct (locale dependent) | Jul |
|
||||||
|
| %h | Synonym of %b | Jul |
|
||||||
|
| %B | Full month name, e.g. October (locale dependent) | July |
|
||||||
|
| %m | Month as a decimal number (range [01,12]) | 07 |
|
||||||
|
| %U | Week of the year as a decimal number (Sunday is the first day of the week) (range [00,53]) | 27 |
|
||||||
|
| %W | Week of the year as a decimal number (Monday is the first day of the week) (range [00,53]) | 27 |
|
||||||
|
| %V | ISO 8601 week number (range [01,53]) | 27 |
|
||||||
|
| %j | Day of the year as a decimal number (range [001,366]) | 187 |
|
||||||
|
| %d | Day of the month as a zero-padded decimal number (range [01,31]). Single digit is preceded by zero. | 06 |
|
||||||
|
| %e | Day of the month as a space-padded decimal number (range [1,31]). Single digit is preceded by a space. | 6 |
|
||||||
|
| %a | Abbreviated weekday name, e.g. Fri (locale dependent) | Thu |
|
||||||
|
| %A | Full weekday name, e.g. Friday (locale dependent) | Thursday |
|
||||||
|
| %w | Weekday as a integer number with Sunday as 0 (range [0-6]) | 4 |
|
||||||
|
| %u | Weekday as a decimal number, where Monday is 1 (ISO 8601 format) (range [1-7]) | 4 |
|
||||||
|
| %H | Hour as a decimal number, 24 hour clock (range [00-23]) | 18 |
|
||||||
|
| %I | Hour as a decimal number, 12 hour clock (range [01,12]) | 06 |
|
||||||
|
| %M | Minute as a decimal number (range [00,59]) | 32 |
|
||||||
|
| %S | Second as a decimal number (range [00,60]) | 07 |
|
||||||
|
| %c | Standard date and time string, e.g. Sun Oct 17 04:41:13 2010 (locale dependent) | Thu Jul 6 18:32:07 2023 |
|
||||||
|
| %x | Localized date representation (locale dependent) | 07/06/23 |
|
||||||
|
| %X | Localized time representation, e.g. 18:40:20 or 6:40:20 PM (locale dependent) | 18:32:07 |
|
||||||
|
| %D | Short MM/DD/YY date, equivalent to %m/%d/%y | 07/06/23 |
|
||||||
|
| %F | Short YYYY-MM-DD date, equivalent to %Y-%m-%d | 2023-07-06 |
|
||||||
|
| %r | Localized 12-hour clock time (locale dependent) | 06:32:07 PM |
|
||||||
|
| %R | Equivalent to "%H:%M" | 18:32 |
|
||||||
|
| %T | Equivalent to "%H:%M:%S" (the ISO 8601 time format) | 18:32:07 |
|
||||||
|
| %p | Localized a.m. or p.m. designation (locale dependent) | PM |
|
||||||
|
| %z | Offset from UTC in the ISO 8601 format (e.g. -0430), or no characters if the time zone information is not available | +0800 |
|
||||||
|
| %Z | Locale-dependent time zone name or abbreviation, or no characters if the time zone information is not available | Z AWST |
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
<logger>
|
<logger>
|
||||||
<level>trace</level>
|
<level>trace</level>
|
||||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
<log>/var/log/clickhouse-server/clickhouse-server-%F-%T.log</log>
|
||||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
<errorlog>/var/log/clickhouse-server/clickhouse-server-%F-%T.err.log</errorlog>
|
||||||
<size>1000M</size>
|
<size>1000M</size>
|
||||||
<count>10</count>
|
<count>10</count>
|
||||||
<stream_compress>true</stream_compress>
|
<stream_compress>true</stream_compress>
|
||||||
@ -2120,7 +2165,13 @@ This section contains the following parameters:
|
|||||||
- `operation_timeout_ms` — Maximum timeout for one operation in milliseconds.
|
- `operation_timeout_ms` — Maximum timeout for one operation in milliseconds.
|
||||||
- `root` — The [znode](http://zookeeper.apache.org/doc/r3.5.5/zookeeperOver.html#Nodes+and+ephemeral+nodes) that is used as the root for znodes used by the ClickHouse server. Optional.
|
- `root` — The [znode](http://zookeeper.apache.org/doc/r3.5.5/zookeeperOver.html#Nodes+and+ephemeral+nodes) that is used as the root for znodes used by the ClickHouse server. Optional.
|
||||||
- `identity` — User and password, that can be required by ZooKeeper to give access to requested znodes. Optional.
|
- `identity` — User and password, that can be required by ZooKeeper to give access to requested znodes. Optional.
|
||||||
|
- zookeeper_load_balancing - Specifies the algorithm of ZooKeeper node selection.
|
||||||
|
* random - randomly selects one of ZooKeeper nodes.
|
||||||
|
* in_order - selects the first ZooKeeper node, if it's not available then the second, and so on.
|
||||||
|
* nearest_hostname - selects a ZooKeeper node with a hostname that is most similar to the server’s hostname.
|
||||||
|
* first_or_random - selects the first ZooKeeper node, if it's not available then randomly selects one of remaining ZooKeeper nodes.
|
||||||
|
* round_robin - selects the first ZooKeeper node, if reconnection happens selects the next.
|
||||||
|
|
||||||
**Example configuration**
|
**Example configuration**
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
@ -2139,6 +2190,8 @@ This section contains the following parameters:
|
|||||||
<root>/path/to/zookeeper/node</root>
|
<root>/path/to/zookeeper/node</root>
|
||||||
<!-- Optional. Zookeeper digest ACL string. -->
|
<!-- Optional. Zookeeper digest ACL string. -->
|
||||||
<identity>user:password</identity>
|
<identity>user:password</identity>
|
||||||
|
<!--<zookeeper_load_balancing>random / in_order / nearest_hostname / first_or_random / round_robin</zookeeper_load_balancing>-->
|
||||||
|
<zookeeper_load_balancing>random</zookeeper_load_balancing>
|
||||||
</zookeeper>
|
</zookeeper>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -7,90 +7,16 @@ pagination_next: en/operations/settings/settings
|
|||||||
|
|
||||||
# Settings Overview
|
# Settings Overview
|
||||||
|
|
||||||
There are multiple ways to define ClickHouse settings. Settings are configured in layers, and each subsequent layer redefines the previous values of a setting.
|
There are two main groups of ClickHouse settings:
|
||||||
|
|
||||||
The order of priority for defining a setting is:
|
- Global server settings
|
||||||
|
- Query-level settings
|
||||||
|
|
||||||
1. Settings in the `users.xml` server configuration file
|
The main distinction between global server settings and query-level settings is that
|
||||||
|
global server settings must be set in configuration files while query-level settings
|
||||||
|
can be set in configuration files or with SQL queries.
|
||||||
|
|
||||||
- Set in the element `<profiles>`.
|
Read about [global server settings](/docs/en/operations/server-configuration-parameters/settings.md) to learn more about configuring your ClickHouse server at the global server level.
|
||||||
|
|
||||||
2. Session settings
|
Read about [query-level settings](/docs/en/operations/settings/settings-query-level.md) to learn more about configuring your ClickHouse server at the query-level.
|
||||||
|
|
||||||
- Send `SET setting=value` from the ClickHouse console client in interactive mode.
|
|
||||||
Similarly, you can use ClickHouse sessions in the HTTP protocol. To do this, you need to specify the `session_id` HTTP parameter.
|
|
||||||
|
|
||||||
3. Query settings
|
|
||||||
|
|
||||||
- When starting the ClickHouse console client in non-interactive mode, set the startup parameter `--setting=value`.
|
|
||||||
- When using the HTTP API, pass CGI parameters (`URL?setting_1=value&setting_2=value...`).
|
|
||||||
- Define settings in the [SETTINGS](../../sql-reference/statements/select/index.md#settings-in-select-query) clause of the SELECT query. The setting value is applied only to that query and is reset to the default or previous value after the query is executed.
|
|
||||||
|
|
||||||
View the [Settings](./settings.md) page for a description of the ClickHouse settings.
|
|
||||||
|
|
||||||
## Converting a Setting to its Default Value
|
|
||||||
|
|
||||||
If you change a setting and would like to revert it back to its default value, set the value to `DEFAULT`. The syntax looks like:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SET setting_name = DEFAULT
|
|
||||||
```
|
|
||||||
|
|
||||||
For example, the default value of `max_insert_block_size` is 1048449. Suppose you change its value to 100000:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SET max_insert_block_size=100000;
|
|
||||||
|
|
||||||
SELECT value FROM system.settings where name='max_insert_block_size';
|
|
||||||
```
|
|
||||||
|
|
||||||
The response is:
|
|
||||||
|
|
||||||
```response
|
|
||||||
┌─value──┐
|
|
||||||
│ 100000 │
|
|
||||||
└────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
The following command sets its value back to 1048449:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SET max_insert_block_size=DEFAULT;
|
|
||||||
|
|
||||||
SELECT value FROM system.settings where name='max_insert_block_size';
|
|
||||||
```
|
|
||||||
|
|
||||||
The setting is now back to its default:
|
|
||||||
|
|
||||||
```response
|
|
||||||
┌─value───┐
|
|
||||||
│ 1048449 │
|
|
||||||
└─────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## Custom Settings {#custom_settings}
|
|
||||||
|
|
||||||
In addition to the common [settings](../../operations/settings/settings.md), users can define custom settings.
|
|
||||||
|
|
||||||
A custom setting name must begin with one of predefined prefixes. The list of these prefixes must be declared in the [custom_settings_prefixes](../../operations/server-configuration-parameters/settings.md#custom_settings_prefixes) parameter in the server configuration file.
|
|
||||||
|
|
||||||
```xml
|
|
||||||
<custom_settings_prefixes>custom_</custom_settings_prefixes>
|
|
||||||
```
|
|
||||||
|
|
||||||
To define a custom setting use `SET` command:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SET custom_a = 123;
|
|
||||||
```
|
|
||||||
|
|
||||||
To get the current value of a custom setting use `getSetting()` function:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SELECT getSetting('custom_a');
|
|
||||||
```
|
|
||||||
|
|
||||||
**See Also**
|
|
||||||
|
|
||||||
- [Server Configuration Settings](../../operations/server-configuration-parameters/settings.md)
|
|
||||||
|
@ -242,6 +242,26 @@ See also:
|
|||||||
- [DateTime data type.](../../sql-reference/data-types/datetime.md)
|
- [DateTime data type.](../../sql-reference/data-types/datetime.md)
|
||||||
- [Functions for working with dates and times.](../../sql-reference/functions/date-time-functions.md)
|
- [Functions for working with dates and times.](../../sql-reference/functions/date-time-functions.md)
|
||||||
|
|
||||||
|
## interval_output_format {#interval_output_format}
|
||||||
|
|
||||||
|
Allows choosing different output formats of the text representation of interval types.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- `kusto` - KQL-style output format.
|
||||||
|
|
||||||
|
ClickHouse outputs intervals in [KQL format](https://learn.microsoft.com/en-us/dotnet/standard/base-types/standard-timespan-format-strings#the-constant-c-format-specifier). For example, `toIntervalDay(2)` would be formatted as `2.00:00:00`. Please note that for interval types of varying length (ie. `IntervalMonth` and `IntervalYear`) the average number of seconds per interval is taken into account.
|
||||||
|
|
||||||
|
- `numeric` - Numeric output format.
|
||||||
|
|
||||||
|
ClickHouse outputs intervals as their underlying numeric representation. For example, `toIntervalDay(2)` would be formatted as `2`.
|
||||||
|
|
||||||
|
Default value: `numeric`.
|
||||||
|
|
||||||
|
See also:
|
||||||
|
|
||||||
|
- [Interval](../../sql-reference/data-types/special-data-types/interval.md)
|
||||||
|
|
||||||
## input_format_ipv4_default_on_conversion_error {#input_format_ipv4_default_on_conversion_error}
|
## input_format_ipv4_default_on_conversion_error {#input_format_ipv4_default_on_conversion_error}
|
||||||
|
|
||||||
Deserialization of IPv4 will use default values instead of throwing exception on conversion error.
|
Deserialization of IPv4 will use default values instead of throwing exception on conversion error.
|
||||||
@ -931,6 +951,11 @@ Result
|
|||||||
```text
|
```text
|
||||||
" string "
|
" string "
|
||||||
```
|
```
|
||||||
|
### input_format_csv_allow_variable_number_of_columns {#input_format_csv_allow_variable_number_of_columns}
|
||||||
|
|
||||||
|
ignore extra columns in CSV input (if file has more columns than expected) and treat missing fields in CSV input as default values.
|
||||||
|
|
||||||
|
Disabled by default.
|
||||||
|
|
||||||
### input_format_csv_allow_whitespace_or_tab_as_delimiter {#input_format_csv_allow_whitespace_or_tab_as_delimiter}
|
### input_format_csv_allow_whitespace_or_tab_as_delimiter {#input_format_csv_allow_whitespace_or_tab_as_delimiter}
|
||||||
|
|
||||||
|
217
docs/en/operations/settings/settings-query-level.md
Normal file
217
docs/en/operations/settings/settings-query-level.md
Normal file
@ -0,0 +1,217 @@
|
|||||||
|
---
|
||||||
|
sidebar_label: Query-level Settings
|
||||||
|
title: Query-level Settings
|
||||||
|
slug: /en/operations/settings/query-level
|
||||||
|
---
|
||||||
|
|
||||||
|
There are multiple ways to set ClickHouse query-level settings. Settings are configured in layers, and each subsequent layer redefines the previous values of a setting.
|
||||||
|
|
||||||
|
The order of priority for defining a setting is:
|
||||||
|
|
||||||
|
1. Applying a setting to a user directly, or within a settings profile
|
||||||
|
|
||||||
|
- SQL (recommended)
|
||||||
|
- adding one or more XML or YAML files to `/etc/clickhouse-server/users.d`
|
||||||
|
|
||||||
|
2. Session settings
|
||||||
|
|
||||||
|
- Send `SET setting=value` from the ClickHouse Cloud SQL console or
|
||||||
|
`clickhouse client` in interactive mode. Similarly, you can use ClickHouse
|
||||||
|
sessions in the HTTP protocol. To do this, you need to specify the
|
||||||
|
`session_id` HTTP parameter.
|
||||||
|
|
||||||
|
3. Query settings
|
||||||
|
|
||||||
|
- When starting `clickhouse client` in non-interactive mode, set the startup
|
||||||
|
parameter `--setting=value`.
|
||||||
|
- When using the HTTP API, pass CGI parameters (`URL?setting_1=value&setting_2=value...`).
|
||||||
|
- Define settings in the
|
||||||
|
[SETTINGS](../../sql-reference/statements/select/index.md#settings-in-select-query)
|
||||||
|
clause of the SELECT query. The setting value is applied only to that query
|
||||||
|
and is reset to the default or previous value after the query is executed.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
These examples all set the value of the `async_insert` setting to `1`, and
|
||||||
|
show how to examine the settings in a running system.
|
||||||
|
|
||||||
|
### Using SQL to apply a setting to a user directly
|
||||||
|
|
||||||
|
This creates the user `ingester` with the setting `async_inset = 1`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE USER ingester
|
||||||
|
IDENTIFIED WITH sha256_hash BY '7e099f39b84ea79559b3e85ea046804e63725fd1f46b37f281276aae20f86dc3'
|
||||||
|
# highlight-next-line
|
||||||
|
SETTINGS async_insert = 1
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Examine the settings profile and assignment
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW ACCESS
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─ACCESS─────────────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ ... │
|
||||||
|
# highlight-next-line
|
||||||
|
│ CREATE USER ingester IDENTIFIED WITH sha256_password SETTINGS async_insert = true │
|
||||||
|
│ ... │
|
||||||
|
└────────────────────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
### Using SQL to create a settings profile and assign to a user
|
||||||
|
|
||||||
|
This creates the profile `log_ingest` with the setting `async_inset = 1`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE
|
||||||
|
SETTINGS PROFILE log_ingest SETTINGS async_insert = 1
|
||||||
|
```
|
||||||
|
|
||||||
|
This creates the user `ingester` and assigns the user the settings profile `log_ingest`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE USER ingester
|
||||||
|
IDENTIFIED WITH sha256_hash BY '7e099f39b84ea79559b3e85ea046804e63725fd1f46b37f281276aae20f86dc3'
|
||||||
|
# highlight-next-line
|
||||||
|
SETTINGS PROFILE log_ingest
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Using XML to create a settings profile and user
|
||||||
|
|
||||||
|
```xml title=/etc/clickhouse-server/users.d/users.xml
|
||||||
|
<clickhouse>
|
||||||
|
# highlight-start
|
||||||
|
<profiles>
|
||||||
|
<log_ingest>
|
||||||
|
<async_insert>1</async_insert>
|
||||||
|
</log_ingest>
|
||||||
|
</profiles>
|
||||||
|
# highlight-end
|
||||||
|
|
||||||
|
<users>
|
||||||
|
<ingester>
|
||||||
|
<password_sha256_hex>7e099f39b84ea79559b3e85ea046804e63725fd1f46b37f281276aae20f86dc3</password_sha256_hex>
|
||||||
|
# highlight-start
|
||||||
|
<profile>log_ingest</profile>
|
||||||
|
# highlight-end
|
||||||
|
</ingester>
|
||||||
|
<default replace="true">
|
||||||
|
<password_sha256_hex>7e099f39b84ea79559b3e85ea046804e63725fd1f46b37f281276aae20f86dc3</password_sha256_hex>
|
||||||
|
<access_management>1</access_management>
|
||||||
|
<named_collection_control>1</named_collection_control>
|
||||||
|
</default>
|
||||||
|
</users>
|
||||||
|
</clickhouse>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Examine the settings profile and assignment
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW ACCESS
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─ACCESS─────────────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ CREATE USER default IDENTIFIED WITH sha256_password │
|
||||||
|
# highlight-next-line
|
||||||
|
│ CREATE USER ingester IDENTIFIED WITH sha256_password SETTINGS PROFILE log_ingest │
|
||||||
|
│ CREATE SETTINGS PROFILE default │
|
||||||
|
# highlight-next-line
|
||||||
|
│ CREATE SETTINGS PROFILE log_ingest SETTINGS async_insert = true │
|
||||||
|
│ CREATE SETTINGS PROFILE readonly SETTINGS readonly = 1 │
|
||||||
|
│ ... │
|
||||||
|
└────────────────────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Assign a setting to a session
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET async_insert =1;
|
||||||
|
SELECT value FROM system.settings where name='async_insert';
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─value──┐
|
||||||
|
│ 1 │
|
||||||
|
└────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Assign a setting during a query
|
||||||
|
|
||||||
|
```sql
|
||||||
|
INSERT INTO YourTable
|
||||||
|
# highlight-next-line
|
||||||
|
SETTINGS async_insert=1
|
||||||
|
VALUES (...)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Converting a Setting to its Default Value
|
||||||
|
|
||||||
|
If you change a setting and would like to revert it back to its default value, set the value to `DEFAULT`. The syntax looks like:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET setting_name = DEFAULT
|
||||||
|
```
|
||||||
|
|
||||||
|
For example, the default value of `async_insert` is `0`. Suppose you change its value to `1`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET async_insert = 1;
|
||||||
|
|
||||||
|
SELECT value FROM system.settings where name='async_insert';
|
||||||
|
```
|
||||||
|
|
||||||
|
The response is:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─value──┐
|
||||||
|
│ 1 │
|
||||||
|
└────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
The following command sets its value back to 0:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET async_insert = DEFAULT;
|
||||||
|
|
||||||
|
SELECT value FROM system.settings where name='async_insert';
|
||||||
|
```
|
||||||
|
|
||||||
|
The setting is now back to its default:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─value───┐
|
||||||
|
│ 0 │
|
||||||
|
└─────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## Custom Settings {#custom_settings}
|
||||||
|
|
||||||
|
In addition to the common [settings](../../operations/settings/settings.md), users can define custom settings.
|
||||||
|
|
||||||
|
A custom setting name must begin with one of predefined prefixes. The list of these prefixes must be declared in the [custom_settings_prefixes](../../operations/server-configuration-parameters/settings.md#custom_settings_prefixes) parameter in the server configuration file.
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<custom_settings_prefixes>custom_</custom_settings_prefixes>
|
||||||
|
```
|
||||||
|
|
||||||
|
To define a custom setting use `SET` command:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET custom_a = 123;
|
||||||
|
```
|
||||||
|
|
||||||
|
To get the current value of a custom setting use `getSetting()` function:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT getSetting('custom_a');
|
||||||
|
```
|
||||||
|
|
||||||
|
**See Also**
|
||||||
|
|
||||||
|
- View the [Settings](./settings.md) page for a description of the ClickHouse settings.
|
||||||
|
- [Global server settings](../../operations/server-configuration-parameters/settings.md)
|
@ -17,7 +17,8 @@ Default value: 0.
|
|||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');
|
INSERT INTO table_1 VALUES (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');
|
||||||
|
SELECT * FROM table_1;
|
||||||
```
|
```
|
||||||
```response
|
```response
|
||||||
┌─x─┬─y────┐
|
┌─x─┬─y────┐
|
||||||
@ -30,7 +31,7 @@ insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');
|
|||||||
```sql
|
```sql
|
||||||
SELECT *
|
SELECT *
|
||||||
FROM table_1
|
FROM table_1
|
||||||
SETTINGS additional_table_filters = (('table_1', 'x != 2'))
|
SETTINGS additional_table_filters = {'table_1': 'x != 2'}
|
||||||
```
|
```
|
||||||
```response
|
```response
|
||||||
┌─x─┬─y────┐
|
┌─x─┬─y────┐
|
||||||
@ -50,7 +51,8 @@ Default value: `''`.
|
|||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');
|
INSERT INTO table_1 VALUES (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');
|
||||||
|
SElECT * FROM table_1;
|
||||||
```
|
```
|
||||||
```response
|
```response
|
||||||
┌─x─┬─y────┐
|
┌─x─┬─y────┐
|
||||||
@ -3201,6 +3203,40 @@ ENGINE = Log
|
|||||||
└──────────────────────────────────────────────────────────────────────────┘
|
└──────────────────────────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## default_temporary_table_engine {#default_temporary_table_engine}
|
||||||
|
|
||||||
|
Same as [default_table_engine](#default_table_engine) but for temporary tables.
|
||||||
|
|
||||||
|
Default value: `Memory`.
|
||||||
|
|
||||||
|
In this example, any new temporary table that does not specify an `Engine` will use the `Log` table engine:
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET default_temporary_table_engine = 'Log';
|
||||||
|
|
||||||
|
CREATE TEMPORARY TABLE my_table (
|
||||||
|
x UInt32,
|
||||||
|
y UInt32
|
||||||
|
);
|
||||||
|
|
||||||
|
SHOW CREATE TEMPORARY TABLE my_table;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─statement────────────────────────────────────────────────────────────────┐
|
||||||
|
│ CREATE TEMPORARY TABLE default.my_table
|
||||||
|
(
|
||||||
|
`x` UInt32,
|
||||||
|
`y` UInt32
|
||||||
|
)
|
||||||
|
ENGINE = Log
|
||||||
|
└──────────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## data_type_default_nullable {#data_type_default_nullable}
|
## data_type_default_nullable {#data_type_default_nullable}
|
||||||
|
|
||||||
Allows data types without explicit modifiers [NULL or NOT NULL](../../sql-reference/statements/create/table.md/#null-modifiers) in column definition will be [Nullable](../../sql-reference/data-types/nullable.md/#data_type-nullable).
|
Allows data types without explicit modifiers [NULL or NOT NULL](../../sql-reference/statements/create/table.md/#null-modifiers) in column definition will be [Nullable](../../sql-reference/data-types/nullable.md/#data_type-nullable).
|
||||||
@ -3501,7 +3537,7 @@ Possible values:
|
|||||||
- Any positive integer.
|
- Any positive integer.
|
||||||
- 0 - Disabled (infinite timeout).
|
- 0 - Disabled (infinite timeout).
|
||||||
|
|
||||||
Default value: 180.
|
Default value: 30.
|
||||||
|
|
||||||
## http_receive_timeout {#http_receive_timeout}
|
## http_receive_timeout {#http_receive_timeout}
|
||||||
|
|
||||||
@ -3512,7 +3548,7 @@ Possible values:
|
|||||||
- Any positive integer.
|
- Any positive integer.
|
||||||
- 0 - Disabled (infinite timeout).
|
- 0 - Disabled (infinite timeout).
|
||||||
|
|
||||||
Default value: 180.
|
Default value: 30.
|
||||||
|
|
||||||
## check_query_single_value_result {#check_query_single_value_result}
|
## check_query_single_value_result {#check_query_single_value_result}
|
||||||
|
|
||||||
|
@ -184,13 +184,15 @@ These settings should be defined in the disk configuration section.
|
|||||||
|
|
||||||
- `enable_filesystem_query_cache_limit` - allow to limit the size of cache which is downloaded within each query (depends on user setting `max_query_cache_size`). Default: `false`.
|
- `enable_filesystem_query_cache_limit` - allow to limit the size of cache which is downloaded within each query (depends on user setting `max_query_cache_size`). Default: `false`.
|
||||||
|
|
||||||
- `enable_cache_hits_threshold` - number which defines how many times some data needs to be read before it will be cached. Default: `0`, e.g. the data is cached at the first attempt to read it.
|
- `enable_cache_hits_threshold` - number which defines how many times some data needs to be read before it will be cached. Default: `false`. This threshold can be defined by `cache_hits_threshold`. Default: `0`, e.g. the data is cached at the first attempt to read it.
|
||||||
|
|
||||||
|
- `enable_bypass_cache_with_threshold` - allows to skip cache completely in case the requested read range exceeds the threshold. Default: `false`. This threshold can be defined by `bypass_cache_threashold`. Default: `268435456` (`256Mi`).
|
||||||
|
|
||||||
- `do_not_evict_index_and_mark_files` - do not evict small frequently used files according to cache policy. Default: `false`. This setting was added in version 22.8. If you used filesystem cache before this version, then it will not work on versions starting from 22.8 if this setting is set to `true`. If you want to use this setting, clear old cache created before version 22.8 before upgrading.
|
- `do_not_evict_index_and_mark_files` - do not evict small frequently used files according to cache policy. Default: `false`. This setting was added in version 22.8. If you used filesystem cache before this version, then it will not work on versions starting from 22.8 if this setting is set to `true`. If you want to use this setting, clear old cache created before version 22.8 before upgrading.
|
||||||
|
|
||||||
- `max_file_segment_size` - a maximum size of a single cache file in bytes or in readable format (`ki, Mi, Gi, etc`, example `10Gi`). Default: `104857600` (`100Mi`).
|
- `max_file_segment_size` - a maximum size of a single cache file in bytes or in readable format (`ki, Mi, Gi, etc`, example `10Gi`). Default: `8388608` (`8Mi`).
|
||||||
|
|
||||||
- `max_elements` - a limit for a number of cache files. Default: `1048576`.
|
- `max_elements` - a limit for a number of cache files. Default: `10000000`.
|
||||||
|
|
||||||
File Cache **query/profile settings**:
|
File Cache **query/profile settings**:
|
||||||
|
|
||||||
|
@ -9,7 +9,6 @@ Columns:
|
|||||||
|
|
||||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — Event date.
|
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — Event date.
|
||||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Event time.
|
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Event time.
|
||||||
- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Event time with microseconds resolution.
|
|
||||||
- `name` ([String](../../sql-reference/data-types/string.md)) — Metric name.
|
- `name` ([String](../../sql-reference/data-types/string.md)) — Metric name.
|
||||||
- `value` ([Float64](../../sql-reference/data-types/float.md)) — Metric value.
|
- `value` ([Float64](../../sql-reference/data-types/float.md)) — Metric value.
|
||||||
|
|
||||||
@ -20,18 +19,18 @@ SELECT * FROM system.asynchronous_metric_log LIMIT 10
|
|||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌─event_date─┬──────────event_time─┬────event_time_microseconds─┬─name─────────────────────────────────────┬─────value─┐
|
┌─event_date─┬──────────event_time─┬─name─────────────────────────────────────┬─────value─┐
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ CPUFrequencyMHz_0 │ 2120.9 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ CPUFrequencyMHz_0 │ 2120.9 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.arenas.all.pmuzzy │ 743 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.arenas.all.pmuzzy │ 743 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.arenas.all.pdirty │ 26288 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.arenas.all.pdirty │ 26288 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.background_thread.run_intervals │ 0 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.background_thread.run_intervals │ 0 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.background_thread.num_runs │ 0 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.background_thread.num_runs │ 0 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.retained │ 60694528 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.retained │ 60694528 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.mapped │ 303161344 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.mapped │ 303161344 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.resident │ 260931584 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.resident │ 260931584 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.metadata │ 12079488 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.metadata │ 12079488 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.allocated │ 133756128 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.allocated │ 133756128 │
|
||||||
└────────────┴─────────────────────┴────────────────────────────┴──────────────────────────────────────────┴───────────┘
|
└────────────┴─────────────────────┴──────────────────────────────────────────┴───────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
@ -13,6 +13,7 @@ System tables provide information about:
|
|||||||
|
|
||||||
- Server states, processes, and environment.
|
- Server states, processes, and environment.
|
||||||
- Server’s internal processes.
|
- Server’s internal processes.
|
||||||
|
- Options used when the ClickHouse binary was built.
|
||||||
|
|
||||||
System tables:
|
System tables:
|
||||||
|
|
||||||
|
45
docs/en/operations/system-tables/jemalloc_bins.md
Normal file
45
docs/en/operations/system-tables/jemalloc_bins.md
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
---
|
||||||
|
slug: /en/operations/system-tables/jemalloc_bins
|
||||||
|
---
|
||||||
|
# jemalloc_bins
|
||||||
|
|
||||||
|
Contains information about memory allocations done via jemalloc allocator in different size classes (bins) aggregated from all arenas.
|
||||||
|
These statistics might not be absolutely accurate because of thread local caching in jemalloc.
|
||||||
|
|
||||||
|
Columns:
|
||||||
|
|
||||||
|
- `index` (UInt64) — Index of the bin ordered by size
|
||||||
|
- `large` (Bool) — True for large allocations and False for small
|
||||||
|
- `size` (UInt64) — Size of allocations in this bin
|
||||||
|
- `allocations` (UInt64) — Number of allocations
|
||||||
|
- `deallocations` (UInt64) — Number of deallocations
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Find the sizes of allocations that contributed the most to the current overall memory usage.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT
|
||||||
|
*,
|
||||||
|
allocations - deallocations AS active_allocations,
|
||||||
|
size * active_allocations AS allocated_bytes
|
||||||
|
FROM system.jemalloc_bins
|
||||||
|
WHERE allocated_bytes > 0
|
||||||
|
ORDER BY allocated_bytes DESC
|
||||||
|
LIMIT 10
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─index─┬─large─┬─────size─┬─allocactions─┬─deallocations─┬─active_allocations─┬─allocated_bytes─┐
|
||||||
|
│ 82 │ 1 │ 50331648 │ 1 │ 0 │ 1 │ 50331648 │
|
||||||
|
│ 10 │ 0 │ 192 │ 512336 │ 370710 │ 141626 │ 27192192 │
|
||||||
|
│ 69 │ 1 │ 5242880 │ 6 │ 2 │ 4 │ 20971520 │
|
||||||
|
│ 3 │ 0 │ 48 │ 16938224 │ 16559484 │ 378740 │ 18179520 │
|
||||||
|
│ 28 │ 0 │ 4096 │ 122924 │ 119142 │ 3782 │ 15491072 │
|
||||||
|
│ 61 │ 1 │ 1310720 │ 44569 │ 44558 │ 11 │ 14417920 │
|
||||||
|
│ 39 │ 1 │ 28672 │ 1285 │ 913 │ 372 │ 10665984 │
|
||||||
|
│ 4 │ 0 │ 64 │ 2837225 │ 2680568 │ 156657 │ 10026048 │
|
||||||
|
│ 6 │ 0 │ 96 │ 2617803 │ 2531435 │ 86368 │ 8291328 │
|
||||||
|
│ 36 │ 1 │ 16384 │ 22431 │ 21970 │ 461 │ 7553024 │
|
||||||
|
└───────┴───────┴──────────┴──────────────┴───────────────┴────────────────────┴─────────────────┘
|
||||||
|
```
|
@ -39,6 +39,8 @@ Columns:
|
|||||||
|
|
||||||
- `data_uncompressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Total size of uncompressed data in the data part. All the auxiliary files (for example, files with marks) are not included.
|
- `data_uncompressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Total size of uncompressed data in the data part. All the auxiliary files (for example, files with marks) are not included.
|
||||||
|
|
||||||
|
- `primary_key_size` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The amount of memory (in bytes) used by primary key values in the primary.idx/cidx file on disk.
|
||||||
|
|
||||||
- `marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The size of the file with marks.
|
- `marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The size of the file with marks.
|
||||||
|
|
||||||
- `secondary_indices_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Total size of compressed data for secondary indices in the data part. All the auxiliary files (for example, files with marks) are not included.
|
- `secondary_indices_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Total size of compressed data for secondary indices in the data part. All the auxiliary files (for example, files with marks) are not included.
|
||||||
|
@ -300,7 +300,7 @@ SELECT groupArrayResample(30, 75, 30)(name, age) FROM people
|
|||||||
|
|
||||||
Consider the results.
|
Consider the results.
|
||||||
|
|
||||||
`Jonh` is out of the sample because he’s too young. Other people are distributed according to the specified age intervals.
|
`John` is out of the sample because he’s too young. Other people are distributed according to the specified age intervals.
|
||||||
|
|
||||||
Now let’s count the total number of people and their average wage in the specified age intervals.
|
Now let’s count the total number of people and their average wage in the specified age intervals.
|
||||||
|
|
||||||
|
@ -44,3 +44,5 @@ Result:
|
|||||||
```
|
```
|
||||||
|
|
||||||
The groupArray function will remove ᴺᵁᴸᴸ value based on the above results.
|
The groupArray function will remove ᴺᵁᴸᴸ value based on the above results.
|
||||||
|
|
||||||
|
- Alias: `array_agg`.
|
||||||
|
@ -143,5 +143,6 @@ Time shifts for multiple days. Some pacific islands changed their timezone offse
|
|||||||
- [The `date_time_input_format` setting](../../operations/settings/settings.md#settings-date_time_input_format)
|
- [The `date_time_input_format` setting](../../operations/settings/settings.md#settings-date_time_input_format)
|
||||||
- [The `date_time_output_format` setting](../../operations/settings/settings.md#settings-date_time_output_format)
|
- [The `date_time_output_format` setting](../../operations/settings/settings.md#settings-date_time_output_format)
|
||||||
- [The `timezone` server configuration parameter](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
|
- [The `timezone` server configuration parameter](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
|
||||||
|
- [The `session_timezone` setting](../../operations/settings/settings.md#session_timezone)
|
||||||
- [Operators for working with dates and times](../../sql-reference/operators/index.md#operators-datetime)
|
- [Operators for working with dates and times](../../sql-reference/operators/index.md#operators-datetime)
|
||||||
- [The `Date` data type](../../sql-reference/data-types/date.md)
|
- [The `Date` data type](../../sql-reference/data-types/date.md)
|
||||||
|
@ -119,6 +119,7 @@ FROM dt;
|
|||||||
- [The `date_time_input_format` setting](../../operations/settings/settings-formats.md#date_time_input_format)
|
- [The `date_time_input_format` setting](../../operations/settings/settings-formats.md#date_time_input_format)
|
||||||
- [The `date_time_output_format` setting](../../operations/settings/settings-formats.md#date_time_output_format)
|
- [The `date_time_output_format` setting](../../operations/settings/settings-formats.md#date_time_output_format)
|
||||||
- [The `timezone` server configuration parameter](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
|
- [The `timezone` server configuration parameter](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
|
||||||
|
- [The `session_timezone` setting](../../operations/settings/settings.md#session_timezone)
|
||||||
- [Operators for working with dates and times](../../sql-reference/operators/index.md#operators-for-working-with-dates-and-times)
|
- [Operators for working with dates and times](../../sql-reference/operators/index.md#operators-for-working-with-dates-and-times)
|
||||||
- [`Date` data type](../../sql-reference/data-types/date.md)
|
- [`Date` data type](../../sql-reference/data-types/date.md)
|
||||||
- [`DateTime` data type](../../sql-reference/data-types/datetime.md)
|
- [`DateTime` data type](../../sql-reference/data-types/datetime.md)
|
||||||
|
@ -102,6 +102,8 @@ The function also works for strings.
|
|||||||
|
|
||||||
Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [size0](../../sql-reference/data-types/array.md#array-size) subcolumn instead of reading and processing the whole array column. The query `SELECT length(arr) FROM table` transforms to `SELECT arr.size0 FROM TABLE`.
|
Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [size0](../../sql-reference/data-types/array.md#array-size) subcolumn instead of reading and processing the whole array column. The query `SELECT length(arr) FROM table` transforms to `SELECT arr.size0 FROM TABLE`.
|
||||||
|
|
||||||
|
Alias: `OCTET_LENGTH`
|
||||||
|
|
||||||
## emptyArrayUInt8, emptyArrayUInt16, emptyArrayUInt32, emptyArrayUInt64
|
## emptyArrayUInt8, emptyArrayUInt16, emptyArrayUInt32, emptyArrayUInt64
|
||||||
|
|
||||||
## emptyArrayInt8, emptyArrayInt16, emptyArrayInt32, emptyArrayInt64
|
## emptyArrayInt8, emptyArrayInt16, emptyArrayInt32, emptyArrayInt64
|
||||||
@ -142,6 +144,7 @@ range([start, ] end [, step])
|
|||||||
|
|
||||||
- All arguments `start`, `end`, `step` must be below data types: `UInt8`, `UInt16`, `UInt32`, `UInt64`,`Int8`, `Int16`, `Int32`, `Int64`, as well as elements of the returned array, which's type is a super type of all arguments.
|
- All arguments `start`, `end`, `step` must be below data types: `UInt8`, `UInt16`, `UInt32`, `UInt64`,`Int8`, `Int16`, `Int32`, `Int64`, as well as elements of the returned array, which's type is a super type of all arguments.
|
||||||
- An exception is thrown if query results in arrays with a total length of more than number of elements specified by the [function_range_max_elements_in_block](../../operations/settings/settings.md#settings-function_range_max_elements_in_block) setting.
|
- An exception is thrown if query results in arrays with a total length of more than number of elements specified by the [function_range_max_elements_in_block](../../operations/settings/settings.md#settings-function_range_max_elements_in_block) setting.
|
||||||
|
- Returns Null if any argument has Nullable(Nothing) type. An exception is thrown if any argument has Null value (Nullable(T) type).
|
||||||
|
|
||||||
**Examples**
|
**Examples**
|
||||||
|
|
||||||
@ -878,7 +881,7 @@ A special function. See the section [“ArrayJoin function”](../../sql-referen
|
|||||||
|
|
||||||
## arrayDifference
|
## arrayDifference
|
||||||
|
|
||||||
Calculates an array of differences between adjacent array elements. The first element of the result array will be 0, the second `a[1] - a[0]`, the third `a[2] - a[1]`, etc. The type of elements in the result array is determined by the type inference rules for subtraction (e.g. `UInt8` - `UInt8` = `Int16`).
|
Calculates an array of differences between adjacent array elements. The first element of the result array will be 0, the second `a[1] - a[0]`, the third `a[2] - a[1]`, etc. The type of elements in the result array is determined by the type inference rules for subtraction (e.g. `UInt8` - `UInt8` = `Int16`).
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
@ -996,6 +999,24 @@ SELECT
|
|||||||
└──────────────┴───────────┘
|
└──────────────┴───────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## arrayJaccardIndex
|
||||||
|
|
||||||
|
Returns the [Jaccard index](https://en.wikipedia.org/wiki/Jaccard_index) of two arrays.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
``` sql
|
||||||
|
SELECT arrayJaccardIndex([1, 2], [2, 3]) AS res
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
``` text
|
||||||
|
┌─res────────────────┐
|
||||||
|
│ 0.3333333333333333 │
|
||||||
|
└────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## arrayReduce
|
## arrayReduce
|
||||||
|
|
||||||
Applies an aggregate function to array elements and returns its result. The name of the aggregation function is passed as a string in single quotes `'max'`, `'sum'`. When using parametric aggregate functions, the parameter is indicated after the function name in parentheses `'uniqUpTo(6)'`.
|
Applies an aggregate function to array elements and returns its result. The name of the aggregation function is passed as a string in single quotes `'max'`, `'sum'`. When using parametric aggregate functions, the parameter is indicated after the function name in parentheses `'uniqUpTo(6)'`.
|
||||||
|
@ -694,10 +694,14 @@ SELECT toDate('2016-12-27') AS date, toWeek(date) AS week0, toWeek(date,1) AS we
|
|||||||
|
|
||||||
Returns year and week for a date. The year in the result may be different from the year in the date argument for the first and the last week of the year.
|
Returns year and week for a date. The year in the result may be different from the year in the date argument for the first and the last week of the year.
|
||||||
|
|
||||||
The mode argument works exactly like the mode argument to `toWeek()`. For the single-argument syntax, a mode value of 0 is used.
|
The mode argument works like the mode argument to `toWeek()`. For the single-argument syntax, a mode value of 0 is used.
|
||||||
|
|
||||||
`toISOYear()` is a compatibility function that is equivalent to `intDiv(toYearWeek(date,3),100)`.
|
`toISOYear()` is a compatibility function that is equivalent to `intDiv(toYearWeek(date,3),100)`.
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
The week number returned by `toYearWeek()` can be different from what the `toWeek()` returns. `toWeek()` always returns week number in the context of the given year, and in case `toWeek()` returns `0`, `toYearWeek()` returns the value corresponding to the last week of previous year. See `prev_yearWeek` in example below.
|
||||||
|
:::
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
@ -707,13 +711,13 @@ toYearWeek(t[, mode[, timezone]])
|
|||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(date,1) AS yearWeek1, toYearWeek(date,9) AS yearWeek9;
|
SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(date,1) AS yearWeek1, toYearWeek(date,9) AS yearWeek9, toYearWeek(toDate('2022-01-01')) AS prev_yearWeek;
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌───────date─┬─yearWeek0─┬─yearWeek1─┬─yearWeek9─┐
|
┌───────date─┬─yearWeek0─┬─yearWeek1─┬─yearWeek9─┬─prev_yearWeek─┐
|
||||||
│ 2016-12-27 │ 201652 │ 201652 │ 201701 │
|
│ 2016-12-27 │ 201652 │ 201652 │ 201701 │ 202152 │
|
||||||
└────────────┴───────────┴───────────┴───────────┘
|
└────────────┴───────────┴───────────┴───────────┴───────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## age
|
## age
|
||||||
|
@ -90,6 +90,8 @@ Returns the length of a string in bytes (not: in characters or Unicode code poin
|
|||||||
|
|
||||||
The function also works for arrays.
|
The function also works for arrays.
|
||||||
|
|
||||||
|
Alias: `OCTET_LENGTH`
|
||||||
|
|
||||||
## lengthUTF8
|
## lengthUTF8
|
||||||
|
|
||||||
Returns the length of a string in Unicode code points (not: in bytes or characters). It assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
|
Returns the length of a string in Unicode code points (not: in bytes or characters). It assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
|
||||||
|
@ -399,7 +399,11 @@ toDateTime(expr[, time_zone ])
|
|||||||
- `expr` — The value. [String](/docs/en/sql-reference/data-types/string.md), [Int](/docs/en/sql-reference/data-types/int-uint.md), [Date](/docs/en/sql-reference/data-types/date.md) or [DateTime](/docs/en/sql-reference/data-types/datetime.md).
|
- `expr` — The value. [String](/docs/en/sql-reference/data-types/string.md), [Int](/docs/en/sql-reference/data-types/int-uint.md), [Date](/docs/en/sql-reference/data-types/date.md) or [DateTime](/docs/en/sql-reference/data-types/datetime.md).
|
||||||
- `time_zone` — Time zone. [String](/docs/en/sql-reference/data-types/string.md).
|
- `time_zone` — Time zone. [String](/docs/en/sql-reference/data-types/string.md).
|
||||||
|
|
||||||
If `expr` is a number, it is interpreted as the number of seconds since the beginning of the Unix Epoch (as Unix timestamp).
|
:::note
|
||||||
|
If `expr` is a number, it is interpreted as the number of seconds since the beginning of the Unix Epoch (as Unix timestamp).
|
||||||
|
If `expr` is a [String](/docs/en/sql-reference/data-types/string.md), it may be interpreted as a Unix timestamp or as a string representation of date / date with time.
|
||||||
|
Thus, parsing of short numbers' string representations (up to 4 digits) is explicitly disabled due to ambiguity, e.g. a string `'1999'` may be both a year (an incomplete string representation of Date / DateTime) or a unix timestamp. Longer numeric strings are allowed.
|
||||||
|
:::
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
|
@ -171,12 +171,13 @@ Result:
|
|||||||
└──────────────────────────────┘
|
└──────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
Executable user defined functions can take constant parameters configured in `command` setting (works only for user defined functions with `executable` type).
|
Executable user defined functions can take constant parameters configured in `command` setting (works only for user defined functions with `executable` type). It also requires the `execute_direct` option (to ensure no shell argument expansion vulnerability).
|
||||||
File `test_function_parameter_python.xml` (`/etc/clickhouse-server/test_function_parameter_python.xml` with default path settings).
|
File `test_function_parameter_python.xml` (`/etc/clickhouse-server/test_function_parameter_python.xml` with default path settings).
|
||||||
```xml
|
```xml
|
||||||
<functions>
|
<functions>
|
||||||
<function>
|
<function>
|
||||||
<type>executable</type>
|
<type>executable</type>
|
||||||
|
<execute_direct>true</execute_direct>
|
||||||
<name>test_function_parameter_python</name>
|
<name>test_function_parameter_python</name>
|
||||||
<return_type>String</return_type>
|
<return_type>String</return_type>
|
||||||
<argument>
|
<argument>
|
||||||
|
@ -5,15 +5,28 @@ sidebar_label: SAMPLE BY
|
|||||||
title: "Manipulating Sampling-Key Expressions"
|
title: "Manipulating Sampling-Key Expressions"
|
||||||
---
|
---
|
||||||
|
|
||||||
Syntax:
|
# Manipulating SAMPLE BY expression
|
||||||
|
|
||||||
|
The following operations are available:
|
||||||
|
|
||||||
|
## MODIFY
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY SAMPLE BY new_expression
|
ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY SAMPLE BY new_expression
|
||||||
```
|
```
|
||||||
|
|
||||||
The command changes the [sampling key](../../../engines/table-engines/mergetree-family/mergetree.md) of the table to `new_expression` (an expression or a tuple of expressions).
|
The command changes the [sampling key](../../../engines/table-engines/mergetree-family/mergetree.md) of the table to `new_expression` (an expression or a tuple of expressions). The primary key must contain the new sample key.
|
||||||
|
|
||||||
The command is lightweight in the sense that it only changes metadata. The primary key must contain the new sample key.
|
## REMOVE
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
ALTER TABLE [db].name [ON CLUSTER cluster] REMOVE SAMPLE BY
|
||||||
|
```
|
||||||
|
|
||||||
|
The command removes the [sampling key](../../../engines/table-engines/mergetree-family/mergetree.md) of the table.
|
||||||
|
|
||||||
|
|
||||||
|
The commands `MODIFY` and `REMOVE` are lightweight in the sense that they only change metadata or remove files.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
It only works for tables in the [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) family (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables).
|
It only works for tables in the [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) family (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables).
|
||||||
|
@ -283,7 +283,7 @@ The optional keyword `EXTENDED` currently has no effect, it only exists for MySQ
|
|||||||
|
|
||||||
`SHOW INDEX` produces a result table with the following structure:
|
`SHOW INDEX` produces a result table with the following structure:
|
||||||
- table - The name of the table (String)
|
- table - The name of the table (String)
|
||||||
- non_unique - 0 if the index can contain duplicates, 1 otherwise (UInt8)
|
- non_unique - 0 if the index cannot contain duplicates, 1 otherwise (UInt8)
|
||||||
- key_name - The name of the index, `PRIMARY` if the index is a primary key index (String)
|
- key_name - The name of the index, `PRIMARY` if the index is a primary key index (String)
|
||||||
- seq_in_index - Currently unused
|
- seq_in_index - Currently unused
|
||||||
- column_name - Currently unused
|
- column_name - Currently unused
|
||||||
|
@ -30,6 +30,14 @@ mongodb(host:port, database, collection, user, password, structure [, options])
|
|||||||
|
|
||||||
- `options` - MongoDB connection string options (optional parameter).
|
- `options` - MongoDB connection string options (optional parameter).
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
If you are using the MongoDB Atlas cloud offering please add these options:
|
||||||
|
|
||||||
|
```
|
||||||
|
'connectTimeoutMS=10000&ssl=true&authSource=admin'
|
||||||
|
```
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
**Returned Value**
|
**Returned Value**
|
||||||
|
|
||||||
|
@ -401,8 +401,8 @@ $ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FOR
|
|||||||
- [output_format_csv_crlf_end_of_line](../operations/settings/settings.md#output_format_csv_crlf_end_of_line) - если установлено значение true, конец строки в формате вывода CSV будет `\r\n` вместо `\n`. Значение по умолчанию - `false`.
|
- [output_format_csv_crlf_end_of_line](../operations/settings/settings.md#output_format_csv_crlf_end_of_line) - если установлено значение true, конец строки в формате вывода CSV будет `\r\n` вместо `\n`. Значение по умолчанию - `false`.
|
||||||
- [input_format_csv_skip_first_lines](../operations/settings/settings.md#input_format_csv_skip_first_lines) - пропустить указанное количество строк в начале данных. Значение по умолчанию - `0`.
|
- [input_format_csv_skip_first_lines](../operations/settings/settings.md#input_format_csv_skip_first_lines) - пропустить указанное количество строк в начале данных. Значение по умолчанию - `0`.
|
||||||
- [input_format_csv_detect_header](../operations/settings/settings.md#input_format_csv_detect_header) - обнаружить заголовок с именами и типами в формате CSV. Значение по умолчанию - `true`.
|
- [input_format_csv_detect_header](../operations/settings/settings.md#input_format_csv_detect_header) - обнаружить заголовок с именами и типами в формате CSV. Значение по умолчанию - `true`.
|
||||||
- [input_format_csv_trim_whitespaces](../operations/settings/settings.md#input_format_csv_trim_whitespaces) - удалить пробелы и символы табуляции из строк без кавычек.
|
- [input_format_csv_trim_whitespaces](../operations/settings/settings.md#input_format_csv_trim_whitespaces) - удалить пробелы и символы табуляции из строк без кавычек. Значение по умолчанию - `true`.
|
||||||
Значение по умолчанию - `true`.
|
- [input_format_csv_allow_variable_number_of_columns](../operations/settings/settings.md/#input_format_csv_allow_variable_number_of_columns) - игнорировать дополнительные столбцы (если файл содержит больше столбцов чем ожидается) и рассматривать отсутствующие поля в CSV в качестве значений по умолчанию. Значение по умолчанию - `false`.
|
||||||
|
|
||||||
## CSVWithNames {#csvwithnames}
|
## CSVWithNames {#csvwithnames}
|
||||||
|
|
||||||
|
@ -575,14 +575,60 @@ ClickHouse поддерживает динамическое изменение
|
|||||||
- `errorlog` - Файл лога ошибок.
|
- `errorlog` - Файл лога ошибок.
|
||||||
- `size` - Размер файла. Действует для `log` и `errorlog`. Как только файл достиг размера `size`, ClickHouse архивирует и переименовывает его, а на его месте создает новый файл лога.
|
- `size` - Размер файла. Действует для `log` и `errorlog`. Как только файл достиг размера `size`, ClickHouse архивирует и переименовывает его, а на его месте создает новый файл лога.
|
||||||
- `count` - Количество заархивированных файлов логов, которые сохраняет ClickHouse.
|
- `count` - Количество заархивированных файлов логов, которые сохраняет ClickHouse.
|
||||||
|
- `stream_compress` – Сжимать `log` и `errorlog` с помощью алгоритма `lz4`. Чтобы активировать, узтановите значение `1` или `true`.
|
||||||
|
|
||||||
|
Имена файлов `log` и `errorlog` (только имя файла, а не директорий) поддерживают спецификаторы шаблонов даты и времени.
|
||||||
|
|
||||||
|
**Спецификаторы форматирования**
|
||||||
|
С помощью следующих спецификаторов, можно определить шаблон для формирования имени файла. Столбец “Пример” показывает возможные значения на момент времени `2023-07-06 18:32:07`.
|
||||||
|
|
||||||
|
| Спецификатор | Описание | Пример |
|
||||||
|
|--------------|---------------------------------------------------------------------------------------------------------------------|--------------------------|
|
||||||
|
| %% | Литерал % | % |
|
||||||
|
| %n | Символ новой строки | |
|
||||||
|
| %t | Символ горизонтальной табуляции | |
|
||||||
|
| %Y | Год как десятичное число, например, 2017 | 2023 |
|
||||||
|
| %y | Последние 2 цифры года в виде десятичного числа (диапазон [00,99]) | 23 |
|
||||||
|
| %C | Первые 2 цифры года в виде десятичного числа (диапазон [00,99]) | 20 |
|
||||||
|
| %G | Год по неделям согласно [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601#Week_dates), то есть год, который содержит указанную неделю. Обычно используется вместе с %V. | 2023 |
|
||||||
|
| %g | Последние 2 цифры [года по неделям ISO 8601](https://en.wikipedia.org/wiki/ISO_8601#Week_dates), т.е. года, содержащего указанную неделю (диапазон [00,99]). | 23 |
|
||||||
|
| %b | Сокращённое название месяца, например Oct (зависит от локали) | Jul |
|
||||||
|
| %h | Синоним %b | Jul |
|
||||||
|
| %B | Полное название месяца, например, October (зависит от локали) | July |
|
||||||
|
| %m | Месяц в виде десятичного числа (диапазон [01,12]) | 07 |
|
||||||
|
| %U | Неделя года в виде десятичного числа (воскресенье - первый день недели) (диапазон [00,53]) | 27 |
|
||||||
|
| %W | Неделя года в виде десятичного числа (понедельник - первый день недели) (диапазон [00,53]) | 27 |
|
||||||
|
| %V | Неделя года ISO 8601 (диапазон [01,53]) | 27 |
|
||||||
|
| %j | День года в виде десятичного числа (диапазон [001,366]) | 187 |
|
||||||
|
| %d | День месяца в виде десятичного числа (диапазон [01,31]) Перед одиночной цифрой ставится ноль. | 06 |
|
||||||
|
| %e | День месяца в виде десятичного числа (диапазон [1,31]). Перед одиночной цифрой ставится пробел. | 6 |
|
||||||
|
| %a | Сокращённое название дня недели, например, Fri (зависит от локали) | Thu |
|
||||||
|
| %A | Полный день недели, например, Friday (зависит от локали) | Thursday |
|
||||||
|
| %w | День недели в виде десятичного числа, где воскресенье равно 0 (диапазон [0-6]) | 4 |
|
||||||
|
| %u | День недели в виде десятичного числа, где понедельник равен 1 (формат ISO 8601) (диапазон [1-7]) | 4 |
|
||||||
|
| %H | Час в виде десятичного числа, 24-часовой формат (диапазон [00-23]) | 18 |
|
||||||
|
| %I | Час в виде десятичного числа, 12-часовой формат (диапазон [01,12]) | 06 |
|
||||||
|
| %M | Минуты в виде десятичного числа (диапазон [00,59]) | 32 |
|
||||||
|
| %S | Секунды как десятичное число (диапазон [00,60]) | 07 |
|
||||||
|
| %c | Стандартная строка даты и времени, например, Sun Oct 17 04:41:13 2010 (зависит от локали) | Thu Jul 6 18:32:07 2023 |
|
||||||
|
| %x | Локализованное представление даты (зависит от локали) | 07/06/23 |
|
||||||
|
| %X | Локализованное представление времени, например, 18:40:20 или 6:40:20 PM (зависит от локали) | 18:32:07 |
|
||||||
|
| %D | Эквивалентно "%m/%d/%y" | 07/06/23 |
|
||||||
|
| %F | Эквивалентно "%Y-%m-%d" (формат даты ISO 8601) | 2023-07-06 |
|
||||||
|
| %r | Локализованное 12-часовое время (зависит от локали) | 06:32:07 PM |
|
||||||
|
| %R | Эквивалентно "%H:%M" | 18:32 |
|
||||||
|
| %T | Эквивалентно "%H:%M:%S" (формат времени ISO 8601) | 18:32:07 |
|
||||||
|
| %p | Локализованное обозначение a.m. или p.m. (зависит от локали) | PM |
|
||||||
|
| %z | Смещение от UTC в формате ISO 8601 (например, -0430), или без символов, если информация о часовом поясе недоступна | +0800 |
|
||||||
|
| %Z | Зависящее от локали название или аббревиатура часового пояса, если информация о часовом поясе доступна | Z AWST |
|
||||||
|
|
||||||
**Пример**
|
**Пример**
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
<logger>
|
<logger>
|
||||||
<level>trace</level>
|
<level>trace</level>
|
||||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
<log>/var/log/clickhouse-server/clickhouse-server-%F-%T.log</log>
|
||||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
<errorlog>/var/log/clickhouse-server/clickhouse-server-%F-%T.err.log</errorlog>
|
||||||
<size>1000M</size>
|
<size>1000M</size>
|
||||||
<count>10</count>
|
<count>10</count>
|
||||||
</logger>
|
</logger>
|
||||||
|
@ -1686,7 +1686,7 @@ SELECT * FROM table_with_enum_column_for_csv_insert;
|
|||||||
## input_format_csv_detect_header {#input_format_csv_detect_header}
|
## input_format_csv_detect_header {#input_format_csv_detect_header}
|
||||||
|
|
||||||
Обнаружить заголовок с именами и типами в формате CSV.
|
Обнаружить заголовок с именами и типами в формате CSV.
|
||||||
|
|
||||||
Значение по умолчанию - `true`.
|
Значение по умолчанию - `true`.
|
||||||
|
|
||||||
## input_format_csv_skip_first_lines {#input_format_csv_skip_first_lines}
|
## input_format_csv_skip_first_lines {#input_format_csv_skip_first_lines}
|
||||||
@ -1727,6 +1727,12 @@ echo ' string ' | ./clickhouse local -q "select * from table FORMAT CSV" --in
|
|||||||
" string "
|
" string "
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## input_format_csv_allow_variable_number_of_columns {#input_format_csv_allow_variable_number_of_columns}
|
||||||
|
|
||||||
|
Игнорировать дополнительные столбцы (если файл содержит больше столбцов чем ожидается) и рассматривать отсутствующие поля в CSV в качестве значений по умолчанию.
|
||||||
|
|
||||||
|
Выключено по умолчанию.
|
||||||
|
|
||||||
## output_format_tsv_crlf_end_of_line {#settings-output-format-tsv-crlf-end-of-line}
|
## output_format_tsv_crlf_end_of_line {#settings-output-format-tsv-crlf-end-of-line}
|
||||||
|
|
||||||
Использовать в качестве разделителя строк для TSV формата CRLF (DOC/Windows стиль) вместо LF (Unix стиль).
|
Использовать в качестве разделителя строк для TSV формата CRLF (DOC/Windows стиль) вместо LF (Unix стиль).
|
||||||
|
@ -8,7 +8,6 @@ slug: /ru/operations/system-tables/asynchronous_metric_log
|
|||||||
Столбцы:
|
Столбцы:
|
||||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — дата события.
|
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — дата события.
|
||||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время события.
|
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время события.
|
||||||
- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — время события в микросекундах.
|
|
||||||
- `name` ([String](../../sql-reference/data-types/string.md)) — название метрики.
|
- `name` ([String](../../sql-reference/data-types/string.md)) — название метрики.
|
||||||
- `value` ([Float64](../../sql-reference/data-types/float.md)) — значение метрики.
|
- `value` ([Float64](../../sql-reference/data-types/float.md)) — значение метрики.
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user