mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 09:32:06 +00:00
Merge branch 'master' into randomize-more-settings-2
This commit is contained in:
commit
01067baf75
@ -74,6 +74,7 @@ ConstructorInitializerIndentWidth: 4
|
||||
ContinuationIndentWidth: 4
|
||||
DerivePointerAlignment: false
|
||||
DisableFormat: false
|
||||
IndentRequiresClause: false
|
||||
IndentWidth: 4
|
||||
IndentWrappedFunctionNames: false
|
||||
MacroBlockBegin: ''
|
||||
|
10
.clang-tidy
10
.clang-tidy
@ -23,7 +23,7 @@ Checks: '*,
|
||||
-bugprone-implicit-widening-of-multiplication-result,
|
||||
-bugprone-narrowing-conversions,
|
||||
-bugprone-not-null-terminated-result,
|
||||
-bugprone-reserved-identifier,
|
||||
-bugprone-reserved-identifier, # useful but too slow, TODO retry when https://reviews.llvm.org/rG1c282052624f9d0bd273bde0b47b30c96699c6c7 is merged
|
||||
-bugprone-unchecked-optional-access,
|
||||
|
||||
-cert-dcl16-c,
|
||||
@ -111,6 +111,7 @@ Checks: '*,
|
||||
-misc-no-recursion,
|
||||
-misc-non-private-member-variables-in-classes,
|
||||
-misc-confusable-identifiers, # useful but slooow
|
||||
-misc-use-anonymous-namespace,
|
||||
|
||||
-modernize-avoid-c-arrays,
|
||||
-modernize-concat-nested-namespaces,
|
||||
@ -136,7 +137,7 @@ Checks: '*,
|
||||
-readability-function-cognitive-complexity,
|
||||
-readability-function-size,
|
||||
-readability-identifier-length,
|
||||
-readability-identifier-naming,
|
||||
-readability-identifier-naming, # useful but too slow
|
||||
-readability-implicit-bool-conversion,
|
||||
-readability-isolate-declaration,
|
||||
-readability-magic-numbers,
|
||||
@ -148,7 +149,7 @@ Checks: '*,
|
||||
-readability-uppercase-literal-suffix,
|
||||
-readability-use-anyofallof,
|
||||
|
||||
-zirkon-*,
|
||||
-zircon-*,
|
||||
'
|
||||
|
||||
WarningsAsErrors: '*'
|
||||
@ -168,11 +169,10 @@ CheckOptions:
|
||||
readability-identifier-naming.ParameterPackCase: lower_case
|
||||
readability-identifier-naming.StructCase: CamelCase
|
||||
readability-identifier-naming.TemplateTemplateParameterCase: CamelCase
|
||||
readability-identifier-naming.TemplateUsingCase: lower_case
|
||||
readability-identifier-naming.TemplateParameterCase: lower_case
|
||||
readability-identifier-naming.TypeTemplateParameterCase: CamelCase
|
||||
readability-identifier-naming.TypedefCase: CamelCase
|
||||
readability-identifier-naming.UnionCase: CamelCase
|
||||
readability-identifier-naming.UsingCase: CamelCase
|
||||
modernize-loop-convert.UseCxx20ReverseRanges: false
|
||||
performance-move-const-arg.CheckTriviallyCopyableMove: false
|
||||
# Workaround clang-tidy bug: https://github.com/llvm/llvm-project/issues/46097
|
||||
|
16
.clangd
Normal file
16
.clangd
Normal file
@ -0,0 +1,16 @@
|
||||
Diagnostics:
|
||||
# clangd does parse .clang-tidy, but some checks are too slow to run in
|
||||
# clang-tidy build, so let's enable them explicitly for clangd at least.
|
||||
ClangTidy:
|
||||
# The following checks had been disabled due to slowliness with C++23,
|
||||
# for more details see [1].
|
||||
#
|
||||
# [1]: https://github.com/llvm/llvm-project/issues/61418
|
||||
#
|
||||
# But the code base had been written in a style that had been checked
|
||||
# by this check, so at least, let's enable it for clangd.
|
||||
Add: [
|
||||
# configured in .clang-tidy
|
||||
readability-identifier-naming,
|
||||
bugprone-reserved-identifier,
|
||||
]
|
293
.github/workflows/master.yml
vendored
293
.github/workflows/master.yml
vendored
@ -850,6 +850,48 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinRISCV64:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_riscv64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
@ -932,6 +974,7 @@ jobs:
|
||||
- BuilderBinDarwinAarch64
|
||||
- BuilderBinFreeBSD
|
||||
- BuilderBinPPC64
|
||||
- BuilderBinRISCV64
|
||||
- BuilderBinAmd64Compat
|
||||
- BuilderBinAarch64V80Compat
|
||||
- BuilderBinClangTidy
|
||||
@ -1341,6 +1384,40 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestReleaseAnalyzer:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_analyzer
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (release, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/stateless_analyzer/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
runs-on: [self-hosted, func-tester-aarch64]
|
||||
@ -2793,6 +2870,216 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan0:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan1:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan2:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan3:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=3
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan4:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=4
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan5:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=5
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsTsan0:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
@ -3886,6 +4173,12 @@ jobs:
|
||||
- IntegrationTestsAsan3
|
||||
- IntegrationTestsAsan4
|
||||
- IntegrationTestsAsan5
|
||||
- IntegrationTestsAnalyzerAsan0
|
||||
- IntegrationTestsAnalyzerAsan1
|
||||
- IntegrationTestsAnalyzerAsan2
|
||||
- IntegrationTestsAnalyzerAsan3
|
||||
- IntegrationTestsAnalyzerAsan4
|
||||
- IntegrationTestsAnalyzerAsan5
|
||||
- IntegrationTestsRelease0
|
||||
- IntegrationTestsRelease1
|
||||
- IntegrationTestsRelease2
|
||||
|
59
.github/workflows/nightly.yml
vendored
59
.github/workflows/nightly.yml
vendored
@ -72,61 +72,17 @@ jobs:
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
BuilderCoverity:
|
||||
needs: DockerHubPush
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
BUILD_NAME=coverity
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
EOF
|
||||
echo "COVERITY_TOKEN=${{ secrets.COVERITY_TOKEN }}" >> "$GITHUB_ENV"
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload Coverity Analysis
|
||||
if: ${{ success() || failure() }}
|
||||
run: |
|
||||
curl --form token="${COVERITY_TOKEN}" \
|
||||
--form email='security+coverity@clickhouse.com' \
|
||||
--form file="@$TEMP_PATH/$BUILD_NAME/coverity-scan.tar.gz" \
|
||||
--form version="${GITHUB_REF#refs/heads/}-${GITHUB_SHA::6}" \
|
||||
--form description="Nighly Scan: $(date +'%Y-%m-%dT%H:%M:%S')" \
|
||||
https://scan.coverity.com/builds?project=ClickHouse%2FClickHouse
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
Codebrowser:
|
||||
needs: [DockerHubPush]
|
||||
uses: ./.github/workflows/woboq.yml
|
||||
SonarCloud:
|
||||
# TODO: Remove if: whenever SonarCloud supports c++23
|
||||
if: ${{ false }}
|
||||
runs-on: [self-hosted, builder]
|
||||
env:
|
||||
SONAR_SCANNER_VERSION: 4.8.0.2856
|
||||
SONAR_SERVER_URL: "https://sonarcloud.io"
|
||||
BUILD_WRAPPER_OUT_DIR: build_wrapper_output_directory # Directory where build-wrapper output will be placed
|
||||
CC: clang-15
|
||||
CXX: clang++-15
|
||||
CC: clang-16
|
||||
CXX: clang++-16
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
@ -156,7 +112,7 @@ jobs:
|
||||
- name: Set Up Build Tools
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -yq git cmake ccache ninja-build python3 yasm
|
||||
sudo apt-get install -yq git cmake ccache ninja-build python3 yasm nasm
|
||||
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
- name: Run build-wrapper
|
||||
run: |
|
||||
@ -175,4 +131,5 @@ jobs:
|
||||
--define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" \
|
||||
--define sonar.projectKey="ClickHouse_ClickHouse" \
|
||||
--define sonar.organization="clickhouse-java" \
|
||||
--define sonar.exclusions="**/*.java,**/*.ts,**/*.js,**/*.css,**/*.sql" \
|
||||
--define sonar.cfamily.cpp23.enabled=true \
|
||||
--define sonar.exclusions="**/*.java,**/*.ts,**/*.js,**/*.css,**/*.sql"
|
||||
|
263
.github/workflows/pull_request.yml
vendored
263
.github/workflows/pull_request.yml
vendored
@ -46,7 +46,12 @@ jobs:
|
||||
- name: Python unit tests
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
echo "Testing the main ci directory"
|
||||
python3 -m unittest discover -s . -p '*_test.py'
|
||||
for dir in *_lambda/; do
|
||||
echo "Testing $dir"
|
||||
python3 -m unittest discover -s "$dir" -p '*_test.py'
|
||||
done
|
||||
DockerHubPushAarch64:
|
||||
needs: CheckLabels
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
@ -906,6 +911,47 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinRISCV64:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_riscv64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
@ -987,6 +1033,7 @@ jobs:
|
||||
- BuilderBinDarwinAarch64
|
||||
- BuilderBinFreeBSD
|
||||
- BuilderBinPPC64
|
||||
- BuilderBinRISCV64
|
||||
- BuilderBinAmd64Compat
|
||||
- BuilderBinAarch64V80Compat
|
||||
- BuilderBinClangTidy
|
||||
@ -3856,6 +3903,216 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan0:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan1:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan2:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan3:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=3
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan4:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=4
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan5:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=5
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsTsan0:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
@ -4842,6 +5099,12 @@ jobs:
|
||||
- IntegrationTestsAsan3
|
||||
- IntegrationTestsAsan4
|
||||
- IntegrationTestsAsan5
|
||||
- IntegrationTestsAnalyzerAsan0
|
||||
- IntegrationTestsAnalyzerAsan1
|
||||
- IntegrationTestsAnalyzerAsan2
|
||||
- IntegrationTestsAnalyzerAsan3
|
||||
- IntegrationTestsAnalyzerAsan4
|
||||
- IntegrationTestsAnalyzerAsan5
|
||||
- IntegrationTestsRelease0
|
||||
- IntegrationTestsRelease1
|
||||
- IntegrationTestsRelease2
|
||||
|
8
.github/workflows/woboq.yml
vendored
8
.github/workflows/woboq.yml
vendored
@ -6,13 +6,13 @@ env:
|
||||
concurrency:
|
||||
group: woboq
|
||||
on: # yamllint disable-line rule:truthy
|
||||
schedule:
|
||||
- cron: '0 */18 * * *'
|
||||
workflow_dispatch:
|
||||
workflow_call:
|
||||
jobs:
|
||||
# don't use dockerhub push because this image updates so rarely
|
||||
WoboqCodebrowser:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
timeout-minutes: 420 # the task is pretty heavy, so there's an additional hour
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
@ -26,6 +26,10 @@ jobs:
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: 'true'
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Codebrowser
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
|
4
.gitignore
vendored
4
.gitignore
vendored
@ -69,6 +69,7 @@ cmake-build-*
|
||||
*.pyc
|
||||
__pycache__
|
||||
*.pytest_cache
|
||||
.mypy_cache
|
||||
|
||||
test.cpp
|
||||
CPackConfig.cmake
|
||||
@ -129,7 +130,6 @@ website/package-lock.json
|
||||
/.ccls-cache
|
||||
|
||||
# clangd cache
|
||||
/.clangd
|
||||
/.cache
|
||||
|
||||
/compile_commands.json
|
||||
@ -162,8 +162,10 @@ tests/queries/0_stateless/test_*
|
||||
tests/queries/0_stateless/*.binary
|
||||
tests/queries/0_stateless/*.generated-expect
|
||||
tests/queries/0_stateless/*.expect.history
|
||||
tests/integration/**/_gen
|
||||
|
||||
# rust
|
||||
/rust/**/target
|
||||
# It is autogenerated from *.in
|
||||
/rust/**/.cargo/config.toml
|
||||
/rust/**/vendor
|
||||
|
29
.gitmodules
vendored
29
.gitmodules
vendored
@ -19,7 +19,7 @@
|
||||
url = https://github.com/google/googletest
|
||||
[submodule "contrib/capnproto"]
|
||||
path = contrib/capnproto
|
||||
url = https://github.com/capnproto/capnproto
|
||||
url = https://github.com/ClickHouse/capnproto
|
||||
[submodule "contrib/double-conversion"]
|
||||
path = contrib/double-conversion
|
||||
url = https://github.com/google/double-conversion
|
||||
@ -35,10 +35,9 @@
|
||||
[submodule "contrib/unixodbc"]
|
||||
path = contrib/unixodbc
|
||||
url = https://github.com/ClickHouse/UnixODBC
|
||||
[submodule "contrib/protobuf"]
|
||||
path = contrib/protobuf
|
||||
url = https://github.com/ClickHouse/protobuf
|
||||
branch = v3.13.0.1
|
||||
[submodule "contrib/google-protobuf"]
|
||||
path = contrib/google-protobuf
|
||||
url = https://github.com/ClickHouse/google-protobuf.git
|
||||
[submodule "contrib/boost"]
|
||||
path = contrib/boost
|
||||
url = https://github.com/ClickHouse/boost
|
||||
@ -253,21 +252,18 @@
|
||||
[submodule "contrib/qpl"]
|
||||
path = contrib/qpl
|
||||
url = https://github.com/intel/qpl
|
||||
[submodule "contrib/idxd-config"]
|
||||
path = contrib/idxd-config
|
||||
url = https://github.com/intel/idxd-config
|
||||
[submodule "contrib/wyhash"]
|
||||
path = contrib/wyhash
|
||||
url = https://github.com/wangyi-fudan/wyhash
|
||||
[submodule "contrib/hashidsxx"]
|
||||
path = contrib/hashidsxx
|
||||
url = https://github.com/schoentoon/hashidsxx
|
||||
[submodule "contrib/nats-io"]
|
||||
path = contrib/nats-io
|
||||
url = https://github.com/ClickHouse/nats.c
|
||||
[submodule "contrib/vectorscan"]
|
||||
path = contrib/vectorscan
|
||||
url = https://github.com/VectorCamp/vectorscan
|
||||
[submodule "contrib/c-ares"]
|
||||
path = contrib/c-ares
|
||||
url = https://github.com/ClickHouse/c-ares
|
||||
url = https://github.com/VectorCamp/vectorscan.git
|
||||
[submodule "contrib/llvm-project"]
|
||||
path = contrib/llvm-project
|
||||
url = https://github.com/ClickHouse/llvm-project
|
||||
@ -335,6 +331,15 @@
|
||||
[submodule "contrib/liburing"]
|
||||
path = contrib/liburing
|
||||
url = https://github.com/axboe/liburing
|
||||
[submodule "contrib/libfiu"]
|
||||
path = contrib/libfiu
|
||||
url = https://github.com/ClickHouse/libfiu.git
|
||||
[submodule "contrib/isa-l"]
|
||||
path = contrib/isa-l
|
||||
url = https://github.com/ClickHouse/isa-l.git
|
||||
[submodule "contrib/c-ares"]
|
||||
path = contrib/c-ares
|
||||
url = https://github.com/c-ares/c-ares.git
|
||||
[submodule "contrib/incbin"]
|
||||
path = contrib/incbin
|
||||
url = https://github.com/graphitemaster/incbin.git
|
||||
|
@ -6,8 +6,10 @@ rules:
|
||||
level: warning
|
||||
indent-sequences: consistent
|
||||
line-length:
|
||||
# there are some bash -c "", so this is OK
|
||||
max: 300
|
||||
# there are:
|
||||
# - bash -c "", so this is OK
|
||||
# - yaml in tests
|
||||
max: 1000
|
||||
level: warning
|
||||
comments:
|
||||
min-spaces-from-content: 1
|
||||
|
354
CHANGELOG.md
354
CHANGELOG.md
@ -1,4 +1,6 @@
|
||||
### Table of Contents
|
||||
**[ClickHouse release v23.6, 2023-06-30](#236)**<br/>
|
||||
**[ClickHouse release v23.5, 2023-06-08](#235)**<br/>
|
||||
**[ClickHouse release v23.4, 2023-04-26](#234)**<br/>
|
||||
**[ClickHouse release v23.3 LTS, 2023-03-30](#233)**<br/>
|
||||
**[ClickHouse release v23.2, 2023-02-23](#232)**<br/>
|
||||
@ -7,6 +9,358 @@
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### <a id="236"></a> ClickHouse release 23.6, 2023-06-29
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Delete feature `do_not_evict_index_and_mark_files` in the fs cache. This feature was only making things worse. [#51253](https://github.com/ClickHouse/ClickHouse/pull/51253) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Remove ALTER support for experimental LIVE VIEW. [#51287](https://github.com/ClickHouse/ClickHouse/pull/51287) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Decrease the default values for `http_max_field_value_size` and `http_max_field_name_size` to 128 KiB. [#51163](https://github.com/ClickHouse/ClickHouse/pull/51163) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* CGroups metrics related to CPU are replaced with one metric, `CGroupMaxCPU` for better usability. The `Normalized` CPU usage metrics will be normalized to CGroups limits instead of the total number of CPUs when they are set. This closes [#50836](https://github.com/ClickHouse/ClickHouse/issues/50836). [#50835](https://github.com/ClickHouse/ClickHouse/pull/50835) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### New Feature
|
||||
* The function `transform` as well as `CASE` with value matching started to support all data types. This closes [#29730](https://github.com/ClickHouse/ClickHouse/issues/29730). This closes [#32387](https://github.com/ClickHouse/ClickHouse/issues/32387). This closes [#50827](https://github.com/ClickHouse/ClickHouse/issues/50827). This closes [#31336](https://github.com/ClickHouse/ClickHouse/issues/31336). This closes [#40493](https://github.com/ClickHouse/ClickHouse/issues/40493). [#51351](https://github.com/ClickHouse/ClickHouse/pull/51351) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Added option `--rename_files_after_processing <pattern>`. This closes [#34207](https://github.com/ClickHouse/ClickHouse/issues/34207). [#49626](https://github.com/ClickHouse/ClickHouse/pull/49626) ([alekseygolub](https://github.com/alekseygolub)).
|
||||
* Add support for `TRUNCATE` modifier in `INTO OUTFILE` clause. Suggest using `APPEND` or `TRUNCATE` for `INTO OUTFILE` when file exists. [#50950](https://github.com/ClickHouse/ClickHouse/pull/50950) ([alekar](https://github.com/alekar)).
|
||||
* Add table engine `Redis` and table function `redis`. It allows querying external Redis servers. [#50150](https://github.com/ClickHouse/ClickHouse/pull/50150) ([JackyWoo](https://github.com/JackyWoo)).
|
||||
* Allow to skip empty files in file/s3/url/hdfs table functions using settings `s3_skip_empty_files`, `hdfs_skip_empty_files`, `engine_file_skip_empty_files`, `engine_url_skip_empty_files`. [#50364](https://github.com/ClickHouse/ClickHouse/pull/50364) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add a new setting named `use_mysql_types_in_show_columns` to alter the `SHOW COLUMNS` SQL statement to display MySQL equivalent types when a client is connected via the MySQL compatibility port. [#49577](https://github.com/ClickHouse/ClickHouse/pull/49577) ([Thomas Panetti](https://github.com/tpanetti)).
|
||||
* Clickhouse-client can now be called with a connection string instead of "--host", "--port", "--user" etc. [#50689](https://github.com/ClickHouse/ClickHouse/pull/50689) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Add setting `session_timezone`; it is used as the default timezone for a session when not explicitly specified. [#44149](https://github.com/ClickHouse/ClickHouse/pull/44149) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Codec DEFLATE_QPL is now controlled via server setting "enable_deflate_qpl_codec" (default: false) instead of setting "allow_experimental_codecs". This marks DEFLATE_QPL non-experimental. [#50775](https://github.com/ClickHouse/ClickHouse/pull/50775) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Improved scheduling of merge selecting and cleanup tasks in `ReplicatedMergeTree`. The tasks will not be executed too frequently when there's nothing to merge or cleanup. Added settings `max_merge_selecting_sleep_ms`, `merge_selecting_sleep_slowdown_factor`, `max_cleanup_delay_period` and `cleanup_thread_preferred_points_per_iteration`. It should close [#31919](https://github.com/ClickHouse/ClickHouse/issues/31919). [#50107](https://github.com/ClickHouse/ClickHouse/pull/50107) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Make filter push down through cross join. [#50605](https://github.com/ClickHouse/ClickHouse/pull/50605) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Improve performance with enabled QueryProfiler using thread-local timer_id instead of global object. [#48778](https://github.com/ClickHouse/ClickHouse/pull/48778) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||
* Rewrite CapnProto input/output format to improve its performance. Map column names and CapnProto fields case insensitive, fix reading/writing of nested structure fields. [#49752](https://github.com/ClickHouse/ClickHouse/pull/49752) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Optimize parquet write performance for parallel threads. [#50102](https://github.com/ClickHouse/ClickHouse/pull/50102) ([Hongbin Ma](https://github.com/binmahone)).
|
||||
* Disable `parallelize_output_from_storages` for processing MATERIALIZED VIEWs and storages with one block only. [#50214](https://github.com/ClickHouse/ClickHouse/pull/50214) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Merge PR [#46558](https://github.com/ClickHouse/ClickHouse/pull/46558). Avoid block permutation during sort if the block is already sorted. [#50697](https://github.com/ClickHouse/ClickHouse/pull/50697) ([Alexey Milovidov](https://github.com/alexey-milovidov), [Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Make multiple list requests to ZooKeeper in parallel to speed up reading from system.zookeeper table. [#51042](https://github.com/ClickHouse/ClickHouse/pull/51042) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Speedup initialization of DateTime lookup tables for time zones. This should reduce startup/connect time of clickhouse-client especially in debug build as it is rather heavy. [#51347](https://github.com/ClickHouse/ClickHouse/pull/51347) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix data lakes slowness because of synchronous head requests. (Related to Iceberg/Deltalake/Hudi being slow with a lot of files). [#50976](https://github.com/ClickHouse/ClickHouse/pull/50976) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Do not read all the columns from right GLOBAL JOIN table. [#50721](https://github.com/ClickHouse/ClickHouse/pull/50721) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
|
||||
#### Experimental Feature
|
||||
* Support parallel replicas with the analyzer. [#50441](https://github.com/ClickHouse/ClickHouse/pull/50441) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Add random sleep before large merges/mutations execution to split load more evenly between replicas in case of zero-copy replication. [#51282](https://github.com/ClickHouse/ClickHouse/pull/51282) ([alesapin](https://github.com/alesapin)).
|
||||
* Do not replicate `ALTER PARTITION` queries and mutations through `Replicated` database if it has only one shard and the underlying table is `ReplicatedMergeTree`. [#51049](https://github.com/ClickHouse/ClickHouse/pull/51049) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
||||
#### Improvement
|
||||
* Relax the thresholds for "too many parts" to be more modern. Return the backpressure during long-running insert queries. [#50856](https://github.com/ClickHouse/ClickHouse/pull/50856) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Allow to cast IPv6 to IPv4 address for CIDR ::ffff:0:0/96 (IPv4-mapped addresses). [#49759](https://github.com/ClickHouse/ClickHouse/pull/49759) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Update MongoDB protocol to support MongoDB 5.1 version and newer. Support for the versions with the old protocol (<3.6) is preserved. Closes [#45621](https://github.com/ClickHouse/ClickHouse/issues/45621), [#49879](https://github.com/ClickHouse/ClickHouse/issues/49879). [#50061](https://github.com/ClickHouse/ClickHouse/pull/50061) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Add setting `input_format_max_bytes_to_read_for_schema_inference` to limit the number of bytes to read in schema inference. Closes [#50577](https://github.com/ClickHouse/ClickHouse/issues/50577). [#50592](https://github.com/ClickHouse/ClickHouse/pull/50592) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Respect setting `input_format_null_as_default` in schema inference. [#50602](https://github.com/ClickHouse/ClickHouse/pull/50602) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow to skip trailing empty lines in CSV/TSV/CustomSeparated formats via settings `input_format_csv_skip_trailing_empty_lines`, `input_format_tsv_skip_trailing_empty_lines` and `input_format_custom_skip_trailing_empty_lines` (disabled by default). Closes [#49315](https://github.com/ClickHouse/ClickHouse/issues/49315). [#50635](https://github.com/ClickHouse/ClickHouse/pull/50635) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Functions "toDateOrDefault|OrNull" and "accuateCast[OrDefault|OrNull]" now correctly parse numeric arguments. [#50709](https://github.com/ClickHouse/ClickHouse/pull/50709) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||
* Support CSV with whitespace or `\t` field delimiters, and these delimiters are supported in Spark. [#50712](https://github.com/ClickHouse/ClickHouse/pull/50712) ([KevinyhZou](https://github.com/KevinyhZou)).
|
||||
* Settings `number_of_mutations_to_delay` and `number_of_mutations_to_throw` are enabled by default now with values 500 and 1000 respectively. [#50726](https://github.com/ClickHouse/ClickHouse/pull/50726) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* The dashboard correctly shows missing values. This closes [#50831](https://github.com/ClickHouse/ClickHouse/issues/50831). [#50832](https://github.com/ClickHouse/ClickHouse/pull/50832) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Added the possibility to use date and time arguments in the syslog timestamp format in functions `parseDateTimeBestEffort*` and `parseDateTime64BestEffort*`. [#50925](https://github.com/ClickHouse/ClickHouse/pull/50925) ([Victor Krasnov](https://github.com/sirvickr)).
|
||||
* Command line parameter "--password" in clickhouse-client can now be specified only once. [#50966](https://github.com/ClickHouse/ClickHouse/pull/50966) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Use `hash_of_all_files` from `system.parts` to check identity of parts during on-cluster backups. [#50997](https://github.com/ClickHouse/ClickHouse/pull/50997) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* The system table zookeeper_connection connected_time identifies the time when the connection is established (standard format), and session_uptime_elapsed_seconds is added, which labels the duration of the established connection session (in seconds). [#51026](https://github.com/ClickHouse/ClickHouse/pull/51026) ([郭小龙](https://github.com/guoxiaolongzte)).
|
||||
* Improve the progress bar for file/s3/hdfs/url table functions by using chunk size from source data and using incremental total size counting in each thread. Fix the progress bar for *Cluster functions. This closes [#47250](https://github.com/ClickHouse/ClickHouse/issues/47250). [#51088](https://github.com/ClickHouse/ClickHouse/pull/51088) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add total_bytes_to_read to the Progress packet in TCP protocol for better Progress bar. [#51158](https://github.com/ClickHouse/ClickHouse/pull/51158) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Better checking of data parts on disks with filesystem cache. [#51164](https://github.com/ClickHouse/ClickHouse/pull/51164) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix sometimes not correct current_elements_num in fs cache. [#51242](https://github.com/ClickHouse/ClickHouse/pull/51242) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Add embedded keeper-client to standalone keeper binary. [#50964](https://github.com/ClickHouse/ClickHouse/pull/50964) ([pufit](https://github.com/pufit)).
|
||||
* Actual LZ4 version is used now. [#50621](https://github.com/ClickHouse/ClickHouse/pull/50621) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* ClickHouse server will print the list of changed settings on fatal errors. This closes [#51137](https://github.com/ClickHouse/ClickHouse/issues/51137). [#51138](https://github.com/ClickHouse/ClickHouse/pull/51138) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Allow building ClickHouse with clang-17. [#51300](https://github.com/ClickHouse/ClickHouse/pull/51300) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* [SQLancer](https://github.com/sqlancer/sqlancer) check is considered stable as bugs that were triggered by it are fixed. Now failures of SQLancer check will be reported as failed check status. [#51340](https://github.com/ClickHouse/ClickHouse/pull/51340) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Split huge `RUN` in Dockerfile into smaller conditional. Install the necessary tools on demand in the same `RUN` layer, and remove them after that. Upgrade the OS only once at the beginning. Use a modern way to check the signed repository. Downgrade the base repo to ubuntu:20.04 to address the issues on older docker versions. Upgrade golang version to address golang vulnerabilities. [#51504](https://github.com/ClickHouse/ClickHouse/pull/51504) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Report loading status for executable dictionaries correctly [#48775](https://github.com/ClickHouse/ClickHouse/pull/48775) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||
* Proper mutation of skip indices and projections [#50104](https://github.com/ClickHouse/ClickHouse/pull/50104) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Cleanup moving parts [#50489](https://github.com/ClickHouse/ClickHouse/pull/50489) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix backward compatibility for IP types hashing in aggregate functions [#50551](https://github.com/ClickHouse/ClickHouse/pull/50551) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix Log family table return wrong rows count after truncate [#50585](https://github.com/ClickHouse/ClickHouse/pull/50585) ([flynn](https://github.com/ucasfl)).
|
||||
* Fix bug in `uniqExact` parallel merging [#50590](https://github.com/ClickHouse/ClickHouse/pull/50590) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Revert recent grace hash join changes [#50699](https://github.com/ClickHouse/ClickHouse/pull/50699) ([vdimir](https://github.com/vdimir)).
|
||||
* Query Cache: Try to fix bad cast from `ColumnConst` to `ColumnVector<char8_t>` [#50704](https://github.com/ClickHouse/ClickHouse/pull/50704) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Avoid storing logs in Keeper containing unknown operation [#50751](https://github.com/ClickHouse/ClickHouse/pull/50751) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* SummingMergeTree support for DateTime64 [#50797](https://github.com/ClickHouse/ClickHouse/pull/50797) ([Jordi Villar](https://github.com/jrdi)).
|
||||
* Add compatibility setting for non-const timezones [#50834](https://github.com/ClickHouse/ClickHouse/pull/50834) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix hashing of LDAP params in the cache entries [#50865](https://github.com/ClickHouse/ClickHouse/pull/50865) ([Julian Maicher](https://github.com/jmaicher)).
|
||||
* Fallback to parsing big integer from String instead of exception in Parquet format [#50873](https://github.com/ClickHouse/ClickHouse/pull/50873) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix checking the lock file too often while writing a backup [#50889](https://github.com/ClickHouse/ClickHouse/pull/50889) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Do not apply projection if read-in-order was enabled. [#50923](https://github.com/ClickHouse/ClickHouse/pull/50923) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix race in the Azure blob storage iterator [#50936](https://github.com/ClickHouse/ClickHouse/pull/50936) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix erroneous `sort_description` propagation in `CreatingSets` [#50955](https://github.com/ClickHouse/ClickHouse/pull/50955) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix Iceberg v2 optional metadata parsing [#50974](https://github.com/ClickHouse/ClickHouse/pull/50974) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* MaterializedMySQL: Keep parentheses for empty table overrides [#50977](https://github.com/ClickHouse/ClickHouse/pull/50977) ([Val Doroshchuk](https://github.com/valbok)).
|
||||
* Fix crash in BackupCoordinationStageSync::setError() [#51012](https://github.com/ClickHouse/ClickHouse/pull/51012) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix subtly broken copy-on-write of ColumnLowCardinality dictionary [#51064](https://github.com/ClickHouse/ClickHouse/pull/51064) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Generate safe IVs [#51086](https://github.com/ClickHouse/ClickHouse/pull/51086) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Fix ineffective query cache for SELECTs with subqueries [#51132](https://github.com/ClickHouse/ClickHouse/pull/51132) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix Set index with constant nullable comparison. [#51205](https://github.com/ClickHouse/ClickHouse/pull/51205) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix a crash in s3 and s3Cluster functions [#51209](https://github.com/ClickHouse/ClickHouse/pull/51209) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix a crash with compiled expressions [#51231](https://github.com/ClickHouse/ClickHouse/pull/51231) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
* Fix use-after-free in StorageURL when switching URLs [#51260](https://github.com/ClickHouse/ClickHouse/pull/51260) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Updated check for parameterized view [#51272](https://github.com/ClickHouse/ClickHouse/pull/51272) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix multiple writing of same file to backup [#51299](https://github.com/ClickHouse/ClickHouse/pull/51299) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix fuzzer failure in ActionsDAG [#51301](https://github.com/ClickHouse/ClickHouse/pull/51301) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove garbage from function `transform` [#51350](https://github.com/ClickHouse/ClickHouse/pull/51350) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
|
||||
### <a id="235"></a> ClickHouse release 23.5, 2023-06-08
|
||||
|
||||
#### Upgrade Notes
|
||||
* Compress marks and primary key by default. It significantly reduces the cold query time. Upgrade notes: the support for compressed marks and primary key has been added in version 22.9. If you turned on compressed marks or primary key or installed version 23.5 or newer, which has compressed marks or primary key on by default, you will not be able to downgrade to version 22.8 or earlier. You can also explicitly disable compressed marks or primary keys by specifying the `compress_marks` and `compress_primary_key` settings in the `<merge_tree>` section of the server configuration file. **Upgrade notes:** If you upgrade from versions prior to 22.9, you should either upgrade all replicas at once or disable the compression before upgrade, or upgrade through an intermediate version, where the compressed marks are supported but not enabled by default, such as 23.3. [#42587](https://github.com/ClickHouse/ClickHouse/pull/42587) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Make local object storage work consistently with s3 object storage, fix problem with append (closes [#48465](https://github.com/ClickHouse/ClickHouse/issues/48465)), make it configurable as independent storage. The change is backward incompatible because the cache on top of local object storage is not compatible to previous versions. [#48791](https://github.com/ClickHouse/ClickHouse/pull/48791) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* The experimental feature "in-memory data parts" is removed. The data format is still supported, but the settings are no-op, and compact or wide parts will be used instead. This closes [#45409](https://github.com/ClickHouse/ClickHouse/issues/45409). [#49429](https://github.com/ClickHouse/ClickHouse/pull/49429) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Changed default values of settings `parallelize_output_from_storages` and `input_format_parquet_preserve_order`. This allows ClickHouse to reorder rows when reading from files (e.g. CSV or Parquet), greatly improving performance in many cases. To restore the old behavior of preserving order, use `parallelize_output_from_storages = 0`, `input_format_parquet_preserve_order = 1`. [#49479](https://github.com/ClickHouse/ClickHouse/pull/49479) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Make projections production-ready. Add the `optimize_use_projections` setting to control whether the projections will be selected for SELECT queries. The setting `allow_experimental_projection_optimization` is obsolete and does nothing. [#49719](https://github.com/ClickHouse/ClickHouse/pull/49719) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Mark `joinGet` as non-deterministic (so as `dictGet`). It allows using them in mutations without an extra setting. [#49843](https://github.com/ClickHouse/ClickHouse/pull/49843) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Revert the "`groupArray` returns cannot be nullable" change (due to binary compatibility breakage for `groupArray`/`groupArrayLast`/`groupArraySample` over `Nullable` types, which likely will lead to `TOO_LARGE_ARRAY_SIZE` or `CANNOT_READ_ALL_DATA`). [#49971](https://github.com/ClickHouse/ClickHouse/pull/49971) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Setting `enable_memory_bound_merging_of_aggregation_results` is enabled by default. If you update from version prior to 22.12, we recommend to set this flag to `false` until update is finished. [#50319](https://github.com/ClickHouse/ClickHouse/pull/50319) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
|
||||
#### New Feature
|
||||
* Added storage engine AzureBlobStorage and azureBlobStorage table function. The supported set of features is very similar to storage/table function S3 [#50604] (https://github.com/ClickHouse/ClickHouse/pull/50604) ([alesapin](https://github.com/alesapin)) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni).
|
||||
* Added native ClickHouse Keeper CLI Client, it is available as `clickhouse keeper-client` [#47414](https://github.com/ClickHouse/ClickHouse/pull/47414) ([pufit](https://github.com/pufit)).
|
||||
* Add `urlCluster` table function. Refactor all *Cluster table functions to reduce code duplication. Make schema inference work for all possible *Cluster function signatures and for named collections. Closes [#38499](https://github.com/ClickHouse/ClickHouse/issues/38499). [#45427](https://github.com/ClickHouse/ClickHouse/pull/45427) ([attack204](https://github.com/attack204)), Pavel Kruglov.
|
||||
* The query cache can now be used for production workloads. [#47977](https://github.com/ClickHouse/ClickHouse/pull/47977) ([Robert Schulze](https://github.com/rschu1ze)). The query cache can now support queries with totals and extremes modifier. [#48853](https://github.com/ClickHouse/ClickHouse/pull/48853) ([Robert Schulze](https://github.com/rschu1ze)). Make `allow_experimental_query_cache` setting as obsolete for backward-compatibility. It was removed in https://github.com/ClickHouse/ClickHouse/pull/47977. [#49934](https://github.com/ClickHouse/ClickHouse/pull/49934) ([Timur Solodovnikov](https://github.com/tsolodov)).
|
||||
* Geographical data types (`Point`, `Ring`, `Polygon`, and `MultiPolygon`) are production-ready. [#50022](https://github.com/ClickHouse/ClickHouse/pull/50022) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add schema inference to PostgreSQL, MySQL, MeiliSearch, and SQLite table engines. Closes [#49972](https://github.com/ClickHouse/ClickHouse/issues/49972). [#50000](https://github.com/ClickHouse/ClickHouse/pull/50000) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Password type in queries like `CREATE USER u IDENTIFIED BY 'p'` will be automatically set according to the setting `default_password_type` in the `config.xml` on the server. Closes [#42915](https://github.com/ClickHouse/ClickHouse/issues/42915). [#44674](https://github.com/ClickHouse/ClickHouse/pull/44674) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Add bcrypt password authentication type. Closes [#34599](https://github.com/ClickHouse/ClickHouse/issues/34599). [#44905](https://github.com/ClickHouse/ClickHouse/pull/44905) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Introduces new keyword `INTO OUTFILE 'file.txt' APPEND`. [#48880](https://github.com/ClickHouse/ClickHouse/pull/48880) ([alekar](https://github.com/alekar)).
|
||||
* Added `system.zookeeper_connection` table that shows information about Keeper connections. [#45245](https://github.com/ClickHouse/ClickHouse/pull/45245) ([mateng915](https://github.com/mateng0915)).
|
||||
* Add new function `generateRandomStructure` that generates random table structure. It can be used in combination with table function `generateRandom`. [#47409](https://github.com/ClickHouse/ClickHouse/pull/47409) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow the use of `CASE` without an `ELSE` branch and extended `transform` to deal with more types. Also fix some issues that made transform() return incorrect results when decimal types were mixed with other numeric types. [#48300](https://github.com/ClickHouse/ClickHouse/pull/48300) ([Salvatore Mesoraca](https://github.com/aiven-sal)). This closes #2655. This closes #9596. This closes #38666.
|
||||
* Added [server-side encryption using KMS keys](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) with S3 tables, and the `header` setting with S3 disks. Closes [#48723](https://github.com/ClickHouse/ClickHouse/issues/48723). [#48724](https://github.com/ClickHouse/ClickHouse/pull/48724) ([Johann Gan](https://github.com/johanngan)).
|
||||
* Add MemoryTracker for the background tasks (merges and mutation). Introduces `merges_mutations_memory_usage_soft_limit` and `merges_mutations_memory_usage_to_ram_ratio` settings that represent the soft memory limit for merges and mutations. If this limit is reached ClickHouse won't schedule new merge or mutation tasks. Also `MergesMutationsMemoryTracking` metric is introduced to allow observing current memory usage of background tasks. Resubmit [#46089](https://github.com/ClickHouse/ClickHouse/issues/46089). Closes [#48774](https://github.com/ClickHouse/ClickHouse/issues/48774). [#48787](https://github.com/ClickHouse/ClickHouse/pull/48787) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Function `dotProduct` work for array. [#49050](https://github.com/ClickHouse/ClickHouse/pull/49050) ([FFFFFFFHHHHHHH](https://github.com/FFFFFFFHHHHHHH)).
|
||||
* Support statement `SHOW INDEX` to improve compatibility with MySQL. [#49158](https://github.com/ClickHouse/ClickHouse/pull/49158) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add virtual column `_file` and `_path` support to table function `url`. - Impove error message for table function `url`. - resolves [#49231](https://github.com/ClickHouse/ClickHouse/issues/49231) - resolves [#49232](https://github.com/ClickHouse/ClickHouse/issues/49232). [#49356](https://github.com/ClickHouse/ClickHouse/pull/49356) ([Ziyi Tan](https://github.com/Ziy1-Tan)).
|
||||
* Adding the `grants` field in the users.xml file, which allows specifying grants for users. [#49381](https://github.com/ClickHouse/ClickHouse/pull/49381) ([pufit](https://github.com/pufit)).
|
||||
* Support full/right join by using grace hash join algorithm. [#49483](https://github.com/ClickHouse/ClickHouse/pull/49483) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* `WITH FILL` modifier groups filling by sorting prefix. Controlled by `use_with_fill_by_sorting_prefix` setting (enabled by default). Related to [#33203](https://github.com/ClickHouse/ClickHouse/issues/33203)#issuecomment-1418736794. [#49503](https://github.com/ClickHouse/ClickHouse/pull/49503) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Clickhouse-client now accepts queries after "--multiquery" when "--query" (or "-q") is absent. example: clickhouse-client --multiquery "select 1; select 2;". [#49870](https://github.com/ClickHouse/ClickHouse/pull/49870) ([Alexey Gerasimchuk](https://github.com/Demilivor)).
|
||||
* Add separate `handshake_timeout` for receiving Hello packet from replica. Closes [#48854](https://github.com/ClickHouse/ClickHouse/issues/48854). [#49948](https://github.com/ClickHouse/ClickHouse/pull/49948) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Added a function "space" which repeats a space as many times as specified. [#50103](https://github.com/ClickHouse/ClickHouse/pull/50103) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Added --input_format_csv_trim_whitespaces option. [#50215](https://github.com/ClickHouse/ClickHouse/pull/50215) ([Alexey Gerasimchuk](https://github.com/Demilivor)).
|
||||
* Allow the `dictGetAll` function for regexp tree dictionaries to return values from multiple matches as arrays. Closes [#50254](https://github.com/ClickHouse/ClickHouse/issues/50254). [#50255](https://github.com/ClickHouse/ClickHouse/pull/50255) ([Johann Gan](https://github.com/johanngan)).
|
||||
* Added `toLastDayOfWeek` function to round a date or a date with time up to the nearest Saturday or Sunday. [#50315](https://github.com/ClickHouse/ClickHouse/pull/50315) ([Victor Krasnov](https://github.com/sirvickr)).
|
||||
* Ability to ignore a skip index by specifying `ignore_data_skipping_indices`. [#50329](https://github.com/ClickHouse/ClickHouse/pull/50329) ([Boris Kuschel](https://github.com/bkuschel)).
|
||||
* Add `system.user_processes` table and `SHOW USER PROCESSES` query to show memory info and ProfileEvents on user level. [#50492](https://github.com/ClickHouse/ClickHouse/pull/50492) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Add server and format settings `display_secrets_in_show_and_select` for displaying secrets of tables, databases, table functions, and dictionaries. Add privilege `displaySecretsInShowAndSelect` controlling which users can view secrets. [#46528](https://github.com/ClickHouse/ClickHouse/pull/46528) ([Mike Kot](https://github.com/myrrc)).
|
||||
* Allow to set up a ROW POLICY for all tables that belong to a DATABASE. [#47640](https://github.com/ClickHouse/ClickHouse/pull/47640) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Compress marks and primary key by default. It significantly reduces the cold query time. Upgrade notes: the support for compressed marks and primary key has been added in version 22.9. If you turned on compressed marks or primary key or installed version 23.5 or newer, which has compressed marks or primary key on by default, you will not be able to downgrade to version 22.8 or earlier. You can also explicitly disable compressed marks or primary keys by specifying the `compress_marks` and `compress_primary_key` settings in the `<merge_tree>` section of the server configuration file. [#42587](https://github.com/ClickHouse/ClickHouse/pull/42587) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* New setting s3_max_inflight_parts_for_one_file sets the limit of concurrently loaded parts with multipart upload request in scope of one file. [#49961](https://github.com/ClickHouse/ClickHouse/pull/49961) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* When reading from multiple files reduce parallel parsing threads for each file. Resolves [#42192](https://github.com/ClickHouse/ClickHouse/issues/42192). [#46661](https://github.com/ClickHouse/ClickHouse/pull/46661) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Use aggregate projection only if it reads fewer granules than normal reading. It should help in case if query hits the PK of the table, but not the projection. Fixes [#49150](https://github.com/ClickHouse/ClickHouse/issues/49150). [#49417](https://github.com/ClickHouse/ClickHouse/pull/49417) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Do not store blocks in `ANY` hash join if nothing is inserted. [#48633](https://github.com/ClickHouse/ClickHouse/pull/48633) ([vdimir](https://github.com/vdimir)).
|
||||
* Fixes aggregate combinator `-If` when JIT compiled, and enable JIT compilation for aggregate functions. Closes [#48120](https://github.com/ClickHouse/ClickHouse/issues/48120). [#49083](https://github.com/ClickHouse/ClickHouse/pull/49083) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* For reading from remote tables we use smaller tasks (instead of reading the whole part) to make tasks stealing work * task size is determined by size of columns to read * always use 1mb buffers for reading from s3 * boundaries of cache segments aligned to 1mb so they have decent size even with small tasks. it also should prevent fragmentation. [#49287](https://github.com/ClickHouse/ClickHouse/pull/49287) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Introduced settings: - `merge_max_block_size_bytes` to limit the amount of memory used for background operations. - `vertical_merge_algorithm_min_bytes_to_activate` to add another condition to activate vertical merges. [#49313](https://github.com/ClickHouse/ClickHouse/pull/49313) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Default size of a read buffer for reading from local filesystem changed to a slightly better value. Also two new settings are introduced: `max_read_buffer_size_local_fs` and `max_read_buffer_size_remote_fs`. [#49321](https://github.com/ClickHouse/ClickHouse/pull/49321) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Improve memory usage and speed of `SPARSE_HASHED`/`HASHED` dictionaries (e.g. `SPARSE_HASHED` now eats 2.6x less memory, and is ~2x faster). [#49380](https://github.com/ClickHouse/ClickHouse/pull/49380) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Optimize the `system.query_log` and `system.query_thread_log` tables by applying `LowCardinality` when appropriate. The queries over these tables will be faster. [#49530](https://github.com/ClickHouse/ClickHouse/pull/49530) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Better performance when reading local `Parquet` files (through parallel reading). [#49539](https://github.com/ClickHouse/ClickHouse/pull/49539) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Improve the performance of `RIGHT/FULL JOIN` by up to 2 times in certain scenarios, especially when joining a small left table with a large right table. [#49585](https://github.com/ClickHouse/ClickHouse/pull/49585) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Improve performance of BLAKE3 by 11% by enabling LTO for Rust. [#49600](https://github.com/ClickHouse/ClickHouse/pull/49600) ([Azat Khuzhin](https://github.com/azat)). Now it is on par with C++.
|
||||
* Optimize the structure of the `system.opentelemetry_span_log`. Use `LowCardinality` where appropriate. Although this table is generally stupid (it is using the Map data type even for common attributes), it will be slightly better. [#49647](https://github.com/ClickHouse/ClickHouse/pull/49647) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Try to reserve hash table's size in `grace_hash` join. [#49816](https://github.com/ClickHouse/ClickHouse/pull/49816) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Parallel merge of `uniqExactIf` states. Closes [#49885](https://github.com/ClickHouse/ClickHouse/issues/49885). [#50285](https://github.com/ClickHouse/ClickHouse/pull/50285) ([flynn](https://github.com/ucasfl)).
|
||||
* Keeper improvement: add `CheckNotExists` request to Keeper, which allows to improve the performance of Replicated tables. [#48897](https://github.com/ClickHouse/ClickHouse/pull/48897) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Keeper performance improvements: avoid serializing same request twice while processing. Cache deserialization results of large requests. Controlled by new coordination setting `min_request_size_for_cache`. [#49004](https://github.com/ClickHouse/ClickHouse/pull/49004) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Reduced number of `List` ZooKeeper requests when selecting parts to merge and a lot of partitions do not have anything to merge. [#49637](https://github.com/ClickHouse/ClickHouse/pull/49637) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Rework locking in the FS cache [#44985](https://github.com/ClickHouse/ClickHouse/pull/44985) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Disable pure parallel replicas if trivial count optimization is possible. [#50594](https://github.com/ClickHouse/ClickHouse/pull/50594) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Don't send head request for all keys in Iceberg schema inference, only for keys that are used for reaing data. [#50203](https://github.com/ClickHouse/ClickHouse/pull/50203) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Setting `enable_memory_bound_merging_of_aggregation_results` is enabled by default. [#50319](https://github.com/ClickHouse/ClickHouse/pull/50319) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
|
||||
#### Experimental Feature
|
||||
* `DEFLATE_QPL` codec lower the minimum simd version to SSE 4.2. [doc change in qpl](https://github.com/intel/qpl/commit/3f8f5cea27739f5261e8fd577dc233ffe88bf679) - Intel® QPL relies on a run-time kernels dispatcher and cpuid check to choose the best available implementation(sse/avx2/avx512) - restructured cmakefile for qpl build in clickhouse to align with latest upstream qpl. [#49811](https://github.com/ClickHouse/ClickHouse/pull/49811) ([jasperzhu](https://github.com/jinjunzh)).
|
||||
* Add initial support to do JOINs with pure parallel replicas. [#49544](https://github.com/ClickHouse/ClickHouse/pull/49544) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* More parallelism on `Outdated` parts removal with "zero-copy replication". [#49630](https://github.com/ClickHouse/ClickHouse/pull/49630) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Parallel Replicas: 1) Fixed an error `NOT_FOUND_COLUMN_IN_BLOCK` in case of using parallel replicas with non-replicated storage with disabled setting `parallel_replicas_for_non_replicated_merge_tree` 2) Now `allow_experimental_parallel_reading_from_replicas` have 3 possible values - 0, 1 and 2. 0 - disabled, 1 - enabled, silently disable them in case of failure (in case of FINAL or JOIN), 2 - enabled, throw an expection in case of failure. 3) If FINAL modifier is used in SELECT query and parallel replicas are enabled, ClickHouse will try to disable them if `allow_experimental_parallel_reading_from_replicas` is set to 1 and throw an exception otherwise. [#50195](https://github.com/ClickHouse/ClickHouse/pull/50195) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* When parallel replicas are enabled they will always skip unavailable servers (the behavior is controlled by the setting `skip_unavailable_shards`, enabled by default and can be only disabled). This closes: [#48565](https://github.com/ClickHouse/ClickHouse/issues/48565). [#50293](https://github.com/ClickHouse/ClickHouse/pull/50293) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
|
||||
#### Improvement
|
||||
* The `BACKUP` command will not decrypt data from encrypted disks while making a backup. Instead the data will be stored in a backup in encrypted form. Such backups can be restored only to an encrypted disk with the same (or extended) list of encryption keys. [#48896](https://github.com/ClickHouse/ClickHouse/pull/48896) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Added possibility to use temporary tables in FROM part of ATTACH PARTITION FROM and REPLACE PARTITION FROM. [#49436](https://github.com/ClickHouse/ClickHouse/pull/49436) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* Added setting `async_insert` for `MergeTree` tables. It has the same meaning as query-level setting `async_insert` and enables asynchronous inserts for specific table. Note: it doesn't take effect for insert queries from `clickhouse-client`, use query-level setting in that case. [#49122](https://github.com/ClickHouse/ClickHouse/pull/49122) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Add support for size suffixes in quota creation statement parameters. [#49087](https://github.com/ClickHouse/ClickHouse/pull/49087) ([Eridanus](https://github.com/Eridanus117)).
|
||||
* Extend `first_value` and `last_value` to accept NULL. [#46467](https://github.com/ClickHouse/ClickHouse/pull/46467) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Add alias `str_to_map` and `mapFromString` for `extractKeyValuePairs`. closes https://github.com/clickhouse/clickhouse/issues/47185. [#49466](https://github.com/ClickHouse/ClickHouse/pull/49466) ([flynn](https://github.com/ucasfl)).
|
||||
* Add support for CGroup version 2 for asynchronous metrics about the memory usage and availability. This closes [#37983](https://github.com/ClickHouse/ClickHouse/issues/37983). [#45999](https://github.com/ClickHouse/ClickHouse/pull/45999) ([sichenzhao](https://github.com/sichenzhao)).
|
||||
* Cluster table functions should always skip unavailable shards. close [#46314](https://github.com/ClickHouse/ClickHouse/issues/46314). [#46765](https://github.com/ClickHouse/ClickHouse/pull/46765) ([zk_kiger](https://github.com/zk-kiger)).
|
||||
* Allow CSV file to contain empty columns in its header. [#47496](https://github.com/ClickHouse/ClickHouse/pull/47496) ([你不要过来啊](https://github.com/iiiuwioajdks)).
|
||||
* Add Google Cloud Storage S3 compatible table function `gcs`. Like the `oss` and `cosn` functions, it is just an alias over the `s3` table function, and it does not bring any new features. [#47815](https://github.com/ClickHouse/ClickHouse/pull/47815) ([Kuba Kaflik](https://github.com/jkaflik)).
|
||||
* Add ability to use strict parts size for S3 (compatibility with CloudFlare R2 S3 Storage). [#48492](https://github.com/ClickHouse/ClickHouse/pull/48492) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Added new columns with info about `Replicated` database replicas to `system.clusters`: `database_shard_name`, `database_replica_name`, `is_active`. Added an optional `FROM SHARD` clause to `SYSTEM DROP DATABASE REPLICA` query. [#48548](https://github.com/ClickHouse/ClickHouse/pull/48548) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Add a new column `zookeeper_name` in system.replicas, to indicate on which (auxiliary) zookeeper cluster the replicated table's metadata is stored. [#48549](https://github.com/ClickHouse/ClickHouse/pull/48549) ([cangyin](https://github.com/cangyin)).
|
||||
* `IN` operator support the comparison of `Date` and `Date32`. Closes [#48736](https://github.com/ClickHouse/ClickHouse/issues/48736). [#48806](https://github.com/ClickHouse/ClickHouse/pull/48806) ([flynn](https://github.com/ucasfl)).
|
||||
* Support for erasure codes in `HDFS`, author: @M1eyu2018, @tomscut. [#48833](https://github.com/ClickHouse/ClickHouse/pull/48833) ([M1eyu](https://github.com/M1eyu2018)).
|
||||
* Implement SYSTEM DROP REPLICA from auxillary ZooKeeper clusters, may be close [#48931](https://github.com/ClickHouse/ClickHouse/issues/48931). [#48932](https://github.com/ClickHouse/ClickHouse/pull/48932) ([wangxiaobo](https://github.com/wzb5212)).
|
||||
* Add Array data type to MongoDB. Closes [#48598](https://github.com/ClickHouse/ClickHouse/issues/48598). [#48983](https://github.com/ClickHouse/ClickHouse/pull/48983) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Support storing `Interval` data types in tables. [#49085](https://github.com/ClickHouse/ClickHouse/pull/49085) ([larryluogit](https://github.com/larryluogit)).
|
||||
* Allow using `ntile` window function without explicit window frame definition: `ntile(3) OVER (ORDER BY a)`, close [#46763](https://github.com/ClickHouse/ClickHouse/issues/46763). [#49093](https://github.com/ClickHouse/ClickHouse/pull/49093) ([vdimir](https://github.com/vdimir)).
|
||||
* Added settings (`number_of_mutations_to_delay`, `number_of_mutations_to_throw`) to delay or throw `ALTER` queries that create mutations (`ALTER UPDATE`, `ALTER DELETE`, `ALTER MODIFY COLUMN`, ...) in case when table already has a lot of unfinished mutations. [#49117](https://github.com/ClickHouse/ClickHouse/pull/49117) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Catch exception from `create_directories` in filesystem cache. [#49203](https://github.com/ClickHouse/ClickHouse/pull/49203) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Copies embedded examples to a new field `example` in `system.functions` to supplement the field `description`. [#49222](https://github.com/ClickHouse/ClickHouse/pull/49222) ([Dan Roscigno](https://github.com/DanRoscigno)).
|
||||
* Enable connection options for the MongoDB dictionary. Example: ``` xml <source> <mongodb> <host>localhost</host> <port>27017</port> <user></user> <password></password> <db>test</db> <collection>dictionary_source</collection> <options>ssl=true</options> </mongodb> </source> ``` ### Documentation entry for user-facing changes. [#49225](https://github.com/ClickHouse/ClickHouse/pull/49225) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Added an alias `asymptotic` for `asymp` computational method for `kolmogorovSmirnovTest`. Improved documentation. [#49286](https://github.com/ClickHouse/ClickHouse/pull/49286) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Aggregation function groupBitAnd/Or/Xor now work on signed integer data. This makes them consistent with the behavior of scalar functions bitAnd/Or/Xor. [#49292](https://github.com/ClickHouse/ClickHouse/pull/49292) ([exmy](https://github.com/exmy)).
|
||||
* Split function-documentation into more fine-granular fields. [#49300](https://github.com/ClickHouse/ClickHouse/pull/49300) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Use multiple threads shared between all tables within a server to load outdated data parts. The the size of the pool and its queue is controlled by `max_outdated_parts_loading_thread_pool_size` and `outdated_part_loading_thread_pool_queue_size` settings. [#49317](https://github.com/ClickHouse/ClickHouse/pull/49317) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Don't overestimate the size of processed data for `LowCardinality` columns when they share dictionaries between blocks. This closes [#49322](https://github.com/ClickHouse/ClickHouse/issues/49322). See also [#48745](https://github.com/ClickHouse/ClickHouse/issues/48745). [#49323](https://github.com/ClickHouse/ClickHouse/pull/49323) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Parquet writer now uses reasonable row group size when invoked through `OUTFILE`. [#49325](https://github.com/ClickHouse/ClickHouse/pull/49325) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Allow restricted keywords like `ARRAY` as an alias if the alias is quoted. Closes [#49324](https://github.com/ClickHouse/ClickHouse/issues/49324). [#49360](https://github.com/ClickHouse/ClickHouse/pull/49360) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Data parts loading and deletion jobs were moved to shared server-wide pools instead of per-table pools. Pools sizes are controlled via settings `max_active_parts_loading_thread_pool_size`, `max_outdated_parts_loading_thread_pool_size` and `max_parts_cleaning_thread_pool_size` in top-level config. Table-level settings `max_part_loading_threads` and `max_part_removal_threads` became obsolete. [#49474](https://github.com/ClickHouse/ClickHouse/pull/49474) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Allow `?password=pass` in URL of the Play UI. Password is replaced in browser history. [#49505](https://github.com/ClickHouse/ClickHouse/pull/49505) ([Mike Kot](https://github.com/myrrc)).
|
||||
* Allow reading zero-size objects from remote filesystems. (because empty files are not backup'd, so we might end up with zero blobs in metadata file). Closes [#49480](https://github.com/ClickHouse/ClickHouse/issues/49480). [#49519](https://github.com/ClickHouse/ClickHouse/pull/49519) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Attach thread MemoryTracker to `total_memory_tracker` after `ThreadGroup` detached. [#49527](https://github.com/ClickHouse/ClickHouse/pull/49527) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Fix parameterized views when a query parameter is used multiple times in the query. [#49556](https://github.com/ClickHouse/ClickHouse/pull/49556) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Release memory allocated for the last sent ProfileEvents snapshot in the context of a query. Followup [#47564](https://github.com/ClickHouse/ClickHouse/issues/47564). [#49561](https://github.com/ClickHouse/ClickHouse/pull/49561) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Function "makeDate" now provides a MySQL-compatible overload (year & day of the year argument). [#49603](https://github.com/ClickHouse/ClickHouse/pull/49603) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Support `dictionary` table function for `RegExpTreeDictionary`. [#49666](https://github.com/ClickHouse/ClickHouse/pull/49666) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Added weighted fair IO scheduling policy. Added dynamic resource manager, which allows IO scheduling hierarchy to be updated in runtime w/o server restarts. [#49671](https://github.com/ClickHouse/ClickHouse/pull/49671) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Add compose request after multipart upload to GCS. This enables the usage of copy operation on objects uploaded with the multipart upload. It's recommended to set `s3_strict_upload_part_size` to some value because compose request can fail on objects created with parts of different sizes. [#49693](https://github.com/ClickHouse/ClickHouse/pull/49693) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* For the `extractKeyValuePairs` function: improve the "best-effort" parsing logic to accept `key_value_delimiter` as a valid part of the value. This also simplifies branching and might even speed up things a bit. [#49760](https://github.com/ClickHouse/ClickHouse/pull/49760) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Add `initial_query_id` field for system.processors_profile_log [#49777](https://github.com/ClickHouse/ClickHouse/pull/49777) ([helifu](https://github.com/helifu)).
|
||||
* System log tables can now have custom sorting keys. [#49778](https://github.com/ClickHouse/ClickHouse/pull/49778) ([helifu](https://github.com/helifu)).
|
||||
* A new field `partitions` to `system.query_log` is used to indicate which partitions are participating in the calculation. [#49779](https://github.com/ClickHouse/ClickHouse/pull/49779) ([helifu](https://github.com/helifu)).
|
||||
* Added `enable_the_endpoint_id_with_zookeeper_name_prefix` setting for `ReplicatedMergeTree` (disabled by default). When enabled, it adds ZooKeeper cluster name to table's interserver communication endpoint. It avoids `Duplicate interserver IO endpoint` errors when having replicated tables with the same path, but different auxiliary ZooKeepers. [#49780](https://github.com/ClickHouse/ClickHouse/pull/49780) ([helifu](https://github.com/helifu)).
|
||||
* Add query parameters to `clickhouse-local`. Closes [#46561](https://github.com/ClickHouse/ClickHouse/issues/46561). [#49785](https://github.com/ClickHouse/ClickHouse/pull/49785) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Allow loading dictionaries and functions from YAML by default. In previous versions, it required editing the `dictionaries_config` or `user_defined_executable_functions_config` in the configuration file, as they expected `*.xml` files. [#49812](https://github.com/ClickHouse/ClickHouse/pull/49812) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The Kafka table engine now allows to use alias columns. [#49824](https://github.com/ClickHouse/ClickHouse/pull/49824) ([Aleksandr Musorin](https://github.com/AVMusorin)).
|
||||
* Add setting to limit the max number of pairs produced by `extractKeyValuePairs`, a safeguard to avoid using way too much memory. [#49836](https://github.com/ClickHouse/ClickHouse/pull/49836) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Add support for (an unusual) case where the arguments in the `IN` operator are single-element tuples. [#49844](https://github.com/ClickHouse/ClickHouse/pull/49844) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* `bitHammingDistance` function support `String` and `FixedString` data type. Closes [#48827](https://github.com/ClickHouse/ClickHouse/issues/48827). [#49858](https://github.com/ClickHouse/ClickHouse/pull/49858) ([flynn](https://github.com/ucasfl)).
|
||||
* Fix timeout resetting errors in the client on OS X. [#49863](https://github.com/ClickHouse/ClickHouse/pull/49863) ([alekar](https://github.com/alekar)).
|
||||
* Add support for big integers, such as UInt128, Int128, UInt256, and Int256 in the function `bitCount`. This enables Hamming distance over large bit masks for AI applications. [#49867](https://github.com/ClickHouse/ClickHouse/pull/49867) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fingerprints to be used instead of key IDs in encrypted disks. This simplifies the configuration of encrypted disks. [#49882](https://github.com/ClickHouse/ClickHouse/pull/49882) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Add UUID data type to PostgreSQL. Closes [#49739](https://github.com/ClickHouse/ClickHouse/issues/49739). [#49894](https://github.com/ClickHouse/ClickHouse/pull/49894) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Function `toUnixTimestamp` now accepts `Date` and `Date32` arguments. [#49989](https://github.com/ClickHouse/ClickHouse/pull/49989) ([Victor Krasnov](https://github.com/sirvickr)).
|
||||
* Charge only server memory for dictionaries. [#49995](https://github.com/ClickHouse/ClickHouse/pull/49995) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* The server will allow using the `SQL_*` settings such as `SQL_AUTO_IS_NULL` as no-ops for MySQL compatibility. This closes [#49927](https://github.com/ClickHouse/ClickHouse/issues/49927). [#50013](https://github.com/ClickHouse/ClickHouse/pull/50013) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Preserve initial_query_id for ON CLUSTER queries, which is useful for introspection (under `distributed_ddl_entry_format_version=5`). [#50015](https://github.com/ClickHouse/ClickHouse/pull/50015) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Preserve backward incompatibility for renamed settings by using aliases (`allow_experimental_projection_optimization` for `optimize_use_projections`, `allow_experimental_lightweight_delete` for `enable_lightweight_delete`). [#50044](https://github.com/ClickHouse/ClickHouse/pull/50044) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Support passing FQDN through setting my_hostname to register cluster node in keeper. Add setting of invisible to support multi compute groups. A compute group as a cluster, is invisible to other compute groups. [#50186](https://github.com/ClickHouse/ClickHouse/pull/50186) ([Yangkuan Liu](https://github.com/LiuYangkuan)).
|
||||
* Fix PostgreSQL reading all the data even though `LIMIT n` could be specified. [#50187](https://github.com/ClickHouse/ClickHouse/pull/50187) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add new profile events for queries with subqueries (`QueriesWithSubqueries`/`SelectQueriesWithSubqueries`/`InsertQueriesWithSubqueries`). [#50204](https://github.com/ClickHouse/ClickHouse/pull/50204) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Adding the roles field in the users.xml file, which allows specifying roles with grants via a config file. [#50278](https://github.com/ClickHouse/ClickHouse/pull/50278) ([pufit](https://github.com/pufit)).
|
||||
* Report `CGroupCpuCfsPeriod` and `CGroupCpuCfsQuota` in AsynchronousMetrics. - Respect cgroup v2 memory limits during server startup. [#50379](https://github.com/ClickHouse/ClickHouse/pull/50379) ([alekar](https://github.com/alekar)).
|
||||
* Add a signal handler for SIGQUIT to work the same way as SIGINT. Closes [#50298](https://github.com/ClickHouse/ClickHouse/issues/50298). [#50435](https://github.com/ClickHouse/ClickHouse/pull/50435) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* In case JSON parse fails due to the large size of the object output the last position to allow debugging. [#50474](https://github.com/ClickHouse/ClickHouse/pull/50474) ([Valentin Alexeev](https://github.com/valentinalexeev)).
|
||||
* Support decimals with not fixed size. Closes [#49130](https://github.com/ClickHouse/ClickHouse/issues/49130). [#50586](https://github.com/ClickHouse/ClickHouse/pull/50586) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* New and improved `keeper-bench`. Everything can be customized from YAML/XML file: - request generator - each type of request generator can have a specific set of fields - multi requests can be generated just by doing the same under `multi` key - for each request or subrequest in multi a `weight` field can be defined to control distribution - define trees that need to be setup for a test run - hosts can be defined with all timeouts customizable and it's possible to control how many sessions to generate for each host - integers defined with `min_value` and `max_value` fields are random number generators. [#48547](https://github.com/ClickHouse/ClickHouse/pull/48547) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Io_uring is not supported on macos, don't choose it when running tests on local to avoid occassional failures. [#49250](https://github.com/ClickHouse/ClickHouse/pull/49250) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Support named fault injection for testing. [#49361](https://github.com/ClickHouse/ClickHouse/pull/49361) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Allow running ClickHouse in the OS where the `prctl` (process control) syscall is not available, such as AWS Lambda. [#49538](https://github.com/ClickHouse/ClickHouse/pull/49538) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed the issue of build conflict between contrib/isa-l and isa-l in qpl [49296](https://github.com/ClickHouse/ClickHouse/issues/49296). [#49584](https://github.com/ClickHouse/ClickHouse/pull/49584) ([jasperzhu](https://github.com/jinjunzh)).
|
||||
* Utilities are now only build if explicitly requested ("-DENABLE_UTILS=1") instead of by default, this reduces link times in typical development builds. [#49620](https://github.com/ClickHouse/ClickHouse/pull/49620) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Pull build description of idxd-config into a separate CMake file to avoid accidental removal in future. [#49651](https://github.com/ClickHouse/ClickHouse/pull/49651) ([jasperzhu](https://github.com/jinjunzh)).
|
||||
* Add CI check with an enabled analyzer in the master. Follow-up [#49562](https://github.com/ClickHouse/ClickHouse/issues/49562). [#49668](https://github.com/ClickHouse/ClickHouse/pull/49668) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Switch to LLVM/clang 16. [#49678](https://github.com/ClickHouse/ClickHouse/pull/49678) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Allow building ClickHouse with clang-17. [#49851](https://github.com/ClickHouse/ClickHouse/pull/49851) ([Alexey Milovidov](https://github.com/alexey-milovidov)). [#50410](https://github.com/ClickHouse/ClickHouse/pull/50410) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* ClickHouse is now easier to be integrated into other cmake projects. [#49991](https://github.com/ClickHouse/ClickHouse/pull/49991) ([Amos Bird](https://github.com/amosbird)). (Which is strongly discouraged - Alexey Milovidov).
|
||||
* Fix strange additional QEMU logging after [#47151](https://github.com/ClickHouse/ClickHouse/issues/47151), see https://s3.amazonaws.com/clickhouse-test-reports/50078/a4743996ee4f3583884d07bcd6501df0cfdaa346/stateless_tests__release__databasereplicated__[3_4].html. [#50442](https://github.com/ClickHouse/ClickHouse/pull/50442) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* ClickHouse can work on Linux RISC-V 6.1.22. This closes [#50456](https://github.com/ClickHouse/ClickHouse/issues/50456). [#50457](https://github.com/ClickHouse/ClickHouse/pull/50457) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Bump internal protobuf to v3.18 (fixes bogus CVE-2022-1941). [#50400](https://github.com/ClickHouse/ClickHouse/pull/50400) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Bump internal libxml2 to v2.10.4 (fixes bogus CVE-2023-28484 and bogus CVE-2023-29469). [#50402](https://github.com/ClickHouse/ClickHouse/pull/50402) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Bump c-ares to v1.19.1 (bogus CVE-2023-32067, bogus CVE-2023-31130, bogus CVE-2023-31147). [#50403](https://github.com/ClickHouse/ClickHouse/pull/50403) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix bogus CVE-2022-2469 in libgsasl. [#50404](https://github.com/ClickHouse/ClickHouse/pull/50404) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* ActionsDAG: fix wrong optimization [#47584](https://github.com/ClickHouse/ClickHouse/pull/47584) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Correctly handle concurrent snapshots in Keeper [#48466](https://github.com/ClickHouse/ClickHouse/pull/48466) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* MergeTreeMarksLoader holds DataPart instead of DataPartStorage [#48515](https://github.com/ClickHouse/ClickHouse/pull/48515) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Sequence state fix [#48603](https://github.com/ClickHouse/ClickHouse/pull/48603) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
* Back/Restore concurrency check on previous fails [#48726](https://github.com/ClickHouse/ClickHouse/pull/48726) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix Attaching a table with non-existent ZK path does not increase the ReadonlyReplica metric [#48954](https://github.com/ClickHouse/ClickHouse/pull/48954) ([wangxiaobo](https://github.com/wzb5212)).
|
||||
* Fix possible terminate called for uncaught exception in some places [#49112](https://github.com/ClickHouse/ClickHouse/pull/49112) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix key not found error for queries with multiple StorageJoin [#49137](https://github.com/ClickHouse/ClickHouse/pull/49137) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix wrong query result when using nullable primary key [#49172](https://github.com/ClickHouse/ClickHouse/pull/49172) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Fix reinterpretAs*() on big endian machines [#49198](https://github.com/ClickHouse/ClickHouse/pull/49198) ([Suzy Wang](https://github.com/SuzyWangIBMer)).
|
||||
* (Experimental zero-copy replication) Lock zero copy parts more atomically [#49211](https://github.com/ClickHouse/ClickHouse/pull/49211) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix race on Outdated parts loading [#49223](https://github.com/ClickHouse/ClickHouse/pull/49223) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix all key value is null and group use rollup return wrong answer [#49282](https://github.com/ClickHouse/ClickHouse/pull/49282) ([Shuai li](https://github.com/loneylee)).
|
||||
* Fix calculating load_factor for HASHED dictionaries with SHARDS [#49319](https://github.com/ClickHouse/ClickHouse/pull/49319) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Disallow configuring compression CODECs for alias columns [#49363](https://github.com/ClickHouse/ClickHouse/pull/49363) ([Timur Solodovnikov](https://github.com/tsolodov)).
|
||||
* Fix bug in removal of existing part directory [#49365](https://github.com/ClickHouse/ClickHouse/pull/49365) ([alesapin](https://github.com/alesapin)).
|
||||
* Properly fix GCS when HMAC is used [#49390](https://github.com/ClickHouse/ClickHouse/pull/49390) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix fuzz bug when subquery set is not built when reading from remote() [#49425](https://github.com/ClickHouse/ClickHouse/pull/49425) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Invert `shutdown_wait_unfinished_queries` [#49427](https://github.com/ClickHouse/ClickHouse/pull/49427) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* (Experimental zero-copy replication) Fix another zero copy bug [#49473](https://github.com/ClickHouse/ClickHouse/pull/49473) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix postgres database setting [#49481](https://github.com/ClickHouse/ClickHouse/pull/49481) ([Mal Curtis](https://github.com/snikch)).
|
||||
* Correctly handle `s3Cluster` arguments [#49490](https://github.com/ClickHouse/ClickHouse/pull/49490) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix bug in TraceCollector destructor. [#49508](https://github.com/ClickHouse/ClickHouse/pull/49508) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix AsynchronousReadIndirectBufferFromRemoteFS breaking on short seeks [#49525](https://github.com/ClickHouse/ClickHouse/pull/49525) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix dictionaries loading order [#49560](https://github.com/ClickHouse/ClickHouse/pull/49560) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Forbid the change of data type of Object('json') column [#49563](https://github.com/ClickHouse/ClickHouse/pull/49563) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix stress test (Logical error: Expected 7134 >= 11030) [#49623](https://github.com/ClickHouse/ClickHouse/pull/49623) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix bug in DISTINCT [#49628](https://github.com/ClickHouse/ClickHouse/pull/49628) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix: DISTINCT in order with zero values in non-sorted columns [#49636](https://github.com/ClickHouse/ClickHouse/pull/49636) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix one-off error in big integers found by UBSan with fuzzer [#49645](https://github.com/ClickHouse/ClickHouse/pull/49645) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix reading from sparse columns after restart [#49660](https://github.com/ClickHouse/ClickHouse/pull/49660) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix assert in SpanHolder::finish() with fibers [#49673](https://github.com/ClickHouse/ClickHouse/pull/49673) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix short circuit functions and mutations with sparse arguments [#49716](https://github.com/ClickHouse/ClickHouse/pull/49716) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix writing appended files to incremental backups [#49725](https://github.com/ClickHouse/ClickHouse/pull/49725) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix "There is no physical column _row_exists in table" error occurring during lightweight delete mutation on a table with Object column. [#49737](https://github.com/ClickHouse/ClickHouse/pull/49737) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix msan issue in randomStringUTF8(uneven number) [#49750](https://github.com/ClickHouse/ClickHouse/pull/49750) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix aggregate function kolmogorovSmirnovTest [#49768](https://github.com/ClickHouse/ClickHouse/pull/49768) ([FFFFFFFHHHHHHH](https://github.com/FFFFFFFHHHHHHH)).
|
||||
* Fix settings aliases in native protocol [#49776](https://github.com/ClickHouse/ClickHouse/pull/49776) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix `arrayMap` with array of tuples with single argument [#49789](https://github.com/ClickHouse/ClickHouse/pull/49789) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix per-query IO/BACKUPs throttling settings [#49797](https://github.com/ClickHouse/ClickHouse/pull/49797) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix setting NULL in profile definition [#49831](https://github.com/ClickHouse/ClickHouse/pull/49831) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix a bug with projections and the aggregate_functions_null_for_empty setting (for query_plan_optimize_projection) [#49873](https://github.com/ClickHouse/ClickHouse/pull/49873) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix processing pending batch for Distributed async INSERT after restart [#49884](https://github.com/ClickHouse/ClickHouse/pull/49884) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix assertion in CacheMetadata::doCleanup [#49914](https://github.com/ClickHouse/ClickHouse/pull/49914) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* fix `is_prefix` in OptimizeRegularExpression [#49919](https://github.com/ClickHouse/ClickHouse/pull/49919) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Fix metrics `WriteBufferFromS3Bytes`, `WriteBufferFromS3Microseconds` and `WriteBufferFromS3RequestsErrors` [#49930](https://github.com/ClickHouse/ClickHouse/pull/49930) ([Aleksandr Musorin](https://github.com/AVMusorin)).
|
||||
* Fix IPv6 encoding in protobuf [#49933](https://github.com/ClickHouse/ClickHouse/pull/49933) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix possible Logical error on bad Nullable parsing for text formats [#49960](https://github.com/ClickHouse/ClickHouse/pull/49960) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add setting output_format_parquet_compliant_nested_types to produce more compatible Parquet files [#50001](https://github.com/ClickHouse/ClickHouse/pull/50001) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix logical error in stress test "Not enough space to add ..." [#50021](https://github.com/ClickHouse/ClickHouse/pull/50021) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Avoid deadlock when starting table in attach thread of `ReplicatedMergeTree` [#50026](https://github.com/ClickHouse/ClickHouse/pull/50026) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix assert in SpanHolder::finish() with fibers attempt 2 [#50034](https://github.com/ClickHouse/ClickHouse/pull/50034) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add proper escaping for DDL OpenTelemetry context serialization [#50045](https://github.com/ClickHouse/ClickHouse/pull/50045) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix reporting broken projection parts [#50052](https://github.com/ClickHouse/ClickHouse/pull/50052) ([Amos Bird](https://github.com/amosbird)).
|
||||
* JIT compilation not equals NaN fix [#50056](https://github.com/ClickHouse/ClickHouse/pull/50056) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix crashing in case of Replicated database without arguments [#50058](https://github.com/ClickHouse/ClickHouse/pull/50058) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix crash with `multiIf` and constant condition and nullable arguments [#50123](https://github.com/ClickHouse/ClickHouse/pull/50123) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix invalid index analysis for date related keys [#50153](https://github.com/ClickHouse/ClickHouse/pull/50153) ([Amos Bird](https://github.com/amosbird)).
|
||||
* do not allow modify order by when there are no order by cols [#50154](https://github.com/ClickHouse/ClickHouse/pull/50154) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Fix broken index analysis when binary operator contains a null constant argument [#50177](https://github.com/ClickHouse/ClickHouse/pull/50177) ([Amos Bird](https://github.com/amosbird)).
|
||||
* clickhouse-client: disallow usage of `--query` and `--queries-file` at the same time [#50210](https://github.com/ClickHouse/ClickHouse/pull/50210) ([Alexey Gerasimchuk](https://github.com/Demilivor)).
|
||||
* Fix UB for INTO OUTFILE extensions (APPEND / AND STDOUT) and WATCH EVENTS [#50216](https://github.com/ClickHouse/ClickHouse/pull/50216) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix skipping spaces at end of row in CustomSeparatedIgnoreSpaces format [#50224](https://github.com/ClickHouse/ClickHouse/pull/50224) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix iceberg metadata parsing [#50232](https://github.com/ClickHouse/ClickHouse/pull/50232) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix nested distributed SELECT in WITH clause [#50234](https://github.com/ClickHouse/ClickHouse/pull/50234) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix msan issue in keyed siphash [#50245](https://github.com/ClickHouse/ClickHouse/pull/50245) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix bugs in Poco sockets in non-blocking mode, use true non-blocking sockets [#50252](https://github.com/ClickHouse/ClickHouse/pull/50252) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix checksum calculation for backup entries [#50264](https://github.com/ClickHouse/ClickHouse/pull/50264) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Comparison functions NaN fix [#50287](https://github.com/ClickHouse/ClickHouse/pull/50287) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* JIT aggregation nullable key fix [#50291](https://github.com/ClickHouse/ClickHouse/pull/50291) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix clickhouse-local crashing when writing empty Arrow or Parquet output [#50328](https://github.com/ClickHouse/ClickHouse/pull/50328) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix crash when Pool::Entry::disconnect() is called [#50334](https://github.com/ClickHouse/ClickHouse/pull/50334) ([Val Doroshchuk](https://github.com/valbok)).
|
||||
* Improved fetch part by holding directory lock longer [#50339](https://github.com/ClickHouse/ClickHouse/pull/50339) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix bitShift* functions with both constant arguments [#50343](https://github.com/ClickHouse/ClickHouse/pull/50343) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix Keeper deadlock on exception when preprocessing requests. [#50387](https://github.com/ClickHouse/ClickHouse/pull/50387) ([frinkr](https://github.com/frinkr)).
|
||||
* Fix hashing of const integer values [#50421](https://github.com/ClickHouse/ClickHouse/pull/50421) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix merge_tree_min_rows_for_seek/merge_tree_min_bytes_for_seek for data skipping indexes [#50432](https://github.com/ClickHouse/ClickHouse/pull/50432) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Limit the number of in-flight tasks for loading outdated parts [#50450](https://github.com/ClickHouse/ClickHouse/pull/50450) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Keeper fix: apply uncommitted state after snapshot install [#50483](https://github.com/ClickHouse/ClickHouse/pull/50483) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix incorrect constant folding [#50536](https://github.com/ClickHouse/ClickHouse/pull/50536) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix logical error in stress test (Not enough space to add ...) [#50583](https://github.com/ClickHouse/ClickHouse/pull/50583) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix converting Null to LowCardinality(Nullable) in values table function [#50637](https://github.com/ClickHouse/ClickHouse/pull/50637) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Revert invalid RegExpTreeDictionary optimization [#50642](https://github.com/ClickHouse/ClickHouse/pull/50642) ([Johann Gan](https://github.com/johanngan)).
|
||||
|
||||
### <a id="234"></a> ClickHouse release 23.4, 2023-04-26
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
@ -57,7 +57,7 @@ if (ENABLE_CHECK_HEAVY_BUILDS)
|
||||
# set CPU time limit to 1000 seconds
|
||||
set (RLIMIT_CPU 1000)
|
||||
|
||||
# gcc10/gcc10/clang -fsanitize=memory is too heavy
|
||||
# -fsanitize=memory is too heavy
|
||||
if (SANITIZE STREQUAL "memory")
|
||||
set (RLIMIT_DATA 10000000000) # 10G
|
||||
endif()
|
||||
@ -87,7 +87,6 @@ if (ENABLE_FUZZING)
|
||||
set (ENABLE_CLICKHOUSE_ODBC_BRIDGE OFF)
|
||||
set (ENABLE_LIBRARIES 0)
|
||||
set (ENABLE_SSL 1)
|
||||
set (USE_UNWIND ON)
|
||||
set (ENABLE_EMBEDDED_COMPILER 0)
|
||||
set (ENABLE_EXAMPLES 0)
|
||||
set (ENABLE_UTILS 0)
|
||||
@ -102,6 +101,17 @@ if (ENABLE_FUZZING)
|
||||
set (ENABLE_PROTOBUF 1)
|
||||
endif()
|
||||
|
||||
option (ENABLE_WOBOQ_CODEBROWSER "Build for woboq codebrowser" OFF)
|
||||
|
||||
if (ENABLE_WOBOQ_CODEBROWSER)
|
||||
set (ENABLE_EMBEDDED_COMPILER 0)
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-poison-system-directories")
|
||||
# woboq codebrowser uses clang tooling, and they could add default system
|
||||
# clang includes, and later clang will warn for those added by itself
|
||||
# includes.
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-poison-system-directories")
|
||||
endif()
|
||||
|
||||
# Global libraries
|
||||
# See:
|
||||
# - default_libs.cmake
|
||||
@ -259,8 +269,8 @@ endif ()
|
||||
option (ENABLE_BUILD_PATH_MAPPING "Enable remapping of file source paths in debug info, predefined preprocessor macros, and __builtin_FILE(). It's used to generate reproducible builds. See https://reproducible-builds.org/docs/build-path" ${ENABLE_BUILD_PATH_MAPPING_DEFAULT})
|
||||
|
||||
if (ENABLE_BUILD_PATH_MAPPING)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.")
|
||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.")
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffile-prefix-map=${PROJECT_SOURCE_DIR}=.")
|
||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -ffile-prefix-map=${PROJECT_SOURCE_DIR}=.")
|
||||
endif ()
|
||||
|
||||
option (ENABLE_BUILD_PROFILING "Enable profiling of build time" OFF)
|
||||
@ -280,7 +290,7 @@ set (CMAKE_C_STANDARD 11)
|
||||
set (CMAKE_C_EXTENSIONS ON) # required by most contribs written in C
|
||||
set (CMAKE_C_STANDARD_REQUIRED ON)
|
||||
|
||||
# Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc
|
||||
# Compiler-specific coverage flags e.g. -fcoverage-mapping
|
||||
option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF)
|
||||
|
||||
if (COMPILER_CLANG)
|
||||
@ -333,22 +343,15 @@ if (COMPILER_CLANG)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-absolute-paths")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-absolute-paths")
|
||||
|
||||
if (NOT ENABLE_TESTS AND NOT SANITIZE)
|
||||
if (NOT ENABLE_TESTS AND NOT SANITIZE AND OS_LINUX)
|
||||
# https://clang.llvm.org/docs/ThinLTO.html
|
||||
# Applies to clang only.
|
||||
# Applies to clang and linux only.
|
||||
# Disabled when building with tests or sanitizers.
|
||||
option(ENABLE_THINLTO "Clang-specific link time optimization" ON)
|
||||
endif()
|
||||
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstrict-vtable-pointers")
|
||||
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 16)
|
||||
# Set new experimental pass manager, it's a performance, build time and binary size win.
|
||||
# Can be removed after https://reviews.llvm.org/D66490 merged and released to at least two versions of clang.
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fexperimental-new-pass-manager")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fexperimental-new-pass-manager")
|
||||
endif ()
|
||||
|
||||
# We cannot afford to use LTO when compiling unit tests, and it's not enough
|
||||
# to only supply -fno-lto at the final linking stage. So we disable it
|
||||
# completely.
|
||||
@ -395,6 +398,8 @@ if ((NOT OS_LINUX AND NOT OS_ANDROID) OR (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG"))
|
||||
set(ENABLE_GWP_ASAN OFF)
|
||||
endif ()
|
||||
|
||||
option (ENABLE_FIU "Enable Fiu" ON)
|
||||
|
||||
option(WERROR "Enable -Werror compiler option" ON)
|
||||
|
||||
if (WERROR)
|
||||
@ -522,6 +527,26 @@ include (cmake/print_flags.cmake)
|
||||
|
||||
if (ENABLE_RUST)
|
||||
add_subdirectory (rust)
|
||||
|
||||
# With LTO Rust adds few symbols with global visiblity, the most common is
|
||||
# rust_eh_personality. And this leads to linking errors because multiple
|
||||
# Rust libraries contains the same symbol.
|
||||
#
|
||||
# If it was shared library, that we could use version script for linker to
|
||||
# hide this symbols, but libraries are static.
|
||||
#
|
||||
# we could in theory compile everything to one library but this will be a
|
||||
# mess
|
||||
#
|
||||
# But this should be OK since CI has lots of other builds that are done
|
||||
# without LTO and it will find multiple definitions if there will be any.
|
||||
#
|
||||
# More information about this behaviour in Rust can be found here
|
||||
# - https://github.com/rust-lang/rust/issues/44322
|
||||
# - https://alanwu.space/post/symbol-hygiene/
|
||||
if (ENABLE_THINLTO)
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--allow-multiple-definition")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
add_subdirectory (base)
|
||||
@ -542,7 +567,7 @@ if (NATIVE_BUILD_TARGETS
|
||||
)
|
||||
message (STATUS "Building native targets...")
|
||||
|
||||
set (NATIVE_BUILD_DIR "${CMAKE_BINARY_DIR}/native")
|
||||
set (NATIVE_BUILD_DIR "${PROJECT_BINARY_DIR}/native")
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory "${NATIVE_BUILD_DIR}"
|
||||
@ -556,7 +581,7 @@ if (NATIVE_BUILD_TARGETS
|
||||
# Avoid overriding .cargo/config.toml with native toolchain.
|
||||
"-DENABLE_RUST=OFF"
|
||||
"-DENABLE_CLICKHOUSE_SELF_EXTRACTING=${ENABLE_CLICKHOUSE_SELF_EXTRACTING}"
|
||||
${CMAKE_SOURCE_DIR}
|
||||
${PROJECT_SOURCE_DIR}
|
||||
WORKING_DIRECTORY "${NATIVE_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
|
||||
|
28
README.md
28
README.md
@ -16,16 +16,32 @@ curl https://clickhouse.com/ | sh
|
||||
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
|
||||
* [Slack](https://clickhouse.com/slack) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
|
||||
* [Blog](https://clickhouse.com/blog/) contains various ClickHouse-related articles, as well as announcements and reports about events.
|
||||
* [Code Browser (Woboq)](https://clickhouse.com/codebrowser/ClickHouse/index.html) with syntax highlight and navigation.
|
||||
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlight, powered by github.dev.
|
||||
* [Code Browser (Woboq)](https://clickhouse.com/codebrowser/ClickHouse/index.html) with syntax highlighting and navigation.
|
||||
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlighting, powered by github.dev.
|
||||
* [Static Analysis (SonarCloud)](https://sonarcloud.io/project/issues?resolved=false&id=ClickHouse_ClickHouse) proposes C++ quality improvements.
|
||||
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
|
||||
|
||||
## Upcoming Events
|
||||
* [**ClickHouse Spring Meetup in Manhattan**](https://www.meetup.com/clickhouse-new-york-user-group/events/292517734) - April 26 - It's spring, and it's time to meet again in the city! Talks include: "Building a domain specific query language on top of Clickhouse", "A Galaxy of Information", "Our Journey to ClickHouse Cloud from Redshift", and a ClickHouse update!
|
||||
* [**v23.4 Release Webinar**](https://clickhouse.com/company/events/v23-4-release-webinar?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-04) - April 26 - 23.4 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
||||
* [**ClickHouse Meetup in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/292892466) - May 16 - Save the date! ClickHouse is coming back to Berlin. We’re excited to announce an upcoming ClickHouse Meetup that you won’t want to miss. Join us as we gather together to discuss the latest in the world of ClickHouse and share user stories.
|
||||
|
||||
* [**v23.7 Release Webinar**](https://clickhouse.com/company/events/v23-7-community-release-call?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-07) - Jul 27 - 23.7 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
||||
* [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/293913596) - Jul 18
|
||||
* [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/293913441) - Jul 19
|
||||
* [**ClickHouse Meetup in Toronto**](https://www.meetup.com/clickhouse-toronto-user-group/events/294183127) - Jul 20
|
||||
* [**ClickHouse Meetup in Singapore**](https://www.meetup.com/clickhouse-singapore-meetup-group/events/294428050/) - Jul 27
|
||||
* [**ClickHouse Meetup in Paris**](https://www.meetup.com/clickhouse-france-user-group/events/294283460) - Sep 12
|
||||
|
||||
Also, keep an eye out for upcoming meetups around the world. Somewhere else you want us to be? Please feel free to reach out to tyler <at> clickhouse <dot> com.
|
||||
|
||||
## Recent Recordings
|
||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||
* **Recording available**: [**v23.3 Release Webinar**](https://www.youtube.com/watch?v=ISaGUjvBNao) UNDROP TABLE, server settings introspection, nested dynamic disks, MySQL compatibility, parseDate Time, Lightweight Deletes, Parallel Replicas, integrations updates, and so much more! Watch it now!
|
||||
* **Recording available**: [**v23.6 Release Webinar**](https://www.youtube.com/watch?v=cuf_hYn7dqU) All the features of 23.6, one convenient video! Watch it now!
|
||||
* **All release webinar recordings**: [YouTube playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3jAlSy1JxyP8zluvXaN3nxU)
|
||||
|
||||
|
||||
## Interested in joining ClickHouse and making it your full-time job?
|
||||
|
||||
We are a globally diverse and distributed team, united behind a common goal of creating industry-leading, real-time analytics. Here, you will have an opportunity to solve some of the most cutting-edge technical challenges and have direct ownership of your work and vision. If you are a contributor by nature, a thinker and a doer - we’ll definitely click!
|
||||
|
||||
Check out our **current openings** here: https://clickhouse.com/company/careers
|
||||
|
||||
Cant find what you are looking for, but want to let us know you are interested in joining ClickHouse? Email careers@clickhouse.com!
|
||||
|
@ -13,9 +13,11 @@ The following versions of ClickHouse server are currently being supported with s
|
||||
|
||||
| Version | Supported |
|
||||
|:-|:-|
|
||||
| 23.6 | ✔️ |
|
||||
| 23.5 | ✔️ |
|
||||
| 23.4 | ✔️ |
|
||||
| 23.3 | ✔️ |
|
||||
| 23.2 | ✔️ |
|
||||
| 23.2 | ❌ |
|
||||
| 23.1 | ❌ |
|
||||
| 22.12 | ❌ |
|
||||
| 22.11 | ❌ |
|
||||
|
@ -2,21 +2,23 @@
|
||||
|
||||
#include <base/strong_typedef.h>
|
||||
#include <base/extended_types.h>
|
||||
#include <Common/formatIPv6.h>
|
||||
#include <Common/memcmpSmall.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
using IPv4 = StrongTypedef<UInt32, struct IPv4Tag>;
|
||||
struct IPv4 : StrongTypedef<UInt32, struct IPv4Tag>
|
||||
{
|
||||
using StrongTypedef::StrongTypedef;
|
||||
using StrongTypedef::operator=;
|
||||
constexpr explicit IPv4(UInt64 value): StrongTypedef(static_cast<UnderlyingType>(value)) {}
|
||||
};
|
||||
|
||||
struct IPv6 : StrongTypedef<UInt128, struct IPv6Tag>
|
||||
{
|
||||
constexpr IPv6() = default;
|
||||
constexpr explicit IPv6(const UInt128 & x) : StrongTypedef(x) {}
|
||||
constexpr explicit IPv6(UInt128 && x) : StrongTypedef(std::move(x)) {}
|
||||
|
||||
IPv6 & operator=(const UInt128 & rhs) { StrongTypedef::operator=(rhs); return *this; }
|
||||
IPv6 & operator=(UInt128 && rhs) { StrongTypedef::operator=(std::move(rhs)); return *this; }
|
||||
using StrongTypedef::StrongTypedef;
|
||||
using StrongTypedef::operator=;
|
||||
|
||||
bool operator<(const IPv6 & rhs) const
|
||||
{
|
||||
@ -54,12 +56,22 @@ namespace DB
|
||||
|
||||
namespace std
|
||||
{
|
||||
/// For historical reasons we hash IPv6 as a FixedString(16)
|
||||
template <>
|
||||
struct hash<DB::IPv6>
|
||||
{
|
||||
size_t operator()(const DB::IPv6 & x) const
|
||||
{
|
||||
return std::hash<DB::IPv6::UnderlyingType>()(x.toUnderType());
|
||||
return std::hash<std::string_view>{}(std::string_view(reinterpret_cast<const char*>(&x.toUnderType()), IPV6_BINARY_LENGTH));
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct hash<DB::IPv4>
|
||||
{
|
||||
size_t operator()(const DB::IPv4 & x) const
|
||||
{
|
||||
return std::hash<DB::IPv4::UnderlyingType>()(x.toUnderType());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <cassert>
|
||||
#include <stdexcept> // for std::logic_error
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
#include <functional>
|
||||
#include <iosfwd>
|
||||
@ -326,5 +327,16 @@ namespace ZeroTraits
|
||||
inline void set(StringRef & x) { x.size = 0; }
|
||||
}
|
||||
|
||||
namespace PackedZeroTraits
|
||||
{
|
||||
template <typename Second, template <typename, typename> class PackedPairNoInit>
|
||||
inline bool check(const PackedPairNoInit<StringRef, Second> p)
|
||||
{ return 0 == p.key.size; }
|
||||
|
||||
template <typename Second, template <typename, typename> class PackedPairNoInit>
|
||||
inline void set(PackedPairNoInit<StringRef, Second> & p)
|
||||
{ p.key.size = 0; }
|
||||
}
|
||||
|
||||
|
||||
std::ostream & operator<<(std::ostream & os, const StringRef & str);
|
||||
|
@ -7,7 +7,13 @@
|
||||
|
||||
/** Returns value `from` converted to type `To` while retaining bit representation.
|
||||
* `To` and `From` must satisfy `CopyConstructible`.
|
||||
*
|
||||
* In contrast to std::bit_cast can cast types of different width.
|
||||
*
|
||||
* Note: for signed types of narrower size, the casted result is zero-extended
|
||||
* instead of sign-extended as with regular static_cast.
|
||||
* For example, -1 Int8 (represented as 0xFF) bit_casted to UInt64
|
||||
* gives 255 (represented as 0x00000000000000FF) instead of 0xFFFFFFFFFFFFFFFF
|
||||
*/
|
||||
template <typename To, typename From>
|
||||
std::decay_t<To> bit_cast(const From & from)
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
#include <array>
|
||||
|
||||
#if defined(__SSE2__)
|
||||
#include <emmintrin.h>
|
||||
@ -447,7 +448,7 @@ inline char * find_last_not_symbols_or_null(char * begin, char * end)
|
||||
/// See https://github.com/boostorg/algorithm/issues/63
|
||||
/// And https://bugs.llvm.org/show_bug.cgi?id=41141
|
||||
template <char... symbols, typename To>
|
||||
inline void splitInto(To & to, const std::string & what, bool token_compress = false)
|
||||
inline To & splitInto(To & to, std::string_view what, bool token_compress = false)
|
||||
{
|
||||
const char * pos = what.data();
|
||||
const char * end = pos + what.size();
|
||||
@ -463,4 +464,6 @@ inline void splitInto(To & to, const std::string & what, bool token_compress = f
|
||||
else
|
||||
pos = delimiter_or_end;
|
||||
}
|
||||
|
||||
return to;
|
||||
}
|
||||
|
@ -28,14 +28,28 @@ uint64_t getMemoryAmountOrZero()
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
// Try to lookup at the Cgroup limit
|
||||
std::ifstream cgroup_limit("/sys/fs/cgroup/memory/memory.limit_in_bytes");
|
||||
if (cgroup_limit.is_open())
|
||||
|
||||
// CGroups v2
|
||||
std::ifstream cgroupv2_limit("/sys/fs/cgroup/memory.max");
|
||||
if (cgroupv2_limit.is_open())
|
||||
{
|
||||
uint64_t memory_limit = 0; // in case of read error
|
||||
cgroup_limit >> memory_limit;
|
||||
uint64_t memory_limit = 0;
|
||||
cgroupv2_limit >> memory_limit;
|
||||
if (memory_limit > 0 && memory_limit < memory_amount)
|
||||
memory_amount = memory_limit;
|
||||
}
|
||||
else
|
||||
{
|
||||
// CGroups v1
|
||||
std::ifstream cgroup_limit("/sys/fs/cgroup/memory/memory.limit_in_bytes");
|
||||
if (cgroup_limit.is_open())
|
||||
{
|
||||
uint64_t memory_limit = 0; // in case of read error
|
||||
cgroup_limit >> memory_limit;
|
||||
if (memory_limit > 0 && memory_limit < memory_amount)
|
||||
memory_amount = memory_limit;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return memory_amount;
|
||||
|
@ -15,25 +15,34 @@
|
||||
|
||||
|
||||
static thread_local uint64_t current_tid = 0;
|
||||
|
||||
static void setCurrentThreadId()
|
||||
{
|
||||
#if defined(OS_ANDROID)
|
||||
current_tid = gettid();
|
||||
#elif defined(OS_LINUX)
|
||||
current_tid = static_cast<uint64_t>(syscall(SYS_gettid)); /// This call is always successful. - man gettid
|
||||
#elif defined(OS_FREEBSD)
|
||||
current_tid = pthread_getthreadid_np();
|
||||
#elif defined(OS_SUNOS)
|
||||
// On Solaris-derived systems, this returns the ID of the LWP, analogous
|
||||
// to a thread.
|
||||
current_tid = static_cast<uint64_t>(pthread_self());
|
||||
#else
|
||||
if (0 != pthread_threadid_np(nullptr, ¤t_tid))
|
||||
throw std::logic_error("pthread_threadid_np returned error");
|
||||
#endif
|
||||
}
|
||||
|
||||
uint64_t getThreadId()
|
||||
{
|
||||
if (!current_tid)
|
||||
{
|
||||
#if defined(OS_ANDROID)
|
||||
current_tid = gettid();
|
||||
#elif defined(OS_LINUX)
|
||||
current_tid = static_cast<uint64_t>(syscall(SYS_gettid)); /// This call is always successful. - man gettid
|
||||
#elif defined(OS_FREEBSD)
|
||||
current_tid = pthread_getthreadid_np();
|
||||
#elif defined(OS_SUNOS)
|
||||
// On Solaris-derived systems, this returns the ID of the LWP, analogous
|
||||
// to a thread.
|
||||
current_tid = static_cast<uint64_t>(pthread_self());
|
||||
#else
|
||||
if (0 != pthread_threadid_np(nullptr, ¤t_tid))
|
||||
throw std::logic_error("pthread_threadid_np returned error");
|
||||
#endif
|
||||
}
|
||||
setCurrentThreadId();
|
||||
|
||||
return current_tid;
|
||||
}
|
||||
|
||||
void updateCurrentThreadIdAfterFork()
|
||||
{
|
||||
setCurrentThreadId();
|
||||
}
|
||||
|
@ -3,3 +3,5 @@
|
||||
|
||||
/// Obtain thread id from OS. The value is cached in thread local variable.
|
||||
uint64_t getThreadId();
|
||||
|
||||
void updateCurrentThreadIdAfterFork();
|
||||
|
426
base/base/hex.h
426
base/base/hex.h
@ -4,212 +4,288 @@
|
||||
#include <cstring>
|
||||
#include "types.h"
|
||||
|
||||
/// Maps 0..15 to 0..9A..F or 0..9a..f correspondingly.
|
||||
namespace CityHash_v1_0_2 { struct uint128; }
|
||||
|
||||
constexpr inline std::string_view hex_digit_to_char_uppercase_table = "0123456789ABCDEF";
|
||||
constexpr inline std::string_view hex_digit_to_char_lowercase_table = "0123456789abcdef";
|
||||
namespace wide
|
||||
{
|
||||
template <size_t Bits, typename Signed>
|
||||
class integer;
|
||||
}
|
||||
|
||||
namespace impl
|
||||
{
|
||||
/// Maps 0..15 to 0..9A..F or 0..9a..f correspondingly.
|
||||
constexpr inline std::string_view hex_digit_to_char_uppercase_table = "0123456789ABCDEF";
|
||||
constexpr inline std::string_view hex_digit_to_char_lowercase_table = "0123456789abcdef";
|
||||
|
||||
/// Maps 0..255 to 00..FF or 00..ff correspondingly.
|
||||
constexpr inline std::string_view hex_byte_to_char_uppercase_table = //
|
||||
"000102030405060708090A0B0C0D0E0F"
|
||||
"101112131415161718191A1B1C1D1E1F"
|
||||
"202122232425262728292A2B2C2D2E2F"
|
||||
"303132333435363738393A3B3C3D3E3F"
|
||||
"404142434445464748494A4B4C4D4E4F"
|
||||
"505152535455565758595A5B5C5D5E5F"
|
||||
"606162636465666768696A6B6C6D6E6F"
|
||||
"707172737475767778797A7B7C7D7E7F"
|
||||
"808182838485868788898A8B8C8D8E8F"
|
||||
"909192939495969798999A9B9C9D9E9F"
|
||||
"A0A1A2A3A4A5A6A7A8A9AAABACADAEAF"
|
||||
"B0B1B2B3B4B5B6B7B8B9BABBBCBDBEBF"
|
||||
"C0C1C2C3C4C5C6C7C8C9CACBCCCDCECF"
|
||||
"D0D1D2D3D4D5D6D7D8D9DADBDCDDDEDF"
|
||||
"E0E1E2E3E4E5E6E7E8E9EAEBECEDEEEF"
|
||||
"F0F1F2F3F4F5F6F7F8F9FAFBFCFDFEFF";
|
||||
|
||||
constexpr inline std::string_view hex_byte_to_char_lowercase_table = //
|
||||
"000102030405060708090a0b0c0d0e0f"
|
||||
"101112131415161718191a1b1c1d1e1f"
|
||||
"202122232425262728292a2b2c2d2e2f"
|
||||
"303132333435363738393a3b3c3d3e3f"
|
||||
"404142434445464748494a4b4c4d4e4f"
|
||||
"505152535455565758595a5b5c5d5e5f"
|
||||
"606162636465666768696a6b6c6d6e6f"
|
||||
"707172737475767778797a7b7c7d7e7f"
|
||||
"808182838485868788898a8b8c8d8e8f"
|
||||
"909192939495969798999a9b9c9d9e9f"
|
||||
"a0a1a2a3a4a5a6a7a8a9aaabacadaeaf"
|
||||
"b0b1b2b3b4b5b6b7b8b9babbbcbdbebf"
|
||||
"c0c1c2c3c4c5c6c7c8c9cacbcccdcecf"
|
||||
"d0d1d2d3d4d5d6d7d8d9dadbdcdddedf"
|
||||
"e0e1e2e3e4e5e6e7e8e9eaebecedeeef"
|
||||
"f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff";
|
||||
|
||||
/// Maps 0..255 to 00000000..11111111 correspondingly.
|
||||
constexpr inline std::string_view bin_byte_to_char_table = //
|
||||
"0000000000000001000000100000001100000100000001010000011000000111"
|
||||
"0000100000001001000010100000101100001100000011010000111000001111"
|
||||
"0001000000010001000100100001001100010100000101010001011000010111"
|
||||
"0001100000011001000110100001101100011100000111010001111000011111"
|
||||
"0010000000100001001000100010001100100100001001010010011000100111"
|
||||
"0010100000101001001010100010101100101100001011010010111000101111"
|
||||
"0011000000110001001100100011001100110100001101010011011000110111"
|
||||
"0011100000111001001110100011101100111100001111010011111000111111"
|
||||
"0100000001000001010000100100001101000100010001010100011001000111"
|
||||
"0100100001001001010010100100101101001100010011010100111001001111"
|
||||
"0101000001010001010100100101001101010100010101010101011001010111"
|
||||
"0101100001011001010110100101101101011100010111010101111001011111"
|
||||
"0110000001100001011000100110001101100100011001010110011001100111"
|
||||
"0110100001101001011010100110101101101100011011010110111001101111"
|
||||
"0111000001110001011100100111001101110100011101010111011001110111"
|
||||
"0111100001111001011110100111101101111100011111010111111001111111"
|
||||
"1000000010000001100000101000001110000100100001011000011010000111"
|
||||
"1000100010001001100010101000101110001100100011011000111010001111"
|
||||
"1001000010010001100100101001001110010100100101011001011010010111"
|
||||
"1001100010011001100110101001101110011100100111011001111010011111"
|
||||
"1010000010100001101000101010001110100100101001011010011010100111"
|
||||
"1010100010101001101010101010101110101100101011011010111010101111"
|
||||
"1011000010110001101100101011001110110100101101011011011010110111"
|
||||
"1011100010111001101110101011101110111100101111011011111010111111"
|
||||
"1100000011000001110000101100001111000100110001011100011011000111"
|
||||
"1100100011001001110010101100101111001100110011011100111011001111"
|
||||
"1101000011010001110100101101001111010100110101011101011011010111"
|
||||
"1101100011011001110110101101101111011100110111011101111011011111"
|
||||
"1110000011100001111000101110001111100100111001011110011011100111"
|
||||
"1110100011101001111010101110101111101100111011011110111011101111"
|
||||
"1111000011110001111100101111001111110100111101011111011011110111"
|
||||
"1111100011111001111110101111101111111100111111011111111011111111";
|
||||
|
||||
/// Maps 0..9, A..F, a..f to 0..15. Other chars are mapped to implementation specific value.
|
||||
constexpr inline std::string_view hex_char_to_digit_table
|
||||
= {"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\xff\xff\xff\xff\xff\xff" //0-9
|
||||
"\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" //A-Z
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" //a-z
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff",
|
||||
256};
|
||||
|
||||
/// Converts a hex digit '0'..'f' or '0'..'F' to its value 0..15.
|
||||
constexpr UInt8 unhexDigit(char c)
|
||||
{
|
||||
return hex_char_to_digit_table[static_cast<UInt8>(c)];
|
||||
}
|
||||
|
||||
/// Converts an unsigned integer in the native endian to hexadecimal representation and back. Used as a base class for HexConversion<T>.
|
||||
template <typename TUInt, typename = void>
|
||||
struct HexConversionUInt
|
||||
{
|
||||
static const constexpr size_t num_hex_digits = sizeof(TUInt) * 2;
|
||||
|
||||
static void hex(TUInt uint_, char * out, std::string_view table)
|
||||
{
|
||||
union
|
||||
{
|
||||
TUInt value;
|
||||
UInt8 uint8[sizeof(TUInt)];
|
||||
};
|
||||
|
||||
value = uint_;
|
||||
|
||||
for (size_t i = 0; i < sizeof(TUInt); ++i)
|
||||
{
|
||||
if constexpr (std::endian::native == std::endian::little)
|
||||
memcpy(out + i * 2, &table[static_cast<size_t>(uint8[sizeof(TUInt) - 1 - i]) * 2], 2);
|
||||
else
|
||||
memcpy(out + i * 2, &table[static_cast<size_t>(uint8[i]) * 2], 2);
|
||||
}
|
||||
}
|
||||
|
||||
static TUInt unhex(const char * data)
|
||||
{
|
||||
TUInt res;
|
||||
if constexpr (sizeof(TUInt) == 1)
|
||||
{
|
||||
res = static_cast<UInt8>(unhexDigit(data[0])) * 0x10 + static_cast<UInt8>(unhexDigit(data[1]));
|
||||
}
|
||||
else if constexpr (sizeof(TUInt) == 2)
|
||||
{
|
||||
res = static_cast<UInt16>(unhexDigit(data[0])) * 0x1000 + static_cast<UInt16>(unhexDigit(data[1])) * 0x100
|
||||
+ static_cast<UInt16>(unhexDigit(data[2])) * 0x10 + static_cast<UInt16>(unhexDigit(data[3]));
|
||||
}
|
||||
else if constexpr ((sizeof(TUInt) <= 8) || ((sizeof(TUInt) % 8) != 0))
|
||||
{
|
||||
res = 0;
|
||||
for (size_t i = 0; i < sizeof(TUInt) * 2; ++i, ++data)
|
||||
{
|
||||
res <<= 4;
|
||||
res += unhexDigit(*data);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
res = 0;
|
||||
for (size_t i = 0; i < sizeof(TUInt) / 8; ++i, data += 16)
|
||||
{
|
||||
res <<= 64;
|
||||
res += HexConversionUInt<UInt64>::unhex(data);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
};
|
||||
|
||||
/// Helper template class to convert a value of any supported type to hexadecimal representation and back.
|
||||
template <typename T, typename SFINAE = void>
|
||||
struct HexConversion;
|
||||
|
||||
template <typename TUInt>
|
||||
struct HexConversion<TUInt, std::enable_if_t<std::is_integral_v<TUInt>>> : public HexConversionUInt<TUInt> {};
|
||||
|
||||
template <size_t Bits, typename Signed>
|
||||
struct HexConversion<wide::integer<Bits, Signed>> : public HexConversionUInt<wide::integer<Bits, Signed>> {};
|
||||
|
||||
template <typename CityHashUInt128> /// Partial specialization here allows not to include <city.h> in this header.
|
||||
struct HexConversion<CityHashUInt128, std::enable_if_t<std::is_same_v<CityHashUInt128, typename CityHash_v1_0_2::uint128>>>
|
||||
{
|
||||
static const constexpr size_t num_hex_digits = 32;
|
||||
|
||||
static void hex(const CityHashUInt128 & uint_, char * out, std::string_view table)
|
||||
{
|
||||
HexConversion<UInt64>::hex(uint_.high64, out, table);
|
||||
HexConversion<UInt64>::hex(uint_.low64, out + 16, table);
|
||||
}
|
||||
|
||||
static CityHashUInt128 unhex(const char * data)
|
||||
{
|
||||
CityHashUInt128 res;
|
||||
res.high64 = HexConversion<UInt64>::unhex(data);
|
||||
res.low64 = HexConversion<UInt64>::unhex(data + 16);
|
||||
return res;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Produces a hexadecimal representation of an integer value with leading zeros (for checksums).
|
||||
/// The function supports native integer types, wide::integer, CityHash_v1_0_2::uint128.
|
||||
/// It can be used with signed types as well, however they are written as corresponding unsigned numbers
|
||||
/// using two's complement (i.e. for example "-1" is written as "0xFF", not as "-0x01").
|
||||
template <typename T>
|
||||
void writeHexUIntUppercase(const T & value, char * out)
|
||||
{
|
||||
impl::HexConversion<T>::hex(value, out, impl::hex_byte_to_char_uppercase_table);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void writeHexUIntLowercase(const T & value, char * out)
|
||||
{
|
||||
impl::HexConversion<T>::hex(value, out, impl::hex_byte_to_char_lowercase_table);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::string getHexUIntUppercase(const T & value)
|
||||
{
|
||||
std::string res(impl::HexConversion<T>::num_hex_digits, '\0');
|
||||
writeHexUIntUppercase(value, res.data());
|
||||
return res;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::string getHexUIntLowercase(const T & value)
|
||||
{
|
||||
std::string res(impl::HexConversion<T>::num_hex_digits, '\0');
|
||||
writeHexUIntLowercase(value, res.data());
|
||||
return res;
|
||||
}
|
||||
|
||||
constexpr char hexDigitUppercase(unsigned char c)
|
||||
{
|
||||
return hex_digit_to_char_uppercase_table[c];
|
||||
return impl::hex_digit_to_char_uppercase_table[c];
|
||||
}
|
||||
|
||||
constexpr char hexDigitLowercase(unsigned char c)
|
||||
{
|
||||
return hex_digit_to_char_lowercase_table[c];
|
||||
return impl::hex_digit_to_char_lowercase_table[c];
|
||||
}
|
||||
|
||||
/// Maps 0..255 to 00..FF or 00..ff correspondingly
|
||||
|
||||
constexpr inline std::string_view hex_byte_to_char_uppercase_table = //
|
||||
"000102030405060708090A0B0C0D0E0F"
|
||||
"101112131415161718191A1B1C1D1E1F"
|
||||
"202122232425262728292A2B2C2D2E2F"
|
||||
"303132333435363738393A3B3C3D3E3F"
|
||||
"404142434445464748494A4B4C4D4E4F"
|
||||
"505152535455565758595A5B5C5D5E5F"
|
||||
"606162636465666768696A6B6C6D6E6F"
|
||||
"707172737475767778797A7B7C7D7E7F"
|
||||
"808182838485868788898A8B8C8D8E8F"
|
||||
"909192939495969798999A9B9C9D9E9F"
|
||||
"A0A1A2A3A4A5A6A7A8A9AAABACADAEAF"
|
||||
"B0B1B2B3B4B5B6B7B8B9BABBBCBDBEBF"
|
||||
"C0C1C2C3C4C5C6C7C8C9CACBCCCDCECF"
|
||||
"D0D1D2D3D4D5D6D7D8D9DADBDCDDDEDF"
|
||||
"E0E1E2E3E4E5E6E7E8E9EAEBECEDEEEF"
|
||||
"F0F1F2F3F4F5F6F7F8F9FAFBFCFDFEFF";
|
||||
|
||||
constexpr inline std::string_view hex_byte_to_char_lowercase_table = //
|
||||
"000102030405060708090a0b0c0d0e0f"
|
||||
"101112131415161718191a1b1c1d1e1f"
|
||||
"202122232425262728292a2b2c2d2e2f"
|
||||
"303132333435363738393a3b3c3d3e3f"
|
||||
"404142434445464748494a4b4c4d4e4f"
|
||||
"505152535455565758595a5b5c5d5e5f"
|
||||
"606162636465666768696a6b6c6d6e6f"
|
||||
"707172737475767778797a7b7c7d7e7f"
|
||||
"808182838485868788898a8b8c8d8e8f"
|
||||
"909192939495969798999a9b9c9d9e9f"
|
||||
"a0a1a2a3a4a5a6a7a8a9aaabacadaeaf"
|
||||
"b0b1b2b3b4b5b6b7b8b9babbbcbdbebf"
|
||||
"c0c1c2c3c4c5c6c7c8c9cacbcccdcecf"
|
||||
"d0d1d2d3d4d5d6d7d8d9dadbdcdddedf"
|
||||
"e0e1e2e3e4e5e6e7e8e9eaebecedeeef"
|
||||
"f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff";
|
||||
|
||||
inline void writeHexByteUppercase(UInt8 byte, void * out)
|
||||
{
|
||||
memcpy(out, &hex_byte_to_char_uppercase_table[static_cast<size_t>(byte) * 2], 2);
|
||||
memcpy(out, &impl::hex_byte_to_char_uppercase_table[static_cast<size_t>(byte) * 2], 2);
|
||||
}
|
||||
|
||||
inline void writeHexByteLowercase(UInt8 byte, void * out)
|
||||
{
|
||||
memcpy(out, &hex_byte_to_char_lowercase_table[static_cast<size_t>(byte) * 2], 2);
|
||||
memcpy(out, &impl::hex_byte_to_char_lowercase_table[static_cast<size_t>(byte) * 2], 2);
|
||||
}
|
||||
|
||||
constexpr inline std::string_view bin_byte_to_char_table = //
|
||||
"0000000000000001000000100000001100000100000001010000011000000111"
|
||||
"0000100000001001000010100000101100001100000011010000111000001111"
|
||||
"0001000000010001000100100001001100010100000101010001011000010111"
|
||||
"0001100000011001000110100001101100011100000111010001111000011111"
|
||||
"0010000000100001001000100010001100100100001001010010011000100111"
|
||||
"0010100000101001001010100010101100101100001011010010111000101111"
|
||||
"0011000000110001001100100011001100110100001101010011011000110111"
|
||||
"0011100000111001001110100011101100111100001111010011111000111111"
|
||||
"0100000001000001010000100100001101000100010001010100011001000111"
|
||||
"0100100001001001010010100100101101001100010011010100111001001111"
|
||||
"0101000001010001010100100101001101010100010101010101011001010111"
|
||||
"0101100001011001010110100101101101011100010111010101111001011111"
|
||||
"0110000001100001011000100110001101100100011001010110011001100111"
|
||||
"0110100001101001011010100110101101101100011011010110111001101111"
|
||||
"0111000001110001011100100111001101110100011101010111011001110111"
|
||||
"0111100001111001011110100111101101111100011111010111111001111111"
|
||||
"1000000010000001100000101000001110000100100001011000011010000111"
|
||||
"1000100010001001100010101000101110001100100011011000111010001111"
|
||||
"1001000010010001100100101001001110010100100101011001011010010111"
|
||||
"1001100010011001100110101001101110011100100111011001111010011111"
|
||||
"1010000010100001101000101010001110100100101001011010011010100111"
|
||||
"1010100010101001101010101010101110101100101011011010111010101111"
|
||||
"1011000010110001101100101011001110110100101101011011011010110111"
|
||||
"1011100010111001101110101011101110111100101111011011111010111111"
|
||||
"1100000011000001110000101100001111000100110001011100011011000111"
|
||||
"1100100011001001110010101100101111001100110011011100111011001111"
|
||||
"1101000011010001110100101101001111010100110101011101011011010111"
|
||||
"1101100011011001110110101101101111011100110111011101111011011111"
|
||||
"1110000011100001111000101110001111100100111001011110011011100111"
|
||||
"1110100011101001111010101110101111101100111011011110111011101111"
|
||||
"1111000011110001111100101111001111110100111101011111011011110111"
|
||||
"1111100011111001111110101111101111111100111111011111111011111111";
|
||||
|
||||
inline void writeBinByte(UInt8 byte, void * out)
|
||||
/// Converts a hex representation with leading zeros back to an integer value.
|
||||
/// The function supports native integer types, wide::integer, CityHash_v1_0_2::uint128.
|
||||
template <typename T>
|
||||
constexpr T unhexUInt(const char * data)
|
||||
{
|
||||
memcpy(out, &bin_byte_to_char_table[static_cast<size_t>(byte) * 8], 8);
|
||||
return impl::HexConversion<T>::unhex(data);
|
||||
}
|
||||
|
||||
/// Produces hex representation of an unsigned int with leading zeros (for checksums)
|
||||
template <typename TUInt>
|
||||
inline void writeHexUIntImpl(TUInt uint_, char * out, std::string_view table)
|
||||
{
|
||||
union
|
||||
{
|
||||
TUInt value;
|
||||
UInt8 uint8[sizeof(TUInt)];
|
||||
};
|
||||
|
||||
value = uint_;
|
||||
|
||||
for (size_t i = 0; i < sizeof(TUInt); ++i)
|
||||
{
|
||||
if constexpr (std::endian::native == std::endian::little)
|
||||
memcpy(out + i * 2, &table[static_cast<size_t>(uint8[sizeof(TUInt) - 1 - i]) * 2], 2);
|
||||
else
|
||||
memcpy(out + i * 2, &table[static_cast<size_t>(uint8[i]) * 2], 2);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename TUInt>
|
||||
inline void writeHexUIntUppercase(TUInt uint_, char * out)
|
||||
{
|
||||
writeHexUIntImpl(uint_, out, hex_byte_to_char_uppercase_table);
|
||||
}
|
||||
|
||||
template <typename TUInt>
|
||||
inline void writeHexUIntLowercase(TUInt uint_, char * out)
|
||||
{
|
||||
writeHexUIntImpl(uint_, out, hex_byte_to_char_lowercase_table);
|
||||
}
|
||||
|
||||
template <typename TUInt>
|
||||
std::string getHexUIntUppercase(TUInt uint_)
|
||||
{
|
||||
std::string res(sizeof(TUInt) * 2, '\0');
|
||||
writeHexUIntUppercase(uint_, res.data());
|
||||
return res;
|
||||
}
|
||||
|
||||
template <typename TUInt>
|
||||
std::string getHexUIntLowercase(TUInt uint_)
|
||||
{
|
||||
std::string res(sizeof(TUInt) * 2, '\0');
|
||||
writeHexUIntLowercase(uint_, res.data());
|
||||
return res;
|
||||
}
|
||||
|
||||
/// Maps 0..9, A..F, a..f to 0..15. Other chars are mapped to implementation specific value.
|
||||
|
||||
constexpr inline std::string_view hex_char_to_digit_table
|
||||
= {"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\xff\xff\xff\xff\xff\xff" //0-9
|
||||
"\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" //A-Z
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" //a-z
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff",
|
||||
256};
|
||||
|
||||
/// Converts a hexadecimal digit '0'..'f' or '0'..'F' to UInt8.
|
||||
constexpr UInt8 unhex(char c)
|
||||
{
|
||||
return hex_char_to_digit_table[static_cast<UInt8>(c)];
|
||||
return impl::unhexDigit(c);
|
||||
}
|
||||
|
||||
/// Converts two hexadecimal digits to UInt8.
|
||||
constexpr UInt8 unhex2(const char * data)
|
||||
{
|
||||
return static_cast<UInt8>(unhex(data[0])) * 0x10 + static_cast<UInt8>(unhex(data[1]));
|
||||
return unhexUInt<UInt8>(data);
|
||||
}
|
||||
|
||||
/// Converts four hexadecimal digits to UInt16.
|
||||
constexpr UInt16 unhex4(const char * data)
|
||||
{
|
||||
return static_cast<UInt16>(unhex(data[0])) * 0x1000 + static_cast<UInt16>(unhex(data[1])) * 0x100
|
||||
+ static_cast<UInt16>(unhex(data[2])) * 0x10 + static_cast<UInt16>(unhex(data[3]));
|
||||
return unhexUInt<UInt16>(data);
|
||||
}
|
||||
|
||||
template <typename TUInt>
|
||||
constexpr TUInt unhexUInt(const char * data)
|
||||
/// Produces a binary representation of a single byte.
|
||||
inline void writeBinByte(UInt8 byte, void * out)
|
||||
{
|
||||
TUInt res = 0;
|
||||
if constexpr ((sizeof(TUInt) <= 8) || ((sizeof(TUInt) % 8) != 0))
|
||||
{
|
||||
for (size_t i = 0; i < sizeof(TUInt) * 2; ++i, ++data)
|
||||
{
|
||||
res <<= 4;
|
||||
res += unhex(*data);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t i = 0; i < sizeof(TUInt) / 8; ++i, data += 16)
|
||||
{
|
||||
res <<= 64;
|
||||
res += unhexUInt<UInt64>(data);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
memcpy(out, &impl::bin_byte_to_char_table[static_cast<size_t>(byte) * 8], 8);
|
||||
}
|
||||
|
@ -11,3 +11,8 @@ constexpr double interpolateExponential(double min, double max, double ratio)
|
||||
assert(min > 0 && ratio >= 0 && ratio <= 1);
|
||||
return min * std::pow(max / min, ratio);
|
||||
}
|
||||
|
||||
constexpr double interpolateLinear(double min, double max, double ratio)
|
||||
{
|
||||
return std::lerp(min, max, ratio);
|
||||
}
|
||||
|
9
base/base/move_extend.h
Normal file
9
base/base/move_extend.h
Normal file
@ -0,0 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
/// Extend @p to by moving elements from @p from to @p to end
|
||||
/// @return @p to iterator to first of moved elements.
|
||||
template <class To, class From>
|
||||
typename To::iterator moveExtend(To & to, From && from)
|
||||
{
|
||||
return to.insert(to.end(), std::make_move_iterator(from.begin()), std::make_move_iterator(from.end()));
|
||||
}
|
@ -27,6 +27,8 @@ using FromDoubleIntermediateType = long double;
|
||||
using FromDoubleIntermediateType = boost::multiprecision::cpp_bin_float_double_extended;
|
||||
#endif
|
||||
|
||||
namespace CityHash_v1_0_2 { struct uint128; }
|
||||
|
||||
namespace wide
|
||||
{
|
||||
|
||||
@ -281,6 +283,17 @@ struct integer<Bits, Signed>::_impl
|
||||
}
|
||||
}
|
||||
|
||||
template <typename CityHashUInt128 = CityHash_v1_0_2::uint128>
|
||||
constexpr static void wide_integer_from_cityhash_uint128(integer<Bits, Signed> & self, const CityHashUInt128 & value) noexcept
|
||||
{
|
||||
static_assert(sizeof(item_count) >= 2);
|
||||
|
||||
if constexpr (std::endian::native == std::endian::little)
|
||||
wide_integer_from_tuple_like(self, std::make_pair(value.low64, value.high64));
|
||||
else
|
||||
wide_integer_from_tuple_like(self, std::make_pair(value.high64, value.low64));
|
||||
}
|
||||
|
||||
/**
|
||||
* N.B. t is constructed from double, so max(t) = max(double) ~ 2^310
|
||||
* the recursive call happens when t / 2^64 > 2^64, so there won't be more than 5 of them.
|
||||
@ -314,7 +327,14 @@ struct integer<Bits, Signed>::_impl
|
||||
|
||||
const T alpha = t / static_cast<T>(max_int);
|
||||
|
||||
if (alpha <= static_cast<T>(max_int))
|
||||
/** Here we have to use strict comparison.
|
||||
* The max_int is 2^64 - 1.
|
||||
* When casted to floating point type, it will be rounded to the closest representable number,
|
||||
* which is 2^64.
|
||||
* But 2^64 is not representable in uint64_t,
|
||||
* so the maximum representable number will be strictly less.
|
||||
*/
|
||||
if (alpha < static_cast<T>(max_int))
|
||||
self = static_cast<uint64_t>(alpha);
|
||||
else // max(double) / 2^64 will surely contain less than 52 precision bits, so speed up computations.
|
||||
set_multiplier<double>(self, static_cast<double>(alpha));
|
||||
@ -1029,6 +1049,8 @@ constexpr integer<Bits, Signed>::integer(T rhs) noexcept
|
||||
_impl::wide_integer_from_wide_integer(*this, rhs);
|
||||
else if constexpr (IsTupleLike<T>::value)
|
||||
_impl::wide_integer_from_tuple_like(*this, rhs);
|
||||
else if constexpr (std::is_same_v<std::remove_cvref_t<T>, CityHash_v1_0_2::uint128>)
|
||||
_impl::wide_integer_from_cityhash_uint128(*this, rhs);
|
||||
else
|
||||
_impl::wide_integer_from_builtin(*this, rhs);
|
||||
}
|
||||
@ -1044,6 +1066,8 @@ constexpr integer<Bits, Signed>::integer(std::initializer_list<T> il) noexcept
|
||||
_impl::wide_integer_from_wide_integer(*this, *il.begin());
|
||||
else if constexpr (IsTupleLike<T>::value)
|
||||
_impl::wide_integer_from_tuple_like(*this, *il.begin());
|
||||
else if constexpr (std::is_same_v<std::remove_cvref_t<T>, CityHash_v1_0_2::uint128>)
|
||||
_impl::wide_integer_from_cityhash_uint128(*this, *il.begin());
|
||||
else
|
||||
_impl::wide_integer_from_builtin(*this, *il.begin());
|
||||
}
|
||||
@ -1081,6 +1105,8 @@ constexpr integer<Bits, Signed> & integer<Bits, Signed>::operator=(T rhs) noexce
|
||||
{
|
||||
if constexpr (IsTupleLike<T>::value)
|
||||
_impl::wide_integer_from_tuple_like(*this, rhs);
|
||||
else if constexpr (std::is_same_v<std::remove_cvref_t<T>, CityHash_v1_0_2::uint128>)
|
||||
_impl::wide_integer_from_cityhash_uint128(*this, rhs);
|
||||
else
|
||||
_impl::wide_integer_from_builtin(*this, rhs);
|
||||
return *this;
|
||||
|
@ -53,7 +53,7 @@ float logf(float x)
|
||||
tmp = ix - OFF;
|
||||
i = (tmp >> (23 - LOGF_TABLE_BITS)) % N;
|
||||
k = (int32_t)tmp >> 23; /* arithmetic shift */
|
||||
iz = ix - (tmp & 0x1ff << 23);
|
||||
iz = ix - (tmp & 0xff800000);
|
||||
invc = T[i].invc;
|
||||
logc = T[i].logc;
|
||||
z = (double_t)asfloat(iz);
|
||||
|
@ -87,7 +87,6 @@ set (SRCS
|
||||
src/LoggingRegistry.cpp
|
||||
src/LogStream.cpp
|
||||
src/MD5Engine.cpp
|
||||
src/MemoryPool.cpp
|
||||
src/MemoryStream.cpp
|
||||
src/Message.cpp
|
||||
src/Mutex.cpp
|
||||
|
@ -117,6 +117,9 @@ public:
|
||||
void readRaw(char * buffer, std::streamsize length);
|
||||
/// Reads length bytes of raw data into buffer.
|
||||
|
||||
void readCString(std::string& value);
|
||||
/// Reads zero-terminated C-string into value.
|
||||
|
||||
void readBOM();
|
||||
/// Reads a byte-order mark from the stream and configures
|
||||
/// the reader for the encountered byte order.
|
||||
|
@ -56,6 +56,8 @@ public:
|
||||
LITTLE_ENDIAN_BYTE_ORDER = 3 /// little-endian byte-order
|
||||
};
|
||||
|
||||
static const std::streamsize DEFAULT_MAX_CSTR_LENGTH { 1024 };
|
||||
|
||||
BinaryWriter(std::ostream & ostr, StreamByteOrder byteOrder = NATIVE_BYTE_ORDER);
|
||||
/// Creates the BinaryWriter.
|
||||
|
||||
@ -131,6 +133,9 @@ public:
|
||||
void writeRaw(const char * buffer, std::streamsize length);
|
||||
/// Writes length raw bytes from the given buffer to the stream.
|
||||
|
||||
void writeCString(const char* cString, std::streamsize maxLength = DEFAULT_MAX_CSTR_LENGTH);
|
||||
/// Writes zero-terminated C-string.
|
||||
|
||||
void writeBOM();
|
||||
/// Writes a byte-order mark to the stream. A byte order mark is
|
||||
/// a 16-bit integer with a value of 0xFEFF, written in host byte-order.
|
||||
|
@ -53,7 +53,7 @@
|
||||
|
||||
|
||||
// Define if no <locale> header is available (such as on WinCE)
|
||||
// #define POCO_NO_LOCALE
|
||||
#define POCO_NO_LOCALE
|
||||
|
||||
|
||||
// Define to desired default thread stack size
|
||||
|
@ -1,116 +0,0 @@
|
||||
//
|
||||
// MemoryPool.h
|
||||
//
|
||||
// Library: Foundation
|
||||
// Package: Core
|
||||
// Module: MemoryPool
|
||||
//
|
||||
// Definition of the MemoryPool class.
|
||||
//
|
||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifndef Foundation_MemoryPool_INCLUDED
|
||||
#define Foundation_MemoryPool_INCLUDED
|
||||
|
||||
|
||||
#include <cstddef>
|
||||
#include <vector>
|
||||
#include "Poco/Foundation.h"
|
||||
#include "Poco/Mutex.h"
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
|
||||
|
||||
class Foundation_API MemoryPool
|
||||
/// A simple pool for fixed-size memory blocks.
|
||||
///
|
||||
/// The main purpose of this class is to speed-up
|
||||
/// memory allocations, as well as to reduce memory
|
||||
/// fragmentation in situations where the same blocks
|
||||
/// are allocated all over again, such as in server
|
||||
/// applications.
|
||||
///
|
||||
/// All allocated blocks are retained for future use.
|
||||
/// A limit on the number of blocks can be specified.
|
||||
/// Blocks can be preallocated.
|
||||
{
|
||||
public:
|
||||
MemoryPool(std::size_t blockSize, int preAlloc = 0, int maxAlloc = 0);
|
||||
/// Creates a MemoryPool for blocks with the given blockSize.
|
||||
/// The number of blocks given in preAlloc are preallocated.
|
||||
|
||||
~MemoryPool();
|
||||
|
||||
void * get();
|
||||
/// Returns a memory block. If there are no more blocks
|
||||
/// in the pool, a new block will be allocated.
|
||||
///
|
||||
/// If maxAlloc blocks are already allocated, an
|
||||
/// OutOfMemoryException is thrown.
|
||||
|
||||
void release(void * ptr);
|
||||
/// Releases a memory block and returns it to the pool.
|
||||
|
||||
std::size_t blockSize() const;
|
||||
/// Returns the block size.
|
||||
|
||||
int allocated() const;
|
||||
/// Returns the number of allocated blocks.
|
||||
|
||||
int available() const;
|
||||
/// Returns the number of available blocks in the pool.
|
||||
|
||||
private:
|
||||
MemoryPool();
|
||||
MemoryPool(const MemoryPool &);
|
||||
MemoryPool & operator=(const MemoryPool &);
|
||||
|
||||
void clear();
|
||||
|
||||
enum
|
||||
{
|
||||
BLOCK_RESERVE = 128
|
||||
};
|
||||
|
||||
typedef std::vector<char *> BlockVec;
|
||||
|
||||
std::size_t _blockSize;
|
||||
int _maxAlloc;
|
||||
int _allocated;
|
||||
BlockVec _blocks;
|
||||
FastMutex _mutex;
|
||||
};
|
||||
|
||||
|
||||
//
|
||||
// inlines
|
||||
//
|
||||
inline std::size_t MemoryPool::blockSize() const
|
||||
{
|
||||
return _blockSize;
|
||||
}
|
||||
|
||||
|
||||
inline int MemoryPool::allocated() const
|
||||
{
|
||||
return _allocated;
|
||||
}
|
||||
|
||||
|
||||
inline int MemoryPool::available() const
|
||||
{
|
||||
return (int)_blocks.size();
|
||||
}
|
||||
|
||||
|
||||
} // namespace Poco
|
||||
|
||||
|
||||
#endif // Foundation_MemoryPool_INCLUDED
|
@ -30,9 +30,6 @@
|
||||
#include <cctype>
|
||||
#include <cmath>
|
||||
#include <limits>
|
||||
#if !defined(POCO_NO_LOCALE)
|
||||
# include <locale>
|
||||
#endif
|
||||
|
||||
|
||||
// binary numbers are supported, thus 64 (bits) + 1 (string terminating zero)
|
||||
@ -53,11 +50,7 @@ inline char decimalSeparator()
|
||||
/// Returns decimal separator from global locale or
|
||||
/// default '.' for platforms where locale is unavailable.
|
||||
{
|
||||
#if !defined(POCO_NO_LOCALE)
|
||||
return std::use_facet<std::numpunct<char>>(std::locale()).decimal_point();
|
||||
#else
|
||||
return '.';
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -65,11 +58,7 @@ inline char thousandSeparator()
|
||||
/// Returns thousand separator from global locale or
|
||||
/// default ',' for platforms where locale is unavailable.
|
||||
{
|
||||
#if !defined(POCO_NO_LOCALE)
|
||||
return std::use_facet<std::numpunct<char>>(std::locale()).thousands_sep();
|
||||
#else
|
||||
return ',';
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -57,7 +57,7 @@ public:
|
||||
URI();
|
||||
/// Creates an empty URI.
|
||||
|
||||
explicit URI(const std::string & uri);
|
||||
explicit URI(const std::string & uri, bool disable_url_encoding = false);
|
||||
/// Parses an URI from the given string. Throws a
|
||||
/// SyntaxException if the uri is not valid.
|
||||
|
||||
@ -350,6 +350,10 @@ protected:
|
||||
static const std::string ILLEGAL;
|
||||
|
||||
private:
|
||||
void encodePath(std::string & encodedStr) const;
|
||||
void decodePath(const std::string & encodedStr);
|
||||
|
||||
|
||||
std::string _scheme;
|
||||
std::string _userInfo;
|
||||
std::string _host;
|
||||
@ -357,6 +361,8 @@ private:
|
||||
std::string _path;
|
||||
std::string _query;
|
||||
std::string _fragment;
|
||||
|
||||
bool _disable_url_encoding = false;
|
||||
};
|
||||
|
||||
|
||||
|
@ -274,6 +274,31 @@ void BinaryReader::readRaw(char* buffer, std::streamsize length)
|
||||
}
|
||||
|
||||
|
||||
void BinaryReader::readCString(std::string& value)
|
||||
{
|
||||
value.clear();
|
||||
if (!_istr.good())
|
||||
{
|
||||
return;
|
||||
}
|
||||
value.reserve(256);
|
||||
while (true)
|
||||
{
|
||||
char c;
|
||||
_istr.get(c);
|
||||
if (!_istr.good())
|
||||
{
|
||||
break;
|
||||
}
|
||||
if (c == '\0')
|
||||
{
|
||||
break;
|
||||
}
|
||||
value += c;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void BinaryReader::readBOM()
|
||||
{
|
||||
UInt16 bom;
|
||||
|
@ -271,7 +271,7 @@ BinaryWriter& BinaryWriter::operator << (const std::string& value)
|
||||
BinaryWriter& BinaryWriter::operator << (const char* value)
|
||||
{
|
||||
poco_check_ptr (value);
|
||||
|
||||
|
||||
if (_pTextConverter)
|
||||
{
|
||||
std::string converted;
|
||||
@ -332,6 +332,15 @@ void BinaryWriter::writeRaw(const char* buffer, std::streamsize length)
|
||||
}
|
||||
|
||||
|
||||
void BinaryWriter::writeCString(const char* cString, std::streamsize maxLength)
|
||||
{
|
||||
const std::size_t len = ::strnlen(cString, maxLength);
|
||||
writeRaw(cString, len);
|
||||
static const char zero = '\0';
|
||||
_ostr.write(&zero, sizeof(zero));
|
||||
}
|
||||
|
||||
|
||||
void BinaryWriter::writeBOM()
|
||||
{
|
||||
UInt16 value = 0xFEFF;
|
||||
|
@ -16,9 +16,6 @@
|
||||
#include "Poco/Exception.h"
|
||||
#include "Poco/Ascii.h"
|
||||
#include <sstream>
|
||||
#if !defined(POCO_NO_LOCALE)
|
||||
#include <locale>
|
||||
#endif
|
||||
#include <cstddef>
|
||||
|
||||
|
||||
@ -147,9 +144,6 @@ namespace
|
||||
void formatOne(std::string& result, std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt, std::vector<Any>::const_iterator& itVal)
|
||||
{
|
||||
std::ostringstream str;
|
||||
#if !defined(POCO_NO_LOCALE)
|
||||
str.imbue(std::locale::classic());
|
||||
#endif
|
||||
try
|
||||
{
|
||||
parseFlags(str, itFmt, endFmt);
|
||||
|
@ -1,105 +0,0 @@
|
||||
//
|
||||
// MemoryPool.cpp
|
||||
//
|
||||
// Library: Foundation
|
||||
// Package: Core
|
||||
// Module: MemoryPool
|
||||
//
|
||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#include "Poco/MemoryPool.h"
|
||||
#include "Poco/Exception.h"
|
||||
|
||||
|
||||
namespace Poco {
|
||||
|
||||
|
||||
MemoryPool::MemoryPool(std::size_t blockSize, int preAlloc, int maxAlloc):
|
||||
_blockSize(blockSize),
|
||||
_maxAlloc(maxAlloc),
|
||||
_allocated(preAlloc)
|
||||
{
|
||||
poco_assert (maxAlloc == 0 || maxAlloc >= preAlloc);
|
||||
poco_assert (preAlloc >= 0 && maxAlloc >= 0);
|
||||
|
||||
int r = BLOCK_RESERVE;
|
||||
if (preAlloc > r)
|
||||
r = preAlloc;
|
||||
if (maxAlloc > 0 && maxAlloc < r)
|
||||
r = maxAlloc;
|
||||
_blocks.reserve(r);
|
||||
|
||||
try
|
||||
{
|
||||
for (int i = 0; i < preAlloc; ++i)
|
||||
{
|
||||
_blocks.push_back(new char[_blockSize]);
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
clear();
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
MemoryPool::~MemoryPool()
|
||||
{
|
||||
clear();
|
||||
}
|
||||
|
||||
|
||||
void MemoryPool::clear()
|
||||
{
|
||||
for (BlockVec::iterator it = _blocks.begin(); it != _blocks.end(); ++it)
|
||||
{
|
||||
delete [] *it;
|
||||
}
|
||||
_blocks.clear();
|
||||
}
|
||||
|
||||
|
||||
void* MemoryPool::get()
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
if (_blocks.empty())
|
||||
{
|
||||
if (_maxAlloc == 0 || _allocated < _maxAlloc)
|
||||
{
|
||||
++_allocated;
|
||||
return new char[_blockSize];
|
||||
}
|
||||
else throw OutOfMemoryException("MemoryPool exhausted");
|
||||
}
|
||||
else
|
||||
{
|
||||
char* ptr = _blocks.back();
|
||||
_blocks.pop_back();
|
||||
return ptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MemoryPool::release(void* ptr)
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
try
|
||||
{
|
||||
_blocks.push_back(reinterpret_cast<char*>(ptr));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
delete [] reinterpret_cast<char*>(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} // namespace Poco
|
@ -15,9 +15,6 @@
|
||||
#include "Poco/NumberFormatter.h"
|
||||
#include "Poco/MemoryStream.h"
|
||||
#include <iomanip>
|
||||
#if !defined(POCO_NO_LOCALE)
|
||||
#include <locale>
|
||||
#endif
|
||||
#include <cstdio>
|
||||
|
||||
|
||||
|
@ -19,9 +19,6 @@
|
||||
#include <cstdio>
|
||||
#include <cctype>
|
||||
#include <stdlib.h>
|
||||
#if !defined(POCO_NO_LOCALE)
|
||||
#include <locale>
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(POCO_LONG_IS_64_BIT)
|
||||
|
@ -36,8 +36,8 @@ URI::URI():
|
||||
}
|
||||
|
||||
|
||||
URI::URI(const std::string& uri):
|
||||
_port(0)
|
||||
URI::URI(const std::string& uri, bool decode_and_encode_path):
|
||||
_port(0), _disable_url_encoding(decode_and_encode_path)
|
||||
{
|
||||
parse(uri);
|
||||
}
|
||||
@ -107,7 +107,8 @@ URI::URI(const URI& uri):
|
||||
_port(uri._port),
|
||||
_path(uri._path),
|
||||
_query(uri._query),
|
||||
_fragment(uri._fragment)
|
||||
_fragment(uri._fragment),
|
||||
_disable_url_encoding(uri._disable_url_encoding)
|
||||
{
|
||||
}
|
||||
|
||||
@ -119,7 +120,8 @@ URI::URI(const URI& baseURI, const std::string& relativeURI):
|
||||
_port(baseURI._port),
|
||||
_path(baseURI._path),
|
||||
_query(baseURI._query),
|
||||
_fragment(baseURI._fragment)
|
||||
_fragment(baseURI._fragment),
|
||||
_disable_url_encoding(baseURI._disable_url_encoding)
|
||||
{
|
||||
resolve(relativeURI);
|
||||
}
|
||||
@ -151,6 +153,7 @@ URI& URI::operator = (const URI& uri)
|
||||
_path = uri._path;
|
||||
_query = uri._query;
|
||||
_fragment = uri._fragment;
|
||||
_disable_url_encoding = uri._disable_url_encoding;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
@ -181,6 +184,7 @@ void URI::swap(URI& uri)
|
||||
std::swap(_path, uri._path);
|
||||
std::swap(_query, uri._query);
|
||||
std::swap(_fragment, uri._fragment);
|
||||
std::swap(_disable_url_encoding, uri._disable_url_encoding);
|
||||
}
|
||||
|
||||
|
||||
@ -201,7 +205,7 @@ std::string URI::toString() const
|
||||
std::string uri;
|
||||
if (isRelative())
|
||||
{
|
||||
encode(_path, RESERVED_PATH, uri);
|
||||
encodePath(uri);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -217,7 +221,7 @@ std::string URI::toString() const
|
||||
{
|
||||
if (!auth.empty() && _path[0] != '/')
|
||||
uri += '/';
|
||||
encode(_path, RESERVED_PATH, uri);
|
||||
encodePath(uri);
|
||||
}
|
||||
else if (!_query.empty() || !_fragment.empty())
|
||||
{
|
||||
@ -313,7 +317,7 @@ void URI::setAuthority(const std::string& authority)
|
||||
void URI::setPath(const std::string& path)
|
||||
{
|
||||
_path.clear();
|
||||
decode(path, _path);
|
||||
decodePath(path);
|
||||
}
|
||||
|
||||
|
||||
@ -418,7 +422,7 @@ void URI::setPathEtc(const std::string& pathEtc)
|
||||
std::string URI::getPathEtc() const
|
||||
{
|
||||
std::string pathEtc;
|
||||
encode(_path, RESERVED_PATH, pathEtc);
|
||||
encodePath(pathEtc);
|
||||
if (!_query.empty())
|
||||
{
|
||||
pathEtc += '?';
|
||||
@ -436,7 +440,7 @@ std::string URI::getPathEtc() const
|
||||
std::string URI::getPathAndQuery() const
|
||||
{
|
||||
std::string pathAndQuery;
|
||||
encode(_path, RESERVED_PATH, pathAndQuery);
|
||||
encodePath(pathAndQuery);
|
||||
if (!_query.empty())
|
||||
{
|
||||
pathAndQuery += '?';
|
||||
@ -681,6 +685,21 @@ void URI::decode(const std::string& str, std::string& decodedStr, bool plusAsSpa
|
||||
}
|
||||
}
|
||||
|
||||
void URI::encodePath(std::string & encodedStr) const
|
||||
{
|
||||
if (_disable_url_encoding)
|
||||
encodedStr = _path;
|
||||
else
|
||||
encode(_path, RESERVED_PATH, encodedStr);
|
||||
}
|
||||
|
||||
void URI::decodePath(const std::string & encodedStr)
|
||||
{
|
||||
if (_disable_url_encoding)
|
||||
_path = encodedStr;
|
||||
else
|
||||
decode(encodedStr, _path);
|
||||
}
|
||||
|
||||
bool URI::isWellKnownPort() const
|
||||
{
|
||||
@ -820,7 +839,7 @@ void URI::parsePath(std::string::const_iterator& it, const std::string::const_it
|
||||
{
|
||||
std::string path;
|
||||
while (it != end && *it != '?' && *it != '#') path += *it++;
|
||||
decode(path, _path);
|
||||
decodePath(path);
|
||||
}
|
||||
|
||||
|
||||
|
@ -13,3 +13,4 @@ target_compile_options (_poco_mongodb
|
||||
|
||||
target_include_directories (_poco_mongodb SYSTEM PUBLIC "include")
|
||||
target_link_libraries (_poco_mongodb PUBLIC Poco::Net)
|
||||
|
||||
|
@ -33,7 +33,7 @@ namespace MongoDB
|
||||
/// This class represents a BSON Array.
|
||||
{
|
||||
public:
|
||||
typedef SharedPtr<Array> Ptr;
|
||||
using Ptr = SharedPtr<Array>;
|
||||
|
||||
Array();
|
||||
/// Creates an empty Array.
|
||||
@ -41,8 +41,31 @@ namespace MongoDB
|
||||
virtual ~Array();
|
||||
/// Destroys the Array.
|
||||
|
||||
// Document template functions available for backward compatibility
|
||||
using Document::add;
|
||||
using Document::get;
|
||||
|
||||
template <typename T>
|
||||
T get(int pos) const
|
||||
Document & add(T value)
|
||||
/// Creates an element with the name from the current pos and value and
|
||||
/// adds it to the array document.
|
||||
///
|
||||
/// The active document is returned to allow chaining of the add methods.
|
||||
{
|
||||
return Document::add<T>(Poco::NumberFormatter::format(size()), value);
|
||||
}
|
||||
|
||||
Document & add(const char * value)
|
||||
/// Creates an element with a name from the current pos and value and
|
||||
/// adds it to the array document.
|
||||
///
|
||||
/// The active document is returned to allow chaining of the add methods.
|
||||
{
|
||||
return Document::add(Poco::NumberFormatter::format(size()), value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T get(std::size_t pos) const
|
||||
/// Returns the element at the given index and tries to convert
|
||||
/// it to the template type. If the element is not found, a
|
||||
/// Poco::NotFoundException will be thrown. If the element cannot be
|
||||
@ -52,7 +75,7 @@ namespace MongoDB
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T get(int pos, const T & deflt) const
|
||||
T get(std::size_t pos, const T & deflt) const
|
||||
/// Returns the element at the given index and tries to convert
|
||||
/// it to the template type. If the element is not found, or
|
||||
/// has the wrong type, the deflt argument will be returned.
|
||||
@ -60,12 +83,12 @@ namespace MongoDB
|
||||
return Document::get<T>(Poco::NumberFormatter::format(pos), deflt);
|
||||
}
|
||||
|
||||
Element::Ptr get(int pos) const;
|
||||
Element::Ptr get(std::size_t pos) const;
|
||||
/// Returns the element at the given index.
|
||||
/// An empty element will be returned if the element is not found.
|
||||
|
||||
template <typename T>
|
||||
bool isType(int pos) const
|
||||
bool isType(std::size_t pos) const
|
||||
/// Returns true if the type of the element equals the TypeId of ElementTrait,
|
||||
/// otherwise false.
|
||||
{
|
||||
@ -74,6 +97,9 @@ namespace MongoDB
|
||||
|
||||
std::string toString(int indent = 0) const;
|
||||
/// Returns a string representation of the Array.
|
||||
|
||||
private:
|
||||
friend void BSONReader::read<Array::Ptr>(Array::Ptr & to);
|
||||
};
|
||||
|
||||
|
||||
|
@ -40,7 +40,7 @@ namespace MongoDB
|
||||
/// A Binary stores its data in a Poco::Buffer<unsigned char>.
|
||||
{
|
||||
public:
|
||||
typedef SharedPtr<Binary> Ptr;
|
||||
using Ptr = SharedPtr<Binary>;
|
||||
|
||||
Binary();
|
||||
/// Creates an empty Binary with subtype 0.
|
||||
|
@ -18,6 +18,7 @@
|
||||
#define MongoDB_Connection_INCLUDED
|
||||
|
||||
|
||||
#include "Poco/MongoDB/OpMsgMessage.h"
|
||||
#include "Poco/MongoDB/RequestMessage.h"
|
||||
#include "Poco/MongoDB/ResponseMessage.h"
|
||||
#include "Poco/Mutex.h"
|
||||
@ -39,7 +40,7 @@ namespace MongoDB
|
||||
/// for more information on the wire protocol.
|
||||
{
|
||||
public:
|
||||
typedef Poco::SharedPtr<Connection> Ptr;
|
||||
using Ptr = Poco::SharedPtr<Connection>;
|
||||
|
||||
class MongoDB_API SocketFactory
|
||||
{
|
||||
@ -90,7 +91,7 @@ namespace MongoDB
|
||||
|
||||
Poco::Net::SocketAddress address() const;
|
||||
/// Returns the address of the MongoDB server.
|
||||
|
||||
|
||||
const std::string & uri() const;
|
||||
/// Returns the uri on which the connection was made.
|
||||
|
||||
@ -145,6 +146,21 @@ namespace MongoDB
|
||||
/// Use this when a response is expected: only a "query" or "getmore"
|
||||
/// request will return a response.
|
||||
|
||||
void sendRequest(OpMsgMessage & request, OpMsgMessage & response);
|
||||
/// Sends a request to the MongoDB server and receives the response
|
||||
/// using newer wire protocol with OP_MSG.
|
||||
|
||||
void sendRequest(OpMsgMessage & request);
|
||||
/// Sends an unacknowledged request to the MongoDB server using newer
|
||||
/// wire protocol with OP_MSG.
|
||||
/// No response is sent by the server.
|
||||
|
||||
void readResponse(OpMsgMessage & response);
|
||||
/// Reads additional response data when previous message's flag moreToCome
|
||||
/// indicates that server will send more data.
|
||||
/// NOTE: See comments in OpMsgCursor code.
|
||||
|
||||
|
||||
protected:
|
||||
void connect();
|
||||
|
||||
@ -164,7 +180,7 @@ namespace MongoDB
|
||||
}
|
||||
inline const std::string & Connection::uri() const
|
||||
{
|
||||
return _uri;
|
||||
return _uri;
|
||||
}
|
||||
|
||||
|
||||
|
@ -40,6 +40,9 @@ namespace MongoDB
|
||||
Cursor(const std::string & fullCollectionName, QueryRequest::Flags flags = QueryRequest::QUERY_DEFAULT);
|
||||
/// Creates a Cursor for the given database and collection ("database.collection"), using the specified flags.
|
||||
|
||||
Cursor(const Document & aggregationResponse);
|
||||
/// Creates a Cursor for the given aggregation query response.
|
||||
|
||||
virtual ~Cursor();
|
||||
/// Destroys the Cursor.
|
||||
|
||||
|
@ -26,6 +26,8 @@
|
||||
#include "Poco/MongoDB/QueryRequest.h"
|
||||
#include "Poco/MongoDB/UpdateRequest.h"
|
||||
|
||||
#include "Poco/MongoDB/OpMsgCursor.h"
|
||||
#include "Poco/MongoDB/OpMsgMessage.h"
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
@ -45,6 +47,9 @@ namespace MongoDB
|
||||
virtual ~Database();
|
||||
/// Destroys the Database.
|
||||
|
||||
const std::string & name() const;
|
||||
/// Database name
|
||||
|
||||
bool authenticate(
|
||||
Connection & connection,
|
||||
const std::string & username,
|
||||
@ -62,34 +67,49 @@ namespace MongoDB
|
||||
/// May throw a Poco::ProtocolException if authentication fails for a reason other than
|
||||
/// invalid credentials.
|
||||
|
||||
Document::Ptr queryBuildInfo(Connection & connection) const;
|
||||
/// Queries server build info (all wire protocols)
|
||||
|
||||
Document::Ptr queryServerHello(Connection & connection) const;
|
||||
/// Queries hello response from server (all wire protocols)
|
||||
|
||||
Int64 count(Connection & connection, const std::string & collectionName) const;
|
||||
/// Sends a count request for the given collection to MongoDB.
|
||||
/// Sends a count request for the given collection to MongoDB. (old wire protocol)
|
||||
///
|
||||
/// If the command fails, -1 is returned.
|
||||
|
||||
Poco::SharedPtr<Poco::MongoDB::QueryRequest> createCommand() const;
|
||||
/// Creates a QueryRequest for a command.
|
||||
/// Creates a QueryRequest for a command. (old wire protocol)
|
||||
|
||||
Poco::SharedPtr<Poco::MongoDB::QueryRequest> createCountRequest(const std::string & collectionName) const;
|
||||
/// Creates a QueryRequest to count the given collection.
|
||||
/// The collectionname must not contain the database name.
|
||||
/// The collectionname must not contain the database name. (old wire protocol)
|
||||
|
||||
Poco::SharedPtr<Poco::MongoDB::DeleteRequest> createDeleteRequest(const std::string & collectionName) const;
|
||||
/// Creates a DeleteRequest to delete documents in the given collection.
|
||||
/// The collectionname must not contain the database name.
|
||||
/// The collectionname must not contain the database name. (old wire protocol)
|
||||
|
||||
Poco::SharedPtr<Poco::MongoDB::InsertRequest> createInsertRequest(const std::string & collectionName) const;
|
||||
/// Creates an InsertRequest to insert new documents in the given collection.
|
||||
/// The collectionname must not contain the database name.
|
||||
/// The collectionname must not contain the database name. (old wire protocol)
|
||||
|
||||
Poco::SharedPtr<Poco::MongoDB::QueryRequest> createQueryRequest(const std::string & collectionName) const;
|
||||
/// Creates a QueryRequest.
|
||||
/// Creates a QueryRequest. (old wire protocol)
|
||||
/// The collectionname must not contain the database name.
|
||||
|
||||
Poco::SharedPtr<Poco::MongoDB::UpdateRequest> createUpdateRequest(const std::string & collectionName) const;
|
||||
/// Creates an UpdateRequest.
|
||||
/// Creates an UpdateRequest. (old wire protocol)
|
||||
/// The collectionname must not contain the database name.
|
||||
|
||||
Poco::SharedPtr<Poco::MongoDB::OpMsgMessage> createOpMsgMessage(const std::string & collectionName) const;
|
||||
/// Creates OpMsgMessage. (new wire protocol)
|
||||
|
||||
Poco::SharedPtr<Poco::MongoDB::OpMsgMessage> createOpMsgMessage() const;
|
||||
/// Creates OpMsgMessage for database commands that do not require collection as an argument. (new wire protocol)
|
||||
|
||||
Poco::SharedPtr<Poco::MongoDB::OpMsgCursor> createOpMsgCursor(const std::string & collectionName) const;
|
||||
/// Creates OpMsgCursor. (new wire protocol)
|
||||
|
||||
Poco::MongoDB::Document::Ptr ensureIndex(
|
||||
Connection & connection,
|
||||
const std::string & collection,
|
||||
@ -100,14 +120,16 @@ namespace MongoDB
|
||||
int version = 0,
|
||||
int ttl = 0);
|
||||
/// Creates an index. The document returned is the result of a getLastError call.
|
||||
/// For more info look at the ensureIndex information on the MongoDB website.
|
||||
/// For more info look at the ensureIndex information on the MongoDB website. (old wire protocol)
|
||||
|
||||
Document::Ptr getLastErrorDoc(Connection & connection) const;
|
||||
/// Sends the getLastError command to the database and returns the error document.
|
||||
/// (old wire protocol)
|
||||
|
||||
std::string getLastError(Connection & connection) const;
|
||||
/// Sends the getLastError command to the database and returns the err element
|
||||
/// from the error document. When err is null, an empty string is returned.
|
||||
/// (old wire protocol)
|
||||
|
||||
static const std::string AUTH_MONGODB_CR;
|
||||
/// Default authentication mechanism prior to MongoDB 3.0.
|
||||
@ -115,6 +137,27 @@ namespace MongoDB
|
||||
static const std::string AUTH_SCRAM_SHA1;
|
||||
/// Default authentication mechanism for MongoDB 3.0.
|
||||
|
||||
enum WireVersion
|
||||
/// Wire version as reported by the command hello.
|
||||
/// See details in MongoDB github, repository specifications.
|
||||
/// @see queryServerHello
|
||||
{
|
||||
VER_26 = 1,
|
||||
VER_26_2 = 2,
|
||||
VER_30 = 3,
|
||||
VER_32 = 4,
|
||||
VER_34 = 5,
|
||||
VER_36 = 6, ///< First wire version that supports OP_MSG
|
||||
VER_40 = 7,
|
||||
VER_42 = 8,
|
||||
VER_44 = 9,
|
||||
VER_50 = 13,
|
||||
VER_51 = 14, ///< First wire version that supports only OP_MSG
|
||||
VER_52 = 15,
|
||||
VER_53 = 16,
|
||||
VER_60 = 17
|
||||
};
|
||||
|
||||
protected:
|
||||
bool authCR(Connection & connection, const std::string & username, const std::string & password);
|
||||
bool authSCRAM(Connection & connection, const std::string & username, const std::string & password);
|
||||
@ -127,6 +170,12 @@ namespace MongoDB
|
||||
//
|
||||
// inlines
|
||||
//
|
||||
inline const std::string & Database::name() const
|
||||
{
|
||||
return _dbname;
|
||||
}
|
||||
|
||||
|
||||
inline Poco::SharedPtr<Poco::MongoDB::QueryRequest> Database::createCommand() const
|
||||
{
|
||||
Poco::SharedPtr<Poco::MongoDB::QueryRequest> cmd = createQueryRequest("$cmd");
|
||||
@ -158,6 +207,24 @@ namespace MongoDB
|
||||
return new Poco::MongoDB::UpdateRequest(_dbname + '.' + collectionName);
|
||||
}
|
||||
|
||||
// -- New wire protocol commands
|
||||
|
||||
inline Poco::SharedPtr<Poco::MongoDB::OpMsgMessage> Database::createOpMsgMessage(const std::string & collectionName) const
|
||||
{
|
||||
return new Poco::MongoDB::OpMsgMessage(_dbname, collectionName);
|
||||
}
|
||||
|
||||
inline Poco::SharedPtr<Poco::MongoDB::OpMsgMessage> Database::createOpMsgMessage() const
|
||||
{
|
||||
// Collection name for database commands is not needed.
|
||||
return createOpMsgMessage("");
|
||||
}
|
||||
|
||||
inline Poco::SharedPtr<Poco::MongoDB::OpMsgCursor> Database::createOpMsgCursor(const std::string & collectionName) const
|
||||
{
|
||||
return new Poco::MongoDB::OpMsgCursor(_dbname, collectionName);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
} // namespace Poco::MongoDB
|
||||
|
@ -31,6 +31,7 @@ namespace Poco
|
||||
namespace MongoDB
|
||||
{
|
||||
|
||||
class Array;
|
||||
|
||||
class ElementFindByName
|
||||
{
|
||||
@ -48,8 +49,8 @@ namespace MongoDB
|
||||
/// Represents a MongoDB (BSON) document.
|
||||
{
|
||||
public:
|
||||
typedef SharedPtr<Document> Ptr;
|
||||
typedef std::vector<Document::Ptr> Vector;
|
||||
using Ptr = SharedPtr<Document>;
|
||||
using Vector = std::vector<Document::Ptr>;
|
||||
|
||||
Document();
|
||||
/// Creates an empty Document.
|
||||
@ -86,6 +87,10 @@ namespace MongoDB
|
||||
/// Unlike the other add methods, this method returns
|
||||
/// a reference to the new document.
|
||||
|
||||
Array & addNewArray(const std::string & name);
|
||||
/// Create a new array and add it to this document.
|
||||
/// Method returns a reference to the new array.
|
||||
|
||||
void clear();
|
||||
/// Removes all elements from the document.
|
||||
|
||||
@ -95,7 +100,7 @@ namespace MongoDB
|
||||
bool empty() const;
|
||||
/// Returns true if the document doesn't contain any documents.
|
||||
|
||||
bool exists(const std::string & name);
|
||||
bool exists(const std::string & name) const;
|
||||
/// Returns true if the document has an element with the given name.
|
||||
|
||||
template <typename T>
|
||||
@ -158,6 +163,9 @@ namespace MongoDB
|
||||
/// return an Int64. When the element is not found, a
|
||||
/// Poco::NotFoundException will be thrown.
|
||||
|
||||
bool remove(const std::string & name);
|
||||
/// Removes an element from the document.
|
||||
|
||||
template <typename T>
|
||||
bool isType(const std::string & name) const
|
||||
/// Returns true when the type of the element equals the TypeId of ElementTrait.
|
||||
@ -227,12 +235,23 @@ namespace MongoDB
|
||||
}
|
||||
|
||||
|
||||
inline bool Document::exists(const std::string & name)
|
||||
inline bool Document::exists(const std::string & name) const
|
||||
{
|
||||
return std::find_if(_elements.begin(), _elements.end(), ElementFindByName(name)) != _elements.end();
|
||||
}
|
||||
|
||||
|
||||
inline bool Document::remove(const std::string & name)
|
||||
{
|
||||
auto it = std::find_if(_elements.begin(), _elements.end(), ElementFindByName(name));
|
||||
if (it == _elements.end())
|
||||
return false;
|
||||
|
||||
_elements.erase(it);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
inline std::size_t Document::size() const
|
||||
{
|
||||
return _elements.size();
|
||||
|
@ -45,7 +45,7 @@ namespace MongoDB
|
||||
/// Represents an Element of a Document or an Array.
|
||||
{
|
||||
public:
|
||||
typedef Poco::SharedPtr<Element> Ptr;
|
||||
using Ptr = Poco::SharedPtr<Element>;
|
||||
|
||||
explicit Element(const std::string & name);
|
||||
/// Creates the Element with the given name.
|
||||
@ -80,7 +80,7 @@ namespace MongoDB
|
||||
}
|
||||
|
||||
|
||||
typedef std::list<Element::Ptr> ElementSet;
|
||||
using ElementSet = std::list<Element::Ptr>;
|
||||
|
||||
|
||||
template <typename T>
|
||||
@ -266,7 +266,7 @@ namespace MongoDB
|
||||
}
|
||||
|
||||
|
||||
typedef Nullable<unsigned char> NullValue;
|
||||
using NullValue = Nullable<unsigned char>;
|
||||
|
||||
|
||||
// BSON Null Value
|
||||
|
@ -35,7 +35,7 @@ namespace MongoDB
|
||||
/// Represents JavaScript type in BSON.
|
||||
{
|
||||
public:
|
||||
typedef SharedPtr<JavaScriptCode> Ptr;
|
||||
using Ptr = SharedPtr<JavaScriptCode>;
|
||||
|
||||
JavaScriptCode();
|
||||
/// Creates an empty JavaScriptCode object.
|
||||
|
@ -28,6 +28,9 @@ namespace MongoDB
|
||||
{
|
||||
|
||||
|
||||
class Message; // Required to disambiguate friend declaration in MessageHeader.
|
||||
|
||||
|
||||
class MongoDB_API MessageHeader
|
||||
/// Represents the message header which is always prepended to a
|
||||
/// MongoDB request or response message.
|
||||
@ -37,14 +40,18 @@ namespace MongoDB
|
||||
|
||||
enum OpCode
|
||||
{
|
||||
// Opcodes deprecated in MongoDB 5.0
|
||||
OP_REPLY = 1,
|
||||
OP_MSG = 1000,
|
||||
OP_UPDATE = 2001,
|
||||
OP_INSERT = 2002,
|
||||
OP_QUERY = 2004,
|
||||
OP_GET_MORE = 2005,
|
||||
OP_DELETE = 2006,
|
||||
OP_KILL_CURSORS = 2007
|
||||
OP_KILL_CURSORS = 2007,
|
||||
|
||||
/// Opcodes supported in MongoDB 5.1 and later
|
||||
OP_COMPRESSED = 2012,
|
||||
OP_MSG = 2013
|
||||
};
|
||||
|
||||
explicit MessageHeader(OpCode);
|
||||
|
@ -33,6 +33,13 @@
|
||||
//
|
||||
|
||||
|
||||
#if defined(_WIN32) && defined(POCO_DLL)
|
||||
# if defined(MongoDB_EXPORTS)
|
||||
# define MongoDB_API __declspec(dllexport)
|
||||
# else
|
||||
# define MongoDB_API __declspec(dllimport)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
#if !defined(MongoDB_API)
|
||||
@ -47,6 +54,11 @@
|
||||
//
|
||||
// Automatically link MongoDB library.
|
||||
//
|
||||
#if defined(_MSC_VER)
|
||||
# if !defined(POCO_NO_AUTOMATIC_LIBS) && !defined(MongoDB_EXPORTS)
|
||||
# pragma comment(lib, "PocoMongoDB" POCO_LIB_SUFFIX)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
#endif // MongoDBMongoDB_INCLUDED
|
||||
|
@ -44,7 +44,7 @@ namespace MongoDB
|
||||
/// as its value.
|
||||
{
|
||||
public:
|
||||
typedef SharedPtr<ObjectId> Ptr;
|
||||
using Ptr = SharedPtr<ObjectId>;
|
||||
|
||||
explicit ObjectId(const std::string & id);
|
||||
/// Creates an ObjectId from a string.
|
||||
|
96
base/poco/MongoDB/include/Poco/MongoDB/OpMsgCursor.h
Normal file
96
base/poco/MongoDB/include/Poco/MongoDB/OpMsgCursor.h
Normal file
@ -0,0 +1,96 @@
|
||||
//
|
||||
// OpMsgCursor.h
|
||||
//
|
||||
// Library: MongoDB
|
||||
// Package: MongoDB
|
||||
// Module: OpMsgCursor
|
||||
//
|
||||
// Definition of the OpMsgCursor class.
|
||||
//
|
||||
// Copyright (c) 2012, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifndef MongoDB_OpMsgCursor_INCLUDED
|
||||
#define MongoDB_OpMsgCursor_INCLUDED
|
||||
|
||||
|
||||
#include "Poco/MongoDB/Connection.h"
|
||||
#include "Poco/MongoDB/MongoDB.h"
|
||||
#include "Poco/MongoDB/OpMsgMessage.h"
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
namespace MongoDB
|
||||
{
|
||||
|
||||
|
||||
class MongoDB_API OpMsgCursor : public Document
|
||||
/// OpMsgCursor is an helper class for querying multiple documents using OpMsgMessage.
|
||||
{
|
||||
public:
|
||||
OpMsgCursor(const std::string & dbname, const std::string & collectionName);
|
||||
/// Creates a OpMsgCursor for the given database and collection.
|
||||
|
||||
virtual ~OpMsgCursor();
|
||||
/// Destroys the OpMsgCursor.
|
||||
|
||||
void setEmptyFirstBatch(bool empty);
|
||||
/// Empty first batch is used to get error response faster with little server processing
|
||||
|
||||
bool emptyFirstBatch() const;
|
||||
|
||||
void setBatchSize(Int32 batchSize);
|
||||
/// Set non-default batch size
|
||||
|
||||
Int32 batchSize() const;
|
||||
/// Current batch size (zero or negative number indicates default batch size)
|
||||
|
||||
Int64 cursorID() const;
|
||||
|
||||
OpMsgMessage & next(Connection & connection);
|
||||
/// Tries to get the next documents. As long as response message has a
|
||||
/// cursor ID next can be called to retrieve the next bunch of documents.
|
||||
///
|
||||
/// The cursor must be killed (see kill()) when not all documents are needed.
|
||||
|
||||
OpMsgMessage & query();
|
||||
/// Returns the associated query.
|
||||
|
||||
void kill(Connection & connection);
|
||||
/// Kills the cursor and reset it so that it can be reused.
|
||||
|
||||
private:
|
||||
OpMsgMessage _query;
|
||||
OpMsgMessage _response;
|
||||
|
||||
bool _emptyFirstBatch{false};
|
||||
Int32 _batchSize{-1};
|
||||
/// Batch size used in the cursor. Zero or negative value means that default shall be used.
|
||||
|
||||
Int64 _cursorID{0};
|
||||
};
|
||||
|
||||
|
||||
//
|
||||
// inlines
|
||||
//
|
||||
inline OpMsgMessage & OpMsgCursor::query()
|
||||
{
|
||||
return _query;
|
||||
}
|
||||
|
||||
inline Int64 OpMsgCursor::cursorID() const
|
||||
{
|
||||
return _cursorID;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
} // namespace Poco::MongoDB
|
||||
|
||||
|
||||
#endif // MongoDB_OpMsgCursor_INCLUDED
|
163
base/poco/MongoDB/include/Poco/MongoDB/OpMsgMessage.h
Normal file
163
base/poco/MongoDB/include/Poco/MongoDB/OpMsgMessage.h
Normal file
@ -0,0 +1,163 @@
|
||||
//
|
||||
// OpMsgMessage.h
|
||||
//
|
||||
// Library: MongoDB
|
||||
// Package: MongoDB
|
||||
// Module: OpMsgMessage
|
||||
//
|
||||
// Definition of the OpMsgMessage class.
|
||||
//
|
||||
// Copyright (c) 2022, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifndef MongoDB_OpMsgMessage_INCLUDED
|
||||
#define MongoDB_OpMsgMessage_INCLUDED
|
||||
|
||||
|
||||
#include "Poco/MongoDB/Document.h"
|
||||
#include "Poco/MongoDB/Message.h"
|
||||
#include "Poco/MongoDB/MongoDB.h"
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
namespace MongoDB
|
||||
{
|
||||
|
||||
|
||||
class MongoDB_API OpMsgMessage : public Message
|
||||
/// This class represents a request/response (OP_MSG) to send requests and receive responses to/from MongoDB.
|
||||
{
|
||||
public:
|
||||
// Constants for most often used MongoDB commands that can be sent using OP_MSG
|
||||
// For complete list see: https://www.mongodb.com/docs/manual/reference/command/
|
||||
|
||||
// Query and write
|
||||
static const std::string CMD_INSERT;
|
||||
static const std::string CMD_DELETE;
|
||||
static const std::string CMD_UPDATE;
|
||||
static const std::string CMD_FIND;
|
||||
static const std::string CMD_FIND_AND_MODIFY;
|
||||
static const std::string CMD_GET_MORE;
|
||||
|
||||
// Aggregation
|
||||
static const std::string CMD_AGGREGATE;
|
||||
static const std::string CMD_COUNT;
|
||||
static const std::string CMD_DISTINCT;
|
||||
static const std::string CMD_MAP_REDUCE;
|
||||
|
||||
// Replication and administration
|
||||
static const std::string CMD_HELLO;
|
||||
static const std::string CMD_REPL_SET_GET_STATUS;
|
||||
static const std::string CMD_REPL_SET_GET_CONFIG;
|
||||
|
||||
static const std::string CMD_CREATE;
|
||||
static const std::string CMD_CREATE_INDEXES;
|
||||
static const std::string CMD_DROP;
|
||||
static const std::string CMD_DROP_DATABASE;
|
||||
static const std::string CMD_KILL_CURSORS;
|
||||
static const std::string CMD_LIST_DATABASES;
|
||||
static const std::string CMD_LIST_INDEXES;
|
||||
|
||||
// Diagnostic
|
||||
static const std::string CMD_BUILD_INFO;
|
||||
static const std::string CMD_COLL_STATS;
|
||||
static const std::string CMD_DB_STATS;
|
||||
static const std::string CMD_HOST_INFO;
|
||||
|
||||
|
||||
enum Flags : UInt32
|
||||
{
|
||||
MSG_FLAGS_DEFAULT = 0,
|
||||
|
||||
MSG_CHECKSUM_PRESENT = (1 << 0),
|
||||
|
||||
MSG_MORE_TO_COME = (1 << 1),
|
||||
/// Sender will send another message and is not prepared for overlapping messages
|
||||
|
||||
MSG_EXHAUST_ALLOWED = (1 << 16)
|
||||
/// Client is prepared for multiple replies (using the moreToCome bit) to this request
|
||||
};
|
||||
|
||||
OpMsgMessage();
|
||||
/// Creates an OpMsgMessage for response.
|
||||
|
||||
OpMsgMessage(const std::string & databaseName, const std::string & collectionName, UInt32 flags = MSG_FLAGS_DEFAULT);
|
||||
/// Creates an OpMsgMessage for requests.
|
||||
|
||||
virtual ~OpMsgMessage();
|
||||
|
||||
const std::string & databaseName() const;
|
||||
|
||||
const std::string & collectionName() const;
|
||||
|
||||
void setCommandName(const std::string & command);
|
||||
/// Sets the command name and clears the command document
|
||||
|
||||
void setCursor(Poco::Int64 cursorID, Poco::Int32 batchSize = -1);
|
||||
/// Sets the command "getMore" for the cursor id with batch size (if it is not negative).
|
||||
|
||||
const std::string & commandName() const;
|
||||
/// Current command name.
|
||||
|
||||
void setAcknowledgedRequest(bool ack);
|
||||
/// Set false to create request that does not return response.
|
||||
/// It has effect only for commands that write or delete documents.
|
||||
/// Default is true (request returns acknowledge response).
|
||||
|
||||
bool acknowledgedRequest() const;
|
||||
|
||||
UInt32 flags() const;
|
||||
|
||||
Document & body();
|
||||
/// Access to body document.
|
||||
/// Additional query arguments shall be added after setting the command name.
|
||||
|
||||
const Document & body() const;
|
||||
|
||||
Document::Vector & documents();
|
||||
/// Documents prepared for request or retrieved in response.
|
||||
|
||||
const Document::Vector & documents() const;
|
||||
/// Documents prepared for request or retrieved in response.
|
||||
|
||||
bool responseOk() const;
|
||||
/// Reads "ok" status from the response message.
|
||||
|
||||
void clear();
|
||||
/// Clears the message.
|
||||
|
||||
void send(std::ostream & ostr);
|
||||
/// Writes the request to stream.
|
||||
|
||||
void read(std::istream & istr);
|
||||
/// Reads the response from the stream.
|
||||
|
||||
private:
|
||||
enum PayloadType : UInt8
|
||||
{
|
||||
PAYLOAD_TYPE_0 = 0,
|
||||
PAYLOAD_TYPE_1 = 1
|
||||
};
|
||||
|
||||
std::string _databaseName;
|
||||
std::string _collectionName;
|
||||
UInt32 _flags{MSG_FLAGS_DEFAULT};
|
||||
std::string _commandName;
|
||||
bool _acknowledged{true};
|
||||
|
||||
Document _body;
|
||||
Document::Vector _documents;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
} // namespace Poco::MongoDB
|
||||
|
||||
|
||||
#endif // MongoDB_OpMsgMessage_INCLUDED
|
@ -94,7 +94,23 @@ namespace MongoDB
|
||||
|
||||
operator Connection::Ptr() { return _connection; }
|
||||
|
||||
#if defined(POCO_ENABLE_CPP11)
|
||||
// Disable copy to prevent unwanted release of resources: C++11 way
|
||||
PooledConnection(const PooledConnection &) = delete;
|
||||
PooledConnection & operator=(const PooledConnection &) = delete;
|
||||
|
||||
// Enable move semantics
|
||||
PooledConnection(PooledConnection && other) = default;
|
||||
PooledConnection & operator=(PooledConnection &&) = default;
|
||||
#endif
|
||||
|
||||
private:
|
||||
#if !defined(POCO_ENABLE_CPP11)
|
||||
// Disable copy to prevent unwanted release of resources: pre C++11 way
|
||||
PooledConnection(const PooledConnection &);
|
||||
PooledConnection & operator=(const PooledConnection &);
|
||||
#endif
|
||||
|
||||
Poco::ObjectPool<Connection, Connection::Ptr> & _pool;
|
||||
Connection::Ptr _connection;
|
||||
};
|
||||
|
@ -33,7 +33,7 @@ namespace MongoDB
|
||||
/// Represents a regular expression in BSON format.
|
||||
{
|
||||
public:
|
||||
typedef SharedPtr<RegularExpression> Ptr;
|
||||
using Ptr = SharedPtr<RegularExpression>;
|
||||
|
||||
RegularExpression();
|
||||
/// Creates an empty RegularExpression.
|
||||
|
@ -38,6 +38,9 @@ namespace MongoDB
|
||||
ResponseMessage();
|
||||
/// Creates an empty ResponseMessage.
|
||||
|
||||
ResponseMessage(const Int64 & cursorID);
|
||||
/// Creates an ResponseMessage for existing cursor ID.
|
||||
|
||||
virtual ~ResponseMessage();
|
||||
/// Destroys the ResponseMessage.
|
||||
|
||||
|
@ -20,7 +20,7 @@ namespace Poco {
|
||||
namespace MongoDB {
|
||||
|
||||
|
||||
Array::Array():
|
||||
Array::Array():
|
||||
Document()
|
||||
{
|
||||
}
|
||||
@ -31,7 +31,7 @@ Array::~Array()
|
||||
}
|
||||
|
||||
|
||||
Element::Ptr Array::get(int pos) const
|
||||
Element::Ptr Array::get(std::size_t pos) const
|
||||
{
|
||||
std::string name = Poco::NumberFormatter::format(pos);
|
||||
return Document::get(name);
|
||||
|
@ -319,4 +319,30 @@ void Connection::sendRequest(RequestMessage& request, ResponseMessage& response)
|
||||
}
|
||||
|
||||
|
||||
void Connection::sendRequest(OpMsgMessage& request, OpMsgMessage& response)
|
||||
{
|
||||
Poco::Net::SocketOutputStream sos(_socket);
|
||||
request.send(sos);
|
||||
|
||||
response.clear();
|
||||
readResponse(response);
|
||||
}
|
||||
|
||||
|
||||
void Connection::sendRequest(OpMsgMessage& request)
|
||||
{
|
||||
request.setAcknowledgedRequest(false);
|
||||
Poco::Net::SocketOutputStream sos(_socket);
|
||||
request.send(sos);
|
||||
}
|
||||
|
||||
|
||||
void Connection::readResponse(OpMsgMessage& response)
|
||||
{
|
||||
Poco::Net::SocketInputStream sis(_socket);
|
||||
response.read(sis);
|
||||
}
|
||||
|
||||
|
||||
|
||||
} } // Poco::MongoDB
|
||||
|
@ -33,6 +33,12 @@ Cursor::Cursor(const std::string& fullCollectionName, QueryRequest::Flags flags)
|
||||
}
|
||||
|
||||
|
||||
Cursor::Cursor(const Document& aggregationResponse) :
|
||||
_query(aggregationResponse.get<Poco::MongoDB::Document::Ptr>("cursor")->get<std::string>("ns")),
|
||||
_response(aggregationResponse.get<Poco::MongoDB::Document::Ptr>("cursor")->get<Int64>("id"))
|
||||
{
|
||||
}
|
||||
|
||||
Cursor::~Cursor()
|
||||
{
|
||||
try
|
||||
|
@ -334,6 +334,50 @@ bool Database::authSCRAM(Connection& connection, const std::string& username, co
|
||||
}
|
||||
|
||||
|
||||
Document::Ptr Database::queryBuildInfo(Connection& connection) const
|
||||
{
|
||||
// build info can be issued on "config" system database
|
||||
Poco::SharedPtr<Poco::MongoDB::QueryRequest> request = createCommand();
|
||||
request->selector().add("buildInfo", 1);
|
||||
|
||||
Poco::MongoDB::ResponseMessage response;
|
||||
connection.sendRequest(*request, response);
|
||||
|
||||
Document::Ptr buildInfo;
|
||||
if ( response.documents().size() > 0 )
|
||||
{
|
||||
buildInfo = response.documents()[0];
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Poco::ProtocolException("Didn't get a response from the buildinfo command");
|
||||
}
|
||||
return buildInfo;
|
||||
}
|
||||
|
||||
|
||||
Document::Ptr Database::queryServerHello(Connection& connection) const
|
||||
{
|
||||
// hello can be issued on "config" system database
|
||||
Poco::SharedPtr<Poco::MongoDB::QueryRequest> request = createCommand();
|
||||
request->selector().add("hello", 1);
|
||||
|
||||
Poco::MongoDB::ResponseMessage response;
|
||||
connection.sendRequest(*request, response);
|
||||
|
||||
Document::Ptr hello;
|
||||
if ( response.documents().size() > 0 )
|
||||
{
|
||||
hello = response.documents()[0];
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Poco::ProtocolException("Didn't get a response from the hello command");
|
||||
}
|
||||
return hello;
|
||||
}
|
||||
|
||||
|
||||
Int64 Database::count(Connection& connection, const std::string& collectionName) const
|
||||
{
|
||||
Poco::SharedPtr<Poco::MongoDB::QueryRequest> countRequest = createCountRequest(collectionName);
|
||||
@ -390,7 +434,7 @@ Document::Ptr Database::getLastErrorDoc(Connection& connection) const
|
||||
{
|
||||
Document::Ptr errorDoc;
|
||||
|
||||
Poco::SharedPtr<Poco::MongoDB::QueryRequest> request = createQueryRequest("$cmd");
|
||||
Poco::SharedPtr<Poco::MongoDB::QueryRequest> request = createCommand();
|
||||
request->setNumberToReturn(1);
|
||||
request->selector().add("getLastError", 1);
|
||||
|
||||
@ -420,7 +464,7 @@ std::string Database::getLastError(Connection& connection) const
|
||||
|
||||
Poco::SharedPtr<Poco::MongoDB::QueryRequest> Database::createCountRequest(const std::string& collectionName) const
|
||||
{
|
||||
Poco::SharedPtr<Poco::MongoDB::QueryRequest> request = createQueryRequest("$cmd");
|
||||
Poco::SharedPtr<Poco::MongoDB::QueryRequest> request = createCommand();
|
||||
request->setNumberToReturn(1);
|
||||
request->selector().add("count", collectionName);
|
||||
return request;
|
||||
|
@ -20,8 +20,8 @@ namespace MongoDB {
|
||||
|
||||
|
||||
DeleteRequest::DeleteRequest(const std::string& collectionName, DeleteRequest::Flags flags):
|
||||
RequestMessage(MessageHeader::OP_DELETE),
|
||||
_flags(flags),
|
||||
RequestMessage(MessageHeader::OP_DELETE),
|
||||
_flags(flags),
|
||||
_fullCollectionName(collectionName),
|
||||
_selector()
|
||||
{
|
||||
|
@ -35,6 +35,14 @@ Document::~Document()
|
||||
}
|
||||
|
||||
|
||||
Array& Document::addNewArray(const std::string& name)
|
||||
{
|
||||
Array::Ptr newArray = new Array();
|
||||
add(name, newArray);
|
||||
return *newArray;
|
||||
}
|
||||
|
||||
|
||||
Element::Ptr Document::get(const std::string& name) const
|
||||
{
|
||||
Element::Ptr element;
|
||||
@ -84,7 +92,7 @@ void Document::read(BinaryReader& reader)
|
||||
while (type != '\0')
|
||||
{
|
||||
Element::Ptr element;
|
||||
|
||||
|
||||
std::string name = BSONReader(reader).readCString();
|
||||
|
||||
switch (type)
|
||||
@ -198,7 +206,7 @@ void Document::write(BinaryWriter& writer)
|
||||
else
|
||||
{
|
||||
std::stringstream sstream;
|
||||
Poco::BinaryWriter tempWriter(sstream);
|
||||
Poco::BinaryWriter tempWriter(sstream, BinaryWriter::LITTLE_ENDIAN_BYTE_ORDER);
|
||||
for (ElementSet::iterator it = _elements.begin(); it != _elements.end(); ++it)
|
||||
{
|
||||
tempWriter << static_cast<unsigned char>((*it)->type());
|
||||
@ -207,7 +215,7 @@ void Document::write(BinaryWriter& writer)
|
||||
element->write(tempWriter);
|
||||
}
|
||||
tempWriter.flush();
|
||||
|
||||
|
||||
Poco::Int32 len = static_cast<Poco::Int32>(5 + sstream.tellp()); /* 5 = sizeof(len) + 0-byte */
|
||||
writer << len;
|
||||
writer.writeRaw(sstream.str());
|
||||
|
@ -24,7 +24,7 @@ Element::Element(const std::string& name) : _name(name)
|
||||
}
|
||||
|
||||
|
||||
Element::~Element()
|
||||
Element::~Element()
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -21,7 +21,7 @@ namespace MongoDB {
|
||||
|
||||
|
||||
GetMoreRequest::GetMoreRequest(const std::string& collectionName, Int64 cursorID):
|
||||
RequestMessage(MessageHeader::OP_GET_MORE),
|
||||
RequestMessage(MessageHeader::OP_GET_MORE),
|
||||
_fullCollectionName(collectionName),
|
||||
_numberToReturn(100),
|
||||
_cursorID(cursorID)
|
||||
|
@ -20,7 +20,7 @@ namespace MongoDB {
|
||||
|
||||
|
||||
InsertRequest::InsertRequest(const std::string& collectionName, Flags flags):
|
||||
RequestMessage(MessageHeader::OP_INSERT),
|
||||
RequestMessage(MessageHeader::OP_INSERT),
|
||||
_flags(flags),
|
||||
_fullCollectionName(collectionName)
|
||||
{
|
||||
|
@ -37,7 +37,7 @@ void KillCursorsRequest::buildRequest(BinaryWriter& writer)
|
||||
for (std::vector<Int64>::iterator it = _cursors.begin(); it != _cursors.end(); ++it)
|
||||
{
|
||||
writer << *it;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -19,7 +19,7 @@ namespace Poco {
|
||||
namespace MongoDB {
|
||||
|
||||
|
||||
Message::Message(MessageHeader::OpCode opcode):
|
||||
Message::Message(MessageHeader::OpCode opcode):
|
||||
_header(opcode)
|
||||
{
|
||||
}
|
||||
|
@ -20,10 +20,10 @@ namespace Poco {
|
||||
namespace MongoDB {
|
||||
|
||||
|
||||
MessageHeader::MessageHeader(OpCode opCode):
|
||||
_messageLength(0),
|
||||
_requestID(0),
|
||||
_responseTo(0),
|
||||
MessageHeader::MessageHeader(OpCode opCode):
|
||||
_messageLength(0),
|
||||
_requestID(0),
|
||||
_responseTo(0),
|
||||
_opCode(opCode)
|
||||
{
|
||||
}
|
||||
@ -42,7 +42,7 @@ void MessageHeader::read(BinaryReader& reader)
|
||||
|
||||
Int32 opCode;
|
||||
reader >> opCode;
|
||||
_opCode = (OpCode) opCode;
|
||||
_opCode = static_cast<OpCode>(opCode);
|
||||
|
||||
if (!reader.good())
|
||||
{
|
||||
@ -56,7 +56,7 @@ void MessageHeader::write(BinaryWriter& writer)
|
||||
writer << _messageLength;
|
||||
writer << _requestID;
|
||||
writer << _responseTo;
|
||||
writer << (Int32) _opCode;
|
||||
writer << static_cast<Int32>(_opCode);
|
||||
}
|
||||
|
||||
|
||||
|
@ -32,7 +32,7 @@ ObjectId::ObjectId(const std::string& id)
|
||||
poco_assert_dbg(id.size() == 24);
|
||||
|
||||
const char* p = id.c_str();
|
||||
for (std::size_t i = 0; i < 12; ++i)
|
||||
for (std::size_t i = 0; i < 12; ++i)
|
||||
{
|
||||
_id[i] = fromHex(p);
|
||||
p += 2;
|
||||
|
187
base/poco/MongoDB/src/OpMsgCursor.cpp
Normal file
187
base/poco/MongoDB/src/OpMsgCursor.cpp
Normal file
@ -0,0 +1,187 @@
|
||||
//
|
||||
// OpMsgCursor.cpp
|
||||
//
|
||||
// Library: MongoDB
|
||||
// Package: MongoDB
|
||||
// Module: OpMsgCursor
|
||||
//
|
||||
// Copyright (c) 2022, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#include "Poco/MongoDB/OpMsgCursor.h"
|
||||
#include "Poco/MongoDB/Array.h"
|
||||
|
||||
//
|
||||
// NOTE:
|
||||
//
|
||||
// MongoDB specification indicates that the flag MSG_EXHAUST_ALLOWED shall be
|
||||
// used in the request when the receiver is ready to receive multiple messages
|
||||
// without sending additional requests in between. Sender (MongoDB) indicates
|
||||
// that more messages follow with flag MSG_MORE_TO_COME.
|
||||
//
|
||||
// It seems that this does not work properly. MSG_MORE_TO_COME is set and reading
|
||||
// next messages sometimes works, however often the data is missing in response
|
||||
// or the message header contains wrong message length and reading blocks.
|
||||
// Opcode in the header is correct.
|
||||
//
|
||||
// Using MSG_EXHAUST_ALLOWED is therefore currently disabled.
|
||||
//
|
||||
// It seems that related JIRA ticket is:
|
||||
//
|
||||
// https://jira.mongodb.org/browse/SERVER-57297
|
||||
//
|
||||
// https://github.com/mongodb/specifications/blob/master/source/message/OP_MSG.rst
|
||||
//
|
||||
|
||||
#define MONGODB_EXHAUST_ALLOWED_WORKS false
|
||||
|
||||
namespace Poco {
|
||||
namespace MongoDB {
|
||||
|
||||
|
||||
static const std::string keyCursor {"cursor"};
|
||||
static const std::string keyFirstBatch {"firstBatch"};
|
||||
static const std::string keyNextBatch {"nextBatch"};
|
||||
|
||||
static Poco::Int64 cursorIdFromResponse(const MongoDB::Document& doc);
|
||||
|
||||
|
||||
OpMsgCursor::OpMsgCursor(const std::string& db, const std::string& collection):
|
||||
#if MONGODB_EXHAUST_ALLOWED_WORKS
|
||||
_query(db, collection, OpMsgMessage::MSG_EXHAUST_ALLOWED)
|
||||
#else
|
||||
_query(db, collection)
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
||||
OpMsgCursor::~OpMsgCursor()
|
||||
{
|
||||
try
|
||||
{
|
||||
poco_assert_dbg(_cursorID == 0);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void OpMsgCursor::setEmptyFirstBatch(bool empty)
|
||||
{
|
||||
_emptyFirstBatch = empty;
|
||||
}
|
||||
|
||||
|
||||
bool OpMsgCursor::emptyFirstBatch() const
|
||||
{
|
||||
return _emptyFirstBatch;
|
||||
}
|
||||
|
||||
|
||||
void OpMsgCursor::setBatchSize(Int32 batchSize)
|
||||
{
|
||||
_batchSize = batchSize;
|
||||
}
|
||||
|
||||
|
||||
Int32 OpMsgCursor::batchSize() const
|
||||
{
|
||||
return _batchSize;
|
||||
}
|
||||
|
||||
|
||||
OpMsgMessage& OpMsgCursor::next(Connection& connection)
|
||||
{
|
||||
if (_cursorID == 0)
|
||||
{
|
||||
_response.clear();
|
||||
|
||||
if (_emptyFirstBatch || _batchSize > 0)
|
||||
{
|
||||
Int32 bsize = _emptyFirstBatch ? 0 : _batchSize;
|
||||
if (_query.commandName() == OpMsgMessage::CMD_FIND)
|
||||
{
|
||||
_query.body().add("batchSize", bsize);
|
||||
}
|
||||
else if (_query.commandName() == OpMsgMessage::CMD_AGGREGATE)
|
||||
{
|
||||
auto& cursorDoc = _query.body().addNewDocument("cursor");
|
||||
cursorDoc.add("batchSize", bsize);
|
||||
}
|
||||
}
|
||||
|
||||
connection.sendRequest(_query, _response);
|
||||
|
||||
const auto& rdoc = _response.body();
|
||||
_cursorID = cursorIdFromResponse(rdoc);
|
||||
}
|
||||
else
|
||||
{
|
||||
#if MONGODB_EXHAUST_ALLOWED_WORKS
|
||||
std::cout << "Response flags: " << _response.flags() << std::endl;
|
||||
if (_response.flags() & OpMsgMessage::MSG_MORE_TO_COME)
|
||||
{
|
||||
std::cout << "More to come. Reading more response: " << std::endl;
|
||||
_response.clear();
|
||||
connection.readResponse(_response);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
_response.clear();
|
||||
_query.setCursor(_cursorID, _batchSize);
|
||||
connection.sendRequest(_query, _response);
|
||||
}
|
||||
}
|
||||
|
||||
const auto& rdoc = _response.body();
|
||||
_cursorID = cursorIdFromResponse(rdoc);
|
||||
|
||||
return _response;
|
||||
}
|
||||
|
||||
|
||||
void OpMsgCursor::kill(Connection& connection)
|
||||
{
|
||||
_response.clear();
|
||||
if (_cursorID != 0)
|
||||
{
|
||||
_query.setCommandName(OpMsgMessage::CMD_KILL_CURSORS);
|
||||
|
||||
MongoDB::Array::Ptr cursors = new MongoDB::Array();
|
||||
cursors->add<Poco::Int64>(_cursorID);
|
||||
_query.body().add("cursors", cursors);
|
||||
|
||||
connection.sendRequest(_query, _response);
|
||||
|
||||
const auto killed = _response.body().get<MongoDB::Array::Ptr>("cursorsKilled", nullptr);
|
||||
if (!killed || killed->size() != 1 || killed->get<Poco::Int64>(0, -1) != _cursorID)
|
||||
{
|
||||
throw Poco::ProtocolException("Cursor not killed as expected: " + std::to_string(_cursorID));
|
||||
}
|
||||
|
||||
_cursorID = 0;
|
||||
_query.clear();
|
||||
_response.clear();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Poco::Int64 cursorIdFromResponse(const MongoDB::Document& doc)
|
||||
{
|
||||
Poco::Int64 id {0};
|
||||
auto cursorDoc = doc.get<Document::Ptr>(keyCursor, nullptr);
|
||||
if(cursorDoc)
|
||||
{
|
||||
id = cursorDoc->get<Poco::Int64>("id", 0);
|
||||
}
|
||||
return id;
|
||||
}
|
||||
|
||||
|
||||
} } // Namespace Poco::MongoDB
|
412
base/poco/MongoDB/src/OpMsgMessage.cpp
Normal file
412
base/poco/MongoDB/src/OpMsgMessage.cpp
Normal file
@ -0,0 +1,412 @@
|
||||
//
|
||||
// OpMsgMessage.cpp
|
||||
//
|
||||
// Library: MongoDB
|
||||
// Package: MongoDB
|
||||
// Module: OpMsgMessage
|
||||
//
|
||||
// Copyright (c) 2022, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
#include "Poco/MongoDB/OpMsgMessage.h"
|
||||
#include "Poco/MongoDB/MessageHeader.h"
|
||||
#include "Poco/MongoDB/Array.h"
|
||||
#include "Poco/StreamCopier.h"
|
||||
#include "Poco/Logger.h"
|
||||
|
||||
#define POCO_MONGODB_DUMP false
|
||||
|
||||
namespace Poco {
|
||||
namespace MongoDB {
|
||||
|
||||
// Query and write
|
||||
const std::string OpMsgMessage::CMD_INSERT { "insert" };
|
||||
const std::string OpMsgMessage::CMD_DELETE { "delete" };
|
||||
const std::string OpMsgMessage::CMD_UPDATE { "update" };
|
||||
const std::string OpMsgMessage::CMD_FIND { "find" };
|
||||
const std::string OpMsgMessage::CMD_FIND_AND_MODIFY { "findAndModify" };
|
||||
const std::string OpMsgMessage::CMD_GET_MORE { "getMore" };
|
||||
|
||||
// Aggregation
|
||||
const std::string OpMsgMessage::CMD_AGGREGATE { "aggregate" };
|
||||
const std::string OpMsgMessage::CMD_COUNT { "count" };
|
||||
const std::string OpMsgMessage::CMD_DISTINCT { "distinct" };
|
||||
const std::string OpMsgMessage::CMD_MAP_REDUCE { "mapReduce" };
|
||||
|
||||
// Replication and administration
|
||||
const std::string OpMsgMessage::CMD_HELLO { "hello" };
|
||||
const std::string OpMsgMessage::CMD_REPL_SET_GET_STATUS { "replSetGetStatus" };
|
||||
const std::string OpMsgMessage::CMD_REPL_SET_GET_CONFIG { "replSetGetConfig" };
|
||||
|
||||
const std::string OpMsgMessage::CMD_CREATE { "create" };
|
||||
const std::string OpMsgMessage::CMD_CREATE_INDEXES { "createIndexes" };
|
||||
const std::string OpMsgMessage::CMD_DROP { "drop" };
|
||||
const std::string OpMsgMessage::CMD_DROP_DATABASE { "dropDatabase" };
|
||||
const std::string OpMsgMessage::CMD_KILL_CURSORS { "killCursors" };
|
||||
const std::string OpMsgMessage::CMD_LIST_DATABASES { "listDatabases" };
|
||||
const std::string OpMsgMessage::CMD_LIST_INDEXES { "listIndexes" };
|
||||
|
||||
// Diagnostic
|
||||
const std::string OpMsgMessage::CMD_BUILD_INFO { "buildInfo" };
|
||||
const std::string OpMsgMessage::CMD_COLL_STATS { "collStats" };
|
||||
const std::string OpMsgMessage::CMD_DB_STATS { "dbStats" };
|
||||
const std::string OpMsgMessage::CMD_HOST_INFO { "hostInfo" };
|
||||
|
||||
|
||||
static const std::string& commandIdentifier(const std::string& command);
|
||||
/// Commands have different names for the payload that is sent in a separate section
|
||||
|
||||
|
||||
static const std::string keyCursor {"cursor"};
|
||||
static const std::string keyFirstBatch {"firstBatch"};
|
||||
static const std::string keyNextBatch {"nextBatch"};
|
||||
|
||||
|
||||
OpMsgMessage::OpMsgMessage() :
|
||||
Message(MessageHeader::OP_MSG)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
OpMsgMessage::OpMsgMessage(const std::string& databaseName, const std::string& collectionName, UInt32 flags) :
|
||||
Message(MessageHeader::OP_MSG),
|
||||
_databaseName(databaseName),
|
||||
_collectionName(collectionName),
|
||||
_flags(flags)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
OpMsgMessage::~OpMsgMessage()
|
||||
{
|
||||
}
|
||||
|
||||
const std::string& OpMsgMessage::databaseName() const
|
||||
{
|
||||
return _databaseName;
|
||||
}
|
||||
|
||||
|
||||
const std::string& OpMsgMessage::collectionName() const
|
||||
{
|
||||
return _collectionName;
|
||||
}
|
||||
|
||||
|
||||
void OpMsgMessage::setCommandName(const std::string& command)
|
||||
{
|
||||
_commandName = command;
|
||||
_body.clear();
|
||||
|
||||
// IMPORTANT: Command name must be first
|
||||
if (_collectionName.empty())
|
||||
{
|
||||
// Collection is not specified. It is assumed that this particular command does
|
||||
// not need it.
|
||||
_body.add(_commandName, Int32(1));
|
||||
}
|
||||
else
|
||||
{
|
||||
_body.add(_commandName, _collectionName);
|
||||
}
|
||||
_body.add("$db", _databaseName);
|
||||
}
|
||||
|
||||
|
||||
void OpMsgMessage::setCursor(Poco::Int64 cursorID, Poco::Int32 batchSize)
|
||||
{
|
||||
_commandName = OpMsgMessage::CMD_GET_MORE;
|
||||
_body.clear();
|
||||
|
||||
// IMPORTANT: Command name must be first
|
||||
_body.add(_commandName, cursorID);
|
||||
_body.add("$db", _databaseName);
|
||||
_body.add("collection", _collectionName);
|
||||
if (batchSize > 0)
|
||||
{
|
||||
_body.add("batchSize", batchSize);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const std::string& OpMsgMessage::commandName() const
|
||||
{
|
||||
return _commandName;
|
||||
}
|
||||
|
||||
|
||||
void OpMsgMessage::setAcknowledgedRequest(bool ack)
|
||||
{
|
||||
const auto& id = commandIdentifier(_commandName);
|
||||
if (id.empty())
|
||||
return;
|
||||
|
||||
_acknowledged = ack;
|
||||
|
||||
auto writeConcern = _body.get<Document::Ptr>("writeConcern", nullptr);
|
||||
if (writeConcern)
|
||||
writeConcern->remove("w");
|
||||
|
||||
if (ack)
|
||||
{
|
||||
_flags = _flags & (~MSG_MORE_TO_COME);
|
||||
}
|
||||
else
|
||||
{
|
||||
_flags = _flags | MSG_MORE_TO_COME;
|
||||
if (!writeConcern)
|
||||
_body.addNewDocument("writeConcern").add("w", 0);
|
||||
else
|
||||
writeConcern->add("w", 0);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
bool OpMsgMessage::acknowledgedRequest() const
|
||||
{
|
||||
return _acknowledged;
|
||||
}
|
||||
|
||||
|
||||
UInt32 OpMsgMessage::flags() const
|
||||
{
|
||||
return _flags;
|
||||
}
|
||||
|
||||
|
||||
Document& OpMsgMessage::body()
|
||||
{
|
||||
return _body;
|
||||
}
|
||||
|
||||
|
||||
const Document& OpMsgMessage::body() const
|
||||
{
|
||||
return _body;
|
||||
}
|
||||
|
||||
|
||||
Document::Vector& OpMsgMessage::documents()
|
||||
{
|
||||
return _documents;
|
||||
}
|
||||
|
||||
|
||||
const Document::Vector& OpMsgMessage::documents() const
|
||||
{
|
||||
return _documents;
|
||||
}
|
||||
|
||||
|
||||
bool OpMsgMessage::responseOk() const
|
||||
{
|
||||
Poco::Int64 ok {false};
|
||||
if (_body.exists("ok"))
|
||||
{
|
||||
ok = _body.getInteger("ok");
|
||||
}
|
||||
return (ok != 0);
|
||||
}
|
||||
|
||||
|
||||
void OpMsgMessage::clear()
|
||||
{
|
||||
_flags = MSG_FLAGS_DEFAULT;
|
||||
_commandName.clear();
|
||||
_body.clear();
|
||||
_documents.clear();
|
||||
}
|
||||
|
||||
|
||||
void OpMsgMessage::send(std::ostream& ostr)
|
||||
{
|
||||
BinaryWriter socketWriter(ostr, BinaryWriter::LITTLE_ENDIAN_BYTE_ORDER);
|
||||
|
||||
// Serialise the body
|
||||
std::stringstream ss;
|
||||
BinaryWriter writer(ss, BinaryWriter::LITTLE_ENDIAN_BYTE_ORDER);
|
||||
writer << _flags;
|
||||
|
||||
writer << PAYLOAD_TYPE_0;
|
||||
_body.write(writer);
|
||||
|
||||
if (!_documents.empty())
|
||||
{
|
||||
// Serialise attached documents
|
||||
|
||||
std::stringstream ssdoc;
|
||||
BinaryWriter wdoc(ssdoc, BinaryWriter::LITTLE_ENDIAN_BYTE_ORDER);
|
||||
for (auto& doc: _documents)
|
||||
{
|
||||
doc->write(wdoc);
|
||||
}
|
||||
wdoc.flush();
|
||||
|
||||
const std::string& identifier = commandIdentifier(_commandName);
|
||||
const Poco::Int32 size = static_cast<Poco::Int32>(sizeof(size) + identifier.size() + 1 + ssdoc.tellp());
|
||||
writer << PAYLOAD_TYPE_1;
|
||||
writer << size;
|
||||
writer.writeCString(identifier.c_str());
|
||||
StreamCopier::copyStream(ssdoc, ss);
|
||||
}
|
||||
writer.flush();
|
||||
|
||||
#if POCO_MONGODB_DUMP
|
||||
const std::string section = ss.str();
|
||||
std::string dump;
|
||||
Logger::formatDump(dump, section.data(), section.length());
|
||||
std::cout << dump << std::endl;
|
||||
#endif
|
||||
|
||||
messageLength(static_cast<Poco::Int32>(ss.tellp()));
|
||||
|
||||
_header.write(socketWriter);
|
||||
StreamCopier::copyStream(ss, ostr);
|
||||
|
||||
ostr.flush();
|
||||
}
|
||||
|
||||
|
||||
void OpMsgMessage::read(std::istream& istr)
|
||||
{
|
||||
std::string message;
|
||||
{
|
||||
BinaryReader reader(istr, BinaryReader::LITTLE_ENDIAN_BYTE_ORDER);
|
||||
_header.read(reader);
|
||||
|
||||
poco_assert_dbg(_header.opCode() == _header.OP_MSG);
|
||||
|
||||
const std::streamsize remainingSize {_header.getMessageLength() - _header.MSG_HEADER_SIZE };
|
||||
message.reserve(remainingSize);
|
||||
|
||||
#if POCO_MONGODB_DUMP
|
||||
std::cout
|
||||
<< "Message hdr: " << _header.getMessageLength() << " " << remainingSize << " "
|
||||
<< _header.opCode() << " " << _header.getRequestID() << " " << _header.responseTo()
|
||||
<< std::endl;
|
||||
#endif
|
||||
|
||||
reader.readRaw(remainingSize, message);
|
||||
|
||||
#if POCO_MONGODB_DUMP
|
||||
std::string dump;
|
||||
Logger::formatDump(dump, message.data(), message.length());
|
||||
std::cout << dump << std::endl;
|
||||
#endif
|
||||
}
|
||||
// Read complete message and then interpret it.
|
||||
|
||||
std::istringstream msgss(message);
|
||||
BinaryReader reader(msgss, BinaryReader::LITTLE_ENDIAN_BYTE_ORDER);
|
||||
|
||||
Poco::UInt8 payloadType {0xFF};
|
||||
|
||||
reader >> _flags;
|
||||
reader >> payloadType;
|
||||
poco_assert_dbg(payloadType == PAYLOAD_TYPE_0);
|
||||
|
||||
_body.read(reader);
|
||||
|
||||
// Read next sections from the buffer
|
||||
while (msgss.good())
|
||||
{
|
||||
// NOTE: Not tested yet with database, because it returns everything in the body.
|
||||
// Does MongoDB ever return documents as Payload type 1?
|
||||
reader >> payloadType;
|
||||
if (!msgss.good())
|
||||
{
|
||||
break;
|
||||
}
|
||||
poco_assert_dbg(payloadType == PAYLOAD_TYPE_1);
|
||||
#if POCO_MONGODB_DUMP
|
||||
std::cout << "section payload: " << payloadType << std::endl;
|
||||
#endif
|
||||
|
||||
Poco::Int32 sectionSize {0};
|
||||
reader >> sectionSize;
|
||||
poco_assert_dbg(sectionSize > 0);
|
||||
|
||||
#if POCO_MONGODB_DUMP
|
||||
std::cout << "section size: " << sectionSize << std::endl;
|
||||
#endif
|
||||
std::streamoff offset = sectionSize - sizeof(sectionSize);
|
||||
std::streampos endOfSection = msgss.tellg() + offset;
|
||||
|
||||
std::string identifier;
|
||||
reader.readCString(identifier);
|
||||
#if POCO_MONGODB_DUMP
|
||||
std::cout << "section identifier: " << identifier << std::endl;
|
||||
#endif
|
||||
|
||||
// Loop to read documents from this section.
|
||||
while (msgss.tellg() < endOfSection)
|
||||
{
|
||||
#if POCO_MONGODB_DUMP
|
||||
std::cout << "section doc: " << msgss.tellg() << " " << endOfSection << std::endl;
|
||||
#endif
|
||||
Document::Ptr doc = new Document();
|
||||
doc->read(reader);
|
||||
_documents.push_back(doc);
|
||||
if (msgss.tellg() < 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract documents from the cursor batch if they are there.
|
||||
MongoDB::Array::Ptr batch;
|
||||
auto curDoc = _body.get<MongoDB::Document::Ptr>(keyCursor, nullptr);
|
||||
if (curDoc)
|
||||
{
|
||||
batch = curDoc->get<MongoDB::Array::Ptr>(keyFirstBatch, nullptr);
|
||||
if (!batch)
|
||||
{
|
||||
batch = curDoc->get<MongoDB::Array::Ptr>(keyNextBatch, nullptr);
|
||||
}
|
||||
}
|
||||
if (batch)
|
||||
{
|
||||
for(std::size_t i = 0; i < batch->size(); i++)
|
||||
{
|
||||
const auto& d = batch->get<MongoDB::Document::Ptr>(i, nullptr);
|
||||
if (d)
|
||||
{
|
||||
_documents.push_back(d);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
const std::string& commandIdentifier(const std::string& command)
|
||||
{
|
||||
// Names of identifiers for commands that send bulk documents in the request
|
||||
// The identifier is set in the section type 1.
|
||||
static std::map<std::string, std::string> identifiers {
|
||||
{ OpMsgMessage::CMD_INSERT, "documents" },
|
||||
{ OpMsgMessage::CMD_DELETE, "deletes" },
|
||||
{ OpMsgMessage::CMD_UPDATE, "updates" },
|
||||
|
||||
// Not sure if create index can send document section
|
||||
{ OpMsgMessage::CMD_CREATE_INDEXES, "indexes" }
|
||||
};
|
||||
|
||||
const auto i = identifiers.find(command);
|
||||
if (i != identifiers.end())
|
||||
{
|
||||
return i->second;
|
||||
}
|
||||
|
||||
// This likely means that documents are incorrectly set for a command
|
||||
// that does not send list of documents in section type 1.
|
||||
static const std::string emptyIdentifier;
|
||||
return emptyIdentifier;
|
||||
}
|
||||
|
||||
|
||||
} } // namespace Poco::MongoDB
|
@ -20,10 +20,10 @@ namespace MongoDB {
|
||||
|
||||
|
||||
QueryRequest::QueryRequest(const std::string& collectionName, QueryRequest::Flags flags):
|
||||
RequestMessage(MessageHeader::OP_QUERY),
|
||||
_flags(flags),
|
||||
RequestMessage(MessageHeader::OP_QUERY),
|
||||
_flags(flags),
|
||||
_fullCollectionName(collectionName),
|
||||
_numberToSkip(0),
|
||||
_numberToSkip(0),
|
||||
_numberToReturn(100),
|
||||
_selector(),
|
||||
_returnFieldSelector()
|
||||
|
@ -25,8 +25,8 @@ RegularExpression::RegularExpression()
|
||||
}
|
||||
|
||||
|
||||
RegularExpression::RegularExpression(const std::string& pattern, const std::string& options):
|
||||
_pattern(pattern),
|
||||
RegularExpression::RegularExpression(const std::string& pattern, const std::string& options):
|
||||
_pattern(pattern),
|
||||
_options(options)
|
||||
{
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ namespace Poco {
|
||||
namespace MongoDB {
|
||||
|
||||
|
||||
ReplicaSet::ReplicaSet(const std::vector<Net::SocketAddress> &addresses):
|
||||
ReplicaSet::ReplicaSet(const std::vector<Net::SocketAddress> &addresses):
|
||||
_addresses(addresses)
|
||||
{
|
||||
}
|
||||
@ -81,8 +81,8 @@ Connection::Ptr ReplicaSet::isMaster(const Net::SocketAddress& address)
|
||||
{
|
||||
conn = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
@ -21,7 +21,7 @@ namespace Poco {
|
||||
namespace MongoDB {
|
||||
|
||||
|
||||
RequestMessage::RequestMessage(MessageHeader::OpCode opcode):
|
||||
RequestMessage::RequestMessage(MessageHeader::OpCode opcode):
|
||||
Message(opcode)
|
||||
{
|
||||
}
|
||||
@ -35,7 +35,7 @@ RequestMessage::~RequestMessage()
|
||||
void RequestMessage::send(std::ostream& ostr)
|
||||
{
|
||||
std::stringstream ss;
|
||||
BinaryWriter requestWriter(ss);
|
||||
BinaryWriter requestWriter(ss, BinaryWriter::LITTLE_ENDIAN_BYTE_ORDER);
|
||||
buildRequest(requestWriter);
|
||||
requestWriter.flush();
|
||||
|
||||
|
@ -21,10 +21,20 @@ namespace MongoDB {
|
||||
|
||||
|
||||
ResponseMessage::ResponseMessage():
|
||||
Message(MessageHeader::OP_REPLY),
|
||||
_responseFlags(0),
|
||||
_cursorID(0),
|
||||
_startingFrom(0),
|
||||
Message(MessageHeader::OP_REPLY),
|
||||
_responseFlags(0),
|
||||
_cursorID(0),
|
||||
_startingFrom(0),
|
||||
_numberReturned(0)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
ResponseMessage::ResponseMessage(const Int64& cursorID):
|
||||
Message(MessageHeader::OP_REPLY),
|
||||
_responseFlags(0),
|
||||
_cursorID(cursorID),
|
||||
_startingFrom(0),
|
||||
_numberReturned(0)
|
||||
{
|
||||
}
|
||||
@ -50,7 +60,7 @@ void ResponseMessage::read(std::istream& istr)
|
||||
clear();
|
||||
|
||||
BinaryReader reader(istr, BinaryReader::LITTLE_ENDIAN_BYTE_ORDER);
|
||||
|
||||
|
||||
_header.read(reader);
|
||||
|
||||
reader >> _responseFlags;
|
||||
|
@ -20,7 +20,7 @@ namespace MongoDB {
|
||||
|
||||
|
||||
UpdateRequest::UpdateRequest(const std::string& collectionName, UpdateRequest::Flags flags):
|
||||
RequestMessage(MessageHeader::OP_UPDATE),
|
||||
RequestMessage(MessageHeader::OP_UPDATE),
|
||||
_flags(flags),
|
||||
_fullCollectionName(collectionName),
|
||||
_selector(),
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
|
||||
#include "Poco/BufferedStreamBuf.h"
|
||||
#include "Poco/Net/HTTPBufferAllocator.h"
|
||||
#include "Poco/Net/Net.h"
|
||||
|
||||
|
||||
@ -27,9 +26,9 @@ namespace Poco
|
||||
{
|
||||
namespace Net
|
||||
{
|
||||
constexpr size_t HTTP_DEFAULT_BUFFER_SIZE = 8 * 1024;
|
||||
|
||||
|
||||
typedef Poco::BasicBufferedStreamBuf<char, std::char_traits<char>, HTTPBufferAllocator> HTTPBasicStreamBuf;
|
||||
typedef Poco::BasicBufferedStreamBuf<char, std::char_traits<char>> HTTPBasicStreamBuf;
|
||||
|
||||
|
||||
}
|
||||
|
@ -1,53 +0,0 @@
|
||||
//
|
||||
// HTTPBufferAllocator.h
|
||||
//
|
||||
// Library: Net
|
||||
// Package: HTTP
|
||||
// Module: HTTPBufferAllocator
|
||||
//
|
||||
// Definition of the HTTPBufferAllocator class.
|
||||
//
|
||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifndef Net_HTTPBufferAllocator_INCLUDED
|
||||
#define Net_HTTPBufferAllocator_INCLUDED
|
||||
|
||||
|
||||
#include <ios>
|
||||
#include "Poco/MemoryPool.h"
|
||||
#include "Poco/Net/Net.h"
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
namespace Net
|
||||
{
|
||||
|
||||
|
||||
class Net_API HTTPBufferAllocator
|
||||
/// A BufferAllocator for HTTP streams.
|
||||
{
|
||||
public:
|
||||
static char * allocate(std::streamsize size);
|
||||
static void deallocate(char * ptr, std::streamsize size);
|
||||
|
||||
enum
|
||||
{
|
||||
BUFFER_SIZE = 128 * 1024
|
||||
};
|
||||
|
||||
private:
|
||||
static Poco::MemoryPool _pool;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
} // namespace Poco::Net
|
||||
|
||||
|
||||
#endif // Net_HTTPBufferAllocator_INCLUDED
|
@ -21,7 +21,6 @@
|
||||
#include <cstddef>
|
||||
#include <istream>
|
||||
#include <ostream>
|
||||
#include "Poco/MemoryPool.h"
|
||||
#include "Poco/Net/HTTPBasicStreamBuf.h"
|
||||
#include "Poco/Net/Net.h"
|
||||
|
||||
@ -80,12 +79,6 @@ namespace Net
|
||||
public:
|
||||
HTTPChunkedInputStream(HTTPSession & session);
|
||||
~HTTPChunkedInputStream();
|
||||
|
||||
void * operator new(std::size_t size);
|
||||
void operator delete(void * ptr);
|
||||
|
||||
private:
|
||||
static Poco::MemoryPool _pool;
|
||||
};
|
||||
|
||||
|
||||
@ -95,12 +88,6 @@ namespace Net
|
||||
public:
|
||||
HTTPChunkedOutputStream(HTTPSession & session);
|
||||
~HTTPChunkedOutputStream();
|
||||
|
||||
void * operator new(std::size_t size);
|
||||
void operator delete(void * ptr);
|
||||
|
||||
private:
|
||||
static Poco::MemoryPool _pool;
|
||||
};
|
||||
|
||||
|
||||
|
@ -127,6 +127,9 @@ namespace Net
|
||||
|
||||
void setResolvedHost(std::string resolved_host) { _resolved_host.swap(resolved_host); }
|
||||
|
||||
std::string getResolvedHost() const { return _resolved_host; }
|
||||
/// Returns the resolved IP address of the target HTTP server.
|
||||
|
||||
Poco::UInt16 getPort() const;
|
||||
/// Returns the port number of the target HTTP server.
|
||||
|
||||
@ -303,7 +306,7 @@ namespace Net
|
||||
DEFAULT_KEEP_ALIVE_TIMEOUT = 8
|
||||
};
|
||||
|
||||
void reconnect();
|
||||
virtual void reconnect();
|
||||
/// Connects the underlying socket to the HTTP server.
|
||||
|
||||
int write(const char * buffer, std::streamsize length);
|
||||
|
@ -78,12 +78,6 @@ namespace Net
|
||||
public:
|
||||
HTTPFixedLengthInputStream(HTTPSession & session, HTTPFixedLengthStreamBuf::ContentLength length);
|
||||
~HTTPFixedLengthInputStream();
|
||||
|
||||
void * operator new(std::size_t size);
|
||||
void operator delete(void * ptr);
|
||||
|
||||
private:
|
||||
static Poco::MemoryPool _pool;
|
||||
};
|
||||
|
||||
|
||||
@ -93,12 +87,6 @@ namespace Net
|
||||
public:
|
||||
HTTPFixedLengthOutputStream(HTTPSession & session, HTTPFixedLengthStreamBuf::ContentLength length);
|
||||
~HTTPFixedLengthOutputStream();
|
||||
|
||||
void * operator new(std::size_t size);
|
||||
void operator delete(void * ptr);
|
||||
|
||||
private:
|
||||
static Poco::MemoryPool _pool;
|
||||
};
|
||||
|
||||
|
||||
|
@ -21,7 +21,6 @@
|
||||
#include <cstddef>
|
||||
#include <istream>
|
||||
#include <ostream>
|
||||
#include "Poco/MemoryPool.h"
|
||||
#include "Poco/Net/HTTPBasicStreamBuf.h"
|
||||
#include "Poco/Net/Net.h"
|
||||
|
||||
@ -74,12 +73,6 @@ namespace Net
|
||||
public:
|
||||
HTTPHeaderInputStream(HTTPSession & session);
|
||||
~HTTPHeaderInputStream();
|
||||
|
||||
void * operator new(std::size_t size);
|
||||
void operator delete(void * ptr);
|
||||
|
||||
private:
|
||||
static Poco::MemoryPool _pool;
|
||||
};
|
||||
|
||||
|
||||
@ -89,12 +82,6 @@ namespace Net
|
||||
public:
|
||||
HTTPHeaderOutputStream(HTTPSession & session);
|
||||
~HTTPHeaderOutputStream();
|
||||
|
||||
void * operator new(std::size_t size);
|
||||
void operator delete(void * ptr);
|
||||
|
||||
private:
|
||||
static Poco::MemoryPool _pool;
|
||||
};
|
||||
|
||||
|
||||
|
@ -192,7 +192,7 @@ namespace Net
|
||||
HTTPSession & operator=(const HTTPSession &);
|
||||
|
||||
StreamSocket _socket;
|
||||
char * _pBuffer;
|
||||
std::unique_ptr<char[]> _pBuffer;
|
||||
char * _pCurrent;
|
||||
char * _pEnd;
|
||||
bool _keepAlive;
|
||||
|
@ -21,7 +21,6 @@
|
||||
#include <cstddef>
|
||||
#include <istream>
|
||||
#include <ostream>
|
||||
#include "Poco/MemoryPool.h"
|
||||
#include "Poco/Net/HTTPBasicStreamBuf.h"
|
||||
#include "Poco/Net/Net.h"
|
||||
|
||||
@ -75,12 +74,6 @@ namespace Net
|
||||
public:
|
||||
HTTPInputStream(HTTPSession & session);
|
||||
~HTTPInputStream();
|
||||
|
||||
void * operator new(std::size_t size);
|
||||
void operator delete(void * ptr);
|
||||
|
||||
private:
|
||||
static Poco::MemoryPool _pool;
|
||||
};
|
||||
|
||||
|
||||
@ -90,12 +83,6 @@ namespace Net
|
||||
public:
|
||||
HTTPOutputStream(HTTPSession & session);
|
||||
~HTTPOutputStream();
|
||||
|
||||
void * operator new(std::size_t size);
|
||||
void operator delete(void * ptr);
|
||||
|
||||
private:
|
||||
static Poco::MemoryPool _pool;
|
||||
};
|
||||
|
||||
|
||||
|
@ -1,44 +0,0 @@
|
||||
//
|
||||
// HTTPBufferAllocator.cpp
|
||||
//
|
||||
// Library: Net
|
||||
// Package: HTTP
|
||||
// Module: HTTPBufferAllocator
|
||||
//
|
||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#include "Poco/Net/HTTPBufferAllocator.h"
|
||||
|
||||
|
||||
using Poco::MemoryPool;
|
||||
|
||||
|
||||
namespace Poco {
|
||||
namespace Net {
|
||||
|
||||
|
||||
MemoryPool HTTPBufferAllocator::_pool(HTTPBufferAllocator::BUFFER_SIZE, 16);
|
||||
|
||||
|
||||
char* HTTPBufferAllocator::allocate(std::streamsize size)
|
||||
{
|
||||
poco_assert_dbg (size == BUFFER_SIZE);
|
||||
|
||||
return reinterpret_cast<char*>(_pool.get());
|
||||
}
|
||||
|
||||
|
||||
void HTTPBufferAllocator::deallocate(char* ptr, std::streamsize size)
|
||||
{
|
||||
poco_assert_dbg (size == BUFFER_SIZE);
|
||||
|
||||
_pool.release(ptr);
|
||||
}
|
||||
|
||||
|
||||
} } // namespace Poco::Net
|
@ -34,7 +34,7 @@ namespace Net {
|
||||
|
||||
|
||||
HTTPChunkedStreamBuf::HTTPChunkedStreamBuf(HTTPSession& session, openmode mode):
|
||||
HTTPBasicStreamBuf(HTTPBufferAllocator::BUFFER_SIZE, mode),
|
||||
HTTPBasicStreamBuf(HTTP_DEFAULT_BUFFER_SIZE, mode),
|
||||
_session(session),
|
||||
_mode(mode),
|
||||
_chunk(0)
|
||||
@ -181,10 +181,6 @@ HTTPChunkedStreamBuf* HTTPChunkedIOS::rdbuf()
|
||||
// HTTPChunkedInputStream
|
||||
//
|
||||
|
||||
|
||||
Poco::MemoryPool HTTPChunkedInputStream::_pool(sizeof(HTTPChunkedInputStream));
|
||||
|
||||
|
||||
HTTPChunkedInputStream::HTTPChunkedInputStream(HTTPSession& session):
|
||||
HTTPChunkedIOS(session, std::ios::in),
|
||||
std::istream(&_buf)
|
||||
@ -196,34 +192,10 @@ HTTPChunkedInputStream::~HTTPChunkedInputStream()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void* HTTPChunkedInputStream::operator new(std::size_t size)
|
||||
{
|
||||
return _pool.get();
|
||||
}
|
||||
|
||||
|
||||
void HTTPChunkedInputStream::operator delete(void* ptr)
|
||||
{
|
||||
try
|
||||
{
|
||||
_pool.release(ptr);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// HTTPChunkedOutputStream
|
||||
//
|
||||
|
||||
|
||||
Poco::MemoryPool HTTPChunkedOutputStream::_pool(sizeof(HTTPChunkedOutputStream));
|
||||
|
||||
|
||||
HTTPChunkedOutputStream::HTTPChunkedOutputStream(HTTPSession& session):
|
||||
HTTPChunkedIOS(session, std::ios::out),
|
||||
std::ostream(&_buf)
|
||||
@ -235,24 +207,4 @@ HTTPChunkedOutputStream::~HTTPChunkedOutputStream()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void* HTTPChunkedOutputStream::operator new(std::size_t size)
|
||||
{
|
||||
return _pool.get();
|
||||
}
|
||||
|
||||
|
||||
void HTTPChunkedOutputStream::operator delete(void* ptr)
|
||||
{
|
||||
try
|
||||
{
|
||||
_pool.release(ptr);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} } // namespace Poco::Net
|
||||
|
@ -30,7 +30,7 @@ namespace Net {
|
||||
|
||||
|
||||
HTTPFixedLengthStreamBuf::HTTPFixedLengthStreamBuf(HTTPSession& session, ContentLength length, openmode mode):
|
||||
HTTPBasicStreamBuf(HTTPBufferAllocator::BUFFER_SIZE, mode),
|
||||
HTTPBasicStreamBuf(HTTP_DEFAULT_BUFFER_SIZE, mode),
|
||||
_session(session),
|
||||
_length(length),
|
||||
_count(0)
|
||||
@ -109,9 +109,6 @@ HTTPFixedLengthStreamBuf* HTTPFixedLengthIOS::rdbuf()
|
||||
//
|
||||
|
||||
|
||||
Poco::MemoryPool HTTPFixedLengthInputStream::_pool(sizeof(HTTPFixedLengthInputStream));
|
||||
|
||||
|
||||
HTTPFixedLengthInputStream::HTTPFixedLengthInputStream(HTTPSession& session, HTTPFixedLengthStreamBuf::ContentLength length):
|
||||
HTTPFixedLengthIOS(session, length, std::ios::in),
|
||||
std::istream(&_buf)
|
||||
@ -124,33 +121,10 @@ HTTPFixedLengthInputStream::~HTTPFixedLengthInputStream()
|
||||
}
|
||||
|
||||
|
||||
void* HTTPFixedLengthInputStream::operator new(std::size_t size)
|
||||
{
|
||||
return _pool.get();
|
||||
}
|
||||
|
||||
|
||||
void HTTPFixedLengthInputStream::operator delete(void* ptr)
|
||||
{
|
||||
try
|
||||
{
|
||||
_pool.release(ptr);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// HTTPFixedLengthOutputStream
|
||||
//
|
||||
|
||||
|
||||
Poco::MemoryPool HTTPFixedLengthOutputStream::_pool(sizeof(HTTPFixedLengthOutputStream));
|
||||
|
||||
|
||||
HTTPFixedLengthOutputStream::HTTPFixedLengthOutputStream(HTTPSession& session, HTTPFixedLengthStreamBuf::ContentLength length):
|
||||
HTTPFixedLengthIOS(session, length, std::ios::out),
|
||||
std::ostream(&_buf)
|
||||
@ -163,23 +137,4 @@ HTTPFixedLengthOutputStream::~HTTPFixedLengthOutputStream()
|
||||
}
|
||||
|
||||
|
||||
void* HTTPFixedLengthOutputStream::operator new(std::size_t size)
|
||||
{
|
||||
return _pool.get();
|
||||
}
|
||||
|
||||
|
||||
void HTTPFixedLengthOutputStream::operator delete(void* ptr)
|
||||
{
|
||||
try
|
||||
{
|
||||
_pool.release(ptr);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} } // namespace Poco::Net
|
||||
|
@ -26,7 +26,7 @@ namespace Net {
|
||||
|
||||
|
||||
HTTPHeaderStreamBuf::HTTPHeaderStreamBuf(HTTPSession& session, openmode mode):
|
||||
HTTPBasicStreamBuf(HTTPBufferAllocator::BUFFER_SIZE, mode),
|
||||
HTTPBasicStreamBuf(HTTP_DEFAULT_BUFFER_SIZE, mode),
|
||||
_session(session),
|
||||
_end(false)
|
||||
{
|
||||
@ -101,10 +101,6 @@ HTTPHeaderStreamBuf* HTTPHeaderIOS::rdbuf()
|
||||
// HTTPHeaderInputStream
|
||||
//
|
||||
|
||||
|
||||
Poco::MemoryPool HTTPHeaderInputStream::_pool(sizeof(HTTPHeaderInputStream));
|
||||
|
||||
|
||||
HTTPHeaderInputStream::HTTPHeaderInputStream(HTTPSession& session):
|
||||
HTTPHeaderIOS(session, std::ios::in),
|
||||
std::istream(&_buf)
|
||||
@ -116,34 +112,10 @@ HTTPHeaderInputStream::~HTTPHeaderInputStream()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void* HTTPHeaderInputStream::operator new(std::size_t size)
|
||||
{
|
||||
return _pool.get();
|
||||
}
|
||||
|
||||
|
||||
void HTTPHeaderInputStream::operator delete(void* ptr)
|
||||
{
|
||||
try
|
||||
{
|
||||
_pool.release(ptr);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// HTTPHeaderOutputStream
|
||||
//
|
||||
|
||||
|
||||
Poco::MemoryPool HTTPHeaderOutputStream::_pool(sizeof(HTTPHeaderOutputStream));
|
||||
|
||||
|
||||
HTTPHeaderOutputStream::HTTPHeaderOutputStream(HTTPSession& session):
|
||||
HTTPHeaderIOS(session, std::ios::out),
|
||||
std::ostream(&_buf)
|
||||
@ -155,24 +127,4 @@ HTTPHeaderOutputStream::~HTTPHeaderOutputStream()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void* HTTPHeaderOutputStream::operator new(std::size_t size)
|
||||
{
|
||||
return _pool.get();
|
||||
}
|
||||
|
||||
|
||||
void HTTPHeaderOutputStream::operator delete(void* ptr)
|
||||
{
|
||||
try
|
||||
{
|
||||
_pool.release(ptr);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} } // namespace Poco::Net
|
||||
|
@ -13,8 +13,8 @@
|
||||
|
||||
|
||||
#include "Poco/Net/HTTPSession.h"
|
||||
#include "Poco/Net/HTTPBufferAllocator.h"
|
||||
#include "Poco/Net/NetException.h"
|
||||
#include "Poco/Net/HTTPBasicStreamBuf.h"
|
||||
#include <cstring>
|
||||
|
||||
|
||||
@ -68,14 +68,6 @@ HTTPSession::HTTPSession(const StreamSocket& socket, bool keepAlive):
|
||||
|
||||
HTTPSession::~HTTPSession()
|
||||
{
|
||||
try
|
||||
{
|
||||
if (_pBuffer) HTTPBufferAllocator::deallocate(_pBuffer, HTTPBufferAllocator::BUFFER_SIZE);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
try
|
||||
{
|
||||
close();
|
||||
@ -177,10 +169,10 @@ void HTTPSession::refill()
|
||||
{
|
||||
if (!_pBuffer)
|
||||
{
|
||||
_pBuffer = HTTPBufferAllocator::allocate(HTTPBufferAllocator::BUFFER_SIZE);
|
||||
_pBuffer = std::make_unique<char[]>(HTTP_DEFAULT_BUFFER_SIZE);
|
||||
}
|
||||
_pCurrent = _pEnd = _pBuffer;
|
||||
int n = receive(_pBuffer, HTTPBufferAllocator::BUFFER_SIZE);
|
||||
_pCurrent = _pEnd = _pBuffer.get();
|
||||
int n = receive(_pBuffer.get(), HTTP_DEFAULT_BUFFER_SIZE);
|
||||
_pEnd += n;
|
||||
}
|
||||
|
||||
@ -199,7 +191,7 @@ void HTTPSession::connect(const SocketAddress& address)
|
||||
_socket.setNoDelay(true);
|
||||
// There may be leftover data from a previous (failed) request in the buffer,
|
||||
// so we clear it.
|
||||
_pCurrent = _pEnd = _pBuffer;
|
||||
_pCurrent = _pEnd = _pBuffer.get();
|
||||
}
|
||||
|
||||
|
||||
|
@ -26,7 +26,7 @@ namespace Net {
|
||||
|
||||
|
||||
HTTPStreamBuf::HTTPStreamBuf(HTTPSession& session, openmode mode):
|
||||
HTTPBasicStreamBuf(HTTPBufferAllocator::BUFFER_SIZE, mode),
|
||||
HTTPBasicStreamBuf(HTTP_DEFAULT_BUFFER_SIZE, mode),
|
||||
_session(session),
|
||||
_mode(mode)
|
||||
{
|
||||
@ -96,10 +96,6 @@ HTTPStreamBuf* HTTPIOS::rdbuf()
|
||||
// HTTPInputStream
|
||||
//
|
||||
|
||||
|
||||
Poco::MemoryPool HTTPInputStream::_pool(sizeof(HTTPInputStream));
|
||||
|
||||
|
||||
HTTPInputStream::HTTPInputStream(HTTPSession& session):
|
||||
HTTPIOS(session, std::ios::in),
|
||||
std::istream(&_buf)
|
||||
@ -112,33 +108,11 @@ HTTPInputStream::~HTTPInputStream()
|
||||
}
|
||||
|
||||
|
||||
void* HTTPInputStream::operator new(std::size_t size)
|
||||
{
|
||||
return _pool.get();
|
||||
}
|
||||
|
||||
|
||||
void HTTPInputStream::operator delete(void* ptr)
|
||||
{
|
||||
try
|
||||
{
|
||||
_pool.release(ptr);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// HTTPOutputStream
|
||||
//
|
||||
|
||||
|
||||
Poco::MemoryPool HTTPOutputStream::_pool(sizeof(HTTPOutputStream));
|
||||
|
||||
|
||||
HTTPOutputStream::HTTPOutputStream(HTTPSession& session):
|
||||
HTTPIOS(session, std::ios::out),
|
||||
std::ostream(&_buf)
|
||||
@ -150,24 +124,4 @@ HTTPOutputStream::~HTTPOutputStream()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void* HTTPOutputStream::operator new(std::size_t size)
|
||||
{
|
||||
return _pool.get();
|
||||
}
|
||||
|
||||
|
||||
void HTTPOutputStream::operator delete(void* ptr)
|
||||
{
|
||||
try
|
||||
{
|
||||
_pool.release(ptr);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} } // namespace Poco::Net
|
||||
|
@ -274,7 +274,9 @@ void SocketImpl::shutdown()
|
||||
|
||||
int SocketImpl::sendBytes(const void* buffer, int length, int flags)
|
||||
{
|
||||
if (_isBrokenTimeout)
|
||||
bool blocking = _blocking && (flags & MSG_DONTWAIT) == 0;
|
||||
|
||||
if (_isBrokenTimeout && blocking)
|
||||
{
|
||||
if (_sndTimeout.totalMicroseconds() != 0)
|
||||
{
|
||||
@ -289,11 +291,13 @@ int SocketImpl::sendBytes(const void* buffer, int length, int flags)
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
rc = ::send(_sockfd, reinterpret_cast<const char*>(buffer), length, flags);
|
||||
}
|
||||
while (_blocking && rc < 0 && lastError() == POCO_EINTR);
|
||||
while (blocking && rc < 0 && lastError() == POCO_EINTR);
|
||||
if (rc < 0)
|
||||
{
|
||||
int err = lastError();
|
||||
if (err == POCO_EAGAIN || err == POCO_ETIMEDOUT)
|
||||
if ((err == POCO_EAGAIN || err == POCO_EWOULDBLOCK) && !blocking)
|
||||
;
|
||||
else if (err == POCO_EAGAIN || err == POCO_ETIMEDOUT)
|
||||
throw TimeoutException();
|
||||
else
|
||||
error(err);
|
||||
|
@ -1,53 +0,0 @@
|
||||
//
|
||||
// ConsoleCertificateHandler.h
|
||||
//
|
||||
// Library: NetSSL_OpenSSL
|
||||
// Package: SSLCore
|
||||
// Module: ConsoleCertificateHandler
|
||||
//
|
||||
// Definition of the ConsoleCertificateHandler class.
|
||||
//
|
||||
// Copyright (c) 2006-2009, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifndef NetSSL_ConsoleCertificateHandler_INCLUDED
|
||||
#define NetSSL_ConsoleCertificateHandler_INCLUDED
|
||||
|
||||
|
||||
#include "Poco/Net/InvalidCertificateHandler.h"
|
||||
#include "Poco/Net/NetSSL.h"
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
namespace Net
|
||||
{
|
||||
|
||||
|
||||
class NetSSL_API ConsoleCertificateHandler : public InvalidCertificateHandler
|
||||
/// A ConsoleCertificateHandler is invoked whenever an error occurs verifying the certificate.
|
||||
///
|
||||
/// The certificate is printed to stdout and the user is asked via console if he wants to accept it.
|
||||
{
|
||||
public:
|
||||
ConsoleCertificateHandler(bool handleErrorsOnServerSide);
|
||||
/// Creates the ConsoleCertificateHandler.
|
||||
|
||||
virtual ~ConsoleCertificateHandler();
|
||||
/// Destroys the ConsoleCertificateHandler.
|
||||
|
||||
void onInvalidCertificate(const void * pSender, VerificationErrorArgs & errorCert);
|
||||
/// Prints the certificate to stdout and waits for user input on the console
|
||||
/// to decide if a certificate should be accepted/rejected.
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
} // namespace Poco::Net
|
||||
|
||||
|
||||
#endif // NetSSL_ConsoleCertificateHandler_INCLUDED
|
@ -85,7 +85,7 @@ namespace Net
|
||||
/// </options>
|
||||
/// </privateKeyPassphraseHandler>
|
||||
/// <invalidCertificateHandler>
|
||||
/// <name>ConsoleCertificateHandler</name>
|
||||
/// <name>RejectCertificateHandler</name>
|
||||
/// </invalidCertificateHandler>
|
||||
/// <cacheSessions>true|false</cacheSessions>
|
||||
/// <sessionIdContext>someString</sessionIdContext> <!-- server only -->
|
||||
@ -186,7 +186,7 @@ namespace Net
|
||||
///
|
||||
/// Valid initialization code would be:
|
||||
/// SharedPtr<PrivateKeyPassphraseHandler> pConsoleHandler = new KeyConsoleHandler;
|
||||
/// SharedPtr<InvalidCertificateHandler> pInvalidCertHandler = new ConsoleCertificateHandler;
|
||||
/// SharedPtr<InvalidCertificateHandler> pInvalidCertHandler = new RejectCertificateHandler;
|
||||
/// Context::Ptr pContext = new Context(Context::SERVER_USE, "any.pem", "any.pem", "rootcert.pem", Context::VERIFY_RELAXED, 9, false, "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH");
|
||||
/// SSLManager::instance().initializeServer(pConsoleHandler, pInvalidCertHandler, pContext);
|
||||
|
||||
@ -203,7 +203,7 @@ namespace Net
|
||||
///
|
||||
/// Valid initialization code would be:
|
||||
/// SharedPtr<PrivateKeyPassphraseHandler> pConsoleHandler = new KeyConsoleHandler;
|
||||
/// SharedPtr<InvalidCertificateHandler> pInvalidCertHandler = new ConsoleCertificateHandler;
|
||||
/// SharedPtr<InvalidCertificateHandler> pInvalidCertHandler = new RejectCertificateHandler;
|
||||
/// Context::Ptr pContext = new Context(Context::CLIENT_USE, "", "", "rootcert.pem", Context::VERIFY_RELAXED, 9, false, "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH");
|
||||
/// SSLManager::instance().initializeClient(pConsoleHandler, pInvalidCertHandler, pContext);
|
||||
|
||||
|
@ -183,6 +183,16 @@ namespace Net
|
||||
/// Returns true iff a reused session was negotiated during
|
||||
/// the handshake.
|
||||
|
||||
virtual void setBlocking(bool flag);
|
||||
/// Sets the socket in blocking mode if flag is true,
|
||||
/// disables blocking mode if flag is false.
|
||||
|
||||
virtual bool getBlocking() const;
|
||||
/// Returns the blocking mode of the socket.
|
||||
/// This method will only work if the blocking modes of
|
||||
/// the socket are changed via the setBlocking method!
|
||||
|
||||
|
||||
protected:
|
||||
void acceptSSL();
|
||||
/// Assume per-object mutex is locked.
|
||||
|
@ -201,6 +201,16 @@ namespace Net
|
||||
/// Returns true iff a reused session was negotiated during
|
||||
/// the handshake.
|
||||
|
||||
virtual void setBlocking(bool flag);
|
||||
/// Sets the socket in blocking mode if flag is true,
|
||||
/// disables blocking mode if flag is false.
|
||||
|
||||
virtual bool getBlocking() const;
|
||||
/// Returns the blocking mode of the socket.
|
||||
/// This method will only work if the blocking modes of
|
||||
/// the socket are changed via the setBlocking method!
|
||||
|
||||
|
||||
protected:
|
||||
void acceptSSL();
|
||||
/// Performs a SSL server-side handshake.
|
||||
|
@ -13,7 +13,6 @@
|
||||
|
||||
|
||||
#include "Poco/Net/CertificateHandlerFactoryMgr.h"
|
||||
#include "Poco/Net/ConsoleCertificateHandler.h"
|
||||
#include "Poco/Net/AcceptCertificateHandler.h"
|
||||
#include "Poco/Net/RejectCertificateHandler.h"
|
||||
|
||||
@ -24,7 +23,6 @@ namespace Net {
|
||||
|
||||
CertificateHandlerFactoryMgr::CertificateHandlerFactoryMgr()
|
||||
{
|
||||
setFactory("ConsoleCertificateHandler", new CertificateHandlerFactoryImpl<ConsoleCertificateHandler>());
|
||||
setFactory("AcceptCertificateHandler", new CertificateHandlerFactoryImpl<AcceptCertificateHandler>());
|
||||
setFactory("RejectCertificateHandler", new CertificateHandlerFactoryImpl<RejectCertificateHandler>());
|
||||
}
|
||||
|
@ -1,53 +0,0 @@
|
||||
//
|
||||
// ConsoleCertificateHandler.cpp
|
||||
//
|
||||
// Library: NetSSL_OpenSSL
|
||||
// Package: SSLCore
|
||||
// Module: ConsoleCertificateHandler
|
||||
//
|
||||
// Copyright (c) 2006-2009, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#include "Poco/Net/ConsoleCertificateHandler.h"
|
||||
#include <iostream>
|
||||
|
||||
|
||||
namespace Poco {
|
||||
namespace Net {
|
||||
|
||||
|
||||
ConsoleCertificateHandler::ConsoleCertificateHandler(bool server): InvalidCertificateHandler(server)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
ConsoleCertificateHandler::~ConsoleCertificateHandler()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void ConsoleCertificateHandler::onInvalidCertificate(const void*, VerificationErrorArgs& errorCert)
|
||||
{
|
||||
const X509Certificate& aCert = errorCert.certificate();
|
||||
std::cout << "\n";
|
||||
std::cout << "WARNING: Certificate verification failed\n";
|
||||
std::cout << "----------------------------------------\n";
|
||||
std::cout << "Issuer Name: " << aCert.issuerName() << "\n";
|
||||
std::cout << "Subject Name: " << aCert.subjectName() << "\n\n";
|
||||
std::cout << "The certificate yielded the error: " << errorCert.errorMessage() << "\n\n";
|
||||
std::cout << "The error occurred in the certificate chain at position " << errorCert.errorDepth() << "\n";
|
||||
std::cout << "Accept the certificate (y,n)? ";
|
||||
char c = 0;
|
||||
std::cin >> c;
|
||||
if (c == 'y' || c == 'Y')
|
||||
errorCert.setIgnoreError(true);
|
||||
else
|
||||
errorCert.setIgnoreError(false);
|
||||
}
|
||||
|
||||
|
||||
} } // namespace Poco::Net
|
@ -46,7 +46,7 @@ const std::string SSLManager::CFG_PREFER_SERVER_CIPHERS("preferServerCiphers");
|
||||
const std::string SSLManager::CFG_DELEGATE_HANDLER("privateKeyPassphraseHandler.name");
|
||||
const std::string SSLManager::VAL_DELEGATE_HANDLER("KeyConsoleHandler");
|
||||
const std::string SSLManager::CFG_CERTIFICATE_HANDLER("invalidCertificateHandler.name");
|
||||
const std::string SSLManager::VAL_CERTIFICATE_HANDLER("ConsoleCertificateHandler");
|
||||
const std::string SSLManager::VAL_CERTIFICATE_HANDLER("RejectCertificateHandler");
|
||||
const std::string SSLManager::CFG_SERVER_PREFIX("openSSL.server.");
|
||||
const std::string SSLManager::CFG_CLIENT_PREFIX("openSSL.client.");
|
||||
const std::string SSLManager::CFG_CACHE_SESSIONS("cacheSessions");
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user