Merge branch 'master' of github.com:clickhouse/ClickHouse into add-gcs-table-function

This commit is contained in:
Kuba Kaflik 2023-05-04 16:03:16 +02:00
commit c6953f4452
3327 changed files with 92657 additions and 60924 deletions

View File

@ -21,7 +21,6 @@ ConstructorInitializerAllOnOneLineOrOnePerLine: true
ExperimentalAutoDetectBinPacking: true
UseTab: Never
TabWidth: 4
IndentWidth: 4
Standard: Cpp11
PointerAlignment: Middle
MaxEmptyLinesToKeep: 2

View File

@ -110,6 +110,7 @@ Checks: '*,
-misc-const-correctness,
-misc-no-recursion,
-misc-non-private-member-variables-in-classes,
-misc-confusable-identifiers, # useful but slooow
-modernize-avoid-c-arrays,
-modernize-concat-nested-namespaces,
@ -148,19 +149,6 @@ Checks: '*,
-readability-use-anyofallof,
-zirkon-*,
-misc-*, # temporarily disabled due to being too slow
# also disable checks in other categories which are aliases of checks in misc-*:
# https://releases.llvm.org/15.0.0/tools/clang/tools/extra/docs/clang-tidy/checks/list.html
-cert-dcl54-cpp, # alias of misc-new-delete-overloads
-hicpp-new-delete-operators, # alias of misc-new-delete-overloads
-cert-fio38-c, # alias of misc-non-copyable-objects
-cert-dcl03-c, # alias of misc-static-assert
-hicpp-static-assert, # alias of misc-static-assert
-cert-err09-cpp, # alias of misc-throw-by-value-catch-by-reference
-cert-err61-cpp, # alias of misc-throw-by-value-catch-by-reference
-cppcoreguidelines-c-copy-assignment-signature, # alias of misc-unconventional-assign-operator
-cppcoreguidelines-non-private-member-variables-in-classes, # alias of misc-non-private-member-variables-in-classes
'
WarningsAsErrors: '*'

View File

@ -2,16 +2,16 @@
A technical comment, you are free to remove or leave it as it is when PR is created
The following categories are used in the next scripts, update them accordingly
utils/changelog/changelog.py
tests/ci/run_check.py
tests/ci/cancel_and_rerun_workflow_lambda/app.py
-->
### Changelog category (leave one):
- New Feature
- Improvement
- Bug Fix (user-visible misbehavior in official stable or prestable release)
- Performance Improvement
- Backward Incompatible Change
- Build/Testing/Packaging Improvement
- Documentation (changelog entry is not required)
- Bug Fix (user-visible misbehavior in an official stable release)
- Not for changelog (changelog entry is not required)

View File

@ -349,6 +349,13 @@ jobs:
with:
clear-repository: true
submodules: true
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
run: |
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
du -hs "$GITHUB_WORKSPACE/contrib" ||:
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
@ -470,7 +477,7 @@ jobs:
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-server --image-path docker/server
python3 docker_server.py --release-type head --no-push --no-ubuntu \
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()

View File

@ -487,6 +487,13 @@ jobs:
with:
clear-repository: true
submodules: true
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
run: |
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
du -hs "$GITHUB_WORKSPACE/contrib" ||:
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
@ -862,7 +869,7 @@ jobs:
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_server.py --release-type head \
--image-repo clickhouse/clickhouse-server --image-path docker/server
python3 docker_server.py --release-type head --no-ubuntu \
python3 docker_server.py --release-type head \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()
@ -1131,7 +1138,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=2
RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -1167,6 +1174,114 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseDatabaseReplicated2:
needs: [BuilderDebRelease]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_database_replicated
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (release, DatabaseReplicated)
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=2
RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseDatabaseReplicated3:
needs: [BuilderDebRelease]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_database_replicated
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (release, DatabaseReplicated)
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=3
RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseS3_0:
needs: [BuilderDebRelease]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_s3_storage
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (release, s3 storage)
REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=2
EOF
- name: Download json reports
@ -1190,7 +1305,7 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseS3:
FunctionalStatelessTestReleaseS3_1:
needs: [BuilderDebRelease]
runs-on: [self-hosted, func-tester]
steps:
@ -1202,6 +1317,8 @@ jobs:
CHECK_NAME=Stateless tests (release, s3 storage)
REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=2
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -1271,7 +1388,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=2
RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -1307,7 +1424,79 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=2
RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAsan2:
needs: [BuilderDebAsan]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_debug
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (asan)
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=2
RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAsan3:
needs: [BuilderDebAsan]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_debug
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (asan)
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=3
RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -1343,7 +1532,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=3
RUN_BY_HASH_TOTAL=5
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -1379,7 +1568,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=3
RUN_BY_HASH_TOTAL=5
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -1415,7 +1604,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=2
RUN_BY_HASH_TOTAL=3
RUN_BY_HASH_TOTAL=5
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -1438,7 +1627,79 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestUBsan:
FunctionalStatelessTestTsan3:
needs: [BuilderDebTsan]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_tsan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (tsan)
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=3
RUN_BY_HASH_TOTAL=5
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestTsan4:
needs: [BuilderDebTsan]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_tsan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (tsan)
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=4
RUN_BY_HASH_TOTAL=5
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestUBsan0:
needs: [BuilderDebUBsan]
runs-on: [self-hosted, func-tester]
steps:
@ -1450,6 +1711,44 @@ jobs:
CHECK_NAME=Stateless tests (ubsan)
REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=2
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestUBsan1:
needs: [BuilderDebUBsan]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_ubsan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (ubsan)
REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=2
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -1485,7 +1784,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=3
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -1521,7 +1820,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=3
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -1557,7 +1856,115 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=2
RUN_BY_HASH_TOTAL=3
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan3:
needs: [BuilderDebMsan]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_memory
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (msan)
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=3
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan4:
needs: [BuilderDebMsan]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_memory
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (msan)
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=4
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan5:
needs: [BuilderDebMsan]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_memory
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (msan)
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=5
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -1593,7 +2000,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=3
RUN_BY_HASH_TOTAL=5
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -1629,7 +2036,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=3
RUN_BY_HASH_TOTAL=5
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -1665,7 +2072,79 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=2
RUN_BY_HASH_TOTAL=3
RUN_BY_HASH_TOTAL=5
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug3:
needs: [BuilderDebDebug]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_debug
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (debug)
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=3
RUN_BY_HASH_TOTAL=5
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug4:
needs: [BuilderDebDebug]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_debug
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (debug)
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=4
RUN_BY_HASH_TOTAL=5
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -2116,7 +2595,7 @@ jobs:
CHECK_NAME=Integration tests (asan)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=3
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -2151,7 +2630,7 @@ jobs:
CHECK_NAME=Integration tests (asan)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=3
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -2186,7 +2665,112 @@ jobs:
CHECK_NAME=Integration tests (asan)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=2
RUN_BY_HASH_TOTAL=3
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAsan3:
needs: [BuilderDebAsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_asan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (asan)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=3
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAsan4:
needs: [BuilderDebAsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_asan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (asan)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=4
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAsan5:
needs: [BuilderDebAsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_asan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (asan)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=5
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -2221,7 +2805,7 @@ jobs:
CHECK_NAME=Integration tests (tsan)
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=4
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -2256,7 +2840,7 @@ jobs:
CHECK_NAME=Integration tests (tsan)
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=4
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -2291,7 +2875,7 @@ jobs:
CHECK_NAME=Integration tests (tsan)
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
RUN_BY_HASH_NUM=2
RUN_BY_HASH_TOTAL=4
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -2326,7 +2910,77 @@ jobs:
CHECK_NAME=Integration tests (tsan)
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
RUN_BY_HASH_NUM=3
RUN_BY_HASH_TOTAL=4
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan4:
needs: [BuilderDebTsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (tsan)
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
RUN_BY_HASH_NUM=4
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan5:
needs: [BuilderDebTsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (tsan)
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
RUN_BY_HASH_NUM=5
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -2361,7 +3015,7 @@ jobs:
CHECK_NAME=Integration tests (release)
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=2
RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -2396,7 +3050,77 @@ jobs:
CHECK_NAME=Integration tests (release)
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=2
RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsRelease2:
needs: [BuilderDebRelease]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_release
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (release)
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
RUN_BY_HASH_NUM=2
RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsRelease3:
needs: [BuilderDebRelease]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_release
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (release)
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
RUN_BY_HASH_NUM=3
RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@ -3116,23 +3840,36 @@ jobs:
- FunctionalStatelessTestDebug0
- FunctionalStatelessTestDebug1
- FunctionalStatelessTestDebug2
- FunctionalStatelessTestDebug3
- FunctionalStatelessTestDebug4
- FunctionalStatelessTestRelease
- FunctionalStatelessTestReleaseDatabaseOrdinary
- FunctionalStatelessTestReleaseDatabaseReplicated0
- FunctionalStatelessTestReleaseDatabaseReplicated1
- FunctionalStatelessTestReleaseDatabaseReplicated2
- FunctionalStatelessTestReleaseDatabaseReplicated3
- FunctionalStatelessTestAarch64
- FunctionalStatelessTestAsan0
- FunctionalStatelessTestAsan1
- FunctionalStatelessTestAsan2
- FunctionalStatelessTestAsan3
- FunctionalStatelessTestTsan0
- FunctionalStatelessTestTsan1
- FunctionalStatelessTestTsan2
- FunctionalStatelessTestTsan3
- FunctionalStatelessTestTsan4
- FunctionalStatelessTestMsan0
- FunctionalStatelessTestMsan1
- FunctionalStatelessTestMsan2
- FunctionalStatelessTestUBsan
- FunctionalStatelessTestMsan3
- FunctionalStatelessTestMsan4
- FunctionalStatelessTestMsan5
- FunctionalStatelessTestUBsan0
- FunctionalStatelessTestUBsan1
- FunctionalStatefulTestDebug
- FunctionalStatefulTestRelease
- FunctionalStatelessTestReleaseS3
- FunctionalStatelessTestReleaseS3_0
- FunctionalStatelessTestReleaseS3_1
- FunctionalStatefulTestAarch64
- FunctionalStatefulTestAsan
- FunctionalStatefulTestTsan
@ -3146,12 +3883,19 @@ jobs:
- IntegrationTestsAsan0
- IntegrationTestsAsan1
- IntegrationTestsAsan2
- IntegrationTestsAsan3
- IntegrationTestsAsan4
- IntegrationTestsAsan5
- IntegrationTestsRelease0
- IntegrationTestsRelease1
- IntegrationTestsRelease2
- IntegrationTestsRelease3
- IntegrationTestsTsan0
- IntegrationTestsTsan1
- IntegrationTestsTsan2
- IntegrationTestsTsan3
- IntegrationTestsTsan4
- IntegrationTestsTsan5
- PerformanceComparisonX86-0
- PerformanceComparisonX86-1
- PerformanceComparisonX86-2

View File

@ -118,9 +118,11 @@ jobs:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
SonarCloud:
# TODO: Remove if: whenever SonarCloud supports c++23
if: ${{ false }}
runs-on: [self-hosted, builder]
env:
SONAR_SCANNER_VERSION: 4.7.0.2747
SONAR_SCANNER_VERSION: 4.8.0.2856
SONAR_SERVER_URL: "https://sonarcloud.io"
BUILD_WRAPPER_OUT_DIR: build_wrapper_output_directory # Directory where build-wrapper output will be placed
CC: clang-15
@ -173,4 +175,4 @@ jobs:
--define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" \
--define sonar.projectKey="ClickHouse_ClickHouse" \
--define sonar.organization="clickhouse-java" \
--define sonar.exclusions="**/*.java,**/*.ts,**/*.js,**/*.css,**/*.sql"
--define sonar.exclusions="**/*.java,**/*.ts,**/*.js,**/*.css,**/*.sql" \

View File

@ -550,6 +550,13 @@ jobs:
with:
clear-repository: true
submodules: true
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
run: |
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
du -hs "$GITHUB_WORKSPACE/contrib" ||:
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
@ -918,7 +925,7 @@ jobs:
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-server --image-path docker/server
python3 docker_server.py --release-type head --no-push --no-ubuntu \
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()
@ -1301,6 +1308,40 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseAnalyzer:
needs: [BuilderDebRelease]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_analyzer
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (release, analyzer)
REPO_COPY=${{runner.temp}}/stateless_analyzer/ClickHouse
KILL_TIMEOUT=10800
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseS3_0:
needs: [BuilderDebRelease]
runs-on: [self-hosted, func-tester]
@ -4748,6 +4789,7 @@ jobs:
- FunctionalStatelessTestReleaseDatabaseReplicated2
- FunctionalStatelessTestReleaseDatabaseReplicated3
- FunctionalStatelessTestReleaseWideParts
- FunctionalStatelessTestReleaseAnalyzer
- FunctionalStatelessTestAarch64
- FunctionalStatelessTestAsan0
- FunctionalStatelessTestAsan1
@ -4839,3 +4881,41 @@ jobs:
cd "$GITHUB_WORKSPACE/tests/ci"
python3 finish_check.py
python3 merge_pr.py --check-approved
##############################################################################################
########################### SQLLOGIC TEST ###################################################
##############################################################################################
SQLLogicTestRelease:
needs: [BuilderDebRelease]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/sqllogic_debug
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Sqllogic test (release)
REPO_COPY=${{runner.temp}}/sqllogic_debug/ClickHouse
KILL_TIMEOUT=10800
EOF
- name: Download json reports
uses: actions/download-artifact@v2
with:
path: ${{ env.REPORTS_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- name: Sqllogic test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 sqllogic_test.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"

View File

@ -55,7 +55,7 @@ jobs:
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_server.py --release-type auto --version "$GITHUB_TAG" \
--image-repo clickhouse/clickhouse-server --image-path docker/server
python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --no-ubuntu \
python3 docker_server.py --release-type auto --version "$GITHUB_TAG" \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()

View File

@ -406,6 +406,13 @@ jobs:
with:
clear-repository: true
submodules: true
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
run: |
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
du -hs "$GITHUB_WORKSPACE/contrib" ||:
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
@ -527,7 +534,7 @@ jobs:
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-server --image-path docker/server
python3 docker_server.py --release-type head --no-push --no-ubuntu \
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()

9
.gitmodules vendored
View File

@ -253,9 +253,6 @@
[submodule "contrib/qpl"]
path = contrib/qpl
url = https://github.com/intel/qpl
[submodule "contrib/idxd-config"]
path = contrib/idxd-config
url = https://github.com/intel/idxd-config
[submodule "contrib/wyhash"]
path = contrib/wyhash
url = https://github.com/wangyi-fudan/wyhash
@ -296,6 +293,9 @@
[submodule "contrib/libdivide"]
path = contrib/libdivide
url = https://github.com/ridiculousfish/libdivide
[submodule "contrib/libbcrypt"]
path = contrib/libbcrypt
url = https://github.com/rg3/libbcrypt.git
[submodule "contrib/ulid-c"]
path = contrib/ulid-c
url = https://github.com/ClickHouse/ulid-c.git
@ -335,3 +335,6 @@
[submodule "contrib/liburing"]
path = contrib/liburing
url = https://github.com/axboe/liburing
[submodule "contrib/isa-l"]
path = contrib/isa-l
url = https://github.com/ClickHouse/isa-l.git

View File

@ -1,10 +1,343 @@
### Table of Contents
**[ClickHouse release v23.4, 2023-04-26](#234)**<br/>
**[ClickHouse release v23.3 LTS, 2023-03-30](#233)**<br/>
**[ClickHouse release v23.2, 2023-02-23](#232)**<br/>
**[ClickHouse release v23.1, 2023-01-25](#231)**<br/>
**[Changelog for 2022](https://clickhouse.com/docs/en/whats-new/changelog/2022/)**<br/>
# 2023 Changelog
### <a id="234"></a> ClickHouse release 23.4, 2023-04-26
#### Backward Incompatible Change
* Formatter '%M' in function formatDateTime() now prints the month name instead of the minutes. This makes the behavior consistent with MySQL. The previous behavior can be restored using setting "formatdatetime_parsedatetime_m_is_month_name = 0". [#47246](https://github.com/ClickHouse/ClickHouse/pull/47246) ([Robert Schulze](https://github.com/rschu1ze)).
* This change makes sense only if you are using the virtual filesystem cache. If `path` in the virtual filesystem cache configuration is not empty and is not an absolute path, then it will be put in `<clickhouse server data directory>/caches/<path_from_cache_config>`. [#48784](https://github.com/ClickHouse/ClickHouse/pull/48784) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Primary/secondary indices and sorting keys with identical expressions are now rejected. This behavior can be disabled using setting `allow_suspicious_indices`. [#48536](https://github.com/ClickHouse/ClickHouse/pull/48536) ([凌涛](https://github.com/lingtaolf)).
#### New Feature
* Support new aggregate function `quantileGK`/`quantilesGK`, like [approx_percentile](https://spark.apache.org/docs/latest/api/sql/index.html#approx_percentile) in spark. Greenwald-Khanna algorithm refer to http://infolab.stanford.edu/~datar/courses/cs361a/papers/quantiles.pdf. [#46428](https://github.com/ClickHouse/ClickHouse/pull/46428) ([李扬](https://github.com/taiyang-li)).
* Add a statement `SHOW COLUMNS` which shows distilled information from system.columns. [#48017](https://github.com/ClickHouse/ClickHouse/pull/48017) ([Robert Schulze](https://github.com/rschu1ze)).
* Added `LIGHTWEIGHT` and `PULL` modifiers for `SYSTEM SYNC REPLICA` query. `LIGHTWEIGHT` version waits for fetches and drop-ranges only (merges and mutations are ignored). `PULL` version pulls new entries from ZooKeeper and does not wait for them. Fixes [#47794](https://github.com/ClickHouse/ClickHouse/issues/47794). [#48085](https://github.com/ClickHouse/ClickHouse/pull/48085) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Add `kafkaMurmurHash` function for compatibility with Kafka DefaultPartitioner. Closes [#47834](https://github.com/ClickHouse/ClickHouse/issues/47834). [#48185](https://github.com/ClickHouse/ClickHouse/pull/48185) ([Nikolay Degterinsky](https://github.com/evillique)).
* Allow to easily create a user with the same grants as the current user by using `GRANT CURRENT GRANTS`. [#48262](https://github.com/ClickHouse/ClickHouse/pull/48262) ([pufit](https://github.com/pufit)).
* Add statistical aggregate function `kolmogorovSmirnovTest`. Close [#48228](https://github.com/ClickHouse/ClickHouse/issues/48228). [#48325](https://github.com/ClickHouse/ClickHouse/pull/48325) ([FFFFFFFHHHHHHH](https://github.com/FFFFFFFHHHHHHH)).
* Added a `lost_part_count` column to the `system.replicas` table. The column value shows the total number of lost parts in the corresponding table. Value is stored in zookeeper and can be used instead of not persistent `ReplicatedDataLoss` profile event for monitoring. [#48526](https://github.com/ClickHouse/ClickHouse/pull/48526) ([Sergei Trifonov](https://github.com/serxa)).
* Add `soundex` function for compatibility. Closes [#39880](https://github.com/ClickHouse/ClickHouse/issues/39880). [#48567](https://github.com/ClickHouse/ClickHouse/pull/48567) ([FriendLey](https://github.com/FriendLey)).
* Support `Map` type for JSONExtract. [#48629](https://github.com/ClickHouse/ClickHouse/pull/48629) ([李扬](https://github.com/taiyang-li)).
* Add `PrettyJSONEachRow` format to output pretty JSON with new line delimiters and 4 space indents. [#48898](https://github.com/ClickHouse/ClickHouse/pull/48898) ([Kruglov Pavel](https://github.com/Avogar)).
* Add `ParquetMetadata` input format to read Parquet file metadata. [#48911](https://github.com/ClickHouse/ClickHouse/pull/48911) ([Kruglov Pavel](https://github.com/Avogar)).
* Add `extractKeyValuePairs` function to extract key value pairs from strings. Input strings might contain noise (i.e. log files / do not need to be 100% formatted in key-value-pair format), the algorithm will look for key value pairs matching the arguments passed to the function. As of now, function accepts the following arguments: `data_column` (mandatory), `key_value_pair_delimiter` (defaults to `:`), `pair_delimiters` (defaults to `\space \, \;`) and `quoting_character` (defaults to double quotes). [#43606](https://github.com/ClickHouse/ClickHouse/pull/43606) ([Arthur Passos](https://github.com/arthurpassos)).
* Functions replaceOne(), replaceAll(), replaceRegexpOne() and replaceRegexpAll() can now be called with non-const pattern and replacement arguments. [#46589](https://github.com/ClickHouse/ClickHouse/pull/46589) ([Robert Schulze](https://github.com/rschu1ze)).
* Added functions to work with columns of type `Map`: `mapConcat`, `mapSort`, `mapExists`. [#48071](https://github.com/ClickHouse/ClickHouse/pull/48071) ([Anton Popov](https://github.com/CurtizJ)).
#### Performance Improvement
* Reading files in `Parquet` format is now much faster. IO and decoding are parallelized (controlled by `max_threads` setting), and only required data ranges are read. [#47964](https://github.com/ClickHouse/ClickHouse/pull/47964) ([Michael Kolupaev](https://github.com/al13n321)).
* If we run a mutation with IN (subquery) like this: `ALTER TABLE t UPDATE col='new value' WHERE id IN (SELECT id FROM huge_table)` and the table `t` has multiple parts than for each part a set for subquery `SELECT id FROM huge_table` is built in memory. And if there are many parts then this might consume a lot of memory (and lead to an OOM) and CPU. The solution is to introduce a short-lived cache of sets that are currently being built by mutation tasks. If another task of the same mutation is executed concurrently it can look up the set in the cache, wait for it to be built and reuse it. [#46835](https://github.com/ClickHouse/ClickHouse/pull/46835) ([Alexander Gololobov](https://github.com/davenger)).
* Only check dependencies if necessary when applying `ALTER TABLE` queries. [#48062](https://github.com/ClickHouse/ClickHouse/pull/48062) ([Raúl Marín](https://github.com/Algunenano)).
* Optimize function `mapUpdate`. [#48118](https://github.com/ClickHouse/ClickHouse/pull/48118) ([Anton Popov](https://github.com/CurtizJ)).
* Now an internal query to local replica is sent explicitly and data from it received through loopback interface. Setting `prefer_localhost_replica` is not respected for parallel replicas. This is needed for better scheduling and makes the code cleaner: the initiator is only responsible for coordinating of the reading process and merging results, continuously answering for requests while all the secondary queries read the data. Note: Using loopback interface is not so performant, otherwise some replicas could starve for tasks which could lead to even slower query execution and not utilizing all possible resources. The initialization of the coordinator is now even more lazy. All incoming requests contain the information about the reading algorithm we initialize the coordinator with it when first request comes. If any replica decides to read with a different algorithman exception will be thrown and a query will be aborted. [#48246](https://github.com/ClickHouse/ClickHouse/pull/48246) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Do not build set for the right side of `IN` clause with subquery when it is used only for analysis of skip indexes, and they are disabled by setting (`use_skip_indexes=0`). Previously it might affect the performance of queries. [#48299](https://github.com/ClickHouse/ClickHouse/pull/48299) ([Anton Popov](https://github.com/CurtizJ)).
* Query processing is parallelized right after reading `FROM file(...)`. Related to [#38755](https://github.com/ClickHouse/ClickHouse/issues/38755). [#48525](https://github.com/ClickHouse/ClickHouse/pull/48525) ([Igor Nikonov](https://github.com/devcrafter)). Query processing is parallelized right after reading from any data source. Affected data sources are mostly simple or external storages like table functions `url`, `file`. [#48727](https://github.com/ClickHouse/ClickHouse/pull/48727) ([Igor Nikonov](https://github.com/devcrafter)). This is controlled by the setting `parallelize_output_from_storages` which is not enabled by default.
* Lowered contention of ThreadPool mutex (may increase performance for a huge amount of small jobs). [#48750](https://github.com/ClickHouse/ClickHouse/pull/48750) ([Sergei Trifonov](https://github.com/serxa)).
* Reduce memory usage for multiple `ALTER DELETE` mutations. [#48522](https://github.com/ClickHouse/ClickHouse/pull/48522) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Remove the excessive connection attempts if the `skip_unavailable_shards` setting is enabled. [#48771](https://github.com/ClickHouse/ClickHouse/pull/48771) ([Azat Khuzhin](https://github.com/azat)).
#### Experimental Feature
* Entries in the query cache are now squashed to max_block_size and compressed. [#45912](https://github.com/ClickHouse/ClickHouse/pull/45912) ([Robert Schulze](https://github.com/rschu1ze)).
* It is now possible to define per-user quotas in the query cache. [#48284](https://github.com/ClickHouse/ClickHouse/pull/48284) ([Robert Schulze](https://github.com/rschu1ze)).
* Some fixes for parallel replicas [#48433](https://github.com/ClickHouse/ClickHouse/pull/48433) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Implement zero-copy-replication (an experimental feature) on encrypted disks. [#48741](https://github.com/ClickHouse/ClickHouse/pull/48741) ([Vitaly Baranov](https://github.com/vitlibar)).
#### Improvement
* Increase default value for `connect_timeout_with_failover_ms` to 1000 ms (because of adding async connections in https://github.com/ClickHouse/ClickHouse/pull/47229) . Closes [#5188](https://github.com/ClickHouse/ClickHouse/issues/5188). [#49009](https://github.com/ClickHouse/ClickHouse/pull/49009) ([Kruglov Pavel](https://github.com/Avogar)).
* Several improvements around data lakes: - Make `Iceberg` work with non-partitioned data. - Support `Iceberg` format version v2 (previously only v1 was supported) - Support reading partitioned data for `DeltaLake`/`Hudi` - Faster reading of `DeltaLake` metadata by using Delta's checkpoint files - Fixed incorrect `Hudi` reads: previously it incorrectly chose which data to read and therefore was able to read correctly only small size tables - Made these engines to pickup updates of changed data (previously the state was set on table creation) - Make proper testing for `Iceberg`/`DeltaLake`/`Hudi` using spark. [#47307](https://github.com/ClickHouse/ClickHouse/pull/47307) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Add async connection to socket and async writing to socket. Make creating connections and sending query/external tables async across shards. Refactor code with fibers. Closes [#46931](https://github.com/ClickHouse/ClickHouse/issues/46931). We will be able to increase `connect_timeout_with_failover_ms` by default after this PR (https://github.com/ClickHouse/ClickHouse/issues/5188). [#47229](https://github.com/ClickHouse/ClickHouse/pull/47229) ([Kruglov Pavel](https://github.com/Avogar)).
* Support config sections `keeper`/`keeper_server` as an alternative to `zookeeper`. Close [#34766](https://github.com/ClickHouse/ClickHouse/issues/34766) , [#34767](https://github.com/ClickHouse/ClickHouse/issues/34767). [#35113](https://github.com/ClickHouse/ClickHouse/pull/35113) ([李扬](https://github.com/taiyang-li)).
* It is possible to set _secure_ flag in named_collections for a dictionary with a ClickHouse table source. Addresses [#38450](https://github.com/ClickHouse/ClickHouse/issues/38450) . [#46323](https://github.com/ClickHouse/ClickHouse/pull/46323) ([Ilya Golshtein](https://github.com/ilejn)).
* `bitCount` function support `FixedString` and `String` data type. [#49044](https://github.com/ClickHouse/ClickHouse/pull/49044) ([flynn](https://github.com/ucasfl)).
* Added configurable retries for all operations with [Zoo]Keeper for Backup queries. [#47224](https://github.com/ClickHouse/ClickHouse/pull/47224) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Enable `use_environment_credentials` for S3 by default, so the entire provider chain is constructed by default. [#47397](https://github.com/ClickHouse/ClickHouse/pull/47397) ([Antonio Andelic](https://github.com/antonio2368)).
* Currently, the JSON_VALUE function is similar as spark's get_json_object function, which support to get value from JSON string by a path like '$.key'. But still has something different - 1. in spark's get_json_object will return null while the path is not exist, but in JSON_VALUE will return empty string; - 2. in spark's get_json_object will return a complex type value, such as a JSON object/array value, but in JSON_VALUE will return empty string. [#47494](https://github.com/ClickHouse/ClickHouse/pull/47494) ([KevinyhZou](https://github.com/KevinyhZou)).
* For `use_structure_from_insertion_table_in_table_functions` more flexible insert table structure propagation to table function. Fixed an issue with name mapping and using virtual columns. No more need for 'auto' setting. [#47962](https://github.com/ClickHouse/ClickHouse/pull/47962) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Do not continue retrying to connect to Keeper if the query is killed or over limits. [#47985](https://github.com/ClickHouse/ClickHouse/pull/47985) ([Raúl Marín](https://github.com/Algunenano)).
* Support Enum output/input in `BSONEachRow`, allow all map key types and avoid extra calculations on output. [#48122](https://github.com/ClickHouse/ClickHouse/pull/48122) ([Kruglov Pavel](https://github.com/Avogar)).
* Support more ClickHouse types in `ORC`/`Arrow`/`Parquet` formats: Enum(8|16), (U)Int(128|256), Decimal256 (for ORC), allow reading IPv4 from Int32 values (ORC outputs IPv4 as Int32, and we couldn't read it back), fix reading Nullable(IPv6) from binary data for `ORC`. [#48126](https://github.com/ClickHouse/ClickHouse/pull/48126) ([Kruglov Pavel](https://github.com/Avogar)).
* Add columns `perform_ttl_move_on_insert`, `load_balancing` for table `system.storage_policies`, modify column `volume_type` type to `Enum8`. [#48167](https://github.com/ClickHouse/ClickHouse/pull/48167) ([lizhuoyu5](https://github.com/lzydmxy)).
* Added support for `BACKUP ALL` command which backups all tables and databases, including temporary and system ones. [#48189](https://github.com/ClickHouse/ClickHouse/pull/48189) ([Vitaly Baranov](https://github.com/vitlibar)).
* Function mapFromArrays supports `Map` type as an input. [#48207](https://github.com/ClickHouse/ClickHouse/pull/48207) ([李扬](https://github.com/taiyang-li)).
* The output of some SHOW PROCESSLIST is now sorted. [#48241](https://github.com/ClickHouse/ClickHouse/pull/48241) ([Robert Schulze](https://github.com/rschu1ze)).
* Per-query/per-server throttling for remote IO/local IO/BACKUPs (server settings: `max_remote_read_network_bandwidth_for_server`, `max_remote_write_network_bandwidth_for_server`, `max_local_read_bandwidth_for_server`, `max_local_write_bandwidth_for_server`, `max_backup_bandwidth_for_server`, settings: `max_remote_read_network_bandwidth`, `max_remote_write_network_bandwidth`, `max_local_read_bandwidth`, `max_local_write_bandwidth`, `max_backup_bandwidth`). [#48242](https://github.com/ClickHouse/ClickHouse/pull/48242) ([Azat Khuzhin](https://github.com/azat)).
* Support more types in `CapnProto` format: Map, (U)Int(128|256), Decimal(128|256). Allow integer conversions during input/output. [#48257](https://github.com/ClickHouse/ClickHouse/pull/48257) ([Kruglov Pavel](https://github.com/Avogar)).
* Don't throw CURRENT_WRITE_BUFFER_IS_EXHAUSTED for normal behaviour. [#48288](https://github.com/ClickHouse/ClickHouse/pull/48288) ([Raúl Marín](https://github.com/Algunenano)).
* Add new setting `keeper_map_strict_mode` which enforces extra guarantees on operations made on top of `KeeperMap` tables. [#48293](https://github.com/ClickHouse/ClickHouse/pull/48293) ([Antonio Andelic](https://github.com/antonio2368)).
* Check primary key type for simple dictionary is native unsigned integer type Add setting `check_dictionary_primary_key ` for compatibility(set `check_dictionary_primary_key =false` to disable checking). [#48335](https://github.com/ClickHouse/ClickHouse/pull/48335) ([lizhuoyu5](https://github.com/lzydmxy)).
* Don't replicate mutations for `KeeperMap` because it's unnecessary. [#48354](https://github.com/ClickHouse/ClickHouse/pull/48354) ([Antonio Andelic](https://github.com/antonio2368)).
* Allow to write/read unnamed tuple as nested Message in Protobuf format. Tuple elements and Message fields are matched by position. [#48390](https://github.com/ClickHouse/ClickHouse/pull/48390) ([Kruglov Pavel](https://github.com/Avogar)).
* Support `additional_table_filters` and `additional_result_filter` settings in the new planner. Also, add a documentation entry for `additional_result_filter`. [#48405](https://github.com/ClickHouse/ClickHouse/pull/48405) ([Dmitry Novik](https://github.com/novikd)).
* `parseDateTime` now understands format string '%f' (fractional seconds). [#48420](https://github.com/ClickHouse/ClickHouse/pull/48420) ([Robert Schulze](https://github.com/rschu1ze)).
* Format string "%f" in formatDateTime() now prints "000000" if the formatted value has no fractional seconds, the previous behavior (single zero) can be restored using setting "formatdatetime_f_prints_single_zero = 1". [#48422](https://github.com/ClickHouse/ClickHouse/pull/48422) ([Robert Schulze](https://github.com/rschu1ze)).
* Don't replicate DELETE and TRUNCATE for KeeperMap. [#48434](https://github.com/ClickHouse/ClickHouse/pull/48434) ([Antonio Andelic](https://github.com/antonio2368)).
* Generate valid Decimals and Bools in generateRandom function. [#48436](https://github.com/ClickHouse/ClickHouse/pull/48436) ([Kruglov Pavel](https://github.com/Avogar)).
* Allow trailing commas in expression list of SELECT query, for example `SELECT a, b, c, FROM table`. Closes [#37802](https://github.com/ClickHouse/ClickHouse/issues/37802). [#48438](https://github.com/ClickHouse/ClickHouse/pull/48438) ([Nikolay Degterinsky](https://github.com/evillique)).
* Override `CLICKHOUSE_USER` and `CLICKHOUSE_PASSWORD` environment variables with `--user` and `--password` client parameters. Closes [#38909](https://github.com/ClickHouse/ClickHouse/issues/38909). [#48440](https://github.com/ClickHouse/ClickHouse/pull/48440) ([Nikolay Degterinsky](https://github.com/evillique)).
* Added retries to loading of data parts in `MergeTree` tables in case of retryable errors. [#48442](https://github.com/ClickHouse/ClickHouse/pull/48442) ([Anton Popov](https://github.com/CurtizJ)).
* Add support for `Date`, `Date32`, `DateTime`, `DateTime64` data types to `arrayMin`, `arrayMax`, `arrayDifference` functions. Closes [#21645](https://github.com/ClickHouse/ClickHouse/issues/21645). [#48445](https://github.com/ClickHouse/ClickHouse/pull/48445) ([Nikolay Degterinsky](https://github.com/evillique)).
* Add support for `{server_uuid}` macro. It is useful for identifying replicas in autoscaled clusters when new replicas are constantly added and removed in runtime. This closes [#48554](https://github.com/ClickHouse/ClickHouse/issues/48554). [#48563](https://github.com/ClickHouse/ClickHouse/pull/48563) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* The installation script will create a hard link instead of copying if it is possible. [#48578](https://github.com/ClickHouse/ClickHouse/pull/48578) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Support `SHOW TABLE` syntax meaning the same as `SHOW CREATE TABLE`. Closes [#48580](https://github.com/ClickHouse/ClickHouse/issues/48580). [#48591](https://github.com/ClickHouse/ClickHouse/pull/48591) ([flynn](https://github.com/ucasfl)).
* HTTP temporary buffers now support working by evicting data from the virtual filesystem cache. [#48664](https://github.com/ClickHouse/ClickHouse/pull/48664) ([Vladimir C](https://github.com/vdimir)).
* Make Schema inference works for `CREATE AS SELECT`. Closes [#47599](https://github.com/ClickHouse/ClickHouse/issues/47599). [#48679](https://github.com/ClickHouse/ClickHouse/pull/48679) ([flynn](https://github.com/ucasfl)).
* Added a `replicated_max_mutations_in_one_entry` setting for `ReplicatedMergeTree` that allows limiting the number of mutation commands per one `MUTATE_PART` entry (default is 10000). [#48731](https://github.com/ClickHouse/ClickHouse/pull/48731) ([Alexander Tokmakov](https://github.com/tavplubix)).
* In AggregateFunction types, don't count unused arena bytes as `read_bytes`. [#48745](https://github.com/ClickHouse/ClickHouse/pull/48745) ([Raúl Marín](https://github.com/Algunenano)).
* Fix some MySQL-related settings not being handled with the MySQL dictionary source + named collection. Closes [#48402](https://github.com/ClickHouse/ClickHouse/issues/48402). [#48759](https://github.com/ClickHouse/ClickHouse/pull/48759) ([Kseniia Sumarokova](https://github.com/kssenii)).
* If a user set `max_single_part_upload_size` to a very large value, it can lead to a crash due to a bug in the AWS S3 SDK. This fixes [#47679](https://github.com/ClickHouse/ClickHouse/issues/47679). [#48816](https://github.com/ClickHouse/ClickHouse/pull/48816) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix data race in `RabbitMQ` ([report](https://pastila.nl/?004f7100/de1505289ab5bb355e67ebe6c7cc8707)), refactor the code. [#48845](https://github.com/ClickHouse/ClickHouse/pull/48845) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Add aliases `name` and `part_name` form `system.parts` and `system.part_log`. Closes [#48718](https://github.com/ClickHouse/ClickHouse/issues/48718). [#48850](https://github.com/ClickHouse/ClickHouse/pull/48850) ([sichenzhao](https://github.com/sichenzhao)).
* Functions "arrayDifferenceSupport()", "arrayCumSum()" and "arrayCumSumNonNegative()" now support input arrays of wide integer types (U)Int128/256. [#48866](https://github.com/ClickHouse/ClickHouse/pull/48866) ([cluster](https://github.com/infdahai)).
* Multi-line history in clickhouse-client is now no longer padded. This makes pasting more natural. [#48870](https://github.com/ClickHouse/ClickHouse/pull/48870) ([Joanna Hulboj](https://github.com/jh0x)).
* Implement a slight improvement for the rare case when ClickHouse is run inside LXC and LXCFS is used. The LXCFS has an issue: sometimes it returns an error "Transport endpoint is not connected" on reading from the file inside `/proc`. This error was correctly logged into ClickHouse's server log. We have additionally workaround this issue by reopening a file. This is a minuscule change. [#48922](https://github.com/ClickHouse/ClickHouse/pull/48922) ([Real](https://github.com/RunningXie)).
* Improve memory accounting for prefetches. Randomise prefetch settings In CI. [#48973](https://github.com/ClickHouse/ClickHouse/pull/48973) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Correctly set headers for native copy operations on GCS. [#48981](https://github.com/ClickHouse/ClickHouse/pull/48981) ([Antonio Andelic](https://github.com/antonio2368)).
* Add support for specifying setting names in the command line with dashes instead of underscores, for example, `--max-threads` instead of `--max_threads`. Additionally, support Unicode dash characters like `—` instead of `--` - this is useful when you communicate with a team in another company, and a manager from that team copy-pasted code from MS Word. [#48985](https://github.com/ClickHouse/ClickHouse/pull/48985) ([alekseygolub](https://github.com/alekseygolub)).
* Add fallback to password authentication when authentication with SSL user certificate has failed. Closes [#48974](https://github.com/ClickHouse/ClickHouse/issues/48974). [#48989](https://github.com/ClickHouse/ClickHouse/pull/48989) ([Nikolay Degterinsky](https://github.com/evillique)).
* Improve the embedded dashboard. Close [#46671](https://github.com/ClickHouse/ClickHouse/issues/46671). [#49036](https://github.com/ClickHouse/ClickHouse/pull/49036) ([Kevin Zhang](https://github.com/Kinzeng)).
* Add profile events for log messages, so you can easily see the count of log messages by severity. [#49042](https://github.com/ClickHouse/ClickHouse/pull/49042) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* In previous versions, the `LineAsString` format worked inconsistently when the parallel parsing was enabled or not, in presence of DOS or macOS Classic line breaks. This closes [#49039](https://github.com/ClickHouse/ClickHouse/issues/49039). [#49052](https://github.com/ClickHouse/ClickHouse/pull/49052) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* The exception message about the unparsed query parameter will also tell about the name of the parameter. Reimplement [#48878](https://github.com/ClickHouse/ClickHouse/issues/48878). Close [#48772](https://github.com/ClickHouse/ClickHouse/issues/48772). [#49061](https://github.com/ClickHouse/ClickHouse/pull/49061) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
#### Build/Testing/Packaging Improvement
* Update time zones. The following were updated: Africa/Cairo, Africa/Casablanca, Africa/El_Aaiun, America/Bogota, America/Cambridge_Bay, America/Ciudad_Juarez, America/Godthab, America/Inuvik, America/Iqaluit, America/Nuuk, America/Ojinaga, America/Pangnirtung, America/Rankin_Inlet, America/Resolute, America/Whitehorse, America/Yellowknife, Asia/Gaza, Asia/Hebron, Asia/Kuala_Lumpur, Asia/Singapore, Canada/Yukon, Egypt, Europe/Kirov, Europe/Volgograd, Singapore. [#48572](https://github.com/ClickHouse/ClickHouse/pull/48572) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Reduce the number of dependencies in the header files to speed up the build. [#47984](https://github.com/ClickHouse/ClickHouse/pull/47984) ([Dmitry Novik](https://github.com/novikd)).
* Randomize compression of marks and indices in tests. [#48286](https://github.com/ClickHouse/ClickHouse/pull/48286) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Bump internal ZSTD from 1.5.4 to 1.5.5. [#46797](https://github.com/ClickHouse/ClickHouse/pull/46797) ([Robert Schulze](https://github.com/rschu1ze)).
* Randomize vertical merges from compact to wide parts in tests. [#48287](https://github.com/ClickHouse/ClickHouse/pull/48287) ([Raúl Marín](https://github.com/Algunenano)).
* Support for CRC32 checksum in HDFS. Fix performance issues. [#48614](https://github.com/ClickHouse/ClickHouse/pull/48614) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Remove remainders of GCC support. [#48671](https://github.com/ClickHouse/ClickHouse/pull/48671) ([Robert Schulze](https://github.com/rschu1ze)).
* Add CI run with new analyzer infrastructure enabled. [#48719](https://github.com/ClickHouse/ClickHouse/pull/48719) ([Dmitry Novik](https://github.com/novikd)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Fix system.query_views_log for MVs that are pushed from background threads [#46668](https://github.com/ClickHouse/ClickHouse/pull/46668) ([Azat Khuzhin](https://github.com/azat)).
* Fix several `RENAME COLUMN` bugs [#46946](https://github.com/ClickHouse/ClickHouse/pull/46946) ([alesapin](https://github.com/alesapin)).
* Fix minor hiliting issues in clickhouse-format [#47610](https://github.com/ClickHouse/ClickHouse/pull/47610) ([Natasha Murashkina](https://github.com/murfel)).
* Fix a bug in LLVM's libc++ leading to a crash for uploading parts to S3 which size is greater than INT_MAX [#47693](https://github.com/ClickHouse/ClickHouse/pull/47693) ([Azat Khuzhin](https://github.com/azat)).
* Fix overflow in the `sparkbar` function [#48121](https://github.com/ClickHouse/ClickHouse/pull/48121) ([Vladimir C](https://github.com/vdimir)).
* Fix race in S3 [#48190](https://github.com/ClickHouse/ClickHouse/pull/48190) ([Anton Popov](https://github.com/CurtizJ)).
* Disable JIT for aggregate functions due to inconsistent behavior [#48195](https://github.com/ClickHouse/ClickHouse/pull/48195) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix alter formatting (minor) [#48289](https://github.com/ClickHouse/ClickHouse/pull/48289) ([Natasha Murashkina](https://github.com/murfel)).
* Fix CPU usage in RabbitMQ (was worsened in 23.2 after [#44404](https://github.com/ClickHouse/ClickHouse/issues/44404)) [#48311](https://github.com/ClickHouse/ClickHouse/pull/48311) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix crash in EXPLAIN PIPELINE for Merge over Distributed [#48320](https://github.com/ClickHouse/ClickHouse/pull/48320) ([Azat Khuzhin](https://github.com/azat)).
* Fix serializing LowCardinality as Arrow dictionary [#48361](https://github.com/ClickHouse/ClickHouse/pull/48361) ([Kruglov Pavel](https://github.com/Avogar)).
* Reset downloader for cache file segment in TemporaryFileStream [#48386](https://github.com/ClickHouse/ClickHouse/pull/48386) ([Vladimir C](https://github.com/vdimir)).
* Fix possible SYSTEM SYNC REPLICA stuck in case of DROP/REPLACE PARTITION [#48391](https://github.com/ClickHouse/ClickHouse/pull/48391) ([Azat Khuzhin](https://github.com/azat)).
* Fix a startup error when loading a distributed table that depends on a dictionary [#48419](https://github.com/ClickHouse/ClickHouse/pull/48419) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
* Don't check dependencies when renaming system tables automatically [#48431](https://github.com/ClickHouse/ClickHouse/pull/48431) ([Raúl Marín](https://github.com/Algunenano)).
* Update only affected rows in KeeperMap storage [#48435](https://github.com/ClickHouse/ClickHouse/pull/48435) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix possible segfault in the VFS cache [#48469](https://github.com/ClickHouse/ClickHouse/pull/48469) ([Kseniia Sumarokova](https://github.com/kssenii)).
* `toTimeZone` function throws an error when no constant string is provided [#48471](https://github.com/ClickHouse/ClickHouse/pull/48471) ([Jordi Villar](https://github.com/jrdi)).
* Fix logical error with IPv4 in Protobuf, add support for Date32 [#48486](https://github.com/ClickHouse/ClickHouse/pull/48486) ([Kruglov Pavel](https://github.com/Avogar)).
* "changed" flag in system.settings was calculated incorrectly for settings with multiple values [#48516](https://github.com/ClickHouse/ClickHouse/pull/48516) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
* Fix storage `Memory` with enabled compression [#48517](https://github.com/ClickHouse/ClickHouse/pull/48517) ([Anton Popov](https://github.com/CurtizJ)).
* Fix bracketed-paste mode messing up password input in the event of client reconnection [#48528](https://github.com/ClickHouse/ClickHouse/pull/48528) ([Michael Kolupaev](https://github.com/al13n321)).
* Fix nested map for keys of IP and UUID types [#48556](https://github.com/ClickHouse/ClickHouse/pull/48556) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Fix an uncaught exception in case of parallel loader for hashed dictionaries [#48571](https://github.com/ClickHouse/ClickHouse/pull/48571) ([Azat Khuzhin](https://github.com/azat)).
* The `groupArray` aggregate function correctly works for empty result over nullable types [#48593](https://github.com/ClickHouse/ClickHouse/pull/48593) ([lgbo](https://github.com/lgbo-ustc)).
* Fix bug in Keeper when a node is not created with scheme `auth` in ACL sometimes. [#48595](https://github.com/ClickHouse/ClickHouse/pull/48595) ([Aleksei Filatov](https://github.com/aalexfvk)).
* Allow IPv4 comparison operators with UInt [#48611](https://github.com/ClickHouse/ClickHouse/pull/48611) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Fix possible error from cache [#48636](https://github.com/ClickHouse/ClickHouse/pull/48636) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Async inserts with empty data will no longer throw exception. [#48663](https://github.com/ClickHouse/ClickHouse/pull/48663) ([Anton Popov](https://github.com/CurtizJ)).
* Fix table dependencies in case of failed RENAME TABLE [#48683](https://github.com/ClickHouse/ClickHouse/pull/48683) ([Azat Khuzhin](https://github.com/azat)).
* If the primary key has duplicate columns (which is only possible for projections), in previous versions it might lead to a bug [#48838](https://github.com/ClickHouse/ClickHouse/pull/48838) ([Amos Bird](https://github.com/amosbird)).
* Fix for a race condition in ZooKeeper when joining send_thread/receive_thread [#48849](https://github.com/ClickHouse/ClickHouse/pull/48849) ([Alexander Gololobov](https://github.com/davenger)).
* Fix unexpected part name error when trying to drop a ignored detached part with zero copy replication [#48862](https://github.com/ClickHouse/ClickHouse/pull/48862) ([Michael Lex](https://github.com/mlex)).
* Fix reading `Date32` Parquet/Arrow column into not a `Date32` column [#48864](https://github.com/ClickHouse/ClickHouse/pull/48864) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix `UNKNOWN_IDENTIFIER` error while selecting from table with row policy and column with dots [#48976](https://github.com/ClickHouse/ClickHouse/pull/48976) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix aggregation by empty nullable strings [#48999](https://github.com/ClickHouse/ClickHouse/pull/48999) ([LiuNeng](https://github.com/liuneng1994)).
### <a id="233"></a> ClickHouse release 23.3 LTS, 2023-03-30
#### Upgrade Notes
* Lightweight DELETEs are production ready and enabled by default. The `DELETE` query for MergeTree tables is now available by default.
* The behavior of `*domain*RFC` and `netloc` functions is slightly changed: relaxed the set of symbols that are allowed in the URL authority for better conformance. [#46841](https://github.com/ClickHouse/ClickHouse/pull/46841) ([Azat Khuzhin](https://github.com/azat)).
* Prohibited creating tables based on KafkaEngine with DEFAULT/EPHEMERAL/ALIAS/MATERIALIZED statements for columns. [#47138](https://github.com/ClickHouse/ClickHouse/pull/47138) ([Aleksandr Musorin](https://github.com/AVMusorin)).
* An "asynchronous connection drain" feature is removed. Related settings and metrics are removed as well. It was an internal feature, so the removal should not affect users who had never heard about that feature. [#47486](https://github.com/ClickHouse/ClickHouse/pull/47486) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Support 256-bit Decimal data type (more than 38 digits) in `arraySum`/`Min`/`Max`/`Avg`/`Product`, `arrayCumSum`/`CumSumNonNegative`, `arrayDifference`, array construction, IN operator, query parameters, `groupArrayMovingSum`, statistical functions, `min`/`max`/`any`/`argMin`/`argMax`, PostgreSQL wire protocol, MySQL table engine and function, `sumMap`, `mapAdd`, `mapSubtract`, `arrayIntersect`. Add support for big integers in `arrayIntersect`. Statistical aggregate functions involving moments (such as `corr` or various `TTest`s) will use `Float64` as their internal representation (they were using `Decimal128` before this change, but it was pointless), and these functions can return `nan` instead of `inf` in case of infinite variance. Some functions were allowed on `Decimal256` data types but returned `Decimal128` in previous versions - now it is fixed. This closes [#47569](https://github.com/ClickHouse/ClickHouse/issues/47569). This closes [#44864](https://github.com/ClickHouse/ClickHouse/issues/44864). This closes [#28335](https://github.com/ClickHouse/ClickHouse/issues/28335). [#47594](https://github.com/ClickHouse/ClickHouse/pull/47594) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Make backup_threads/restore_threads server settings (instead of user settings). [#47881](https://github.com/ClickHouse/ClickHouse/pull/47881) ([Azat Khuzhin](https://github.com/azat)).
* Do not allow const and non-deterministic secondary indices [#46839](https://github.com/ClickHouse/ClickHouse/pull/46839) ([Anton Popov](https://github.com/CurtizJ)).
#### New Feature
* Add a new mode for splitting the work on replicas using settings `parallel_replicas_custom_key` and `parallel_replicas_custom_key_filter_type`. If the cluster consists of a single shard with multiple replicas, up to `max_parallel_replicas` will be randomly picked and turned into shards. For each shard, a corresponding filter is added to the query on the initiator before being sent to the shard. If the cluster consists of multiple shards, it will behave the same as `sample_key` but with the possibility to define an arbitrary key. [#45108](https://github.com/ClickHouse/ClickHouse/pull/45108) ([Antonio Andelic](https://github.com/antonio2368)).
* An option to display partial result on cancel: Added query setting `partial_result_on_first_cancel` allowing the canceled query (e.g. due to Ctrl-C) to return a partial result. [#45689](https://github.com/ClickHouse/ClickHouse/pull/45689) ([Alexey Perevyshin](https://github.com/alexX512)).
* Added support of arbitrary tables engines for temporary tables (except for Replicated and KeeperMap engines). Close [#31497](https://github.com/ClickHouse/ClickHouse/issues/31497). [#46071](https://github.com/ClickHouse/ClickHouse/pull/46071) ([Roman Vasin](https://github.com/rvasin)).
* Add support for replication of user-defined SQL functions using centralized storage in Keeper. [#46085](https://github.com/ClickHouse/ClickHouse/pull/46085) ([Aleksei Filatov](https://github.com/aalexfvk)).
* Implement `system.server_settings` (similar to `system.settings`), which will contain server configurations. [#46550](https://github.com/ClickHouse/ClickHouse/pull/46550) ([pufit](https://github.com/pufit)).
* Support for `UNDROP TABLE` query. Closes [#46811](https://github.com/ClickHouse/ClickHouse/issues/46811). [#47241](https://github.com/ClickHouse/ClickHouse/pull/47241) ([chen](https://github.com/xiedeyantu)).
* Allow separate grants for named collections (e.g. to be able to give `SHOW/CREATE/ALTER/DROP named collection` access only to certain collections, instead of all at once). Closes [#40894](https://github.com/ClickHouse/ClickHouse/issues/40894). Add new access type `NAMED_COLLECTION_CONTROL` which is not given to user default unless explicitly added to the user config (is required to be able to do `GRANT ALL`), also `show_named_collections` is no longer obligatory to be manually specified for user default to be able to have full access rights as was in 23.2. [#46241](https://github.com/ClickHouse/ClickHouse/pull/46241) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Allow nested custom disks. Previously custom disks supported only flat disk structure. [#47106](https://github.com/ClickHouse/ClickHouse/pull/47106) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Introduce a function `widthBucket` (with a `WIDTH_BUCKET` alias for compatibility). [#42974](https://github.com/ClickHouse/ClickHouse/issues/42974). [#46790](https://github.com/ClickHouse/ClickHouse/pull/46790) ([avoiderboi](https://github.com/avoiderboi)).
* Add new function `parseDateTime`/`parseDateTimeInJodaSyntax` according to the specified format string. parseDateTime parses String to DateTime in MySQL syntax, parseDateTimeInJodaSyntax parses in Joda syntax. [#46815](https://github.com/ClickHouse/ClickHouse/pull/46815) ([李扬](https://github.com/taiyang-li)).
* Use `dummy UInt8` for the default structure of table function `null`. Closes [#46930](https://github.com/ClickHouse/ClickHouse/issues/46930). [#47006](https://github.com/ClickHouse/ClickHouse/pull/47006) ([flynn](https://github.com/ucasfl)).
* Support for date format with a comma, like `Dec 15, 2021` in the `parseDateTimeBestEffort` function. Closes [#46816](https://github.com/ClickHouse/ClickHouse/issues/46816). [#47071](https://github.com/ClickHouse/ClickHouse/pull/47071) ([chen](https://github.com/xiedeyantu)).
* Add settings `http_wait_end_of_query` and `http_response_buffer_size` that corresponds to URL params `wait_end_of_query` and `buffer_size` for the HTTP interface. This allows changing these settings in the profiles. [#47108](https://github.com/ClickHouse/ClickHouse/pull/47108) ([Vladimir C](https://github.com/vdimir)).
* Add `system.dropped_tables` table that shows tables that were dropped from `Atomic` databases but were not completely removed yet. [#47364](https://github.com/ClickHouse/ClickHouse/pull/47364) ([chen](https://github.com/xiedeyantu)).
* Add `INSTR` as alias of `positionCaseInsensitive` for MySQL compatibility. Closes [#47529](https://github.com/ClickHouse/ClickHouse/issues/47529). [#47535](https://github.com/ClickHouse/ClickHouse/pull/47535) ([flynn](https://github.com/ucasfl)).
* Added `toDecimalString` function allowing to convert numbers to string with fixed precision. [#47838](https://github.com/ClickHouse/ClickHouse/pull/47838) ([Andrey Zvonov](https://github.com/zvonand)).
* Add a merge tree setting `max_number_of_mutations_for_replica`. It limits the number of part mutations per replica to the specified amount. Zero means no limit on the number of mutations per replica (the execution can still be constrained by other settings). [#48047](https://github.com/ClickHouse/ClickHouse/pull/48047) ([Vladimir C](https://github.com/vdimir)).
* Add the Map-related function `mapFromArrays`, which allows the creation of a map from a pair of arrays. [#31125](https://github.com/ClickHouse/ClickHouse/pull/31125) ([李扬](https://github.com/taiyang-li)).
* Allow control of compression in Parquet/ORC/Arrow output formats, adds support for more compression input formats. This closes [#13541](https://github.com/ClickHouse/ClickHouse/issues/13541). [#47114](https://github.com/ClickHouse/ClickHouse/pull/47114) ([Kruglov Pavel](https://github.com/Avogar)).
* Add SSL User Certificate authentication to the native protocol. Closes [#47077](https://github.com/ClickHouse/ClickHouse/issues/47077). [#47596](https://github.com/ClickHouse/ClickHouse/pull/47596) ([Nikolay Degterinsky](https://github.com/evillique)).
* Add *OrNull() and *OrZero() variants for `parseDateTime`, add alias `str_to_date` for MySQL parity. [#48000](https://github.com/ClickHouse/ClickHouse/pull/48000) ([Robert Schulze](https://github.com/rschu1ze)).
* Added operator `REGEXP` (similar to operators "LIKE", "IN", "MOD" etc.) for better compatibility with MySQL [#47869](https://github.com/ClickHouse/ClickHouse/pull/47869) ([Robert Schulze](https://github.com/rschu1ze)).
#### Performance Improvement
* Marks in memory are now compressed, using 3-6x less memory. [#47290](https://github.com/ClickHouse/ClickHouse/pull/47290) ([Michael Kolupaev](https://github.com/al13n321)).
* Backups for large numbers of files were unbelievably slow in previous versions. Not anymore. Now they are unbelievably fast. [#47251](https://github.com/ClickHouse/ClickHouse/pull/47251) ([Alexey Milovidov](https://github.com/alexey-milovidov)). Introduced a separate thread pool for backup's IO operations. This will allow scaling it independently of other pools and increase performance. [#47174](https://github.com/ClickHouse/ClickHouse/pull/47174) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). Use MultiRead request and retries for collecting metadata at the final stage of backup processing. [#47243](https://github.com/ClickHouse/ClickHouse/pull/47243) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). If a backup and restoring data are both in S3 then server-side copy should be used from now on. [#47546](https://github.com/ClickHouse/ClickHouse/pull/47546) ([Vitaly Baranov](https://github.com/vitlibar)).
* Fixed excessive reading in queries with `FINAL`. [#47801](https://github.com/ClickHouse/ClickHouse/pull/47801) ([Nikita Taranov](https://github.com/nickitat)).
* Setting `max_final_threads` would be set to the number of cores at server startup (by the same algorithm as used for `max_threads`). This improves the concurrency of `final` execution on servers with high number of CPUs. [#47915](https://github.com/ClickHouse/ClickHouse/pull/47915) ([Nikita Taranov](https://github.com/nickitat)).
* Allow executing reading pipeline for DIRECT dictionary with CLICKHOUSE source in multiple threads. To enable set `dictionary_use_async_executor=1` in `SETTINGS` section for source in `CREATE DICTIONARY` statement. [#47986](https://github.com/ClickHouse/ClickHouse/pull/47986) ([Vladimir C](https://github.com/vdimir)).
* Optimize one nullable key aggregate performance. [#45772](https://github.com/ClickHouse/ClickHouse/pull/45772) ([LiuNeng](https://github.com/liuneng1994)).
* Implemented lowercase `tokenbf_v1` index utilization for `hasTokenOrNull`, `hasTokenCaseInsensitive` and `hasTokenCaseInsensitiveOrNull`. [#46252](https://github.com/ClickHouse/ClickHouse/pull/46252) ([ltrk2](https://github.com/ltrk2)).
* Optimize functions `position` and `LIKE` by searching the first two chars using SIMD. [#46289](https://github.com/ClickHouse/ClickHouse/pull/46289) ([Jiebin Sun](https://github.com/jiebinn)).
* Optimize queries from the `system.detached_parts`, which could be significantly large. Added several sources with respect to the block size limitation; in each block, an IO thread pool is used to calculate the part size, i.e. to make syscalls in parallel. [#46624](https://github.com/ClickHouse/ClickHouse/pull/46624) ([Sema Checherinda](https://github.com/CheSema)).
* Increase the default value of `max_replicated_merges_in_queue` for ReplicatedMergeTree tables from 16 to 1000. It allows faster background merge operation on clusters with a very large number of replicas, such as clusters with shared storage in ClickHouse Cloud. [#47050](https://github.com/ClickHouse/ClickHouse/pull/47050) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Updated `clickhouse-copier` to use `GROUP BY` instead of `DISTINCT` to get the list of partitions. For large tables, this reduced the select time from over 500s to under 1s. [#47386](https://github.com/ClickHouse/ClickHouse/pull/47386) ([Clayton McClure](https://github.com/cmcclure-twilio)).
* Fix performance degradation in `ASOF JOIN`. [#47544](https://github.com/ClickHouse/ClickHouse/pull/47544) ([Ongkong](https://github.com/ongkong)).
* Even more batching in Keeper. Improve performance by avoiding breaking batches on read requests. [#47978](https://github.com/ClickHouse/ClickHouse/pull/47978) ([Antonio Andelic](https://github.com/antonio2368)).
* Allow PREWHERE for Merge with different DEFAULT expressions for columns. [#46831](https://github.com/ClickHouse/ClickHouse/pull/46831) ([Azat Khuzhin](https://github.com/azat)).
#### Experimental Feature
* Parallel replicas: Improved the overall performance by better utilizing the local replica, and forbid the reading with parallel replicas from non-replicated MergeTree by default. [#47858](https://github.com/ClickHouse/ClickHouse/pull/47858) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Support filter push down to left table for JOIN with `Join`, `Dictionary` and `EmbeddedRocksDB` tables if the experimental Analyzer is enabled. [#47280](https://github.com/ClickHouse/ClickHouse/pull/47280) ([Maksim Kita](https://github.com/kitaisreal)).
* Now ReplicatedMergeTree with zero copy replication has less load to Keeper. [#47676](https://github.com/ClickHouse/ClickHouse/pull/47676) ([alesapin](https://github.com/alesapin)).
* Fix create materialized view with MaterializedPostgreSQL [#40807](https://github.com/ClickHouse/ClickHouse/pull/40807) ([Maksim Buren](https://github.com/maks-buren630501)).
#### Improvement
* Enable `input_format_json_ignore_unknown_keys_in_named_tuple` by default. [#46742](https://github.com/ClickHouse/ClickHouse/pull/46742) ([Kruglov Pavel](https://github.com/Avogar)).
* Allow errors to be ignored while pushing to MATERIALIZED VIEW (add new setting `materialized_views_ignore_errors`, by default to `false`, but it is set to `true` for flushing logs to `system.*_log` tables unconditionally). [#46658](https://github.com/ClickHouse/ClickHouse/pull/46658) ([Azat Khuzhin](https://github.com/azat)).
* Track the file queue of distributed sends in memory. [#45491](https://github.com/ClickHouse/ClickHouse/pull/45491) ([Azat Khuzhin](https://github.com/azat)).
* Now `X-ClickHouse-Query-Id` and `X-ClickHouse-Timezone` headers are added to responses in all queries via HTTP protocol. Previously it was done only for `SELECT` queries. [#46364](https://github.com/ClickHouse/ClickHouse/pull/46364) ([Anton Popov](https://github.com/CurtizJ)).
* External tables from `MongoDB`: support for connection to a replica set via a URI with a host:port enum and support for the readPreference option in MongoDB dictionaries. Example URI: mongodb://db0.example.com:27017,db1.example.com:27017,db2.example.com:27017/?replicaSet=myRepl&readPreference=primary. [#46524](https://github.com/ClickHouse/ClickHouse/pull/46524) ([artem-yadr](https://github.com/artem-yadr)).
* This improvement should be invisible for users. Re-implement projection analysis on top of query plan. Added setting `query_plan_optimize_projection=1` to switch between old and new version. Fixes [#44963](https://github.com/ClickHouse/ClickHouse/issues/44963). [#46537](https://github.com/ClickHouse/ClickHouse/pull/46537) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Use Parquet format v2 instead of v1 in output format by default. Add setting `output_format_parquet_version` to control parquet version, possible values `1.0`, `2.4`, `2.6`, `2.latest` (default). [#46617](https://github.com/ClickHouse/ClickHouse/pull/46617) ([Kruglov Pavel](https://github.com/Avogar)).
* It is now possible to use the new configuration syntax to configure Kafka topics with periods (`.`) in their name. [#46752](https://github.com/ClickHouse/ClickHouse/pull/46752) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix heuristics that check hyperscan patterns for problematic repeats. [#46819](https://github.com/ClickHouse/ClickHouse/pull/46819) ([Robert Schulze](https://github.com/rschu1ze)).
* Don't report ZK node exists to system.errors when a block was created concurrently by a different replica. [#46820](https://github.com/ClickHouse/ClickHouse/pull/46820) ([Raúl Marín](https://github.com/Algunenano)).
* Increase the limit for opened files in `clickhouse-local`. It will be able to read from `web` tables on servers with a huge number of CPU cores. Do not back off reading from the URL table engine in case of too many opened files. This closes [#46852](https://github.com/ClickHouse/ClickHouse/issues/46852). [#46853](https://github.com/ClickHouse/ClickHouse/pull/46853) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Exceptions thrown when numbers cannot be parsed now have an easier-to-read exception message. [#46917](https://github.com/ClickHouse/ClickHouse/pull/46917) ([Robert Schulze](https://github.com/rschu1ze)).
* Added update `system.backups` after every processed task to track the progress of backups. [#46989](https://github.com/ClickHouse/ClickHouse/pull/46989) ([Aleksandr Musorin](https://github.com/AVMusorin)).
* Allow types conversion in Native input format. Add settings `input_format_native_allow_types_conversion` that controls it (enabled by default). [#46990](https://github.com/ClickHouse/ClickHouse/pull/46990) ([Kruglov Pavel](https://github.com/Avogar)).
* Allow IPv4 in the `range` function to generate IP ranges. [#46995](https://github.com/ClickHouse/ClickHouse/pull/46995) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Improve exception message when it's impossible to move a part from one volume/disk to another. [#47032](https://github.com/ClickHouse/ClickHouse/pull/47032) ([alesapin](https://github.com/alesapin)).
* Support `Bool` type in `JSONType` function. Previously `Null` type was mistakenly returned for bool values. [#47046](https://github.com/ClickHouse/ClickHouse/pull/47046) ([Anton Popov](https://github.com/CurtizJ)).
* Use `_request_body` parameter to configure predefined HTTP queries. [#47086](https://github.com/ClickHouse/ClickHouse/pull/47086) ([Constantine Peresypkin](https://github.com/pkit)).
* Automatic indentation in the built-in UI SQL editor when Enter is pressed. [#47113](https://github.com/ClickHouse/ClickHouse/pull/47113) ([Alexey Korepanov](https://github.com/alexkorep)).
* Self-extraction with 'sudo' will attempt to set uid and gid of extracted files to running user. [#47116](https://github.com/ClickHouse/ClickHouse/pull/47116) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Previously, the `repeat` function's second argument only accepted an unsigned integer type, which meant it could not accept values such as -1. This behavior differed from that of the Spark function. In this update, the repeat function has been modified to match the behavior of the Spark function. It now accepts the same types of inputs, including negative integers. Extensive testing has been performed to verify the correctness of the updated implementation. [#47134](https://github.com/ClickHouse/ClickHouse/pull/47134) ([KevinyhZou](https://github.com/KevinyhZou)). Note: the changelog entry was rewritten by ChatGPT.
* Remove `::__1` part from stacktraces. Display `std::basic_string<char, ...` as `String` in stacktraces. [#47171](https://github.com/ClickHouse/ClickHouse/pull/47171) ([Mike Kot](https://github.com/myrrc)).
* Reimplement interserver mode to avoid replay attacks (note, that change is backward compatible with older servers). [#47213](https://github.com/ClickHouse/ClickHouse/pull/47213) ([Azat Khuzhin](https://github.com/azat)).
* Improve recognition of regular expression groups and refine the regexp_tree dictionary. [#47218](https://github.com/ClickHouse/ClickHouse/pull/47218) ([Han Fei](https://github.com/hanfei1991)).
* Keeper improvement: Add new 4LW `clrs` to clean resources used by Keeper (e.g. release unused memory). [#47256](https://github.com/ClickHouse/ClickHouse/pull/47256) ([Antonio Andelic](https://github.com/antonio2368)).
* Add optional arguments to codecs `DoubleDelta(bytes_size)`, `Gorilla(bytes_size)`, `FPC(level, float_size)`, this allows using these codecs without column type in `clickhouse-compressor`. Fix possible aborts and arithmetic errors in `clickhouse-compressor` with these codecs. Fixes: https://github.com/ClickHouse/ClickHouse/discussions/47262. [#47271](https://github.com/ClickHouse/ClickHouse/pull/47271) ([Kruglov Pavel](https://github.com/Avogar)).
* Add support for big int types to the `runningDifference` function. Closes [#47194](https://github.com/ClickHouse/ClickHouse/issues/47194). [#47322](https://github.com/ClickHouse/ClickHouse/pull/47322) ([Nikolay Degterinsky](https://github.com/evillique)).
* Add an expiration window for S3 credentials that have an expiration time to avoid `ExpiredToken` errors in some edge cases. It can be controlled with `expiration_window_seconds` config, the default is 120 seconds. [#47423](https://github.com/ClickHouse/ClickHouse/pull/47423) ([Antonio Andelic](https://github.com/antonio2368)).
* Support Decimals and Date32 in `Avro` format. [#47434](https://github.com/ClickHouse/ClickHouse/pull/47434) ([Kruglov Pavel](https://github.com/Avogar)).
* Do not start the server if an interrupted conversion from `Ordinary` to `Atomic` was detected, print a better error message with troubleshooting instructions. [#47487](https://github.com/ClickHouse/ClickHouse/pull/47487) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Add a new column `kind` to the `system.opentelemetry_span_log`. This column holds the value of [SpanKind](https://opentelemetry.io/docs/reference/specification/trace/api/#spankind) defined in OpenTelemtry. [#47499](https://github.com/ClickHouse/ClickHouse/pull/47499) ([Frank Chen](https://github.com/FrankChen021)).
* Allow reading/writing nested arrays in `Protobuf` format with only the root field name as column name. Previously column name should've contained all nested field names (like `a.b.c Array(Array(Array(UInt32)))`, now you can use just `a Array(Array(Array(UInt32)))`. [#47650](https://github.com/ClickHouse/ClickHouse/pull/47650) ([Kruglov Pavel](https://github.com/Avogar)).
* Added an optional `STRICT` modifier for `SYSTEM SYNC REPLICA` which makes the query wait for the replication queue to become empty (just like it worked before https://github.com/ClickHouse/ClickHouse/pull/45648). [#47659](https://github.com/ClickHouse/ClickHouse/pull/47659) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Improve the naming of some OpenTelemetry span logs. [#47667](https://github.com/ClickHouse/ClickHouse/pull/47667) ([Frank Chen](https://github.com/FrankChen021)).
* Prevent using too long chains of aggregate function combinators (they can lead to slow queries in the analysis stage). This closes [#47715](https://github.com/ClickHouse/ClickHouse/issues/47715). [#47716](https://github.com/ClickHouse/ClickHouse/pull/47716) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Support for subquery in parameterized views; resolves [#46741](https://github.com/ClickHouse/ClickHouse/issues/46741) [#47725](https://github.com/ClickHouse/ClickHouse/pull/47725) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
* Fix memory leak in MySQL integration (reproduces with `connection_auto_close=1`). [#47732](https://github.com/ClickHouse/ClickHouse/pull/47732) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Improved error handling in the code related to Decimal parameters, resulting in more informative error messages. Previously, when incorrect Decimal parameters were supplied, the error message generated was unclear or unhelpful. With this update, the error message printed has been fixed to provide more detailed and useful information, making it easier to identify and correct issues related to Decimal parameters. [#47812](https://github.com/ClickHouse/ClickHouse/pull/47812) ([Yu Feng](https://github.com/Vigor-jpg)). Note: this changelog entry is rewritten by ChatGPT.
* The parameter `exact_rows_before_limit` is used to make `rows_before_limit_at_least` is designed to accurately reflect the number of rows returned before the limit is reached. This pull request addresses issues encountered when the query involves distributed processing across multiple shards or sorting operations. Prior to this update, these scenarios were not functioning as intended. [#47874](https://github.com/ClickHouse/ClickHouse/pull/47874) ([Amos Bird](https://github.com/amosbird)).
* ThreadPools metrics introspection. [#47880](https://github.com/ClickHouse/ClickHouse/pull/47880) ([Azat Khuzhin](https://github.com/azat)).
* Add `WriteBufferFromS3Microseconds` and `WriteBufferFromS3RequestsErrors` profile events. [#47885](https://github.com/ClickHouse/ClickHouse/pull/47885) ([Antonio Andelic](https://github.com/antonio2368)).
* Add `--link` and `--noninteractive` (`-y`) options to ClickHouse install. Closes [#47750](https://github.com/ClickHouse/ClickHouse/issues/47750). [#47887](https://github.com/ClickHouse/ClickHouse/pull/47887) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fixed `UNKNOWN_TABLE` exception when attaching to a materialized view that has dependent tables that are not available. This might be useful when trying to restore state from a backup. [#47975](https://github.com/ClickHouse/ClickHouse/pull/47975) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
* Fix case when the (optional) path is not added to an encrypted disk configuration. [#47981](https://github.com/ClickHouse/ClickHouse/pull/47981) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Support for CTE in parameterized views Implementation: Updated to allow query parameters while evaluating scalar subqueries. [#48065](https://github.com/ClickHouse/ClickHouse/pull/48065) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
* Support big integers `(U)Int128/(U)Int256`, `Map` with any key type and `DateTime64` with any precision (not only 3 and 6). [#48119](https://github.com/ClickHouse/ClickHouse/pull/48119) ([Kruglov Pavel](https://github.com/Avogar)).
* Allow skipping errors related to unknown enum values in row input formats. [#48133](https://github.com/ClickHouse/ClickHouse/pull/48133) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
#### Build/Testing/Packaging Improvement
* ClickHouse now builds with `C++23`. [#47424](https://github.com/ClickHouse/ClickHouse/pull/47424) ([Robert Schulze](https://github.com/rschu1ze)).
* Fuzz `EXPLAIN` queries in the AST Fuzzer. [#47803](https://github.com/ClickHouse/ClickHouse/pull/47803) [#47852](https://github.com/ClickHouse/ClickHouse/pull/47852) ([flynn](https://github.com/ucasfl)).
* Split stress test and the automated backward compatibility check (now Upgrade check). [#44879](https://github.com/ClickHouse/ClickHouse/pull/44879) ([Kruglov Pavel](https://github.com/Avogar)).
* Updated the Ubuntu Image for Docker to calm down some bogus security reports. [#46784](https://github.com/ClickHouse/ClickHouse/pull/46784) ([Julio Jimenez](https://github.com/juliojimenez)). Please note that ClickHouse has no dependencies and does not require Docker.
* Adds a prompt to allow the removal of an existing `clickhouse` download when using "curl | sh" download of ClickHouse. Prompt is "ClickHouse binary clickhouse already exists. Overwrite? \[y/N\]". [#46859](https://github.com/ClickHouse/ClickHouse/pull/46859) ([Dan Roscigno](https://github.com/DanRoscigno)).
* Fix error during server startup on old distros (e.g. Amazon Linux 2) and on ARM that glibc 2.28 symbols are not found. [#47008](https://github.com/ClickHouse/ClickHouse/pull/47008) ([Robert Schulze](https://github.com/rschu1ze)).
* Prepare for clang 16. [#47027](https://github.com/ClickHouse/ClickHouse/pull/47027) ([Amos Bird](https://github.com/amosbird)).
* Added a CI check which ensures ClickHouse can run with an old glibc on ARM. [#47063](https://github.com/ClickHouse/ClickHouse/pull/47063) ([Robert Schulze](https://github.com/rschu1ze)).
* Add a style check to prevent incorrect usage of the `NDEBUG` macro. [#47699](https://github.com/ClickHouse/ClickHouse/pull/47699) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Speed up the build a little. [#47714](https://github.com/ClickHouse/ClickHouse/pull/47714) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Bump `vectorscan` to 5.4.9. [#47955](https://github.com/ClickHouse/ClickHouse/pull/47955) ([Robert Schulze](https://github.com/rschu1ze)).
* Add a unit test to assert Apache Arrow's fatal logging does not abort. It covers the changes in [ClickHouse/arrow#16](https://github.com/ClickHouse/arrow/pull/16). [#47958](https://github.com/ClickHouse/ClickHouse/pull/47958) ([Arthur Passos](https://github.com/arthurpassos)).
* Restore the ability of native macOS debug server build to start. [#48050](https://github.com/ClickHouse/ClickHouse/pull/48050) ([Robert Schulze](https://github.com/rschu1ze)). Note: this change is only relevant for development, as the ClickHouse official builds are done with cross-compilation.
#### Bug Fix (user-visible misbehavior in an official stable release)
* Fix formats parser resetting, test processing bad messages in `Kafka` [#45693](https://github.com/ClickHouse/ClickHouse/pull/45693) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix data size calculation in Keeper [#46086](https://github.com/ClickHouse/ClickHouse/pull/46086) ([Antonio Andelic](https://github.com/antonio2368)).
* Fixed a bug in automatic retries of `DROP TABLE` query with `ReplicatedMergeTree` tables and `Atomic` databases. In rare cases it could lead to `Can't get data for node /zk_path/log_pointer` and `The specified key does not exist` errors if the ZooKeeper session expired during DROP and a new replicated table with the same path in ZooKeeper was created in parallel. [#46384](https://github.com/ClickHouse/ClickHouse/pull/46384) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix incorrect alias recursion while normalizing queries that prevented some queries to run. [#46609](https://github.com/ClickHouse/ClickHouse/pull/46609) ([Raúl Marín](https://github.com/Algunenano)).
* Fix IPv4/IPv6 serialization/deserialization in binary formats [#46616](https://github.com/ClickHouse/ClickHouse/pull/46616) ([Kruglov Pavel](https://github.com/Avogar)).
* ActionsDAG: do not change result of `and` during optimization [#46653](https://github.com/ClickHouse/ClickHouse/pull/46653) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Improve query cancellation when a client dies [#46681](https://github.com/ClickHouse/ClickHouse/pull/46681) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix arithmetic operations in aggregate optimization [#46705](https://github.com/ClickHouse/ClickHouse/pull/46705) ([Duc Canh Le](https://github.com/canhld94)).
* Fix possible `clickhouse-local`'s abort on JSONEachRow schema inference [#46731](https://github.com/ClickHouse/ClickHouse/pull/46731) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix changing an expired role [#46772](https://github.com/ClickHouse/ClickHouse/pull/46772) ([Vitaly Baranov](https://github.com/vitlibar)).
* Fix combined PREWHERE column accumulation from multiple steps [#46785](https://github.com/ClickHouse/ClickHouse/pull/46785) ([Alexander Gololobov](https://github.com/davenger)).
* Use initial range for fetching file size in HTTP read buffer. Without this change, some remote files couldn't be processed. [#46824](https://github.com/ClickHouse/ClickHouse/pull/46824) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix the incorrect progress bar while using the URL tables [#46830](https://github.com/ClickHouse/ClickHouse/pull/46830) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix MSan report in `maxIntersections` function [#46847](https://github.com/ClickHouse/ClickHouse/pull/46847) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix a bug in `Map` data type [#46856](https://github.com/ClickHouse/ClickHouse/pull/46856) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix wrong results of some LIKE searches when the LIKE pattern contains quoted non-quotable characters [#46875](https://github.com/ClickHouse/ClickHouse/pull/46875) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix - WITH FILL would produce abort when the Filling Transform processing an empty block [#46897](https://github.com/ClickHouse/ClickHouse/pull/46897) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Fix date and int inference from string in JSON [#46972](https://github.com/ClickHouse/ClickHouse/pull/46972) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix bug in zero-copy replication disk choice during fetch [#47010](https://github.com/ClickHouse/ClickHouse/pull/47010) ([alesapin](https://github.com/alesapin)).
* Fix a typo in systemd service definition [#47051](https://github.com/ClickHouse/ClickHouse/pull/47051) ([Palash Goel](https://github.com/palash-goel)).
* Fix the NOT_IMPLEMENTED error with CROSS JOIN and algorithm = auto [#47068](https://github.com/ClickHouse/ClickHouse/pull/47068) ([Vladimir C](https://github.com/vdimir)).
* Fix the problem that the 'ReplicatedMergeTree' table failed to insert two similar data when the 'part_type' is configured as 'InMemory' mode (experimental feature). [#47121](https://github.com/ClickHouse/ClickHouse/pull/47121) ([liding1992](https://github.com/liding1992)).
* External dictionaries / library-bridge: Fix error "unknown library method 'extDict_libClone'" [#47136](https://github.com/ClickHouse/ClickHouse/pull/47136) ([alex filatov](https://github.com/phil-88)).
* Fix race condition in a grace hash join with limit [#47153](https://github.com/ClickHouse/ClickHouse/pull/47153) ([Vladimir C](https://github.com/vdimir)).
* Fix concrete columns PREWHERE support [#47154](https://github.com/ClickHouse/ClickHouse/pull/47154) ([Azat Khuzhin](https://github.com/azat)).
* Fix possible deadlock in Query Status [#47161](https://github.com/ClickHouse/ClickHouse/pull/47161) ([Kruglov Pavel](https://github.com/Avogar)).
* Forbid insert select for the same `Join` table, as it leads to a deadlock [#47260](https://github.com/ClickHouse/ClickHouse/pull/47260) ([Vladimir C](https://github.com/vdimir)).
* Skip merged partitions for `min_age_to_force_merge_seconds` merges [#47303](https://github.com/ClickHouse/ClickHouse/pull/47303) ([Antonio Andelic](https://github.com/antonio2368)).
* Modify find_first_symbols, so it works as expected for find_first_not_symbols [#47304](https://github.com/ClickHouse/ClickHouse/pull/47304) ([Arthur Passos](https://github.com/arthurpassos)).
* Fix big numbers inference in CSV [#47410](https://github.com/ClickHouse/ClickHouse/pull/47410) ([Kruglov Pavel](https://github.com/Avogar)).
* Disable logical expression optimizer for expression with aliases. [#47451](https://github.com/ClickHouse/ClickHouse/pull/47451) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix error in `decodeURLComponent` [#47457](https://github.com/ClickHouse/ClickHouse/pull/47457) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix explain graph with projection [#47473](https://github.com/ClickHouse/ClickHouse/pull/47473) ([flynn](https://github.com/ucasfl)).
* Fix query parameters [#47488](https://github.com/ClickHouse/ClickHouse/pull/47488) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Parameterized view: a bug fix. [#47495](https://github.com/ClickHouse/ClickHouse/pull/47495) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
* Fuzzer of data formats, and the corresponding fixes. [#47519](https://github.com/ClickHouse/ClickHouse/pull/47519) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix monotonicity check for `DateTime64` [#47526](https://github.com/ClickHouse/ClickHouse/pull/47526) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix "block structure mismatch" for a Nullable LowCardinality column [#47537](https://github.com/ClickHouse/ClickHouse/pull/47537) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Proper fix for a bug in Apache Parquet [#45878](https://github.com/ClickHouse/ClickHouse/issues/45878) [#47538](https://github.com/ClickHouse/ClickHouse/pull/47538) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix `BSONEachRow` parallel parsing when document size is invalid [#47540](https://github.com/ClickHouse/ClickHouse/pull/47540) ([Kruglov Pavel](https://github.com/Avogar)).
* Preserve error in `system.distribution_queue` on `SYSTEM FLUSH DISTRIBUTED` [#47541](https://github.com/ClickHouse/ClickHouse/pull/47541) ([Azat Khuzhin](https://github.com/azat)).
* Check for duplicate column in `BSONEachRow` format [#47609](https://github.com/ClickHouse/ClickHouse/pull/47609) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix wait for zero copy lock during move [#47631](https://github.com/ClickHouse/ClickHouse/pull/47631) ([alesapin](https://github.com/alesapin)).
* Fix aggregation by partitions [#47634](https://github.com/ClickHouse/ClickHouse/pull/47634) ([Nikita Taranov](https://github.com/nickitat)).
* Fix bug in tuple as array serialization in `BSONEachRow` format [#47690](https://github.com/ClickHouse/ClickHouse/pull/47690) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix crash in `polygonsSymDifferenceCartesian` [#47702](https://github.com/ClickHouse/ClickHouse/pull/47702) ([pufit](https://github.com/pufit)).
* Fix reading from storage `File` compressed files with `zlib` and `gzip` compression [#47796](https://github.com/ClickHouse/ClickHouse/pull/47796) ([Anton Popov](https://github.com/CurtizJ)).
* Improve empty query detection for PostgreSQL (for pgx golang driver) [#47854](https://github.com/ClickHouse/ClickHouse/pull/47854) ([Azat Khuzhin](https://github.com/azat)).
* Fix DateTime monotonicity check for LowCardinality types [#47860](https://github.com/ClickHouse/ClickHouse/pull/47860) ([Antonio Andelic](https://github.com/antonio2368)).
* Use restore_threads (not backup_threads) for RESTORE ASYNC [#47861](https://github.com/ClickHouse/ClickHouse/pull/47861) ([Azat Khuzhin](https://github.com/azat)).
* Fix DROP COLUMN with ReplicatedMergeTree containing projections [#47883](https://github.com/ClickHouse/ClickHouse/pull/47883) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix for Replicated database recovery [#47901](https://github.com/ClickHouse/ClickHouse/pull/47901) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Hotfix for too verbose warnings in HTTP [#47903](https://github.com/ClickHouse/ClickHouse/pull/47903) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix "Field value too long" in `catboostEvaluate` [#47970](https://github.com/ClickHouse/ClickHouse/pull/47970) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix [#36971](https://github.com/ClickHouse/ClickHouse/issues/36971): Watchdog: exit with non-zero code if child process exits [#47973](https://github.com/ClickHouse/ClickHouse/pull/47973) ([Коренберг Марк](https://github.com/socketpair)).
* Fix for "index file `cidx` is unexpectedly long" [#48010](https://github.com/ClickHouse/ClickHouse/pull/48010) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
* Fix MaterializedPostgreSQL query to get attributes (replica-identity) [#48015](https://github.com/ClickHouse/ClickHouse/pull/48015) ([Solomatov Sergei](https://github.com/solomatovs)).
* parseDateTime(): Fix UB (signed integer overflow) [#48019](https://github.com/ClickHouse/ClickHouse/pull/48019) ([Robert Schulze](https://github.com/rschu1ze)).
* Use unique names for Records in Avro to avoid reusing its schema [#48057](https://github.com/ClickHouse/ClickHouse/pull/48057) ([Kruglov Pavel](https://github.com/Avogar)).
* Correctly set TCP/HTTP socket timeouts in Keeper [#48108](https://github.com/ClickHouse/ClickHouse/pull/48108) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix possible member call on null pointer in `Avro` format [#48184](https://github.com/ClickHouse/ClickHouse/pull/48184) ([Kruglov Pavel](https://github.com/Avogar)).
### <a id="232"></a> ClickHouse release 23.2, 2023-02-23
#### Backward Incompatible Change
@ -140,7 +473,7 @@
* Upgrade Intel QPL from v0.3.0 to v1.0.0 2. Build libaccel-config and link it statically to QPL library instead of dynamically. [#45809](https://github.com/ClickHouse/ClickHouse/pull/45809) ([jasperzhu](https://github.com/jinjunzh)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
#### Bug Fix (user-visible misbehavior in official stable release)
* Flush data exactly by `rabbitmq_flush_interval_ms` or by `rabbitmq_max_block_size` in `StorageRabbitMQ`. Closes [#42389](https://github.com/ClickHouse/ClickHouse/issues/42389). Closes [#45160](https://github.com/ClickHouse/ClickHouse/issues/45160). [#44404](https://github.com/ClickHouse/ClickHouse/pull/44404) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Use PODArray to render in sparkBar function, so we can control the memory usage. Close [#44467](https://github.com/ClickHouse/ClickHouse/issues/44467). [#44489](https://github.com/ClickHouse/ClickHouse/pull/44489) ([Duc Canh Le](https://github.com/canhld94)).

View File

@ -58,7 +58,7 @@ if (ENABLE_CHECK_HEAVY_BUILDS)
set (RLIMIT_CPU 1000)
# gcc10/gcc10/clang -fsanitize=memory is too heavy
if (SANITIZE STREQUAL "memory" OR COMPILER_GCC)
if (SANITIZE STREQUAL "memory")
set (RLIMIT_DATA 10000000000) # 10G
endif()
@ -170,17 +170,10 @@ else ()
set(NO_WHOLE_ARCHIVE --no-whole-archive)
endif ()
option(ENABLE_CURL_BUILD "Enable curl, azure, sentry build on by default except MacOS." ON)
if (OS_DARWIN)
# Disable the curl, azure, senry build on MacOS
set (ENABLE_CURL_BUILD OFF)
endif ()
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
# Can be lld or ld-lld or lld-13 or /path/to/lld.
if (LINKER_NAME MATCHES "lld" AND OS_LINUX)
if (LINKER_NAME MATCHES "lld")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gdb-index")
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--gdb-index")
message (STATUS "Adding .gdb-index via --gdb-index linker option.")
endif ()
endif()
@ -213,7 +206,7 @@ endif ()
# Create BuildID when using lld. For other linkers it is created by default.
# (NOTE: LINKER_NAME can be either path or name, and in different variants)
if (LINKER_NAME MATCHES "lld" AND OS_LINUX)
if (LINKER_NAME MATCHES "lld")
# SHA1 is not cryptographically secure but it is the best what lld is offering.
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--build-id=sha1")
endif ()
@ -287,48 +280,31 @@ set (CMAKE_C_STANDARD 11)
set (CMAKE_C_EXTENSIONS ON) # required by most contribs written in C
set (CMAKE_C_STANDARD_REQUIRED ON)
if (COMPILER_GCC OR COMPILER_CLANG)
# Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
# See https://reviews.llvm.org/D112921
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
endif ()
# falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable
# benchmarks.
if (COMPILER_GCC OR COMPILER_CLANG)
set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
endif ()
if (ARCH_AMD64)
# align branches within a 32-Byte boundary to avoid the potential performance loss when code layout change,
# which makes benchmark results more stable.
set(BRANCHES_WITHIN_32B_BOUNDARIES "-mbranches-within-32B-boundaries")
if (COMPILER_GCC)
# gcc is in assembler, need to add "-Wa," prefix
set(BRANCHES_WITHIN_32B_BOUNDARIES "-Wa,${BRANCHES_WITHIN_32B_BOUNDARIES}")
endif()
set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}")
endif()
if (COMPILER_GCC)
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fcoroutines")
endif ()
# Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc
option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF)
if (WITH_COVERAGE AND COMPILER_CLANG)
set(COMPILER_FLAGS "${COMPILER_FLAGS} -fprofile-instr-generate -fcoverage-mapping")
# If we want to disable coverage for specific translation units
set(WITHOUT_COVERAGE "-fno-profile-instr-generate -fno-coverage-mapping")
endif()
if (COMPILER_CLANG)
# Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
# See https://reviews.llvm.org/D112921
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
if (WITH_COVERAGE AND COMPILER_GCC)
set(COMPILER_FLAGS "${COMPILER_FLAGS} -fprofile-arcs -ftest-coverage")
set(COVERAGE_OPTION "-lgcov")
set(WITHOUT_COVERAGE "-fno-profile-arcs -fno-test-coverage")
endif()
# falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable
# benchmarks.
set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
if (ARCH_AMD64)
# align branches within a 32-Byte boundary to avoid the potential performance loss when code layout change,
# which makes benchmark results more stable.
set(BRANCHES_WITHIN_32B_BOUNDARIES "-mbranches-within-32B-boundaries")
set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}")
endif()
if (WITH_COVERAGE)
set(COMPILER_FLAGS "${COMPILER_FLAGS} -fprofile-instr-generate -fcoverage-mapping")
# If we want to disable coverage for specific translation units
set(WITHOUT_COVERAGE "-fno-profile-instr-generate -fno-coverage-mapping")
endif()
endif ()
set (COMPILER_FLAGS "${COMPILER_FLAGS}")
@ -350,15 +326,7 @@ set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O0 ${DEBUG_I
if (COMPILER_CLANG)
if (OS_DARWIN)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-U,_inside_main")
# The LLVM MachO linker (ld64.lld) generates by default unwind info in 'compact' format which the internal unwinder doesn't support
# and the server will not come up ('invalid compact unwind encoding'). Disable it.
# You will see warning during the build "ld64.lld: warning: Option `-no_compact_unwind' is undocumented. Should lld implement it?".
# Yes, ld64.lld does not document the option, likely for compat with Apple's system ld after which ld64.lld is modeled after and
# which also does not document it.
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-no_compact_unwind")
endif()
# Display absolute paths in error messages. Otherwise KDevelop fails to navigate to correct file and opens a new file instead.
@ -419,7 +387,11 @@ else()
endif ()
option (ENABLE_GWP_ASAN "Enable Gwp-Asan" ON)
if (NOT OS_LINUX AND NOT OS_ANDROID)
# We use mmap for allocations more heavily in debug builds,
# but GWP-ASan also wants to use mmap frequently,
# and due to a large number of memory mappings,
# it does not work together well.
if ((NOT OS_LINUX AND NOT OS_ANDROID) OR (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG"))
set(ENABLE_GWP_ASAN OFF)
endif ()
@ -443,8 +415,11 @@ endif ()
set (CMAKE_POSTFIX_VARIABLE "CMAKE_${CMAKE_BUILD_TYPE_UC}_POSTFIX")
set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
if (OS_LINUX AND NOT (ARCH_AARCH64 OR ARCH_S390X))
if (NOT SANITIZE)
set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
endif()
if (OS_LINUX AND NOT (ARCH_AARCH64 OR ARCH_S390X) AND NOT SANITIZE)
# Slightly more efficient code can be generated
# It's disabled for ARM because otherwise ClickHouse cannot run on Android.
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie")
@ -577,7 +552,7 @@ if (NATIVE_BUILD_TARGETS
COMMAND ${CMAKE_COMMAND}
"-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
"-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
"-DENABLE_CCACHE=${ENABLE_CCACHE}"
"-DCOMPILER_CACHE=${COMPILER_CACHE}"
# Avoid overriding .cargo/config.toml with native toolchain.
"-DENABLE_RUST=OFF"
"-DENABLE_CLICKHOUSE_SELF_EXTRACTING=${ENABLE_CLICKHOUSE_SELF_EXTRACTING}"

View File

@ -19,8 +19,8 @@ endif()
if (NOT "$ENV{CFLAGS}" STREQUAL ""
OR NOT "$ENV{CXXFLAGS}" STREQUAL ""
OR NOT "$ENV{LDFLAGS}" STREQUAL ""
OR CMAKE_C_FLAGS OR CMAKE_CXX_FLAGS OR CMAKE_EXE_LINKER_FLAGS OR CMAKE_SHARED_LINKER_FLAGS OR CMAKE_MODULE_LINKER_FLAGS
OR CMAKE_C_FLAGS_INIT OR CMAKE_CXX_FLAGS_INIT OR CMAKE_EXE_LINKER_FLAGS_INIT OR CMAKE_SHARED_LINKER_FLAGS_INIT OR CMAKE_MODULE_LINKER_FLAGS_INIT)
OR CMAKE_C_FLAGS OR CMAKE_CXX_FLAGS OR CMAKE_EXE_LINKER_FLAGS OR CMAKE_MODULE_LINKER_FLAGS
OR CMAKE_C_FLAGS_INIT OR CMAKE_CXX_FLAGS_INIT OR CMAKE_EXE_LINKER_FLAGS_INIT OR CMAKE_MODULE_LINKER_FLAGS_INIT)
# if $ENV
message("CFLAGS: $ENV{CFLAGS}")
@ -36,7 +36,6 @@ if (NOT "$ENV{CFLAGS}" STREQUAL ""
message("CMAKE_C_FLAGS_INIT: ${CMAKE_C_FLAGS_INIT}")
message("CMAKE_CXX_FLAGS_INIT: ${CMAKE_CXX_FLAGS_INIT}")
message("CMAKE_EXE_LINKER_FLAGS_INIT: ${CMAKE_EXE_LINKER_FLAGS_INIT}")
message("CMAKE_SHARED_LINKER_FLAGS_INIT: ${CMAKE_SHARED_LINKER_FLAGS_INIT}")
message("CMAKE_MODULE_LINKER_FLAGS_INIT: ${CMAKE_MODULE_LINKER_FLAGS_INIT}")
message(FATAL_ERROR "

View File

@ -1,4 +1,4 @@
[![ClickHouse — open source distributed column-oriented DBMS](https://github.com/ClickHouse/clickhouse-presentations/raw/master/images/logo-400x240.png)](https://clickhouse.com)
[<img alt="ClickHouse — open source distributed column-oriented DBMS" width="400px" src="https://clickhouse.com/images/ch_gh_logo_rounded.png" />](https://clickhouse.com?utm_source=github)
ClickHouse® is an open-source column-oriented database management system that allows generating analytical data reports in real-time.
@ -21,10 +21,11 @@ curl https://clickhouse.com/ | sh
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
## Upcoming Events
* [**ClickHouse Meetup in Austin**](https://www.meetup.com/clickhouse-austin-user-group/events/291486654/) - Mar 30 - The first ClickHouse Meetup in Austin is happening soon! Interested in speaking, let us know!
* [**v23.3 Release Webinar**](https://clickhouse.com/company/events/v23-3-release-webinar?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-02) - Mar 30 - 23.3 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
* [**ClickHouse Spring Meetup in Manhattan**](https://www.meetup.com/clickhouse-new-york-user-group/events/292517734) - April 26 - It's spring, and it's time to meet again in the city! Talks include: "Building a domain specific query language on top of Clickhouse", "A Galaxy of Information", "Our Journey to ClickHouse Cloud from Redshift", and a ClickHouse update!
* [**v23.4 Release Webinar**](https://clickhouse.com/company/events/v23-4-release-webinar?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-04) - April 26 - 23.4 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
* [**ClickHouse Meetup in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/292892466) - May 16 - Save the date! ClickHouse is coming back to Berlin. Were excited to announce an upcoming ClickHouse Meetup that you wont want to miss. Join us as we gather together to discuss the latest in the world of ClickHouse and share user stories.
## Recent Recordings
* **FOSDEM 2023**: In the "Fast and Streaming Data" room Alexey gave a talk entitled "Building Analytical Apps With ClickHouse" that looks at the landscape of data tools, an interesting data set, and how you can interact with data quickly. Check out the recording on **[YouTube](https://www.youtube.com/watch?v=JlcI2Vfz_uk)**.
* **Recording available**: [**v23.2 Release Webinar**](https://www.youtube.com/watch?v=2o0vRMMIrkY) NTILE Window Function support, Partition Key for GROUP By, io_uring, Apache Iceberg support, Dynamic Disks, integrations updates! Watch it now!
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
* **Recording available**: [**v23.3 Release Webinar**](https://www.youtube.com/watch?v=ISaGUjvBNao) UNDROP TABLE, server settings introspection, nested dynamic disks, MySQL compatibility, parseDate Time, Lightweight Deletes, Parallel Replicas, integrations updates, and so much more! Watch it now!
* **All release webinar recordings**: [YouTube playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3jAlSy1JxyP8zluvXaN3nxU)

View File

@ -13,20 +13,16 @@ The following versions of ClickHouse server are currently being supported with s
| Version | Supported |
|:-|:-|
| 23.4 | ✔️ |
| 23.3 | ✔️ |
| 23.2 | ✔️ |
| 23.1 | ✔️ |
| 22.12 | ✔️ |
| 23.1 | |
| 22.12 | |
| 22.11 | ❌ |
| 22.10 | ❌ |
| 22.9 | ❌ |
| 22.8 | ✔️ |
| 22.7 | ❌ |
| 22.6 | ❌ |
| 22.5 | ❌ |
| 22.4 | ❌ |
| 22.3 | ✔️ |
| 22.2 | ❌ |
| 22.1 | ❌ |
| 22.* | ❌ |
| 21.* | ❌ |
| 20.* | ❌ |
| 19.* | ❌ |

View File

@ -1,5 +1,6 @@
#pragma once
#include <base/extended_types.h>
#include <base/Decimal_fwd.h>
#if !defined(NO_SANITIZE_UNDEFINED)
#if defined(__clang__)
@ -19,23 +20,6 @@ using Decimal64 = Decimal<Int64>;
using Decimal128 = Decimal<Int128>;
using Decimal256 = Decimal<Int256>;
template <class T>
concept is_decimal =
std::is_same_v<T, Decimal32>
|| std::is_same_v<T, Decimal64>
|| std::is_same_v<T, Decimal128>
|| std::is_same_v<T, Decimal256>
|| std::is_same_v<T, DateTime64>;
template <class T>
concept is_over_big_int =
std::is_same_v<T, Int128>
|| std::is_same_v<T, UInt128>
|| std::is_same_v<T, Int256>
|| std::is_same_v<T, UInt256>
|| std::is_same_v<T, Decimal128>
|| std::is_same_v<T, Decimal256>;
template <class T> struct NativeTypeT { using Type = T; };
template <is_decimal T> struct NativeTypeT<T> { using Type = typename T::NativeType; };
template <class T> using NativeType = typename NativeTypeT<T>::Type;

46
base/base/Decimal_fwd.h Normal file
View File

@ -0,0 +1,46 @@
#pragma once
#include <base/types.h>
namespace wide
{
template <size_t Bits, typename Signed>
class integer;
}
using Int128 = wide::integer<128, signed>;
using UInt128 = wide::integer<128, unsigned>;
using Int256 = wide::integer<256, signed>;
using UInt256 = wide::integer<256, unsigned>;
namespace DB
{
template <class> struct Decimal;
using Decimal32 = Decimal<Int32>;
using Decimal64 = Decimal<Int64>;
using Decimal128 = Decimal<Int128>;
using Decimal256 = Decimal<Int256>;
class DateTime64;
template <class T>
concept is_decimal =
std::is_same_v<T, Decimal32>
|| std::is_same_v<T, Decimal64>
|| std::is_same_v<T, Decimal128>
|| std::is_same_v<T, Decimal256>
|| std::is_same_v<T, DateTime64>;
template <class T>
concept is_over_big_int =
std::is_same_v<T, Int128>
|| std::is_same_v<T, UInt128>
|| std::is_same_v<T, Int256>
|| std::is_same_v<T, UInt256>
|| std::is_same_v<T, Decimal128>
|| std::is_same_v<T, Decimal256>;
}

View File

@ -51,3 +51,15 @@ namespace DB
};
}
namespace std
{
template <>
struct hash<DB::IPv6>
{
size_t operator()(const DB::IPv6 & x) const
{
return std::hash<DB::IPv6::UnderlyingType>()(x.toUnderType());
}
};
}

View File

@ -466,9 +466,8 @@ JSON::Pos JSON::searchField(const char * data, size_t size) const
{
if (!it->hasEscapes())
{
if (static_cast<int>(size) + 2 > it->dataEnd() - it->data())
continue;
if (!strncmp(data, it->data() + 1, size))
const auto current_name = it->getRawName();
if (current_name.size() == size && 0 == memcmp(current_name.data(), data, size))
break;
}
else

View File

@ -3,13 +3,29 @@
#include <Poco/Util/LayeredConfiguration.h>
#include <Poco/Util/MapConfiguration.h>
void argsToConfig(const Poco::Util::Application::ArgVec & argv, Poco::Util::LayeredConfiguration & config, int priority)
void argsToConfig(const Poco::Util::Application::ArgVec & argv,
Poco::Util::LayeredConfiguration & config,
int priority,
const std::unordered_set<std::string>* alias_names)
{
/// Parsing all args and converting to config layer
/// Test: -- --1=1 --1=2 --3 5 7 8 -9 10 -11=12 14= 15== --16==17 --=18 --19= --20 21 22 --23 --24 25 --26 -27 28 ---29=30 -- ----31 32 --33 3-4
Poco::AutoPtr<Poco::Util::MapConfiguration> map_config = new Poco::Util::MapConfiguration;
std::string key;
auto add_arg = [&map_config, &alias_names](const std::string & k, const std::string & v)
{
map_config->setString(k, v);
if (alias_names && !alias_names->contains(k))
{
std::string alias_key = k;
std::replace(alias_key.begin(), alias_key.end(), '-', '_');
if (alias_names->contains(alias_key))
map_config->setString(alias_key, v);
}
};
for (const auto & arg : argv)
{
auto key_start = arg.find_first_not_of('-');
@ -19,7 +35,7 @@ void argsToConfig(const Poco::Util::Application::ArgVec & argv, Poco::Util::Laye
// old saved '--key', will set to some true value "1"
if (!key.empty() && pos_minus != std::string::npos && pos_minus < key_start)
{
map_config->setString(key, "1");
add_arg(key, "1");
key = "";
}
@ -29,7 +45,7 @@ void argsToConfig(const Poco::Util::Application::ArgVec & argv, Poco::Util::Laye
{
if (pos_minus == std::string::npos || pos_minus > key_start)
{
map_config->setString(key, arg);
add_arg(key, arg);
}
key = "";
}
@ -55,7 +71,7 @@ void argsToConfig(const Poco::Util::Application::ArgVec & argv, Poco::Util::Laye
if (arg.size() > pos_eq)
value = arg.substr(pos_eq + 1);
map_config->setString(key, value);
add_arg(key, value);
key = "";
}

View File

@ -1,6 +1,8 @@
#pragma once
#include <Poco/Util/Application.h>
#include <string>
#include <unordered_set>
namespace Poco::Util
{
@ -8,4 +10,7 @@ class LayeredConfiguration; // NOLINT(cppcoreguidelines-virtual-class-destructor
}
/// Import extra command line arguments to configuration. These are command line arguments after --.
void argsToConfig(const Poco::Util::Application::ArgVec & argv, Poco::Util::LayeredConfiguration & config, int priority);
void argsToConfig(const Poco::Util::Application::ArgVec & argv,
Poco::Util::LayeredConfiguration & config,
int priority,
const std::unordered_set<std::string>* registered_alias_names = nullptr);

View File

@ -34,9 +34,51 @@
* If no such characters, returns nullptr.
*/
struct SearchSymbols
{
static constexpr auto BUFFER_SIZE = 16;
SearchSymbols() = default;
explicit SearchSymbols(std::string in)
: str(std::move(in))
{
#if defined(__SSE4_2__)
if (str.size() > BUFFER_SIZE)
{
throw std::runtime_error("SearchSymbols can contain at most " + std::to_string(BUFFER_SIZE) + " symbols and " + std::to_string(str.size()) + " was provided\n");
}
char tmp_safety_buffer[BUFFER_SIZE] = {0};
memcpy(tmp_safety_buffer, str.data(), str.size());
simd_vector = _mm_loadu_si128(reinterpret_cast<const __m128i *>(tmp_safety_buffer));
#endif
}
#if defined(__SSE4_2__)
__m128i simd_vector;
#endif
std::string str;
};
namespace detail
{
template <char ...chars> constexpr bool is_in(char x) { return ((x == chars) || ...); }
template <char ...chars> constexpr bool is_in(char x) { return ((x == chars) || ...); } // NOLINT(misc-redundant-expression)
static bool is_in(char c, const char * symbols, size_t num_chars)
{
for (size_t i = 0u; i < num_chars; ++i)
{
if (c == symbols[i])
{
return true;
}
}
return false;
}
#if defined(__SSE2__)
template <char s0>
@ -53,6 +95,43 @@ inline __m128i mm_is_in(__m128i bytes)
__m128i eq = mm_is_in<s1, tail...>(bytes);
return _mm_or_si128(eq0, eq);
}
inline __m128i mm_is_in(__m128i bytes, const char * symbols, size_t num_chars)
{
__m128i accumulator = _mm_setzero_si128();
for (size_t i = 0; i < num_chars; ++i)
{
__m128i eq = _mm_cmpeq_epi8(bytes, _mm_set1_epi8(symbols[i]));
accumulator = _mm_or_si128(accumulator, eq);
}
return accumulator;
}
inline std::array<__m128i, 16u> mm_is_in_prepare(const char * symbols, size_t num_chars)
{
std::array<__m128i, 16u> result {};
for (size_t i = 0; i < num_chars; ++i)
{
result[i] = _mm_set1_epi8(symbols[i]);
}
return result;
}
inline __m128i mm_is_in_execute(__m128i bytes, const std::array<__m128i, 16u> & needles)
{
__m128i accumulator = _mm_setzero_si128();
for (const auto & needle : needles)
{
__m128i eq = _mm_cmpeq_epi8(bytes, needle);
accumulator = _mm_or_si128(accumulator, eq);
}
return accumulator;
}
#endif
template <bool positive>
@ -99,6 +178,32 @@ inline const char * find_first_symbols_sse2(const char * const begin, const char
return return_mode == ReturnMode::End ? end : nullptr;
}
template <bool positive, ReturnMode return_mode>
inline const char * find_first_symbols_sse2(const char * const begin, const char * const end, const char * symbols, size_t num_chars)
{
const char * pos = begin;
#if defined(__SSE2__)
const auto needles = mm_is_in_prepare(symbols, num_chars);
for (; pos + 15 < end; pos += 16)
{
__m128i bytes = _mm_loadu_si128(reinterpret_cast<const __m128i *>(pos));
__m128i eq = mm_is_in_execute(bytes, needles);
uint16_t bit_mask = maybe_negate<positive>(uint16_t(_mm_movemask_epi8(eq)));
if (bit_mask)
return pos + __builtin_ctz(bit_mask);
}
#endif
for (; pos < end; ++pos)
if (maybe_negate<positive>(is_in(*pos, symbols, num_chars)))
return pos;
return return_mode == ReturnMode::End ? end : nullptr;
}
template <bool positive, ReturnMode return_mode, char... symbols>
inline const char * find_last_symbols_sse2(const char * const begin, const char * const end)
@ -179,6 +284,41 @@ inline const char * find_first_symbols_sse42(const char * const begin, const cha
return return_mode == ReturnMode::End ? end : nullptr;
}
template <bool positive, ReturnMode return_mode>
inline const char * find_first_symbols_sse42(const char * const begin, const char * const end, const SearchSymbols & symbols)
{
const char * pos = begin;
const auto num_chars = symbols.str.size();
#if defined(__SSE4_2__)
constexpr int mode = _SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT;
const __m128i set = symbols.simd_vector;
for (; pos + 15 < end; pos += 16)
{
__m128i bytes = _mm_loadu_si128(reinterpret_cast<const __m128i *>(pos));
if constexpr (positive)
{
if (_mm_cmpestrc(set, num_chars, bytes, 16, mode))
return pos + _mm_cmpestri(set, num_chars, bytes, 16, mode);
}
else
{
if (_mm_cmpestrc(set, num_chars, bytes, 16, mode | _SIDD_NEGATIVE_POLARITY))
return pos + _mm_cmpestri(set, num_chars, bytes, 16, mode | _SIDD_NEGATIVE_POLARITY);
}
}
#endif
for (; pos < end; ++pos)
if (maybe_negate<positive>(is_in(*pos, symbols.str.data(), num_chars)))
return pos;
return return_mode == ReturnMode::End ? end : nullptr;
}
/// NOTE No SSE 4.2 implementation for find_last_symbols_or_null. Not worth to do.
@ -194,6 +334,17 @@ inline const char * find_first_symbols_dispatch(const char * begin, const char *
return find_first_symbols_sse2<positive, return_mode, symbols...>(begin, end);
}
template <bool positive, ReturnMode return_mode>
inline const char * find_first_symbols_dispatch(const std::string_view haystack, const SearchSymbols & symbols)
{
#if defined(__SSE4_2__)
if (symbols.str.size() >= 5)
return find_first_symbols_sse42<positive, return_mode>(haystack.begin(), haystack.end(), symbols);
else
#endif
return find_first_symbols_sse2<positive, return_mode>(haystack.begin(), haystack.end(), symbols.str.data(), symbols.str.size());
}
}
@ -211,6 +362,11 @@ inline char * find_first_symbols(char * begin, char * end)
return const_cast<char *>(detail::find_first_symbols_dispatch<true, detail::ReturnMode::End, symbols...>(begin, end));
}
inline const char * find_first_symbols(std::string_view haystack, const SearchSymbols & symbols)
{
return detail::find_first_symbols_dispatch<true, detail::ReturnMode::End>(haystack, symbols);
}
template <char... symbols>
inline const char * find_first_not_symbols(const char * begin, const char * end)
{
@ -223,6 +379,11 @@ inline char * find_first_not_symbols(char * begin, char * end)
return const_cast<char *>(detail::find_first_symbols_dispatch<false, detail::ReturnMode::End, symbols...>(begin, end));
}
inline const char * find_first_not_symbols(std::string_view haystack, const SearchSymbols & symbols)
{
return detail::find_first_symbols_dispatch<false, detail::ReturnMode::End>(haystack, symbols);
}
template <char... symbols>
inline const char * find_first_symbols_or_null(const char * begin, const char * end)
{
@ -235,6 +396,11 @@ inline char * find_first_symbols_or_null(char * begin, char * end)
return const_cast<char *>(detail::find_first_symbols_dispatch<true, detail::ReturnMode::Nullptr, symbols...>(begin, end));
}
inline const char * find_first_symbols_or_null(std::string_view haystack, const SearchSymbols & symbols)
{
return detail::find_first_symbols_dispatch<true, detail::ReturnMode::Nullptr>(haystack, symbols);
}
template <char... symbols>
inline const char * find_first_not_symbols_or_null(const char * begin, const char * end)
{
@ -247,6 +413,10 @@ inline char * find_first_not_symbols_or_null(char * begin, char * end)
return const_cast<char *>(detail::find_first_symbols_dispatch<false, detail::ReturnMode::Nullptr, symbols...>(begin, end));
}
inline const char * find_first_not_symbols_or_null(std::string_view haystack, const SearchSymbols & symbols)
{
return detail::find_first_symbols_dispatch<false, detail::ReturnMode::Nullptr>(haystack, symbols);
}
template <char... symbols>
inline const char * find_last_symbols_or_null(const char * begin, const char * end)

View File

@ -35,7 +35,7 @@ public:
Self & operator=(T && rhs) { t = std::move(rhs); return *this;}
// NOLINTBEGIN(google-explicit-constructor)
operator const T & () const { return t; }
constexpr operator const T & () const { return t; }
operator T & () { return t; }
// NOLINTEND(google-explicit-constructor)

View File

@ -5,44 +5,6 @@
#include <bit>
inline void reverseMemcpy(void * dst, const void * src, size_t size)
{
uint8_t * uint_dst = reinterpret_cast<uint8_t *>(dst);
const uint8_t * uint_src = reinterpret_cast<const uint8_t *>(src);
uint_dst += size;
while (size)
{
--uint_dst;
*uint_dst = *uint_src;
++uint_src;
--size;
}
}
template <typename T>
inline T unalignedLoadLE(const void * address)
{
T res {};
if constexpr (std::endian::native == std::endian::little)
memcpy(&res, address, sizeof(res));
else
reverseMemcpy(&res, address, sizeof(res));
return res;
}
template <typename T>
inline void unalignedStoreLE(void * address,
const typename std::enable_if<true, T>::type & src)
{
static_assert(std::is_trivially_copyable_v<T>);
if constexpr (std::endian::native == std::endian::little)
memcpy(address, &src, sizeof(src));
else
reverseMemcpy(address, &src, sizeof(src));
}
template <typename T>
inline T unalignedLoad(const void * address)
{
@ -62,3 +24,70 @@ inline void unalignedStore(void * address,
static_assert(std::is_trivially_copyable_v<T>);
memcpy(address, &src, sizeof(src));
}
inline void reverseMemcpy(void * dst, const void * src, size_t size)
{
uint8_t * uint_dst = reinterpret_cast<uint8_t *>(dst);
const uint8_t * uint_src = reinterpret_cast<const uint8_t *>(src);
uint_dst += size;
while (size)
{
--uint_dst;
*uint_dst = *uint_src;
++uint_src;
--size;
}
}
template <std::endian endian, typename T>
inline T unalignedLoadEndian(const void * address)
{
T res {};
if constexpr (std::endian::native == endian)
memcpy(&res, address, sizeof(res));
else
reverseMemcpy(&res, address, sizeof(res));
return res;
}
template <std::endian endian, typename T>
inline void unalignedStoreEndian(void * address, T & src)
{
static_assert(std::is_trivially_copyable_v<T>);
if constexpr (std::endian::native == endian)
memcpy(address, &src, sizeof(src));
else
reverseMemcpy(address, &src, sizeof(src));
}
template <typename T>
inline T unalignedLoadLittleEndian(const void * address)
{
return unalignedLoadEndian<std::endian::little, T>(address);
}
template <typename T>
inline void unalignedStoreLittleEndian(void * address,
const typename std::enable_if<true, T>::type & src)
{
unalignedStoreEndian<std::endian::little>(address, src);
}
template <typename T>
inline T unalignedLoadBigEndian(const void * address)
{
return unalignedLoadEndian<std::endian::big, T>(address);
}
template <typename T>
inline void unalignedStoreBigEndian(void * address,
const typename std::enable_if<true, T>::type & src)
{
unalignedStoreEndian<std::endian::big>(address, src);
}

View File

@ -155,13 +155,13 @@ struct common_type<wide::integer<Bits, Signed>, Arithmetic>
std::is_floating_point_v<Arithmetic>,
Arithmetic,
std::conditional_t<
sizeof(Arithmetic) < Bits * sizeof(long),
sizeof(Arithmetic) * 8 < Bits,
wide::integer<Bits, Signed>,
std::conditional_t<
Bits * sizeof(long) < sizeof(Arithmetic),
Bits < sizeof(Arithmetic) * 8,
Arithmetic,
std::conditional_t<
Bits * sizeof(long) == sizeof(Arithmetic) && (std::is_same_v<Signed, signed> || std::is_signed_v<Arithmetic>),
Bits == sizeof(Arithmetic) * 8 && (std::is_same_v<Signed, signed> || std::is_signed_v<Arithmetic>),
Arithmetic,
wide::integer<Bits, Signed>>>>>;
};
@ -732,9 +732,10 @@ public:
if (std::numeric_limits<T>::is_signed && (is_negative(lhs) != is_negative(rhs)))
return is_negative(rhs);
integer<Bits, Signed> t = rhs;
for (unsigned i = 0; i < item_count; ++i)
{
base_type rhs_item = get_item(rhs, big(i));
base_type rhs_item = get_item(t, big(i));
if (lhs.items[big(i)] != rhs_item)
return lhs.items[big(i)] > rhs_item;
@ -757,9 +758,10 @@ public:
if (std::numeric_limits<T>::is_signed && (is_negative(lhs) != is_negative(rhs)))
return is_negative(lhs);
integer<Bits, Signed> t = rhs;
for (unsigned i = 0; i < item_count; ++i)
{
base_type rhs_item = get_item(rhs, big(i));
base_type rhs_item = get_item(t, big(i));
if (lhs.items[big(i)] != rhs_item)
return lhs.items[big(i)] < rhs_item;
@ -779,9 +781,10 @@ public:
{
if constexpr (should_keep_size<T>())
{
integer<Bits, Signed> t = rhs;
for (unsigned i = 0; i < item_count; ++i)
{
base_type rhs_item = get_item(rhs, any(i));
base_type rhs_item = get_item(t, any(i));
if (lhs.items[any(i)] != rhs_item)
return false;
@ -1239,7 +1242,7 @@ constexpr integer<Bits, Signed>::operator long double() const noexcept
for (unsigned i = 0; i < _impl::item_count; ++i)
{
long double t = res;
res *= std::numeric_limits<base_type>::max();
res *= static_cast<long double>(std::numeric_limits<base_type>::max());
res += t;
res += tmp.items[_impl::big(i)];
}

View File

@ -235,6 +235,17 @@ ssize_t getrandom(void *buf, size_t buflen, unsigned flags)
return syscall(SYS_getrandom, buf, buflen, flags);
}
/* Structure for scatter/gather I/O. */
struct iovec
{
void *iov_base; /* Pointer to data. */
size_t iov_len; /* Length of data. */
};
ssize_t preadv(int __fd, const struct iovec *__iovec, int __count, __off_t __offset)
{
return syscall(SYS_preadv, __fd, __iovec, __count, (long)(__offset), (long)(__offset>>32));
}
#include <errno.h>
#include <limits.h>

View File

@ -0,0 +1,81 @@
/* origin: FreeBSD /usr/src/lib/msun/src/e_expf.c */
/*
* Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
*/
/*
* ====================================================
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
*
* Developed at SunPro, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "libm.h"
static const float
half[2] = {0.5,-0.5},
ln2hi = 6.9314575195e-1f, /* 0x3f317200 */
ln2lo = 1.4286067653e-6f, /* 0x35bfbe8e */
invln2 = 1.4426950216e+0f, /* 0x3fb8aa3b */
/*
* Domain [-0.34568, 0.34568], range ~[-4.278e-9, 4.447e-9]:
* |x*(exp(x)+1)/(exp(x)-1) - p(x)| < 2**-27.74
*/
P1 = 1.6666625440e-1f, /* 0xaaaa8f.0p-26 */
P2 = -2.7667332906e-3f; /* -0xb55215.0p-32 */
float expf(float x)
{
float_t hi, lo, c, xx, y;
int k, sign;
uint32_t hx;
GET_FLOAT_WORD(hx, x);
sign = hx >> 31; /* sign bit of x */
hx &= 0x7fffffff; /* high word of |x| */
/* special cases */
if (hx >= 0x42aeac50) { /* if |x| >= -87.33655f or NaN */
if (hx >= 0x42b17218 && !sign) { /* x >= 88.722839f */
/* overflow */
x *= 0x1p127f;
return x;
}
if (sign) {
/* underflow */
FORCE_EVAL(-0x1p-149f/x);
if (hx >= 0x42cff1b5) /* x <= -103.972084f */
return 0;
}
}
/* argument reduction */
if (hx > 0x3eb17218) { /* if |x| > 0.5 ln2 */
if (hx > 0x3f851592) /* if |x| > 1.5 ln2 */
k = invln2*x + half[sign];
else
k = 1 - sign - sign;
hi = x - k*ln2hi; /* k*ln2hi is exact here */
lo = k*ln2lo;
x = hi - lo;
} else if (hx > 0x39000000) { /* |x| > 2**-14 */
k = 0;
hi = x;
lo = 0;
} else {
/* raise inexact */
FORCE_EVAL(0x1p127f + x);
return 1 + x;
}
/* x is now in primary range */
xx = x*x;
c = x - xx*(P1+xx*P2);
y = 1 + (x*c/(2-c) - lo + hi);
if (k == 0)
return y;
return scalbnf(y, k);
}

View File

@ -0,0 +1,31 @@
#include <math.h>
#include <stdint.h>
float scalbnf(float x, int n)
{
union {float f; uint32_t i;} u;
float_t y = x;
if (n > 127) {
y *= 0x1p127f;
n -= 127;
if (n > 127) {
y *= 0x1p127f;
n -= 127;
if (n > 127)
n = 127;
}
} else if (n < -126) {
y *= 0x1p-126f;
n += 126;
if (n < -126) {
y *= 0x1p-126f;
n += 126;
if (n < -126)
n = -126;
}
}
u.i = (uint32_t)(0x7f+n)<<23;
x = y * u.f;
return x;
}

View File

@ -31,7 +31,8 @@ TRAP(argp_state_help)
TRAP(argp_usage)
TRAP(asctime)
TRAP(clearenv)
TRAP(crypt)
// Redefined at contrib/libbcrypt/crypt_blowfish/wrapper.c:186
// TRAP(crypt)
TRAP(ctime)
TRAP(cuserid)
TRAP(drand48)

View File

@ -90,20 +90,6 @@ namespace Crypto
std::string groupName() const;
/// Returns the EC key group name.
void save(const std::string & publicKeyFile, const std::string & privateKeyFile = "", const std::string & privateKeyPassphrase = "")
const;
/// Exports the public and private keys to the given files.
///
/// If an empty filename is specified, the corresponding key
/// is not exported.
void
save(std::ostream * pPublicKeyStream, std::ostream * pPrivateKeyStream = 0, const std::string & privateKeyPassphrase = "") const;
/// Exports the public and private key to the given streams.
///
/// If a null pointer is passed for a stream, the corresponding
/// key is not exported.
static std::string getCurveName(int nid = -1);
/// Returns elliptical curve name corresponding to
/// the given nid; if nid is not found, returns
@ -150,22 +136,6 @@ namespace Crypto
{
return OBJ_nid2sn(groupId());
}
inline void
ECKeyImpl::save(const std::string & publicKeyFile, const std::string & privateKeyFile, const std::string & privateKeyPassphrase) const
{
EVPPKey(_pEC).save(publicKeyFile, privateKeyFile, privateKeyPassphrase);
}
inline void
ECKeyImpl::save(std::ostream * pPublicKeyStream, std::ostream * pPrivateKeyStream, const std::string & privateKeyPassphrase) const
{
EVPPKey(_pEC).save(pPublicKeyStream, pPrivateKeyStream, privateKeyPassphrase);
}
}
} // namespace Poco::Crypto

View File

@ -56,24 +56,6 @@ namespace Crypto
virtual int size() const;
/// Returns the RSA modulus size.
virtual void save(
const std::string & publicKeyPairFile,
const std::string & privateKeyPairFile = "",
const std::string & privateKeyPairPassphrase = "") const;
/// Exports the public and private keys to the given files.
///
/// If an empty filename is specified, the corresponding key
/// is not exported.
virtual void save(
std::ostream * pPublicKeyPairStream,
std::ostream * pPrivateKeyPairStream = 0,
const std::string & privateKeyPairPassphrase = "") const;
/// Exports the public and private key to the given streams.
///
/// If a null pointer is passed for a stream, the corresponding
/// key is not exported.
KeyPairImpl::Ptr impl() const;
/// Returns the impl object.
@ -97,21 +79,6 @@ namespace Crypto
return _pImpl->size();
}
inline void
KeyPair::save(const std::string & publicKeyFile, const std::string & privateKeyFile, const std::string & privateKeyPassphrase) const
{
_pImpl->save(publicKeyFile, privateKeyFile, privateKeyPassphrase);
}
inline void
KeyPair::save(std::ostream * pPublicKeyStream, std::ostream * pPrivateKeyStream, const std::string & privateKeyPassphrase) const
{
_pImpl->save(pPublicKeyStream, pPrivateKeyStream, privateKeyPassphrase);
}
inline const std::string & KeyPair::name() const
{
return _pImpl->name();

View File

@ -55,22 +55,6 @@ namespace Crypto
virtual int size() const = 0;
/// Returns the key size.
virtual void save(
const std::string & publicKeyFile,
const std::string & privateKeyFile = "",
const std::string & privateKeyPassphrase = "") const = 0;
/// Exports the public and private keys to the given files.
///
/// If an empty filename is specified, the corresponding key
/// is not exported.
virtual void save(
std::ostream * pPublicKeyStream, std::ostream * pPrivateKeyStream = 0, const std::string & privateKeyPassphrase = "") const = 0;
/// Exports the public and private key to the given streams.
///
/// If a null pointer is passed for a stream, the corresponding
/// key is not exported.
const std::string & name() const;
/// Returns key pair name

View File

@ -96,20 +96,6 @@ namespace Crypto
ByteVec decryptionExponent() const;
/// Returns the RSA decryption exponent.
void save(const std::string & publicKeyFile, const std::string & privateKeyFile = "", const std::string & privateKeyPassphrase = "")
const;
/// Exports the public and private keys to the given files.
///
/// If an empty filename is specified, the corresponding key
/// is not exported.
void
save(std::ostream * pPublicKeyStream, std::ostream * pPrivateKeyStream = 0, const std::string & privateKeyPassphrase = "") const;
/// Exports the public and private key to the given streams.
///
/// If a null pointer is passed for a stream, the corresponding
/// key is not exported.
private:
RSAKeyImpl();
@ -139,4 +125,4 @@ namespace Crypto
} // namespace Poco::Crypto
#endif // Crypto_RSAKeyImplImpl_INCLUDED
#endif // Crypto_RSAKeyImplImpl_INCLUDED

View File

@ -269,103 +269,6 @@ RSAKeyImpl::ByteVec RSAKeyImpl::decryptionExponent() const
}
void RSAKeyImpl::save(const std::string& publicKeyFile,
const std::string& privateKeyFile,
const std::string& privateKeyPassphrase) const
{
if (!publicKeyFile.empty())
{
BIO* bio = BIO_new(BIO_s_file());
if (!bio) throw Poco::IOException("Cannot create BIO for writing public key file", publicKeyFile);
try
{
if (BIO_write_filename(bio, const_cast<char*>(publicKeyFile.c_str())))
{
if (!PEM_write_bio_RSAPublicKey(bio, _pRSA))
throw Poco::WriteFileException("Failed to write public key to file", publicKeyFile);
}
else throw Poco::CreateFileException("Cannot create public key file");
}
catch (...)
{
BIO_free(bio);
throw;
}
BIO_free(bio);
}
if (!privateKeyFile.empty())
{
BIO* bio = BIO_new(BIO_s_file());
if (!bio) throw Poco::IOException("Cannot create BIO for writing private key file", privateKeyFile);
try
{
if (BIO_write_filename(bio, const_cast<char*>(privateKeyFile.c_str())))
{
int rc = 0;
if (privateKeyPassphrase.empty())
rc = PEM_write_bio_RSAPrivateKey(bio, _pRSA, 0, 0, 0, 0, 0);
else
rc = PEM_write_bio_RSAPrivateKey(bio, _pRSA, EVP_des_ede3_cbc(),
reinterpret_cast<unsigned char*>(const_cast<char*>(privateKeyPassphrase.c_str())),
static_cast<int>(privateKeyPassphrase.length()), 0, 0);
if (!rc) throw Poco::FileException("Failed to write private key to file", privateKeyFile);
}
else throw Poco::CreateFileException("Cannot create private key file", privateKeyFile);
}
catch (...)
{
BIO_free(bio);
throw;
}
BIO_free(bio);
}
}
void RSAKeyImpl::save(std::ostream* pPublicKeyStream,
std::ostream* pPrivateKeyStream,
const std::string& privateKeyPassphrase) const
{
if (pPublicKeyStream)
{
BIO* bio = BIO_new(BIO_s_mem());
if (!bio) throw Poco::IOException("Cannot create BIO for writing public key");
if (!PEM_write_bio_RSAPublicKey(bio, _pRSA))
{
BIO_free(bio);
throw Poco::WriteFileException("Failed to write public key to stream");
}
char* pData;
long size = BIO_get_mem_data(bio, &pData);
pPublicKeyStream->write(pData, static_cast<std::streamsize>(size));
BIO_free(bio);
}
if (pPrivateKeyStream)
{
BIO* bio = BIO_new(BIO_s_mem());
if (!bio) throw Poco::IOException("Cannot create BIO for writing public key");
int rc = 0;
if (privateKeyPassphrase.empty())
rc = PEM_write_bio_RSAPrivateKey(bio, _pRSA, 0, 0, 0, 0, 0);
else
rc = PEM_write_bio_RSAPrivateKey(bio, _pRSA, EVP_des_ede3_cbc(),
reinterpret_cast<unsigned char*>(const_cast<char*>(privateKeyPassphrase.c_str())),
static_cast<int>(privateKeyPassphrase.length()), 0, 0);
if (!rc)
{
BIO_free(bio);
throw Poco::FileException("Failed to write private key to stream");
}
char* pData;
long size = BIO_get_mem_data(bio, &pData);
pPrivateKeyStream->write(pData, static_cast<std::streamsize>(size));
BIO_free(bio);
}
}
RSAKeyImpl::ByteVec RSAKeyImpl::convertToByteVec(const BIGNUM* bn)
{
int numBytes = BN_num_bytes(bn);
@ -383,4 +286,4 @@ RSAKeyImpl::ByteVec RSAKeyImpl::convertToByteVec(const BIGNUM* bn)
}
} } // namespace Poco::Crypto
} } // namespace Poco::Crypto

View File

@ -1,62 +0,0 @@
//
// Unicode.h
//
// Library: Data/ODBC
// Package: ODBC
// Module: Unicode
//
// Definition of Unicode_WIN32.
//
// Copyright (c) 2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Data_ODBC_Unicode_WIN32_INCLUDED
#define Data_ODBC_Unicode_WIN32_INCLUDED
namespace Poco
{
namespace Data
{
namespace ODBC
{
inline void makeUTF16(SQLCHAR * pSQLChar, SQLINTEGER length, std::wstring & target)
/// Utility function for conversion from UTF-8 to UTF-16
{
int len = length;
if (SQL_NTS == len)
len = (int)std::strlen((const char *)pSQLChar);
UnicodeConverter::toUTF16((const char *)pSQLChar, len, target);
}
inline void makeUTF8(Poco::Buffer<wchar_t> & buffer, SQLINTEGER length, SQLPOINTER pTarget, SQLINTEGER targetLength)
/// Utility function for conversion from UTF-16 to UTF-8. Length is in bytes.
{
if (buffer.sizeBytes() < length)
throw InvalidArgumentException("Specified length exceeds available length.");
else if ((length % 2) != 0)
throw InvalidArgumentException("Length must be an even number.");
length /= sizeof(wchar_t);
std::string result;
UnicodeConverter::toUTF8(buffer.begin(), length, result);
std::memset(pTarget, 0, targetLength);
std::strncpy((char *)pTarget, result.c_str(), result.size() < targetLength ? result.size() : targetLength);
}
}
}
} // namespace Poco::Data::ODBC
#endif // Data_ODBC_Unicode_WIN32_INCLUDED

View File

@ -1,761 +0,0 @@
//
// Unicode.cpp
//
// Library: Data/ODBC
// Package: ODBC
// Module: Unicode
//
// Copyright (c) 2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/Data/ODBC/ODBC.h"
#include "Poco/Data/ODBC/Utility.h"
#include "Poco/Data/ODBC/Unicode_WIN32.h"
#include "Poco/Buffer.h"
#include "Poco/Exception.h"
using Poco::Buffer;
using Poco::InvalidArgumentException;
using Poco::NotImplementedException;
namespace Poco {
namespace Data {
namespace ODBC {
SQLRETURN SQLColAttribute(SQLHSTMT hstmt,
SQLUSMALLINT iCol,
SQLUSMALLINT iField,
SQLPOINTER pCharAttr,
SQLSMALLINT cbCharAttrMax,
SQLSMALLINT* pcbCharAttr,
NumAttrPtrType pNumAttr)
{
if (isString(pCharAttr, cbCharAttrMax))
{
Buffer<wchar_t> buffer(stringLength(pCharAttr, cbCharAttrMax));
SQLRETURN rc = SQLColAttributeW(hstmt,
iCol,
iField,
buffer.begin(),
(SQLSMALLINT) buffer.sizeBytes(),
pcbCharAttr,
pNumAttr);
makeUTF8(buffer, *pcbCharAttr, pCharAttr, cbCharAttrMax);
return rc;
}
return SQLColAttributeW(hstmt,
iCol,
iField,
pCharAttr,
cbCharAttrMax,
pcbCharAttr,
pNumAttr);
}
SQLRETURN SQLColAttributes(SQLHSTMT hstmt,
SQLUSMALLINT icol,
SQLUSMALLINT fDescType,
SQLPOINTER rgbDesc,
SQLSMALLINT cbDescMax,
SQLSMALLINT* pcbDesc,
SQLLEN* pfDesc)
{
return SQLColAttribute(hstmt,
icol,
fDescType,
rgbDesc,
cbDescMax,
pcbDesc,
pfDesc);
}
SQLRETURN SQLConnect(SQLHDBC hdbc,
SQLCHAR* szDSN,
SQLSMALLINT cbDSN,
SQLCHAR* szUID,
SQLSMALLINT cbUID,
SQLCHAR* szAuthStr,
SQLSMALLINT cbAuthStr)
{
std::wstring sqlDSN;
makeUTF16(szDSN, cbDSN, sqlDSN);
std::wstring sqlUID;
makeUTF16(szUID, cbUID, sqlUID);
std::wstring sqlPWD;
makeUTF16(szAuthStr, cbAuthStr, sqlPWD);
return SQLConnectW(hdbc,
(SQLWCHAR*) sqlDSN.c_str(),
(SQLSMALLINT) sqlDSN.size(),
(SQLWCHAR*) sqlUID.c_str(),
(SQLSMALLINT) sqlUID.size(),
(SQLWCHAR*) sqlPWD.c_str(),
(SQLSMALLINT) sqlPWD.size());
}
SQLRETURN SQLDescribeCol(SQLHSTMT hstmt,
SQLUSMALLINT icol,
SQLCHAR* szColName,
SQLSMALLINT cbColNameMax,
SQLSMALLINT* pcbColName,
SQLSMALLINT* pfSqlType,
SQLULEN* pcbColDef,
SQLSMALLINT* pibScale,
SQLSMALLINT* pfNullable)
{
Buffer<wchar_t> buffer(cbColNameMax);
SQLRETURN rc = SQLDescribeColW(hstmt,
icol,
(SQLWCHAR*) buffer.begin(),
(SQLSMALLINT) buffer.size(),
pcbColName,
pfSqlType,
pcbColDef,
pibScale,
pfNullable);
makeUTF8(buffer, *pcbColName * sizeof(wchar_t), szColName, cbColNameMax);
return rc;
}
SQLRETURN SQLError(SQLHENV henv,
SQLHDBC hdbc,
SQLHSTMT hstmt,
SQLCHAR* szSqlState,
SQLINTEGER* pfNativeError,
SQLCHAR* szErrorMsg,
SQLSMALLINT cbErrorMsgMax,
SQLSMALLINT* pcbErrorMsg)
{
throw NotImplementedException("SQLError is obsolete. "
"Use SQLGetDiagRec instead.");
}
SQLRETURN SQLExecDirect(SQLHSTMT hstmt,
SQLCHAR* szSqlStr,
SQLINTEGER cbSqlStr)
{
std::wstring sqlStr;
makeUTF16(szSqlStr, cbSqlStr, sqlStr);
return SQLExecDirectW(hstmt,
(SQLWCHAR*) sqlStr.c_str(),
(SQLINTEGER) sqlStr.size());
}
SQLRETURN SQLGetConnectAttr(SQLHDBC hdbc,
SQLINTEGER fAttribute,
SQLPOINTER rgbValue,
SQLINTEGER cbValueMax,
SQLINTEGER* pcbValue)
{
if (isString(rgbValue, cbValueMax))
{
Buffer<wchar_t> buffer(stringLength(rgbValue, cbValueMax));
SQLRETURN rc = SQLGetConnectAttrW(hdbc,
fAttribute,
buffer.begin(),
(SQLINTEGER) buffer.sizeBytes(),
pcbValue);
makeUTF8(buffer, *pcbValue, rgbValue, cbValueMax);
return rc;
}
return SQLGetConnectAttrW(hdbc,
fAttribute,
rgbValue,
cbValueMax,
pcbValue);
}
SQLRETURN SQLGetCursorName(SQLHSTMT hstmt,
SQLCHAR* szCursor,
SQLSMALLINT cbCursorMax,
SQLSMALLINT* pcbCursor)
{
throw NotImplementedException("Not implemented");
}
SQLRETURN SQLSetDescField(SQLHDESC hdesc,
SQLSMALLINT iRecord,
SQLSMALLINT iField,
SQLPOINTER rgbValue,
SQLINTEGER cbValueMax)
{
if (isString(rgbValue, cbValueMax))
{
std::wstring str;
makeUTF16((SQLCHAR*) rgbValue, cbValueMax, str);
SQLRETURN rc = SQLSetDescFieldW(hdesc,
iRecord,
iField,
(SQLPOINTER) str.c_str(),
(SQLINTEGER) str.size() * sizeof(std::wstring::value_type));
return rc;
}
return SQLSetDescFieldW(hdesc,
iRecord,
iField,
rgbValue,
cbValueMax);
}
SQLRETURN SQLGetDescField(SQLHDESC hdesc,
SQLSMALLINT iRecord,
SQLSMALLINT iField,
SQLPOINTER rgbValue,
SQLINTEGER cbValueMax,
SQLINTEGER* pcbValue)
{
if (isString(rgbValue, cbValueMax))
{
Buffer<wchar_t> buffer(stringLength(rgbValue, cbValueMax));
SQLRETURN rc = SQLGetDescFieldW(hdesc,
iRecord,
iField,
buffer.begin(),
(SQLINTEGER) buffer.sizeBytes(),
pcbValue);
makeUTF8(buffer, *pcbValue, rgbValue, cbValueMax);
return rc;
}
return SQLGetDescFieldW(hdesc,
iRecord,
iField,
rgbValue,
cbValueMax,
pcbValue);
}
SQLRETURN SQLGetDescRec(SQLHDESC hdesc,
SQLSMALLINT iRecord,
SQLCHAR* szName,
SQLSMALLINT cbNameMax,
SQLSMALLINT* pcbName,
SQLSMALLINT* pfType,
SQLSMALLINT* pfSubType,
SQLLEN* pLength,
SQLSMALLINT* pPrecision,
SQLSMALLINT* pScale,
SQLSMALLINT* pNullable)
{
throw NotImplementedException();
}
SQLRETURN SQLGetDiagField(SQLSMALLINT fHandleType,
SQLHANDLE handle,
SQLSMALLINT iRecord,
SQLSMALLINT fDiagField,
SQLPOINTER rgbDiagInfo,
SQLSMALLINT cbDiagInfoMax,
SQLSMALLINT* pcbDiagInfo)
{
if (isString(rgbDiagInfo, cbDiagInfoMax))
{
Buffer<wchar_t> buffer(stringLength(rgbDiagInfo, cbDiagInfoMax));
SQLRETURN rc = SQLGetDiagFieldW(fHandleType,
handle,
iRecord,
fDiagField,
buffer.begin(),
(SQLSMALLINT) buffer.sizeBytes(),
pcbDiagInfo);
makeUTF8(buffer, *pcbDiagInfo, rgbDiagInfo, cbDiagInfoMax);
return rc;
}
return SQLGetDiagFieldW(fHandleType,
handle,
iRecord,
fDiagField,
rgbDiagInfo,
cbDiagInfoMax,
pcbDiagInfo);
}
SQLRETURN SQLGetDiagRec(SQLSMALLINT fHandleType,
SQLHANDLE handle,
SQLSMALLINT iRecord,
SQLCHAR* szSqlState,
SQLINTEGER* pfNativeError,
SQLCHAR* szErrorMsg,
SQLSMALLINT cbErrorMsgMax,
SQLSMALLINT* pcbErrorMsg)
{
const SQLINTEGER stateLen = SQL_SQLSTATE_SIZE + 1;
Buffer<wchar_t> bufState(stateLen);
Buffer<wchar_t> bufErr(cbErrorMsgMax);
SQLRETURN rc = SQLGetDiagRecW(fHandleType,
handle,
iRecord,
bufState.begin(),
pfNativeError,
bufErr.begin(),
(SQLSMALLINT) bufErr.size(),
pcbErrorMsg);
makeUTF8(bufState, stateLen * sizeof(wchar_t), szSqlState, stateLen);
makeUTF8(bufErr, *pcbErrorMsg * sizeof(wchar_t), szErrorMsg, cbErrorMsgMax);
return rc;
}
SQLRETURN SQLPrepare(SQLHSTMT hstmt,
SQLCHAR* szSqlStr,
SQLINTEGER cbSqlStr)
{
std::wstring sqlStr;
makeUTF16(szSqlStr, cbSqlStr, sqlStr);
return SQLPrepareW(hstmt,
(SQLWCHAR*) sqlStr.c_str(),
(SQLINTEGER) sqlStr.size());
}
SQLRETURN SQLSetConnectAttr(SQLHDBC hdbc,
SQLINTEGER fAttribute,
SQLPOINTER rgbValue,
SQLINTEGER cbValue)
{
if (isString(rgbValue, cbValue))
{
std::wstring str;
makeUTF16((SQLCHAR*) rgbValue, cbValue, str);
return SQLSetConnectAttrW(hdbc,
fAttribute,
(SQLWCHAR*) str.c_str(),
(SQLINTEGER) str.size() * sizeof(std::wstring::value_type));
}
return SQLSetConnectAttrW(hdbc,
fAttribute,
rgbValue,
cbValue);
}
SQLRETURN SQLSetCursorName(SQLHSTMT hstmt,
SQLCHAR* szCursor,
SQLSMALLINT cbCursor)
{
throw NotImplementedException("Not implemented");
}
SQLRETURN SQLSetStmtAttr(SQLHSTMT hstmt,
SQLINTEGER fAttribute,
SQLPOINTER rgbValue,
SQLINTEGER cbValueMax)
{
if (isString(rgbValue, cbValueMax))
{
std::wstring str;
makeUTF16((SQLCHAR*) rgbValue, cbValueMax, str);
return SQLSetStmtAttrW(hstmt,
fAttribute,
(SQLPOINTER) str.c_str(),
(SQLINTEGER) str.size());
}
return SQLSetStmtAttrW(hstmt,
fAttribute,
rgbValue,
cbValueMax);
}
SQLRETURN SQLGetStmtAttr(SQLHSTMT hstmt,
SQLINTEGER fAttribute,
SQLPOINTER rgbValue,
SQLINTEGER cbValueMax,
SQLINTEGER* pcbValue)
{
if (isString(rgbValue, cbValueMax))
{
Buffer<wchar_t> buffer(stringLength(rgbValue, cbValueMax));
return SQLGetStmtAttrW(hstmt,
fAttribute,
(SQLPOINTER) buffer.begin(),
(SQLINTEGER) buffer.sizeBytes(),
pcbValue);
}
return SQLGetStmtAttrW(hstmt,
fAttribute,
rgbValue,
cbValueMax,
pcbValue);
}
SQLRETURN SQLColumns(SQLHSTMT hstmt,
SQLCHAR* szCatalogName,
SQLSMALLINT cbCatalogName,
SQLCHAR* szSchemaName,
SQLSMALLINT cbSchemaName,
SQLCHAR* szTableName,
SQLSMALLINT cbTableName,
SQLCHAR* szColumnName,
SQLSMALLINT cbColumnName)
{
throw NotImplementedException();
}
SQLRETURN SQLGetConnectOption(SQLHDBC hdbc,
SQLUSMALLINT fOption,
SQLPOINTER pvParam)
{
throw NotImplementedException();
}
SQLRETURN SQLGetInfo(SQLHDBC hdbc,
SQLUSMALLINT fInfoType,
SQLPOINTER rgbInfoValue,
SQLSMALLINT cbInfoValueMax,
SQLSMALLINT* pcbInfoValue)
{
if (cbInfoValueMax)
{
Buffer<wchar_t> buffer(cbInfoValueMax);
SQLRETURN rc = SQLGetInfoW(hdbc,
fInfoType,
(SQLPOINTER) buffer.begin(),
(SQLSMALLINT) buffer.sizeBytes(),
pcbInfoValue);
makeUTF8(buffer, *pcbInfoValue, rgbInfoValue, cbInfoValueMax);
return rc;
}
return SQLGetInfoW(hdbc,
fInfoType,
rgbInfoValue,
cbInfoValueMax,
pcbInfoValue);
}
SQLRETURN SQLGetTypeInfo(SQLHSTMT StatementHandle, SQLSMALLINT DataType)
{
return SQLGetTypeInfoW(StatementHandle, DataType);
}
SQLRETURN SQLSetConnectOption(SQLHDBC hdbc,
SQLUSMALLINT fOption,
SQLULEN vParam)
{
throw NotImplementedException();
}
SQLRETURN SQLSpecialColumns(SQLHSTMT hstmt,
SQLUSMALLINT fColType,
SQLCHAR* szCatalogName,
SQLSMALLINT cbCatalogName,
SQLCHAR* szSchemaName,
SQLSMALLINT cbSchemaName,
SQLCHAR* szTableName,
SQLSMALLINT cbTableName,
SQLUSMALLINT fScope,
SQLUSMALLINT fNullable)
{
throw NotImplementedException();
}
SQLRETURN SQLStatistics(SQLHSTMT hstmt,
SQLCHAR* szCatalogName,
SQLSMALLINT cbCatalogName,
SQLCHAR* szSchemaName,
SQLSMALLINT cbSchemaName,
SQLCHAR* szTableName,
SQLSMALLINT cbTableName,
SQLUSMALLINT fUnique,
SQLUSMALLINT fAccuracy)
{
throw NotImplementedException();
}
SQLRETURN SQLTables(SQLHSTMT hstmt,
SQLCHAR* szCatalogName,
SQLSMALLINT cbCatalogName,
SQLCHAR* szSchemaName,
SQLSMALLINT cbSchemaName,
SQLCHAR* szTableName,
SQLSMALLINT cbTableName,
SQLCHAR* szTableType,
SQLSMALLINT cbTableType)
{
throw NotImplementedException();
}
SQLRETURN SQLDataSources(SQLHENV henv,
SQLUSMALLINT fDirection,
SQLCHAR* szDSN,
SQLSMALLINT cbDSNMax,
SQLSMALLINT* pcbDSN,
SQLCHAR* szDesc,
SQLSMALLINT cbDescMax,
SQLSMALLINT* pcbDesc)
{
Buffer<wchar_t> bufDSN(cbDSNMax);
Buffer<wchar_t> bufDesc(cbDescMax);
SQLRETURN rc = SQLDataSourcesW(henv,
fDirection,
bufDSN.begin(),
(SQLSMALLINT) bufDSN.size(),
pcbDSN,
bufDesc.begin(),
(SQLSMALLINT) bufDesc.size(),
pcbDesc);
makeUTF8(bufDSN, *pcbDSN * sizeof(wchar_t), szDSN, cbDSNMax);
makeUTF8(bufDesc, *pcbDesc * sizeof(wchar_t), szDesc, cbDescMax);
return rc;
}
SQLRETURN SQLDriverConnect(SQLHDBC hdbc,
SQLHWND hwnd,
SQLCHAR* szConnStrIn,
SQLSMALLINT cbConnStrIn,
SQLCHAR* szConnStrOut,
SQLSMALLINT cbConnStrOutMax,
SQLSMALLINT* pcbConnStrOut,
SQLUSMALLINT fDriverCompletion)
{
std::wstring connStrIn;
int len = cbConnStrIn;
if (SQL_NTS == len)
len = (int) std::strlen((const char*) szConnStrIn);
Poco::UnicodeConverter::toUTF16((const char *) szConnStrIn, len, connStrIn);
Buffer<wchar_t> bufOut(cbConnStrOutMax);
SQLRETURN rc = SQLDriverConnectW(hdbc,
hwnd,
(SQLWCHAR*) connStrIn.c_str(),
(SQLSMALLINT) connStrIn.size(),
bufOut.begin(),
(SQLSMALLINT) bufOut.size(),
pcbConnStrOut,
fDriverCompletion);
if (!Utility::isError(rc))
makeUTF8(bufOut, *pcbConnStrOut * sizeof(wchar_t), szConnStrOut, cbConnStrOutMax);
return rc;
}
SQLRETURN SQLBrowseConnect(SQLHDBC hdbc,
SQLCHAR* szConnStrIn,
SQLSMALLINT cbConnStrIn,
SQLCHAR* szConnStrOut,
SQLSMALLINT cbConnStrOutMax,
SQLSMALLINT* pcbConnStrOut)
{
std::wstring str;
makeUTF16(szConnStrIn, cbConnStrIn, str);
Buffer<wchar_t> bufConnStrOut(cbConnStrOutMax);
SQLRETURN rc = SQLBrowseConnectW(hdbc,
(SQLWCHAR*) str.c_str(),
(SQLSMALLINT) str.size(),
bufConnStrOut.begin(),
(SQLSMALLINT) bufConnStrOut.size(),
pcbConnStrOut);
makeUTF8(bufConnStrOut, *pcbConnStrOut * sizeof(wchar_t), szConnStrOut, cbConnStrOutMax);
return rc;
}
SQLRETURN SQLColumnPrivileges(SQLHSTMT hstmt,
SQLCHAR* szCatalogName,
SQLSMALLINT cbCatalogName,
SQLCHAR* szSchemaName,
SQLSMALLINT cbSchemaName,
SQLCHAR* szTableName,
SQLSMALLINT cbTableName,
SQLCHAR* szColumnName,
SQLSMALLINT cbColumnName)
{
throw NotImplementedException();
}
SQLRETURN SQLForeignKeys(SQLHSTMT hstmt,
SQLCHAR* szPkCatalogName,
SQLSMALLINT cbPkCatalogName,
SQLCHAR* szPkSchemaName,
SQLSMALLINT cbPkSchemaName,
SQLCHAR* szPkTableName,
SQLSMALLINT cbPkTableName,
SQLCHAR* szFkCatalogName,
SQLSMALLINT cbFkCatalogName,
SQLCHAR* szFkSchemaName,
SQLSMALLINT cbFkSchemaName,
SQLCHAR* szFkTableName,
SQLSMALLINT cbFkTableName)
{
throw NotImplementedException();
}
SQLRETURN SQLNativeSql(SQLHDBC hdbc,
SQLCHAR* szSqlStrIn,
SQLINTEGER cbSqlStrIn,
SQLCHAR* szSqlStr,
SQLINTEGER cbSqlStrMax,
SQLINTEGER* pcbSqlStr)
{
std::wstring str;
makeUTF16(szSqlStrIn, cbSqlStrIn, str);
Buffer<wchar_t> bufSQLOut(cbSqlStrMax);
SQLRETURN rc = SQLNativeSqlW(hdbc,
(SQLWCHAR*) str.c_str(),
(SQLINTEGER) str.size(),
bufSQLOut.begin(),
(SQLINTEGER) bufSQLOut.size(),
pcbSqlStr);
makeUTF8(bufSQLOut, *pcbSqlStr * sizeof(wchar_t), szSqlStr, cbSqlStrMax);
return rc;
}
SQLRETURN SQLPrimaryKeys(SQLHSTMT hstmt,
SQLCHAR* szCatalogName,
SQLSMALLINT cbCatalogName,
SQLCHAR* szSchemaName,
SQLSMALLINT cbSchemaName,
SQLCHAR* szTableName,
SQLSMALLINT cbTableName)
{
throw NotImplementedException();
}
SQLRETURN SQLProcedureColumns(SQLHSTMT hstmt,
SQLCHAR* szCatalogName,
SQLSMALLINT cbCatalogName,
SQLCHAR* szSchemaName,
SQLSMALLINT cbSchemaName,
SQLCHAR* szProcName,
SQLSMALLINT cbProcName,
SQLCHAR* szColumnName,
SQLSMALLINT cbColumnName)
{
throw NotImplementedException();
}
SQLRETURN SQLProcedures(SQLHSTMT hstmt,
SQLCHAR* szCatalogName,
SQLSMALLINT cbCatalogName,
SQLCHAR* szSchemaName,
SQLSMALLINT cbSchemaName,
SQLCHAR* szProcName,
SQLSMALLINT cbProcName)
{
throw NotImplementedException();
}
SQLRETURN SQLTablePrivileges(SQLHSTMT hstmt,
SQLCHAR* szCatalogName,
SQLSMALLINT cbCatalogName,
SQLCHAR* szSchemaName,
SQLSMALLINT cbSchemaName,
SQLCHAR* szTableName,
SQLSMALLINT cbTableName)
{
throw NotImplementedException();
}
SQLRETURN SQLDrivers(SQLHENV henv,
SQLUSMALLINT fDirection,
SQLCHAR* szDriverDesc,
SQLSMALLINT cbDriverDescMax,
SQLSMALLINT* pcbDriverDesc,
SQLCHAR* szDriverAttributes,
SQLSMALLINT cbDrvrAttrMax,
SQLSMALLINT* pcbDrvrAttr)
{
Buffer<wchar_t> bufDriverDesc(cbDriverDescMax);
Buffer<wchar_t> bufDriverAttr(cbDrvrAttrMax);
SQLRETURN rc = SQLDriversW(henv,
fDirection,
bufDriverDesc.begin(),
(SQLSMALLINT) bufDriverDesc.size(),
pcbDriverDesc,
bufDriverAttr.begin(),
(SQLSMALLINT) bufDriverAttr.size(),
pcbDrvrAttr);
makeUTF8(bufDriverDesc, *pcbDriverDesc * sizeof(wchar_t), szDriverDesc, cbDriverDescMax);
makeUTF8(bufDriverAttr, *pcbDrvrAttr * sizeof(wchar_t), szDriverAttributes, cbDrvrAttrMax);
return rc;
}
} } } // namespace Poco::Data::ODBC

View File

@ -1,37 +0,0 @@
//
// AutoTransaction.h
//
// Library: Data
// Package: DataCore
// Module: AutoTransaction
//
// Forward header for the Transaction class.
//
// Copyright (c) 2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Data_AutoTransaction_INCLUDED
#define Data_AutoTransaction_INCLUDED
#include "Poco/Data/Transaction.h"
namespace Poco
{
namespace Data
{
typedef Transaction AutoTransaction;
}
} // namespace Poco::Data
#endif // Data_AutoTransaction_INCLUDED

View File

@ -1,54 +0,0 @@
//
// DynamicLOB.h
//
// Library: Data
// Package: DataCore
// Module: DynamicLOB
//
// Definition of the Poco::Dynamic::Var LOB cast operators.
//
// Copyright (c) 2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Data_DynamicLOB_INCLUDED
#define Data_DynamicLOB_INCLUDED
#include "Poco/Data/Data.h"
#include "Poco/Data/LOB.h"
#include "Poco/Dynamic/Var.h"
namespace Poco
{
namespace Data
{
template <typename T>
class LOB;
typedef LOB<unsigned char> BLOB;
typedef LOB<char> CLOB;
}
} // namespace Poco::Data
namespace Poco
{
namespace Dynamic
{
template <>
Data_API Var::operator Poco::Data::CLOB() const;
template <>
Data_API Var::operator Poco::Data::BLOB() const;
}
} // namespace Poco::Dynamic
#endif // Data_DynamicLOB_INCLUDED

View File

@ -1,149 +0,0 @@
//
// LOBStream.h
//
// Library: Data
// Package: DataCore
// Module: LOBStream
//
// Definition of the LOBStream class.
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Data_LOBStream_INCLUDED
#define Data_LOBStream_INCLUDED
#include <istream>
#include <ostream>
#include "Poco/Data/LOB.h"
#include "Poco/Foundation.h"
#include "Poco/UnbufferedStreamBuf.h"
namespace Poco
{
namespace Data
{
template <typename T>
class LOBStreamBuf : public BasicUnbufferedStreamBuf<T, std::char_traits<T>>
/// This is the streambuf class used for reading from and writing to a LOB.
{
public:
LOBStreamBuf(LOB<T> & lob) : _lob(lob), _it(_lob.begin())
/// Creates LOBStreamBuf.
{
}
~LOBStreamBuf()
/// Destroys LOBStreamBuf.
{
}
protected:
typedef std::char_traits<T> TraitsType;
typedef BasicUnbufferedStreamBuf<T, TraitsType> BaseType;
typename BaseType::int_type readFromDevice()
{
if (_it != _lob.end())
return BaseType::charToInt(*_it++);
else
return -1;
}
typename BaseType::int_type writeToDevice(T c)
{
_lob.appendRaw(&c, 1);
return 1;
}
private:
LOB<T> & _lob;
typename LOB<T>::Iterator _it;
};
template <typename T>
class LOBIOS : public virtual std::ios
/// The base class for LOBInputStream and
/// LOBOutputStream.
///
/// This class is needed to ensure the correct initialization
/// order of the stream buffer and base classes.
{
public:
LOBIOS(LOB<T> & lob, openmode mode) : _buf(lob)
/// Creates the LOBIOS with the given LOB.
{
poco_ios_init(&_buf);
}
~LOBIOS()
/// Destroys the LOBIOS.
{
}
LOBStreamBuf<T> * rdbuf()
/// Returns a pointer to the internal LOBStreamBuf.
{
return &_buf;
}
protected:
LOBStreamBuf<T> _buf;
};
template <typename T>
class LOBOutputStream : public LOBIOS<T>, public std::basic_ostream<T, std::char_traits<T>>
/// An output stream for writing to a LOB.
{
public:
LOBOutputStream(LOB<T> & lob) : LOBIOS<T>(lob, std::ios::out), std::ostream(LOBIOS<T>::rdbuf())
/// Creates the LOBOutputStream with the given LOB.
{
}
~LOBOutputStream()
/// Destroys the LOBOutputStream.
{
}
};
template <typename T>
class LOBInputStream : public LOBIOS<T>, public std::basic_istream<T, std::char_traits<T>>
/// An input stream for reading from a LOB.
{
public:
LOBInputStream(LOB<T> & lob) : LOBIOS<T>(lob, std::ios::in), std::istream(LOBIOS<T>::rdbuf())
/// Creates the LOBInputStream with the given LOB.
{
}
~LOBInputStream()
/// Destroys the LOBInputStream.
{
}
};
typedef LOBOutputStream<unsigned char> BLOBOutputStream;
typedef LOBOutputStream<char> CLOBOutputStream;
typedef LOBInputStream<unsigned char> BLOBInputStream;
typedef LOBInputStream<char> CLOBInputStream;
}
} // namespace Poco::Data
#endif // Data_LOBStream_INCLUDED

View File

@ -1,74 +0,0 @@
//
// DynamicLOB.cpp
//
// Library: Data
// Package: DataCore
// Module: DynamicLOB
//
// Copyright (c) 2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifdef __GNUC__
// TODO: determine g++ version able to do the right thing without these specializations
#include "Poco/Data/DynamicLOB.h"
#include "Poco/Data/LOB.h"
#include "Poco/Dynamic/Var.h"
namespace Poco {
namespace Dynamic {
using Poco::Data::CLOB;
using Poco::Data::BLOB;
template <>
Var::operator CLOB () const
{
VarHolder* pHolder = content();
if (!pHolder)
throw InvalidAccessException("Can not convert empty value.");
if (typeid(CLOB) == pHolder->type())
return extract<CLOB>();
else
{
std::string result;
pHolder->convert(result);
return CLOB(result);
}
}
template <>
Var::operator BLOB () const
{
VarHolder* pHolder = content();
if (!pHolder)
throw InvalidAccessException("Can not convert empty value.");
if (typeid(BLOB) == pHolder->type())
return extract<BLOB>();
else
{
std::string result;
pHolder->convert(result);
return BLOB(reinterpret_cast<const unsigned char*>(result.data()),
result.size());
}
}
} } // namespace Poco::Data
#endif // __GNUC__

View File

@ -31,8 +31,6 @@ set (SRCS
src/ASCIIEncoding.cpp
src/AsyncChannel.cpp
src/AtomicCounter.cpp
src/Base32Decoder.cpp
src/Base32Encoder.cpp
src/Base64Decoder.cpp
src/Base64Encoder.cpp
src/BinaryReader.cpp
@ -81,9 +79,6 @@ set (SRCS
src/HexBinaryEncoder.cpp
src/InflatingStream.cpp
src/JSONString.cpp
src/Latin1Encoding.cpp
src/Latin2Encoding.cpp
src/Latin9Encoding.cpp
src/LineEndingConverter.cpp
src/LocalDateTime.cpp
src/LogFile.cpp
@ -91,8 +86,6 @@ set (SRCS
src/LoggingFactory.cpp
src/LoggingRegistry.cpp
src/LogStream.cpp
src/Manifest.cpp
src/MD4Engine.cpp
src/MD5Engine.cpp
src/MemoryPool.cpp
src/MemoryStream.cpp
@ -113,7 +106,6 @@ set (SRCS
src/PatternFormatter.cpp
src/Pipe.cpp
src/PipeImpl.cpp
src/PipeStream.cpp
src/PriorityNotificationQueue.cpp
src/Process.cpp
src/PurgeStrategy.cpp
@ -136,10 +128,8 @@ set (SRCS
src/StreamChannel.cpp
src/StreamConverter.cpp
src/StreamCopier.cpp
src/StreamTokenizer.cpp
src/String.cpp
src/StringTokenizer.cpp
src/SynchronizedObject.cpp
src/SyslogChannel.cpp
src/Task.cpp
src/TaskManager.cpp
@ -175,9 +165,6 @@ set (SRCS
src/VarHolder.cpp
src/VarIterator.cpp
src/Void.cpp
src/Windows1250Encoding.cpp
src/Windows1251Encoding.cpp
src/Windows1252Encoding.cpp
)
add_library (_poco_foundation ${SRCS})
@ -233,7 +220,8 @@ target_link_libraries (_poco_foundation
PRIVATE
Poco::Foundation::PCRE
ch_contrib::zlib
ch_contrib::lz4)
ch_contrib::lz4
ch_contrib::double_conversion)
if(OS_DARWIN AND ARCH_AARCH64)
target_compile_definitions (_poco_foundation

View File

@ -1,105 +0,0 @@
//
// Base32Decoder.h
//
// Library: Foundation
// Package: Streams
// Module: Base32
//
// Definition of class Base32Decoder.
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_Base32Decoder_INCLUDED
#define Foundation_Base32Decoder_INCLUDED
#include <istream>
#include "Poco/Foundation.h"
#include "Poco/UnbufferedStreamBuf.h"
namespace Poco
{
class Foundation_API Base32DecoderBuf : public UnbufferedStreamBuf
/// This streambuf base32-decodes all data read
/// from the istream connected to it.
///
/// Note: For performance reasons, the characters
/// are read directly from the given istream's
/// underlying streambuf, so the state
/// of the istream will not reflect that of
/// its streambuf.
{
public:
Base32DecoderBuf(std::istream & istr);
~Base32DecoderBuf();
private:
int readFromDevice();
int readOne();
unsigned char _group[8];
int _groupLength;
int _groupIndex;
std::streambuf & _buf;
static unsigned char IN_ENCODING[256];
static bool IN_ENCODING_INIT;
private:
Base32DecoderBuf(const Base32DecoderBuf &);
Base32DecoderBuf & operator=(const Base32DecoderBuf &);
};
class Foundation_API Base32DecoderIOS : public virtual std::ios
/// The base class for Base32Decoder.
///
/// This class is needed to ensure the correct initialization
/// order of the stream buffer and base classes.
{
public:
Base32DecoderIOS(std::istream & istr);
~Base32DecoderIOS();
Base32DecoderBuf * rdbuf();
protected:
Base32DecoderBuf _buf;
private:
Base32DecoderIOS(const Base32DecoderIOS &);
Base32DecoderIOS & operator=(const Base32DecoderIOS &);
};
class Foundation_API Base32Decoder : public Base32DecoderIOS, public std::istream
/// This istream base32-decodes all data
/// read from the istream connected to it.
///
/// Note: For performance reasons, the characters
/// are read directly from the given istream's
/// underlying streambuf, so the state
/// of the istream will not reflect that of
/// its streambuf.
{
public:
Base32Decoder(std::istream & istr);
~Base32Decoder();
private:
Base32Decoder(const Base32Decoder &);
Base32Decoder & operator=(const Base32Decoder &);
};
} // namespace Poco
#endif // Foundation_Base32Decoder_INCLUDED

View File

@ -1,111 +0,0 @@
//
// Base32Encoder.h
//
// Library: Foundation
// Package: Streams
// Module: Base32
//
// Definition of class Base32Encoder.
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_Base32Encoder_INCLUDED
#define Foundation_Base32Encoder_INCLUDED
#include <ostream>
#include "Poco/Foundation.h"
#include "Poco/UnbufferedStreamBuf.h"
namespace Poco
{
class Foundation_API Base32EncoderBuf : public UnbufferedStreamBuf
/// This streambuf base32-encodes all data written
/// to it and forwards it to a connected
/// ostream.
///
/// Note: The characters are directly written
/// to the ostream's streambuf, thus bypassing
/// the ostream. The ostream's state is therefore
/// not updated to match the buffer's state.
{
public:
Base32EncoderBuf(std::ostream & ostr, bool padding = true);
~Base32EncoderBuf();
int close();
/// Closes the stream buffer.
private:
int writeToDevice(char c);
unsigned char _group[5];
int _groupLength;
std::streambuf & _buf;
bool _doPadding;
static const unsigned char OUT_ENCODING[32];
friend class Base32DecoderBuf;
Base32EncoderBuf(const Base32EncoderBuf &);
Base32EncoderBuf & operator=(const Base32EncoderBuf &);
};
class Foundation_API Base32EncoderIOS : public virtual std::ios
/// The base class for Base32Encoder.
///
/// This class is needed to ensure the correct initialization
/// order of the stream buffer and base classes.
{
public:
Base32EncoderIOS(std::ostream & ostr, bool padding = true);
~Base32EncoderIOS();
int close();
Base32EncoderBuf * rdbuf();
protected:
Base32EncoderBuf _buf;
private:
Base32EncoderIOS(const Base32EncoderIOS &);
Base32EncoderIOS & operator=(const Base32EncoderIOS &);
};
class Foundation_API Base32Encoder : public Base32EncoderIOS, public std::ostream
/// This ostream base32-encodes all data
/// written to it and forwards it to
/// a connected ostream.
/// Always call close() when done
/// writing data, to ensure proper
/// completion of the encoding operation.
///
/// Note: The characters are directly written
/// to the ostream's streambuf, thus bypassing
/// the ostream. The ostream's state is therefore
/// not updated to match the buffer's state.
{
public:
Base32Encoder(std::ostream & ostr, bool padding = true);
~Base32Encoder();
private:
Base32Encoder(const Base32Encoder &);
Base32Encoder & operator=(const Base32Encoder &);
};
} // namespace Poco
#endif // Foundation_Base32Encoder_INCLUDED

View File

@ -1,92 +0,0 @@
//
// ClassLibrary.h
//
// Library: Foundation
// Package: SharedLibrary
// Module: ClassLoader
//
// Definitions for class libraries.
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_ClassLibrary_INCLUDED
#define Foundation_ClassLibrary_INCLUDED
#include <typeinfo>
#include "Poco/Foundation.h"
#include "Poco/Manifest.h"
# define POCO_LIBRARY_API
//
// the entry points for every class library
//
extern "C" {
bool POCO_LIBRARY_API pocoBuildManifest(Poco::ManifestBase * pManifest);
void POCO_LIBRARY_API pocoInitializeLibrary();
void POCO_LIBRARY_API pocoUninitializeLibrary();
}
//
// additional support for named manifests
//
#define POCO_DECLARE_NAMED_MANIFEST(name) \
extern "C" { \
bool POCO_LIBRARY_API POCO_JOIN(pocoBuildManifest, name)(Poco::ManifestBase * pManifest); \
}
//
// Macros to automatically implement pocoBuildManifest
//
// usage:
//
// POCO_BEGIN_MANIFEST(MyBaseClass)
// POCO_EXPORT_CLASS(MyFirstClass)
// POCO_EXPORT_CLASS(MySecondClass)
// ...
// POCO_END_MANIFEST
//
#define POCO_BEGIN_MANIFEST_IMPL(fnName, base) \
bool fnName(Poco::ManifestBase * pManifest_) \
{ \
typedef base _Base; \
typedef Poco::Manifest<_Base> _Manifest; \
std::string requiredType(typeid(_Manifest).name()); \
std::string actualType(pManifest_->className()); \
if (requiredType == actualType) \
{ \
Poco::Manifest<_Base> * pManifest = static_cast<_Manifest *>(pManifest_);
#define POCO_BEGIN_MANIFEST(base) POCO_BEGIN_MANIFEST_IMPL(pocoBuildManifest, base)
#define POCO_BEGIN_NAMED_MANIFEST(name, base) \
POCO_DECLARE_NAMED_MANIFEST(name) \
POCO_BEGIN_MANIFEST_IMPL(POCO_JOIN(pocoBuildManifest, name), base)
#define POCO_END_MANIFEST \
return true; \
} \
else return false; \
}
#define POCO_EXPORT_CLASS(cls) pManifest->insert(new Poco::MetaObject<cls, _Base>(#cls));
#define POCO_EXPORT_SINGLETON(cls) pManifest->insert(new Poco::MetaSingleton<cls, _Base>(#cls));
#endif // Foundation_ClassLibrary_INCLUDED

View File

@ -1,355 +0,0 @@
//
// ClassLoader.h
//
// Library: Foundation
// Package: SharedLibrary
// Module: ClassLoader
//
// Definition of the ClassLoader class.
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_ClassLoader_INCLUDED
#define Foundation_ClassLoader_INCLUDED
#include <map>
#include "Poco/Exception.h"
#include "Poco/Foundation.h"
#include "Poco/Manifest.h"
#include "Poco/MetaObject.h"
#include "Poco/Mutex.h"
#include "Poco/SharedLibrary.h"
namespace Poco
{
template <class Base>
class ClassLoader
/// The ClassLoader loads C++ classes from shared libraries
/// at runtime. It must be instantiated with a root class
/// of the loadable classes.
/// For a class to be loadable from a library, the library
/// must provide a Manifest of all the classes it contains.
/// The Manifest for a shared library can be easily built
/// with the help of the macros in the header file
/// "Foundation/ClassLibrary.h".
///
/// Starting with POCO release 1.3, a class library can
/// export multiple manifests. In addition to the default
/// (unnamed) manifest, multiple named manifests can
/// be exported, each having a different base class.
///
/// There is one important restriction: one instance of
/// ClassLoader can only load one manifest from a class
/// library.
{
public:
typedef AbstractMetaObject<Base> Meta;
typedef Manifest<Base> Manif;
typedef void (*InitializeLibraryFunc)();
typedef void (*UninitializeLibraryFunc)();
typedef bool (*BuildManifestFunc)(ManifestBase *);
struct LibraryInfo
{
SharedLibrary * pLibrary;
const Manif * pManifest;
int refCount;
};
typedef std::map<std::string, LibraryInfo> LibraryMap;
class Iterator
/// The ClassLoader's very own iterator class.
{
public:
typedef std::pair<std::string, const Manif *> Pair;
Iterator(const typename LibraryMap::const_iterator & it) { _it = it; }
Iterator(const Iterator & it) { _it = it._it; }
~Iterator() { }
Iterator & operator=(const Iterator & it)
{
_it = it._it;
return *this;
}
inline bool operator==(const Iterator & it) const { return _it == it._it; }
inline bool operator!=(const Iterator & it) const { return _it != it._it; }
Iterator & operator++() // prefix
{
++_it;
return *this;
}
Iterator operator++(int) // postfix
{
Iterator result(_it);
++_it;
return result;
}
inline const Pair * operator*() const
{
_pair.first = _it->first;
_pair.second = _it->second.pManifest;
return &_pair;
}
inline const Pair * operator->() const
{
_pair.first = _it->first;
_pair.second = _it->second.pManifest;
return &_pair;
}
private:
typename LibraryMap::const_iterator _it;
mutable Pair _pair;
};
ClassLoader()
/// Creates the ClassLoader.
{
}
virtual ~ClassLoader()
/// Destroys the ClassLoader.
{
for (typename LibraryMap::const_iterator it = _map.begin(); it != _map.end(); ++it)
{
delete it->second.pLibrary;
delete it->second.pManifest;
}
}
void loadLibrary(const std::string & path, const std::string & manifest)
/// Loads a library from the given path, using the given manifest.
/// Does nothing if the library is already loaded.
/// Throws a LibraryLoadException if the library
/// cannot be loaded or does not have a Manifest.
/// If the library exports a function named "pocoInitializeLibrary",
/// this function is executed.
/// If called multiple times for the same library,
/// the number of calls to unloadLibrary() must be the same
/// for the library to become unloaded.
{
FastMutex::ScopedLock lock(_mutex);
typename LibraryMap::iterator it = _map.find(path);
if (it == _map.end())
{
LibraryInfo li;
li.pLibrary = 0;
li.pManifest = 0;
li.refCount = 1;
try
{
li.pLibrary = new SharedLibrary(path);
li.pManifest = new Manif();
std::string pocoBuildManifestSymbol("pocoBuildManifest");
pocoBuildManifestSymbol.append(manifest);
if (li.pLibrary->hasSymbol("pocoInitializeLibrary"))
{
InitializeLibraryFunc initializeLibrary = (InitializeLibraryFunc)li.pLibrary->getSymbol("pocoInitializeLibrary");
initializeLibrary();
}
if (li.pLibrary->hasSymbol(pocoBuildManifestSymbol))
{
BuildManifestFunc buildManifest = (BuildManifestFunc)li.pLibrary->getSymbol(pocoBuildManifestSymbol);
if (buildManifest(const_cast<Manif *>(li.pManifest)))
_map[path] = li;
else
throw LibraryLoadException(std::string("Manifest class mismatch in ") + path, manifest);
}
else
throw LibraryLoadException(std::string("No manifest in ") + path, manifest);
}
catch (...)
{
delete li.pLibrary;
delete li.pManifest;
throw;
}
}
else
{
++it->second.refCount;
}
}
void loadLibrary(const std::string & path)
/// Loads a library from the given path. Does nothing
/// if the library is already loaded.
/// Throws a LibraryLoadException if the library
/// cannot be loaded or does not have a Manifest.
/// If the library exports a function named "pocoInitializeLibrary",
/// this function is executed.
/// If called multiple times for the same library,
/// the number of calls to unloadLibrary() must be the same
/// for the library to become unloaded.
///
/// Equivalent to loadLibrary(path, "").
{
loadLibrary(path, "");
}
void unloadLibrary(const std::string & path)
/// Unloads the given library.
/// Be extremely cautious when unloading shared libraries.
/// If objects from the library are still referenced somewhere,
/// a total crash is very likely.
/// If the library exports a function named "pocoUninitializeLibrary",
/// this function is executed before it is unloaded.
/// If loadLibrary() has been called multiple times for the same
/// library, the number of calls to unloadLibrary() must be the same
/// for the library to become unloaded.
{
FastMutex::ScopedLock lock(_mutex);
typename LibraryMap::iterator it = _map.find(path);
if (it != _map.end())
{
if (--it->second.refCount == 0)
{
if (it->second.pLibrary->hasSymbol("pocoUninitializeLibrary"))
{
UninitializeLibraryFunc uninitializeLibrary
= (UninitializeLibraryFunc)it->second.pLibrary->getSymbol("pocoUninitializeLibrary");
uninitializeLibrary();
}
delete it->second.pManifest;
it->second.pLibrary->unload();
delete it->second.pLibrary;
_map.erase(it);
}
}
else
throw NotFoundException(path);
}
const Meta * findClass(const std::string & className) const
/// Returns a pointer to the MetaObject for the given
/// class, or a null pointer if the class is not known.
{
FastMutex::ScopedLock lock(_mutex);
for (typename LibraryMap::const_iterator it = _map.begin(); it != _map.end(); ++it)
{
const Manif * pManif = it->second.pManifest;
typename Manif::Iterator itm = pManif->find(className);
if (itm != pManif->end())
return *itm;
}
return 0;
}
const Meta & classFor(const std::string & className) const
/// Returns a reference to the MetaObject for the given
/// class. Throws a NotFoundException if the class
/// is not known.
{
const Meta * pMeta = findClass(className);
if (pMeta)
return *pMeta;
else
throw NotFoundException(className);
}
Base * create(const std::string & className) const
/// Creates an instance of the given class.
/// Throws a NotFoundException if the class
/// is not known.
{
return classFor(className).create();
}
Base & instance(const std::string & className) const
/// Returns a reference to the sole instance of
/// the given class. The class must be a singleton,
/// otherwise an InvalidAccessException will be thrown.
/// Throws a NotFoundException if the class
/// is not known.
{
return classFor(className).instance();
}
bool canCreate(const std::string & className) const
/// Returns true if create() can create new instances
/// of the class.
{
return classFor(className).canCreate();
}
void destroy(const std::string & className, Base * pObject) const
/// Destroys the object pObject points to.
/// Does nothing if object is not found.
{
classFor(className).destroy(pObject);
}
bool isAutoDelete(const std::string & className, Base * pObject) const
/// Returns true if the object is automatically
/// deleted by its meta object.
{
return classFor(className).isAutoDelete(pObject);
}
const Manif * findManifest(const std::string & path) const
/// Returns a pointer to the Manifest for the given
/// library, or a null pointer if the library has not been loaded.
{
FastMutex::ScopedLock lock(_mutex);
typename LibraryMap::const_iterator it = _map.find(path);
if (it != _map.end())
return it->second.pManifest;
else
return 0;
}
const Manif & manifestFor(const std::string & path) const
/// Returns a reference to the Manifest for the given library
/// Throws a NotFoundException if the library has not been loaded.
{
const Manif * pManif = findManifest(path);
if (pManif)
return *pManif;
else
throw NotFoundException(path);
}
bool isLibraryLoaded(const std::string & path) const
/// Returns true if the library with the given name
/// has already been loaded.
{
return findManifest(path) != 0;
}
Iterator begin() const
{
FastMutex::ScopedLock lock(_mutex);
return Iterator(_map.begin());
}
Iterator end() const
{
FastMutex::ScopedLock lock(_mutex);
return Iterator(_map.end());
}
private:
LibraryMap _map;
mutable FastMutex _mutex;
};
} // namespace Poco
#endif // Foundation_ClassLoader_INCLUDED

View File

@ -1,102 +0,0 @@
//
// EventLogChannel.h
//
// Library: Foundation
// Package: Logging
// Module: EventLogChannel
//
// Definition of the EventLogChannel class specific to WIN32.
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_EventLogChannel_INCLUDED
#define Foundation_EventLogChannel_INCLUDED
#include "Poco/Channel.h"
#include "Poco/Foundation.h"
#include "Poco/UnWindows.h"
namespace Poco
{
class Foundation_API EventLogChannel : public Channel
/// This Windows-only channel works with the Windows NT Event Log
/// service.
///
/// To work properly, the EventLogChannel class requires that either
/// the PocoFoundation.dll or the PocoMsg.dll Dynamic Link Library
/// containing the message definition resources can be found in $PATH.
{
public:
EventLogChannel();
/// Creates the EventLogChannel.
/// The name of the current application (or more correctly,
/// the name of its executable) is taken as event source name.
EventLogChannel(const std::string & name);
/// Creates the EventLogChannel with the given event source name.
EventLogChannel(const std::string & name, const std::string & host);
/// Creates an EventLogChannel with the given event source
/// name that routes messages to the given host.
void open();
/// Opens the EventLogChannel. If necessary, the
/// required registry entries to register a
/// message resource DLL are made.
void close();
/// Closes the EventLogChannel.
void log(const Message & msg);
/// Logs the given message to the Windows Event Log.
///
/// The message type and priority are mapped to
/// appropriate values for Event Log type and category.
void setProperty(const std::string & name, const std::string & value);
/// Sets or changes a configuration property.
///
/// The following properties are supported:
///
/// * name: The name of the event source.
/// * loghost: The name of the host where the Event Log service is running.
/// The default is "localhost".
/// * host: same as host.
/// * logfile: The name of the log file. The default is "Application".
std::string getProperty(const std::string & name) const;
/// Returns the value of the given property.
static const std::string PROP_NAME;
static const std::string PROP_HOST;
static const std::string PROP_LOGHOST;
static const std::string PROP_LOGFILE;
protected:
~EventLogChannel();
static int getType(const Message & msg);
static int getCategory(const Message & msg);
void setUpRegistry() const;
static std::string findLibrary(const char * name);
private:
std::string _name;
std::string _host;
std::string _logFile;
HANDLE _h;
};
} // namespace Poco
#endif // Foundation_EventLogChannel_INCLUDED

View File

@ -1,126 +0,0 @@
//
// FPEnvironment_DUMMY.h
//
// Library: Foundation
// Package: Core
// Module: FPEnvironment
//
// Definition of class FPEnvironmentImpl for platforms that do not
// support IEEE 754 extensions.
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_FPEnvironment_DUMMY_INCLUDED
#define Foundation_FPEnvironment_DUMMY_INCLUDED
#include <cmath>
#include "Poco/Foundation.h"
namespace Poco
{
class Foundation_API FPEnvironmentImpl
{
protected:
enum RoundingModeImpl
{
FP_ROUND_DOWNWARD_IMPL,
FP_ROUND_UPWARD_IMPL,
FP_ROUND_TONEAREST_IMPL,
FP_ROUND_TOWARDZERO_IMPL
};
enum FlagImpl
{
FP_DIVIDE_BY_ZERO_IMPL,
FP_INEXACT_IMPL,
FP_OVERFLOW_IMPL,
FP_UNDERFLOW_IMPL,
FP_INVALID_IMPL
};
FPEnvironmentImpl();
FPEnvironmentImpl(const FPEnvironmentImpl & env);
~FPEnvironmentImpl();
FPEnvironmentImpl & operator=(const FPEnvironmentImpl & env);
void keepCurrentImpl();
static void clearFlagsImpl();
static bool isFlagImpl(FlagImpl flag);
static void setRoundingModeImpl(RoundingModeImpl mode);
static RoundingModeImpl getRoundingModeImpl();
static bool isInfiniteImpl(float value);
static bool isInfiniteImpl(double value);
static bool isInfiniteImpl(long double value);
static bool isNaNImpl(float value);
static bool isNaNImpl(double value);
static bool isNaNImpl(long double value);
static float copySignImpl(float target, float source);
static double copySignImpl(double target, double source);
static long double copySignImpl(long double target, long double source);
private:
static RoundingModeImpl _roundingMode;
};
//
// inlines
//
inline bool FPEnvironmentImpl::isInfiniteImpl(float value)
{
return std::isinf(value) != 0;
}
inline bool FPEnvironmentImpl::isInfiniteImpl(double value)
{
return std::isinf(value) != 0;
}
inline bool FPEnvironmentImpl::isInfiniteImpl(long double value)
{
return std::isinf((double)value) != 0;
}
inline bool FPEnvironmentImpl::isNaNImpl(float value)
{
return std::isnan(value) != 0;
}
inline bool FPEnvironmentImpl::isNaNImpl(double value)
{
return std::isnan(value) != 0;
}
inline bool FPEnvironmentImpl::isNaNImpl(long double value)
{
return std::isnan((double)value) != 0;
}
inline float FPEnvironmentImpl::copySignImpl(float target, float source)
{
return copysignf(target, source);
}
inline double FPEnvironmentImpl::copySignImpl(double target, double source)
{
return copysign(target, source);
}
} // namespace Poco
#endif // Foundation_FPEnvironment_DUMMY_INCLUDED

View File

@ -1,72 +0,0 @@
//
// FileStream_WIN32.h
//
// Library: Foundation
// Package: Streams
// Module: FileStream
//
// Definition of the FileStreamBuf, FileInputStream and FileOutputStream classes.
//
// Copyright (c) 2007, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_FileStream_WIN32_INCLUDED
#define Foundation_FileStream_WIN32_INCLUDED
#include "Poco/BufferedBidirectionalStreamBuf.h"
#include "Poco/Foundation.h"
#include "Poco/UnWindows.h"
namespace Poco
{
class Foundation_API FileStreamBuf : public BufferedBidirectionalStreamBuf
/// This stream buffer handles Fileio
{
public:
FileStreamBuf();
/// Creates a FileStreamBuf.
~FileStreamBuf();
/// Destroys the FileStream.
void open(const std::string & path, std::ios::openmode mode);
/// Opens the given file in the given mode.
bool close();
/// Closes the File stream buffer. Returns true if successful,
/// false otherwise.
std::streampos seekoff(std::streamoff off, std::ios::seekdir dir, std::ios::openmode mode = std::ios::in | std::ios::out);
/// change position by offset, according to way and mode
std::streampos seekpos(std::streampos pos, std::ios::openmode mode = std::ios::in | std::ios::out);
/// change to specified position, according to mode
protected:
enum
{
BUFFER_SIZE = 4096
};
int readFromDevice(char * buffer, std::streamsize length);
int writeToDevice(const char * buffer, std::streamsize length);
private:
std::string _path;
HANDLE _handle;
UInt64 _pos;
};
} // namespace Poco
#endif // Foundation_FileStream_WIN32_INCLUDED

View File

@ -1,176 +0,0 @@
//
// HashSet.h
//
// Library: Foundation
// Package: Hashing
// Module: HashSet
//
// Definition of the HashSet class.
//
// Copyright (c) 2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_HashSet_INCLUDED
#define Foundation_HashSet_INCLUDED
#include "Poco/Foundation.h"
#include "Poco/LinearHashTable.h"
namespace Poco
{
template <class Value, class HashFunc = Hash<Value>>
class HashSet
/// This class implements a set using a LinearHashTable.
///
/// A HashSet can be used just like a std::set.
{
public:
typedef Value ValueType;
typedef Value & Reference;
typedef const Value & ConstReference;
typedef Value * Pointer;
typedef const Value * ConstPointer;
typedef HashFunc Hash;
typedef LinearHashTable<ValueType, Hash> HashTable;
typedef typename HashTable::Iterator Iterator;
typedef typename HashTable::ConstIterator ConstIterator;
HashSet()
/// Creates an empty HashSet.
{
}
HashSet(std::size_t initialReserve) : _table(initialReserve)
/// Creates the HashSet, using the given initialReserve.
{
}
HashSet(const HashSet & set) : _table(set._table)
/// Creates the HashSet by copying another one.
{
}
~HashSet()
/// Destroys the HashSet.
{
}
HashSet & operator=(const HashSet & table)
/// Assigns another HashSet.
{
HashSet tmp(table);
swap(tmp);
return *this;
}
void swap(HashSet & set)
/// Swaps the HashSet with another one.
{
_table.swap(set._table);
}
ConstIterator begin() const
/// Returns an iterator pointing to the first entry, if one exists.
{
return _table.begin();
}
ConstIterator end() const
/// Returns an iterator pointing to the end of the table.
{
return _table.end();
}
Iterator begin()
/// Returns an iterator pointing to the first entry, if one exists.
{
return _table.begin();
}
Iterator end()
/// Returns an iterator pointing to the end of the table.
{
return _table.end();
}
ConstIterator find(const ValueType & value) const
/// Finds an entry in the table.
{
return _table.find(value);
}
Iterator find(const ValueType & value)
/// Finds an entry in the table.
{
return _table.find(value);
}
std::size_t count(const ValueType & value) const
/// Returns the number of elements with the given
/// value, with is either 1 or 0.
{
return _table.count(value);
}
std::pair<Iterator, bool> insert(const ValueType & value)
/// Inserts an element into the set.
///
/// If the element already exists in the set,
/// a pair(iterator, false) with iterator pointing to the
/// existing element is returned.
/// Otherwise, the element is inserted an a
/// pair(iterator, true) with iterator
/// pointing to the new element is returned.
{
return _table.insert(value);
}
void erase(Iterator it)
/// Erases the element pointed to by it.
{
_table.erase(it);
}
void erase(const ValueType & value)
/// Erases the element with the given value, if it exists.
{
_table.erase(value);
}
void clear()
/// Erases all elements.
{
_table.clear();
}
std::size_t size() const
/// Returns the number of elements in the table.
{
return _table.size();
}
bool empty() const
/// Returns true iff the table is empty.
{
return _table.empty();
}
private:
HashTable _table;
};
} // namespace Poco
#endif // Foundation_HashSet_INCLUDED

View File

@ -1,352 +0,0 @@
//
// HashTable.h
//
// Library: Foundation
// Package: Hashing
// Module: HashTable
//
// Definition of the HashTable class.
//
// Copyright (c) 2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_HashTable_INCLUDED
#define Foundation_HashTable_INCLUDED
#include <cstddef>
#include <cstring>
#include <map>
#include <vector>
#include "Poco/Exception.h"
#include "Poco/Foundation.h"
#include "Poco/HashFunction.h"
#include "Poco/HashStatistic.h"
namespace Poco
{
//@ deprecated
template <class Key, class Value, class KeyHashFunction = HashFunction<Key>>
class HashTable
/// A HashTable stores a key value pair that can be looked up via a hashed key.
///
/// Collision handling is done via overflow maps(!). With small hash tables performance of this
/// data struct will be closer to that a map than a hash table, i.e. slower. On the plus side,
/// this class offers remove operations. Also HashTable full errors are not possible. If a fast
/// HashTable implementation is needed and the remove operation is not required, use SimpleHashTable
/// instead.
///
/// This class is NOT thread safe.
{
public:
typedef std::map<Key, Value> HashEntryMap;
typedef HashEntryMap ** HashTableVector;
typedef typename HashEntryMap::const_iterator ConstIterator;
typedef typename HashEntryMap::iterator Iterator;
HashTable(UInt32 initialSize = 251) : _entries(0), _size(0), _maxCapacity(initialSize)
/// Creates the HashTable.
{
_entries = new HashEntryMap *[initialSize];
memset(_entries, '\0', sizeof(HashEntryMap *) * initialSize);
}
HashTable(const HashTable & ht) : _entries(new HashEntryMap *[ht._maxCapacity]), _size(ht._size), _maxCapacity(ht._maxCapacity)
{
for (UInt32 i = 0; i < _maxCapacity; ++i)
{
if (ht._entries[i])
_entries[i] = new HashEntryMap(ht._entries[i]->begin(), ht._entries[i]->end());
else
_entries[i] = 0;
}
}
~HashTable()
/// Destroys the HashTable.
{
clear();
}
HashTable & operator=(const HashTable & ht)
{
if (this != &ht)
{
clear();
_maxCapacity = ht._maxCapacity;
poco_assert_dbg(_entries == 0);
_entries = new HashEntryMap *[_maxCapacity];
_size = ht._size;
for (UInt32 i = 0; i < _maxCapacity; ++i)
{
if (ht._entries[i])
_entries[i] = new HashEntryMap(ht._entries[i]->begin(), ht._entries[i]->end());
else
_entries[i] = 0;
}
}
return *this;
}
void clear()
{
if (!_entries)
return;
for (UInt32 i = 0; i < _maxCapacity; ++i)
{
delete _entries[i];
}
delete[] _entries;
_entries = 0;
_size = 0;
_maxCapacity = 0;
}
UInt32 insert(const Key & key, const Value & value)
/// Returns the hash value of the inserted item.
/// Throws an exception if the entry was already inserted
{
UInt32 hsh = hash(key);
insertRaw(key, hsh, value);
return hsh;
}
Value & insertRaw(const Key & key, UInt32 hsh, const Value & value)
/// Returns the hash value of the inserted item.
/// Throws an exception if the entry was already inserted
{
if (!_entries[hsh])
_entries[hsh] = new HashEntryMap();
std::pair<typename HashEntryMap::iterator, bool> res(_entries[hsh]->insert(std::make_pair(key, value)));
if (!res.second)
throw InvalidArgumentException("HashTable::insert, key already exists.");
_size++;
return res.first->second;
}
UInt32 update(const Key & key, const Value & value)
/// Returns the hash value of the inserted item.
/// Replaces an existing entry if it finds one
{
UInt32 hsh = hash(key);
updateRaw(key, hsh, value);
return hsh;
}
void updateRaw(const Key & key, UInt32 hsh, const Value & value)
/// Returns the hash value of the inserted item.
/// Replaces an existing entry if it finds one
{
if (!_entries[hsh])
_entries[hsh] = new HashEntryMap();
std::pair<Iterator, bool> res = _entries[hsh]->insert(std::make_pair(key, value));
if (res.second == false)
res.first->second = value;
else
_size++;
}
void remove(const Key & key)
{
UInt32 hsh = hash(key);
removeRaw(key, hsh);
}
void removeRaw(const Key & key, UInt32 hsh)
/// Performance version, allows to specify the hash value
{
if (_entries[hsh])
{
_size -= _entries[hsh]->erase(key);
}
}
UInt32 hash(const Key & key) const { return _hash(key, _maxCapacity); }
const Value & get(const Key & key) const
/// Throws an exception if the value does not exist
{
UInt32 hsh = hash(key);
return getRaw(key, hsh);
}
const Value & getRaw(const Key & key, UInt32 hsh) const
/// Throws an exception if the value does not exist
{
if (!_entries[hsh])
throw InvalidArgumentException("key not found");
ConstIterator it = _entries[hsh]->find(key);
if (it == _entries[hsh]->end())
throw InvalidArgumentException("key not found");
return it->second;
}
Value & get(const Key & key)
/// Throws an exception if the value does not exist
{
UInt32 hsh = hash(key);
return const_cast<Value &>(getRaw(key, hsh));
}
const Value & operator[](const Key & key) const { return get(key); }
Value & operator[](const Key & key)
{
UInt32 hsh = hash(key);
if (!_entries[hsh])
return insertRaw(key, hsh, Value());
ConstIterator it = _entries[hsh]->find(key);
if (it == _entries[hsh]->end())
return insertRaw(key, hsh, Value());
return it->second;
}
const Key & getKeyRaw(const Key & key, UInt32 hsh)
/// Throws an exception if the key does not exist. returns a reference to the internally
/// stored key. Useful when someone does an insert and wants for performance reason only to store
/// a pointer to the key in another collection
{
if (!_entries[hsh])
throw InvalidArgumentException("key not found");
ConstIterator it = _entries[hsh]->find(key);
if (it == _entries[hsh]->end())
throw InvalidArgumentException("key not found");
return it->first;
}
bool get(const Key & key, Value & v) const
/// Sets v to the found value, returns false if no value was found
{
UInt32 hsh = hash(key);
return getRaw(key, hsh, v);
}
bool getRaw(const Key & key, UInt32 hsh, Value & v) const
/// Sets v to the found value, returns false if no value was found
{
if (!_entries[hsh])
return false;
ConstIterator it = _entries[hsh]->find(key);
if (it == _entries[hsh]->end())
return false;
v = it->second;
return true;
}
bool exists(const Key & key)
{
UInt32 hsh = hash(key);
return existsRaw(key, hsh);
}
bool existsRaw(const Key & key, UInt32 hsh) { return _entries[hsh] && (_entries[hsh]->end() != _entries[hsh]->find(key)); }
std::size_t size() const
/// Returns the number of elements already inserted into the HashTable
{
return _size;
}
UInt32 maxCapacity() const { return _maxCapacity; }
void resize(UInt32 newSize)
/// Resizes the hashtable, rehashes all existing entries. Expensive!
{
if (_maxCapacity != newSize)
{
HashTableVector cpy = _entries;
_entries = 0;
UInt32 oldSize = _maxCapacity;
_maxCapacity = newSize;
_entries = new HashEntryMap *[_maxCapacity];
memset(_entries, '\0', sizeof(HashEntryMap *) * _maxCapacity);
if (_size == 0)
{
// no data was yet inserted
delete[] cpy;
return;
}
_size = 0;
for (UInt32 i = 0; i < oldSize; ++i)
{
if (cpy[i])
{
ConstIterator it = cpy[i]->begin();
ConstIterator itEnd = cpy[i]->end();
for (; it != itEnd; ++it)
{
insert(it->first, it->second);
}
delete cpy[i];
}
}
delete[] cpy;
}
}
HashStatistic currentState(bool details = false) const
/// Returns the current internal state
{
UInt32 numberOfEntries = (UInt32)_size;
UInt32 numZeroEntries = 0;
UInt32 maxEntriesPerHash = 0;
std::vector<UInt32> detailedEntriesPerHash;
#ifdef _DEBUG
UInt32 totalSize = 0;
#endif
for (UInt32 i = 0; i < _maxCapacity; ++i)
{
if (_entries[i])
{
UInt32 size = (UInt32)_entries[i]->size();
poco_assert_dbg(size != 0);
if (size > maxEntriesPerHash)
maxEntriesPerHash = size;
if (details)
detailedEntriesPerHash.push_back(size);
#ifdef _DEBUG
totalSize += size;
#endif
}
else
{
numZeroEntries++;
if (details)
detailedEntriesPerHash.push_back(0);
}
}
#ifdef _DEBUG
poco_assert_dbg(totalSize == numberOfEntries);
#endif
return HashStatistic(_maxCapacity, numberOfEntries, numZeroEntries, maxEntriesPerHash, detailedEntriesPerHash);
}
private:
HashTableVector _entries;
std::size_t _size;
UInt32 _maxCapacity;
KeyHashFunction _hash;
};
} // namespace Poco
#endif // Foundation_HashTable_INCLUDED

View File

@ -1,52 +0,0 @@
//
// Latin1Encoding.h
//
// Library: Foundation
// Package: Text
// Module: Latin1Encoding
//
// Definition of the Latin1Encoding class.
//
// Copyright (c) 2004-2007, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_Latin1Encoding_INCLUDED
#define Foundation_Latin1Encoding_INCLUDED
#include "Poco/Foundation.h"
#include "Poco/TextEncoding.h"
namespace Poco
{
class Foundation_API Latin1Encoding : public TextEncoding
/// ISO Latin-1 (8859-1) text encoding.
{
public:
Latin1Encoding();
~Latin1Encoding();
const char * canonicalName() const;
bool isA(const std::string & encodingName) const;
const CharacterMap & characterMap() const;
int convert(const unsigned char * bytes) const;
int convert(int ch, unsigned char * bytes, int length) const;
int queryConvert(const unsigned char * bytes, int length) const;
int sequenceLength(const unsigned char * bytes, int length) const;
private:
static const char * _names[];
static const CharacterMap _charMap;
};
} // namespace Poco
#endif // Foundation_Latin1Encoding_INCLUDED

View File

@ -1,55 +0,0 @@
//
// Latin2Encoding.h
//
// Library: Foundation
// Package: Text
// Module: Latin2Encoding
//
// Definition of the Latin2Encoding class.
//
// Copyright (c) 2004-2007, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_Latin2Encoding_INCLUDED
#define Foundation_Latin2Encoding_INCLUDED
#include "Poco/Foundation.h"
#include "Poco/TextEncoding.h"
namespace Poco
{
class Foundation_API Latin2Encoding : public TextEncoding
/// ISO Latin-2 (8859-2) text encoding.
///
/// Latin-2 is basically Latin-1 with the EURO sign plus
/// some other minor changes.
{
public:
Latin2Encoding();
virtual ~Latin2Encoding();
const char * canonicalName() const;
bool isA(const std::string & encodingName) const;
const CharacterMap & characterMap() const;
int convert(const unsigned char * bytes) const;
int convert(int ch, unsigned char * bytes, int length) const;
int queryConvert(const unsigned char * bytes, int length) const;
int sequenceLength(const unsigned char * bytes, int length) const;
private:
static const char * _names[];
static const CharacterMap _charMap;
};
} // namespace Poco
#endif // Foundation_Latin2Encoding_INCLUDED

View File

@ -1,55 +0,0 @@
//
// Latin9Encoding.h
//
// Library: Foundation
// Package: Text
// Module: Latin9Encoding
//
// Definition of the Latin9Encoding class.
//
// Copyright (c) 2004-2007, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_Latin9Encoding_INCLUDED
#define Foundation_Latin9Encoding_INCLUDED
#include "Poco/Foundation.h"
#include "Poco/TextEncoding.h"
namespace Poco
{
class Foundation_API Latin9Encoding : public TextEncoding
/// ISO Latin-9 (8859-15) text encoding.
///
/// Latin-9 is basically Latin-1 with the EURO sign plus
/// some other minor changes.
{
public:
Latin9Encoding();
~Latin9Encoding();
const char * canonicalName() const;
bool isA(const std::string & encodingName) const;
const CharacterMap & characterMap() const;
int convert(const unsigned char * bytes) const;
int convert(int ch, unsigned char * bytes, int length) const;
int queryConvert(const unsigned char * bytes, int length) const;
int sequenceLength(const unsigned char * bytes, int length) const;
private:
static const char * _names[];
static const CharacterMap _charMap;
};
} // namespace Poco
#endif // Foundation_Latin9Encoding_INCLUDED

View File

@ -1,96 +0,0 @@
//
// MD4Engine.h
//
// Library: Foundation
// Package: Crypt
// Module: MD4Engine
//
// Definition of class MD4Engine.
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
//
// MD4 (RFC 1320) algorithm:
// Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
// rights reserved.
//
// License to copy and use this software is granted provided that it
// is identified as the "RSA Data Security, Inc. MD4 Message-Digest
// Algorithm" in all material mentioning or referencing this software
// or this function.
//
// License is also granted to make and use derivative works provided
// that such works are identified as "derived from the RSA Data
// Security, Inc. MD4 Message-Digest Algorithm" in all material
// mentioning or referencing the derived work.
//
// RSA Data Security, Inc. makes no representations concerning either
// the merchantability of this software or the suitability of this
// software for any particular purpose. It is provided "as is"
// without express or implied warranty of any kind.
//
// These notices must be retained in any copies of any part of this
// documentation and/or software.
//
#ifndef Foundation_MD4Engine_INCLUDED
#define Foundation_MD4Engine_INCLUDED
#include "Poco/DigestEngine.h"
#include "Poco/Foundation.h"
namespace Poco
{
class Foundation_API MD4Engine : public DigestEngine
/// This class implements the MD4 message digest algorithm,
/// described in RFC 1320.
{
public:
enum
{
BLOCK_SIZE = 64,
DIGEST_SIZE = 16
};
MD4Engine();
~MD4Engine();
std::size_t digestLength() const;
void reset();
const DigestEngine::Digest & digest();
protected:
void updateImpl(const void * data, std::size_t length);
private:
static void transform(UInt32 state[4], const unsigned char block[64]);
static void encode(unsigned char * output, const UInt32 * input, std::size_t len);
static void decode(UInt32 * output, const unsigned char * input, std::size_t len);
struct Context
{
UInt32 state[4]; // state (ABCD)
UInt32 count[2]; // number of bits, modulo 2^64 (lsb first)
unsigned char buffer[64]; // input buffer
};
Context _context;
DigestEngine::Digest _digest;
MD4Engine(const MD4Engine &);
MD4Engine & operator=(const MD4Engine &);
};
} // namespace Poco
#endif // Foundation_MD5Engine_INCLUDED

View File

@ -1,152 +0,0 @@
//
// Manifest.h
//
// Library: Foundation
// Package: SharedLibrary
// Module: ClassLoader
//
// Definition of the Manifest class.
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_Manifest_INCLUDED
#define Foundation_Manifest_INCLUDED
#include <map>
#include <typeinfo>
#include "Poco/Foundation.h"
#include "Poco/MetaObject.h"
namespace Poco
{
class Foundation_API ManifestBase
/// ManifestBase is a common base class for
/// all instantiations of Manifest.
{
public:
ManifestBase();
virtual ~ManifestBase();
virtual const char * className() const = 0;
/// Returns the type name of the manifest's class.
};
template <class B>
class Manifest : public ManifestBase
/// A Manifest maintains a list of all classes
/// contained in a dynamically loadable class
/// library.
/// Internally, the information is held
/// in a map. An iterator is provided to
/// iterate over all the classes in a Manifest.
{
public:
typedef AbstractMetaObject<B> Meta;
typedef std::map<std::string, const Meta *> MetaMap;
class Iterator
/// The Manifest's very own iterator class.
{
public:
Iterator(const typename MetaMap::const_iterator & it) { _it = it; }
Iterator(const Iterator & it) { _it = it._it; }
~Iterator() { }
Iterator & operator=(const Iterator & it)
{
_it = it._it;
return *this;
}
inline bool operator==(const Iterator & it) const { return _it == it._it; }
inline bool operator!=(const Iterator & it) const { return _it != it._it; }
Iterator & operator++() // prefix
{
++_it;
return *this;
}
Iterator operator++(int) // postfix
{
Iterator result(_it);
++_it;
return result;
}
inline const Meta * operator*() const { return _it->second; }
inline const Meta * operator->() const { return _it->second; }
private:
typename MetaMap::const_iterator _it;
};
Manifest()
/// Creates an empty Manifest.
{
}
virtual ~Manifest()
/// Destroys the Manifest.
{
clear();
}
Iterator find(const std::string & className) const
/// Returns an iterator pointing to the MetaObject
/// for the given class. If the MetaObject cannot
/// be found, the iterator points to end().
{
return Iterator(_metaMap.find(className));
}
Iterator begin() const { return Iterator(_metaMap.begin()); }
Iterator end() const { return Iterator(_metaMap.end()); }
bool insert(const Meta * pMeta)
/// Inserts a MetaObject. Returns true if insertion
/// was successful, false if a class with the same
/// name already exists.
{
return _metaMap.insert(typename MetaMap::value_type(pMeta->name(), pMeta)).second;
}
void clear()
/// Removes all MetaObjects from the manifest.
{
for (typename MetaMap::iterator it = _metaMap.begin(); it != _metaMap.end(); ++it)
{
delete it->second;
}
_metaMap.clear();
}
int size() const
/// Returns the number of MetaObjects in the Manifest.
{
return int(_metaMap.size());
}
bool empty() const
/// Returns true iff the Manifest does not contain any MetaObjects.
{
return _metaMap.empty();
}
const char * className() const { return typeid(*this).name(); }
private:
MetaMap _metaMap;
};
} // namespace Poco
#endif // Foundation_Manifest_INCLUDED

View File

@ -1,50 +0,0 @@
//
// PipeImpl_DUMMY.h
//
// Library: Foundation
// Package: Processes
// Module: PipeImpl
//
// Definition of the PipeImpl_DUMMY class.
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_PipeImpl_DUMMY_INCLUDED
#define Foundation_PipeImpl_DUMMY_INCLUDED
#include "Poco/Foundation.h"
#include "Poco/RefCountedObject.h"
namespace Poco
{
class Foundation_API PipeImpl : public RefCountedObject
/// A dummy implementation of PipeImpl for platforms
/// that do not support pipes.
{
public:
typedef int Handle;
PipeImpl();
~PipeImpl();
int writeBytes(const void * buffer, int length);
int readBytes(void * buffer, int length);
Handle readHandle() const;
Handle writeHandle() const;
void closeRead();
void closeWrite();
};
} // namespace Poco
#endif // Foundation_PipeImpl_DUMMY_INCLUDED

View File

@ -1,121 +0,0 @@
//
// PipeStream.h
//
// Library: Foundation
// Package: Processes
// Module: PipeStream
//
// Definition of the PipeStream class.
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_PipeStream_INCLUDED
#define Foundation_PipeStream_INCLUDED
#include <istream>
#include <ostream>
#include "Poco/BufferedStreamBuf.h"
#include "Poco/Foundation.h"
#include "Poco/Pipe.h"
namespace Poco
{
class Foundation_API PipeStreamBuf : public BufferedStreamBuf
/// This is the streambuf class used for reading from and writing to a Pipe.
{
public:
typedef BufferedStreamBuf::openmode openmode;
PipeStreamBuf(const Pipe & pipe, openmode mode);
/// Creates a PipeStreamBuf with the given Pipe.
~PipeStreamBuf();
/// Destroys the PipeStreamBuf.
void close();
/// Closes the pipe.
protected:
int readFromDevice(char * buffer, std::streamsize length);
int writeToDevice(const char * buffer, std::streamsize length);
private:
enum
{
STREAM_BUFFER_SIZE = 1024
};
Pipe _pipe;
};
class Foundation_API PipeIOS : public virtual std::ios
/// The base class for PipeInputStream and
/// PipeOutputStream.
///
/// This class is needed to ensure the correct initialization
/// order of the stream buffer and base classes.
{
public:
PipeIOS(const Pipe & pipe, openmode mode);
/// Creates the PipeIOS with the given Pipe.
~PipeIOS();
/// Destroys the PipeIOS.
///
/// Flushes the buffer, but does not close the pipe.
PipeStreamBuf * rdbuf();
/// Returns a pointer to the internal PipeStreamBuf.
void close();
/// Flushes the stream and closes the pipe.
protected:
PipeStreamBuf _buf;
};
class Foundation_API PipeOutputStream : public PipeIOS, public std::ostream
/// An output stream for writing to a Pipe.
{
public:
PipeOutputStream(const Pipe & pipe);
/// Creates the PipeOutputStream with the given Pipe.
~PipeOutputStream();
/// Destroys the PipeOutputStream.
///
/// Flushes the buffer, but does not close the pipe.
};
class Foundation_API PipeInputStream : public PipeIOS, public std::istream
/// An input stream for reading from a Pipe.
///
/// Using formatted input from a PipeInputStream
/// is not recommended, due to the read-ahead behavior of
/// istream with formatted reads.
{
public:
PipeInputStream(const Pipe & pipe);
/// Creates the PipeInputStream with the given Pipe.
~PipeInputStream();
/// Destroys the PipeInputStream.
};
} // namespace Poco
#endif // Foundation_PipeStream_INCLUDED

View File

@ -1,89 +0,0 @@
//
// SharedMemoryImpl.h
//
// Library: Foundation
// Package: Processes
// Module: SharedMemoryImpl
//
// Definition of the SharedMemoryImpl class.
//
// Copyright (c) 2007, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_SharedMemoryImpl_INCLUDED
#define Foundation_SharedMemoryImpl_INCLUDED
#include "Poco/Foundation.h"
#include "Poco/RefCountedObject.h"
#include "Poco/SharedMemory.h"
namespace Poco
{
class Foundation_API SharedMemoryImpl : public RefCountedObject
/// A dummy implementation of shared memory, for systems
/// that do not have shared memory support.
{
public:
SharedMemoryImpl(const std::string & id, std::size_t size, SharedMemory::AccessMode mode, const void * addr, bool server);
/// Creates or connects to a shared memory object with the given name.
///
/// For maximum portability, name should be a valid Unix filename and not
/// contain any slashes or backslashes.
///
/// An address hint can be passed to the system, specifying the desired
/// start address of the shared memory area. Whether the hint
/// is actually honored is, however, up to the system. Windows platform
/// will generally ignore the hint.
SharedMemoryImpl(const Poco::File & aFile, SharedMemory::AccessMode mode, const void * addr);
/// Maps the entire contents of file into a shared memory segment.
///
/// An address hint can be passed to the system, specifying the desired
/// start address of the shared memory area. Whether the hint
/// is actually honored is, however, up to the system. Windows platform
/// will generally ignore the hint.
char * begin() const;
/// Returns the start address of the shared memory segment.
char * end() const;
/// Returns the one-past-end end address of the shared memory segment.
protected:
~SharedMemoryImpl();
/// Destroys the SharedMemoryImpl.
private:
SharedMemoryImpl();
SharedMemoryImpl(const SharedMemoryImpl &);
SharedMemoryImpl & operator=(const SharedMemoryImpl &);
};
//
// inlines
//
inline char * SharedMemoryImpl::begin() const
{
return 0;
}
inline char * SharedMemoryImpl::end() const
{
return 0;
}
} // namespace Poco
#endif // Foundation_SharedMemoryImpl_INCLUDED

View File

@ -1,387 +0,0 @@
//
// SimpleHashTable.h
//
// Library: Foundation
// Package: Hashing
// Module: SimpleHashTable
//
// Definition of the SimpleHashTable class.
//
// Copyright (c) 2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_SimpleHashTable_INCLUDED
#define Foundation_SimpleHashTable_INCLUDED
#include <algorithm>
#include <cstddef>
#include <map>
#include <vector>
#include "Poco/Exception.h"
#include "Poco/Foundation.h"
#include "Poco/HashFunction.h"
#include "Poco/HashStatistic.h"
namespace Poco
{
//@ deprecated
template <class Key, class Value, class KeyHashFunction = HashFunction<Key>>
class SimpleHashTable
/// A SimpleHashTable stores a key value pair that can be looked up via a hashed key.
///
/// In comparison to a HashTable, this class handles collisions by sequentially searching the next
/// free location. This also means that the maximum size of this table is limited, i.e. if the hash table
/// is full, it will throw an exception and that this class does not support remove operations.
/// On the plus side it is faster than the HashTable.
///
/// This class is NOT thread safe.
{
public:
class HashEntry
{
public:
Key key;
Value value;
HashEntry(const Key k, const Value v) : key(k), value(v) { }
};
typedef std::vector<HashEntry *> HashTableVector;
SimpleHashTable(UInt32 capacity = 251) : _entries(capacity, 0), _size(0), _capacity(capacity)
/// Creates the SimpleHashTable.
{
}
SimpleHashTable(const SimpleHashTable & ht) : _size(ht._size), _capacity(ht._capacity)
{
_entries.reserve(ht._capacity);
for (typename HashTableVector::iterator it = ht._entries.begin(); it != ht._entries.end(); ++it)
{
if (*it)
_entries.push_back(new HashEntry(*it));
else
_entries.push_back(0);
}
}
~SimpleHashTable()
/// Destroys the SimpleHashTable.
{
clear();
}
SimpleHashTable & operator=(const SimpleHashTable & ht)
{
if (this != &ht)
{
SimpleHashTable tmp(ht);
swap(tmp);
}
return *this;
}
void swap(SimpleHashTable & ht)
{
using std::swap;
swap(_entries, ht._entries);
swap(_size, ht._size);
swap(_capacity, ht._capacity);
}
void clear()
{
for (typename HashTableVector::iterator it = _entries.begin(); it != _entries.end(); ++it)
{
delete *it;
*it = 0;
}
_size = 0;
}
UInt32 insert(const Key & key, const Value & value)
/// Returns the hash value of the inserted item.
/// Throws an exception if the entry was already inserted
{
UInt32 hsh = hash(key);
insertRaw(key, hsh, value);
return hsh;
}
Value & insertRaw(const Key & key, UInt32 hsh, const Value & value)
/// Returns the hash value of the inserted item.
/// Throws an exception if the entry was already inserted
{
UInt32 pos = hsh;
if (!_entries[pos])
_entries[pos] = new HashEntry(key, value);
else
{
UInt32 origHash = hsh;
while (_entries[hsh % _capacity])
{
if (_entries[hsh % _capacity]->key == key)
throw ExistsException();
if (hsh - origHash > _capacity)
throw PoolOverflowException("SimpleHashTable full");
hsh++;
}
pos = hsh % _capacity;
_entries[pos] = new HashEntry(key, value);
}
_size++;
return _entries[pos]->value;
}
UInt32 update(const Key & key, const Value & value)
/// Returns the hash value of the inserted item.
/// Replaces an existing entry if it finds one
{
UInt32 hsh = hash(key);
updateRaw(key, hsh, value);
return hsh;
}
void updateRaw(const Key & key, UInt32 hsh, const Value & value)
/// Returns the hash value of the inserted item.
/// Replaces an existing entry if it finds one
{
if (!_entries[hsh])
_entries[hsh] = new HashEntry(key, value);
else
{
UInt32 origHash = hsh;
while (_entries[hsh % _capacity])
{
if (_entries[hsh % _capacity]->key == key)
{
_entries[hsh % _capacity]->value = value;
return;
}
if (hsh - origHash > _capacity)
throw PoolOverflowException("SimpleHashTable full");
hsh++;
}
_entries[hsh % _capacity] = new HashEntry(key, value);
}
_size++;
}
UInt32 hash(const Key & key) const { return _hash(key, _capacity); }
const Value & get(const Key & key) const
/// Throws an exception if the value does not exist
{
UInt32 hsh = hash(key);
return getRaw(key, hsh);
}
const Value & getRaw(const Key & key, UInt32 hsh) const
/// Throws an exception if the value does not exist
{
UInt32 origHash = hsh;
while (true)
{
if (_entries[hsh % _capacity])
{
if (_entries[hsh % _capacity]->key == key)
{
return _entries[hsh % _capacity]->value;
}
}
else
throw InvalidArgumentException("value not found");
if (hsh - origHash > _capacity)
throw InvalidArgumentException("value not found");
hsh++;
}
}
Value & get(const Key & key)
/// Throws an exception if the value does not exist
{
UInt32 hsh = hash(key);
return const_cast<Value &>(getRaw(key, hsh));
}
const Value & operator[](const Key & key) const { return get(key); }
Value & operator[](const Key & key)
{
UInt32 hsh = hash(key);
UInt32 origHash = hsh;
while (true)
{
if (_entries[hsh % _capacity])
{
if (_entries[hsh % _capacity]->key == key)
{
return _entries[hsh % _capacity]->value;
}
}
else
return insertRaw(key, hsh, Value());
if (hsh - origHash > _capacity)
return insertRaw(key, hsh, Value());
hsh++;
}
}
const Key & getKeyRaw(const Key & key, UInt32 hsh)
/// Throws an exception if the key does not exist. returns a reference to the internally
/// stored key. Useful when someone does an insert and wants for performance reason only to store
/// a pointer to the key in another collection
{
UInt32 origHash = hsh;
while (true)
{
if (_entries[hsh % _capacity])
{
if (_entries[hsh % _capacity]->key == key)
{
return _entries[hsh % _capacity]->key;
}
}
else
throw InvalidArgumentException("key not found");
if (hsh - origHash > _capacity)
throw InvalidArgumentException("key not found");
hsh++;
}
}
bool get(const Key & key, Value & v) const
/// Sets v to the found value, returns false if no value was found
{
UInt32 hsh = hash(key);
return getRaw(key, hsh, v);
}
bool getRaw(const Key & key, UInt32 hsh, Value & v) const
/// Sets v to the found value, returns false if no value was found
{
UInt32 origHash = hsh;
while (true)
{
if (_entries[hsh % _capacity])
{
if (_entries[hsh % _capacity]->key == key)
{
v = _entries[hsh % _capacity]->value;
return true;
}
}
else
return false;
if (hsh - origHash > _capacity)
return false;
hsh++;
}
}
bool exists(const Key & key) const
{
UInt32 hsh = hash(key);
return existsRaw(key, hsh);
}
bool existsRaw(const Key & key, UInt32 hsh) const
{
UInt32 origHash = hsh;
while (true)
{
if (_entries[hsh % _capacity])
{
if (_entries[hsh % _capacity]->key == key)
{
return true;
}
}
else
return false;
if (hsh - origHash > _capacity)
return false;
hsh++;
}
}
std::size_t size() const
/// Returns the number of elements already inserted into the SimpleHashTable
{
return _size;
}
UInt32 capacity() const { return _capacity; }
void resize(UInt32 newSize)
/// Resizes the hashtable, rehashes all existing entries. Expensive!
{
if (_capacity != newSize)
{
SimpleHashTable tmp(newSize);
swap(tmp);
for (typename HashTableVector::const_iterator it = tmp._entries.begin(); it != tmp._entries.end(); ++it)
{
if (*it)
{
insertRaw((*it)->key, hash((*it)->key), (*it)->value);
}
}
}
}
HashStatistic currentState(bool details = false) const
/// Returns the current internal state
{
UInt32 numberOfEntries = (UInt32)_size;
UInt32 numZeroEntries = 0;
UInt32 maxEntriesPerHash = 0;
std::vector<UInt32> detailedEntriesPerHash;
#ifdef _DEBUG
UInt32 totalSize = 0;
#endif
for (int i = 0; i < _capacity; ++i)
{
if (_entries[i])
{
maxEntriesPerHash = 1;
UInt32 size = 1;
if (details)
detailedEntriesPerHash.push_back(size);
#ifdef _DEBUG
totalSize += size;
#endif
}
else
{
numZeroEntries++;
if (details)
detailedEntriesPerHash.push_back(0);
}
}
#ifdef _DEBUG
poco_assert_dbg(totalSize == numberOfEntries);
#endif
return HashStatistic(_capacity, numberOfEntries, numZeroEntries, maxEntriesPerHash, detailedEntriesPerHash);
}
private:
HashTableVector _entries;
std::size_t _size;
UInt32 _capacity;
KeyHashFunction _hash;
};
} // namespace Poco
#endif // Foundation_HashTable_INCLUDED

View File

@ -1,98 +0,0 @@
//
// StreamTokenizer.h
//
// Library: Foundation
// Package: Streams
// Module: StreamTokenizer
//
// Definition of the StreamTokenizer class.
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_StreamTokenizer_INCLUDED
#define Foundation_StreamTokenizer_INCLUDED
#include <istream>
#include <vector>
#include "Poco/Foundation.h"
#include "Poco/Token.h"
namespace Poco
{
class Foundation_API StreamTokenizer
/// A stream tokenizer splits an input stream
/// into a sequence of tokens of different kinds.
/// Various token kinds can be registered with
/// the tokenizer.
{
public:
StreamTokenizer();
/// Creates a StreamTokenizer with no attached stream.
StreamTokenizer(std::istream & istr);
/// Creates a StreamTokenizer with no attached stream.
virtual ~StreamTokenizer();
/// Destroys the StreamTokenizer and deletes all
/// registered tokens.
void attachToStream(std::istream & istr);
/// Attaches the tokenizer to an input stream.
void addToken(Token * pToken);
/// Adds a token class to the tokenizer. The
/// tokenizer takes ownership of the token and
/// deletes it when no longer needed. Comment
/// and whitespace tokens will be marked as
/// ignorable, which means that next() will not
/// return them.
void addToken(Token * pToken, bool ignore);
/// Adds a token class to the tokenizer. The
/// tokenizer takes ownership of the token and
/// deletes it when no longer needed.
/// If ignore is true, the token will be marked
/// as ignorable, which means that next() will
/// not return it.
const Token * next();
/// Extracts the next token from the input stream.
/// Returns a pointer to an EOFToken if there are
/// no more characters to read.
/// Returns a pointer to an InvalidToken if an
/// invalid character is encountered.
/// If a token is marked as ignorable, it will not
/// be returned, and the next token will be
/// examined.
/// Never returns a NULL pointer.
/// You must not delete the token returned by next().
private:
struct TokenInfo
{
Token * pToken;
bool ignore;
};
typedef std::vector<TokenInfo> TokenVec;
TokenVec _tokens;
std::istream * _pIstr;
InvalidToken _invalidToken;
EOFToken _eofToken;
};
} // namespace Poco
#endif // Foundation_StreamTokenizer_INCLUDED

View File

@ -1,132 +0,0 @@
//
// SynchronizedObject.h
//
// Library: Foundation
// Package: Threading
// Module: SynchronizedObject
//
// Definition of the SynchronizedObject class.
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_SynchronizedObject_INCLUDED
#define Foundation_SynchronizedObject_INCLUDED
#include "Poco/Event.h"
#include "Poco/Foundation.h"
#include "Poco/Mutex.h"
namespace Poco
{
class Foundation_API SynchronizedObject
/// This class aggregates a Mutex and an Event
/// and can act as a base class for all objects
/// requiring synchronization in a multithreaded
/// scenario.
{
public:
typedef Poco::ScopedLock<SynchronizedObject> ScopedLock;
SynchronizedObject();
/// Creates the object.
virtual ~SynchronizedObject();
/// Destroys the object.
void lock() const;
/// Locks the object. Blocks if the object
/// is locked by another thread.
bool tryLock() const;
/// Tries to lock the object. Returns false immediately
/// if the object is already locked by another thread
/// Returns true if the object was successfully locked.
void unlock() const;
/// Unlocks the object so that it can be locked by
/// other threads.
void notify() const;
/// Signals the object.
/// Exactly only one thread waiting for the object
/// can resume execution.
void wait() const;
/// Waits for the object to become signalled.
void wait(long milliseconds) const;
/// Waits for the object to become signalled.
/// Throws a TimeoutException if the object
/// does not become signalled within the specified
/// time interval.
bool tryWait(long milliseconds) const;
/// Waits for the object to become signalled.
/// Returns true if the object
/// became signalled within the specified
/// time interval, false otherwise.
private:
mutable Mutex _mutex;
mutable Event _event;
};
//
// inlines
//
inline void SynchronizedObject::lock() const
{
_mutex.lock();
}
inline bool SynchronizedObject::tryLock() const
{
return _mutex.tryLock();
}
inline void SynchronizedObject::unlock() const
{
_mutex.unlock();
}
inline void SynchronizedObject::notify() const
{
_event.set();
}
inline void SynchronizedObject::wait() const
{
_event.wait();
}
inline void SynchronizedObject::wait(long milliseconds) const
{
_event.wait(milliseconds);
}
inline bool SynchronizedObject::tryWait(long milliseconds) const
{
return _event.tryWait(milliseconds);
}
} // namespace Poco
#endif // Foundation_SynchronizedObject_INCLUDED

View File

@ -1,135 +0,0 @@
//
// UnWindows.h
//
// Library: Foundation
// Package: Core
// Module: UnWindows
//
// A wrapper around the <windows.h> header file that #undef's some
// of the macros for function names defined by <windows.h> that
// are a frequent source of conflicts (e.g., GetUserName).
//
// Remember, that most of the WIN32 API functions come in two variants,
// an Unicode variant (e.g., GetUserNameA) and an ASCII variant (GetUserNameW).
// There is also a macro (GetUserName) that's either defined to be the Unicode
// name or the ASCII name, depending on whether the UNICODE macro is #define'd
// or not. POCO always calls the Unicode or ASCII functions directly (depending
// on whether POCO_WIN32_UTF8 is #define'd or not), so the macros are not ignored.
//
// These macro definitions are a frequent case of problems and naming conflicts,
// especially for C++ programmers. Say, you define a class with a member function named
// GetUserName. Depending on whether "Poco/UnWindows.h" has been included by a particular
// translation unit or not, this might be changed to GetUserNameA/GetUserNameW, or not.
// While, due to naming conventions used, this is less of a problem in POCO, some
// of the users of POCO might use a different naming convention where this can become
// a problem.
//
// To disable the #undef's, compile POCO with the POCO_NO_UNWINDOWS macro #define'd.
//
// Copyright (c) 2007, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_UnWindows_INCLUDED
#define Foundation_UnWindows_INCLUDED
// Reduce bloat
// Microsoft Visual C++ includes copies of the Windows header files
// that were current at the time Visual C++ was released.
// The Windows header files use macros to indicate which versions
// of Windows support many programming elements. Therefore, you must
// define these macros to use new functionality introduced in each
// major operating system release. (Individual header files may use
// different macros; therefore, if compilation problems occur, check
// the header file that contains the definition for conditional
// definitions.) For more information, see SdkDdkVer.h.
# if defined(_WIN32_WINNT)
# if (_WIN32_WINNT < 0x0502)
# error Unsupported Windows version.
# endif
# elif defined(NTDDI_VERSION)
# if (NTDDI_VERSION < 0x05020000)
# error Unsupported Windows version.
# endif
# elif !defined(_WIN32_WINNT)
// Define minimum supported version.
// This can be changed, if needed.
// If allowed (see POCO_MIN_WINDOWS_OS_SUPPORT
// below), Platform_WIN32.h will do its
// best to determine the appropriate values
// and may redefine these. See Platform_WIN32.h
// for details.
# define _WIN32_WINNT 0x0502
# define NTDDI_VERSION 0x05020000
# endif
// To prevent Platform_WIN32.h to modify version defines,
// uncomment this, otherwise versions will be automatically
// discovered in Platform_WIN32.h.
// #define POCO_FORCE_MIN_WINDOWS_OS_SUPPORT
#include <windows.h>
#if !defined(POCO_NO_UNWINDOWS)
// A list of annoying macros to #undef.
// Extend as required.
# undef GetBinaryType
# undef GetShortPathName
# undef GetLongPathName
# undef GetEnvironmentStrings
# undef SetEnvironmentStrings
# undef FreeEnvironmentStrings
# undef FormatMessage
# undef EncryptFile
# undef DecryptFile
# undef CreateMutex
# undef OpenMutex
# undef CreateEvent
# undef OpenEvent
# undef CreateSemaphore
# undef OpenSemaphore
# undef LoadLibrary
# undef GetModuleFileName
# undef CreateProcess
# undef GetCommandLine
# undef GetEnvironmentVariable
# undef SetEnvironmentVariable
# undef ExpandEnvironmentStrings
# undef OutputDebugString
# undef FindResource
# undef UpdateResource
# undef FindAtom
# undef AddAtom
# undef GetSystemDirectory
# undef GetTempPath
# undef GetTempFileName
# undef SetCurrentDirectory
# undef GetCurrentDirectory
# undef CreateDirectory
# undef RemoveDirectory
# undef CreateFile
# undef DeleteFile
# undef SearchPath
# undef CopyFile
# undef MoveFile
# undef ReplaceFile
# undef GetComputerName
# undef SetComputerName
# undef GetUserName
# undef LogonUser
# undef GetVersion
# undef GetObject
#endif // POCO_NO_UNWINDOWS
#endif // Foundation_UnWindows_INCLUDED

View File

@ -1,53 +0,0 @@
//
// Windows1250Encoding.h
//
// Library: Foundation
// Package: Text
// Module: Windows1250Encoding
//
// Definition of the Windows1250Encoding class.
//
// Copyright (c) 2005-2007, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_Windows1250Encoding_INCLUDED
#define Foundation_Windows1250Encoding_INCLUDED
#include "Poco/Foundation.h"
#include "Poco/TextEncoding.h"
namespace Poco
{
class Foundation_API Windows1250Encoding : public TextEncoding
/// Windows Codepage 1250 text encoding.
/// Based on: http://msdn.microsoft.com/en-us/goglobal/cc305143
{
public:
Windows1250Encoding();
~Windows1250Encoding();
const char * canonicalName() const;
bool isA(const std::string & encodingName) const;
const CharacterMap & characterMap() const;
int convert(const unsigned char * bytes) const;
int convert(int ch, unsigned char * bytes, int length) const;
int queryConvert(const unsigned char * bytes, int length) const;
int sequenceLength(const unsigned char * bytes, int length) const;
private:
static const char * _names[];
static const CharacterMap _charMap;
};
} // namespace Poco
#endif // Foundation_Windows1250Encoding_INCLUDED

View File

@ -1,53 +0,0 @@
//
// Windows1251Encoding.h
//
// Library: Foundation
// Package: Text
// Module: Windows1251Encoding
//
// Definition of the Windows1251Encoding class.
//
// Copyright (c) 2005-2007, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_Windows1251Encoding_INCLUDED
#define Foundation_Windows1251Encoding_INCLUDED
#include "Poco/Foundation.h"
#include "Poco/TextEncoding.h"
namespace Poco
{
class Foundation_API Windows1251Encoding : public TextEncoding
/// Windows Codepage 1251 text encoding.
/// Based on: http://msdn.microsoft.com/en-us/goglobal/cc305144
{
public:
Windows1251Encoding();
~Windows1251Encoding();
const char * canonicalName() const;
bool isA(const std::string & encodingName) const;
const CharacterMap & characterMap() const;
int convert(const unsigned char * bytes) const;
int convert(int ch, unsigned char * bytes, int length) const;
int queryConvert(const unsigned char * bytes, int length) const;
int sequenceLength(const unsigned char * bytes, int length) const;
private:
static const char * _names[];
static const CharacterMap _charMap;
};
} // namespace Poco
#endif // Foundation_Windows1251Encoding_INCLUDED

View File

@ -1,52 +0,0 @@
//
// Windows1252Encoding.h
//
// Library: Foundation
// Package: Text
// Module: Windows1252Encoding
//
// Definition of the Windows1252Encoding class.
//
// Copyright (c) 2005-2007, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_Windows1252Encoding_INCLUDED
#define Foundation_Windows1252Encoding_INCLUDED
#include "Poco/Foundation.h"
#include "Poco/TextEncoding.h"
namespace Poco
{
class Foundation_API Windows1252Encoding : public TextEncoding
/// Windows Codepage 1252 text encoding.
{
public:
Windows1252Encoding();
~Windows1252Encoding();
const char * canonicalName() const;
bool isA(const std::string & encodingName) const;
const CharacterMap & characterMap() const;
int convert(const unsigned char * bytes) const;
int convert(int ch, unsigned char * bytes, int length) const;
int queryConvert(const unsigned char * bytes, int length) const;
int sequenceLength(const unsigned char * bytes, int length) const;
private:
static const char * _names[];
static const CharacterMap _charMap;
};
} // namespace Poco
#endif // Foundation_Windows1252Encoding_INCLUDED

View File

@ -1,184 +0,0 @@
//
// WindowsConsoleChannel.h
//
// Library: Foundation
// Package: Logging
// Module: WindowsConsoleChannel
//
// Definition of the WindowsConsoleChannel class.
//
// Copyright (c) 2007, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_WindowsConsoleChannel_INCLUDED
#define Foundation_WindowsConsoleChannel_INCLUDED
#include "Poco/Channel.h"
#include "Poco/Foundation.h"
#include "Poco/Mutex.h"
#include "Poco/UnWindows.h"
namespace Poco
{
class Foundation_API WindowsConsoleChannel : public Channel
/// A channel that writes to the Windows console.
///
/// Only the message's text is written, followed
/// by a newline.
///
/// If POCO has been compiled with POCO_WIN32_UTF8,
/// log messages are assumed to be UTF-8 encoded, and
/// are converted to UTF-16 prior to writing them to the
/// console. This is the main difference to the ConsoleChannel
/// class, which cannot handle UTF-8 encoded messages on Windows.
///
/// Chain this channel to a FormattingChannel with an
/// appropriate Formatter to control what is contained
/// in the text.
///
/// Only available on Windows platforms.
{
public:
WindowsConsoleChannel();
/// Creates the WindowsConsoleChannel.
void log(const Message & msg);
/// Logs the given message to the channel's stream.
protected:
~WindowsConsoleChannel();
private:
HANDLE _hConsole;
bool _isFile;
};
class Foundation_API WindowsColorConsoleChannel : public Channel
/// A channel that writes to the Windows console.
///
/// Only the message's text is written, followed
/// by a newline.
///
/// If POCO has been compiled with POCO_WIN32_UTF8,
/// log messages are assumed to be UTF-8 encoded, and
/// are converted to UTF-16 prior to writing them to the
/// console. This is the main difference to the ConsoleChannel
/// class, which cannot handle UTF-8 encoded messages on Windows.
///
/// Messages can be colored depending on priority.
///
/// To enable message coloring, set the "enableColors"
/// property to true (default). Furthermore, colors can be
/// configured by setting the following properties
/// (default values are given in parenthesis):
///
/// * traceColor (gray)
/// * debugColor (gray)
/// * informationColor (default)
/// * noticeColor (default)
/// * warningColor (yellow)
/// * errorColor (lightRed)
/// * criticalColor (lightRed)
/// * fatalColor (lightRed)
///
/// The following color values are supported:
///
/// * default
/// * black
/// * red
/// * green
/// * brown
/// * blue
/// * magenta
/// * cyan
/// * gray
/// * darkgray
/// * lightRed
/// * lightGreen
/// * yellow
/// * lightBlue
/// * lightMagenta
/// * lightCyan
/// * white
///
/// Chain this channel to a FormattingChannel with an
/// appropriate Formatter to control what is contained
/// in the text.
///
/// Only available on Windows platforms.
{
public:
WindowsColorConsoleChannel();
/// Creates the WindowsConsoleChannel.
void log(const Message & msg);
/// Logs the given message to the channel's stream.
void setProperty(const std::string & name, const std::string & value);
/// Sets the property with the given name.
///
/// The following properties are supported:
/// * enableColors: Enable or disable colors.
/// * traceColor: Specify color for trace messages.
/// * debugColor: Specify color for debug messages.
/// * informationColor: Specify color for information messages.
/// * noticeColor: Specify color for notice messages.
/// * warningColor: Specify color for warning messages.
/// * errorColor: Specify color for error messages.
/// * criticalColor: Specify color for critical messages.
/// * fatalColor: Specify color for fatal messages.
///
/// See the class documentation for a list of supported color values.
std::string getProperty(const std::string & name) const;
/// Returns the value of the property with the given name.
/// See setProperty() for a description of the supported
/// properties.
protected:
enum Color
{
CC_BLACK = 0x0000,
CC_RED = 0x0004,
CC_GREEN = 0x0002,
CC_BROWN = 0x0006,
CC_BLUE = 0x0001,
CC_MAGENTA = 0x0005,
CC_CYAN = 0x0003,
CC_GRAY = 0x0007,
CC_DARKGRAY = 0x0008,
CC_LIGHTRED = 0x000C,
CC_LIGHTGREEN = 0x000A,
CC_YELLOW = 0x000E,
CC_LIGHTBLUE = 0x0009,
CC_LIGHTMAGENTA = 0x000D,
CC_LIGHTCYAN = 0x000B,
CC_WHITE = 0x000F
};
~WindowsColorConsoleChannel();
WORD parseColor(const std::string & color) const;
std::string formatColor(WORD color) const;
void initColors();
private:
bool _enableColors;
HANDLE _hConsole;
bool _isFile;
WORD _colors[9];
};
} // namespace Poco
#endif // Foundation_WindowsConsoleChannel_INCLUDED

View File

@ -1,160 +0,0 @@
//
// Base32Decoder.cpp
//
// Library: Foundation
// Package: Streams
// Module: Base32
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/Base32Decoder.h"
#include "Poco/Base32Encoder.h"
#include "Poco/Exception.h"
#include "Poco/Mutex.h"
#include <cstring>
namespace Poco {
unsigned char Base32DecoderBuf::IN_ENCODING[256];
bool Base32DecoderBuf::IN_ENCODING_INIT = false;
namespace
{
static FastMutex mutex;
}
Base32DecoderBuf::Base32DecoderBuf(std::istream& istr):
_groupLength(0),
_groupIndex(0),
_buf(*istr.rdbuf())
{
FastMutex::ScopedLock lock(mutex);
if (!IN_ENCODING_INIT)
{
for (unsigned i = 0; i < sizeof(IN_ENCODING); i++)
{
IN_ENCODING[i] = 0xFF;
}
for (unsigned i = 0; i < sizeof(Base32EncoderBuf::OUT_ENCODING); i++)
{
IN_ENCODING[Base32EncoderBuf::OUT_ENCODING[i]] = i;
}
IN_ENCODING[static_cast<unsigned char>('=')] = '\0';
IN_ENCODING_INIT = true;
}
}
Base32DecoderBuf::~Base32DecoderBuf()
{
}
int Base32DecoderBuf::readFromDevice()
{
if (_groupIndex < _groupLength)
{
return _group[_groupIndex++];
}
else
{
unsigned char buffer[8];
std::memset(buffer, '=', sizeof(buffer));
int c;
// per RFC-4648, Section 6, permissible block lengths are:
// 2, 4, 5, 7, and 8 bytes. Any other length is malformed.
//
do {
if ((c = readOne()) == -1) return -1;
buffer[0] = (unsigned char) c;
if (IN_ENCODING[buffer[0]] == 0xFF) throw DataFormatException();
if ((c = readOne()) == -1) throw DataFormatException();
buffer[1] = (unsigned char) c;
if (IN_ENCODING[buffer[1]] == 0xFF) throw DataFormatException();
if ((c = readOne()) == -1) break;
buffer[2] = (unsigned char) c;
if (IN_ENCODING[buffer[2]] == 0xFF) throw DataFormatException();
if ((c = readOne()) == -1) throw DataFormatException();
buffer[3] = (unsigned char) c;
if (IN_ENCODING[buffer[3]] == 0xFF) throw DataFormatException();
if ((c = readOne()) == -1) break;
buffer[4] = (unsigned char) c;
if (IN_ENCODING[buffer[4]] == 0xFF) throw DataFormatException();
if ((c = readOne()) == -1) break;
buffer[5] = (unsigned char) c;
if (IN_ENCODING[buffer[5]] == 0xFF) throw DataFormatException();
if ((c = readOne()) == -1) throw DataFormatException();
buffer[6] = (unsigned char) c;
if (IN_ENCODING[buffer[6]] == 0xFF) throw DataFormatException();
if ((c = readOne()) == -1) break;
buffer[7] = (unsigned char) c;
if (IN_ENCODING[buffer[7]] == 0xFF) throw DataFormatException();
} while (false);
_group[0] = (IN_ENCODING[buffer[0]] << 3) | (IN_ENCODING[buffer[1]] >> 2);
_group[1] = ((IN_ENCODING[buffer[1]] & 0x03) << 6) | (IN_ENCODING[buffer[2]] << 1) | (IN_ENCODING[buffer[3]] >> 4);
_group[2] = ((IN_ENCODING[buffer[3]] & 0x0F) << 4) | (IN_ENCODING[buffer[4]] >> 1);
_group[3] = ((IN_ENCODING[buffer[4]] & 0x01) << 7) | (IN_ENCODING[buffer[5]] << 2) | (IN_ENCODING[buffer[6]] >> 3);
_group[4] = ((IN_ENCODING[buffer[6]] & 0x07) << 5) | IN_ENCODING[buffer[7]];
if (buffer[2] == '=')
_groupLength = 1;
else if (buffer[4] == '=')
_groupLength = 2;
else if (buffer[5] == '=')
_groupLength = 3;
else if (buffer[7] == '=')
_groupLength = 4;
else
_groupLength = 5;
_groupIndex = 1;
return _group[0];
}
}
int Base32DecoderBuf::readOne()
{
int ch = _buf.sbumpc();
return ch;
}
Base32DecoderIOS::Base32DecoderIOS(std::istream& istr): _buf(istr)
{
poco_ios_init(&_buf);
}
Base32DecoderIOS::~Base32DecoderIOS()
{
}
Base32DecoderBuf* Base32DecoderIOS::rdbuf()
{
return &_buf;
}
Base32Decoder::Base32Decoder(std::istream& istr): Base32DecoderIOS(istr), std::istream(&_buf)
{
}
Base32Decoder::~Base32Decoder()
{
}
} // namespace Poco

View File

@ -1,202 +0,0 @@
//
// Base32Encoder.cpp
//
// Library: Foundation
// Package: Streams
// Module: Base32
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/Base32Encoder.h"
namespace Poco {
const unsigned char Base32EncoderBuf::OUT_ENCODING[32] =
{
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',
'Y', 'Z', '2', '3', '4', '5', '6', '7',
};
Base32EncoderBuf::Base32EncoderBuf(std::ostream& ostr, bool padding):
_groupLength(0),
_buf(*ostr.rdbuf()),
_doPadding(padding)
{
}
Base32EncoderBuf::~Base32EncoderBuf()
{
try
{
close();
}
catch (...)
{
}
}
int Base32EncoderBuf::writeToDevice(char c)
{
static const int eof = std::char_traits<char>::eof();
_group[_groupLength++] = (unsigned char) c;
if (_groupLength == 5)
{
unsigned char idx;
idx = _group[0] >> 3;
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
idx = ((_group[0] & 0x07) << 2) | (_group[1] >> 6);
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
idx = ((_group[1] & 0x3E) >> 1);
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
idx = ((_group[1] & 0x01) << 4) | (_group[2] >> 4);
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
idx = ((_group[2] & 0x0F) << 1) | (_group[3] >> 7);
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
idx = ((_group[3] & 0x7C) >> 2);
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
idx = ((_group[3] & 0x03) << 3) | (_group[4] >> 5);
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
idx = (_group[4] & 0x1F);
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
_groupLength = 0;
}
return charToInt(c);
}
int Base32EncoderBuf::close()
{
static const int eof = std::char_traits<char>::eof();
if (sync() == eof) return eof;
if (_groupLength == 1)
{
_group[1] = 0;
unsigned char idx;
idx = _group[0] >> 3;
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
idx = ((_group[0] & 0x07) << 2);
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
if (_doPadding) {
if (_buf.sputc('=') == eof) return eof;
if (_buf.sputc('=') == eof) return eof;
if (_buf.sputc('=') == eof) return eof;
if (_buf.sputc('=') == eof) return eof;
if (_buf.sputc('=') == eof) return eof;
if (_buf.sputc('=') == eof) return eof;
}
}
else if (_groupLength == 2)
{
_group[2] = 0;
unsigned char idx;
idx = _group[0] >> 3;
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
idx = ((_group[0] & 0x07) << 2) | (_group[1] >> 6);
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
idx = ((_group[1] & 0x3E) >> 1);
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
idx = ((_group[1] & 0x01) << 4);
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
if (_doPadding) {
if (_buf.sputc('=') == eof) return eof;
if (_buf.sputc('=') == eof) return eof;
if (_buf.sputc('=') == eof) return eof;
if (_buf.sputc('=') == eof) return eof;
}
}
else if (_groupLength == 3)
{
_group[3] = 0;
unsigned char idx;
idx = _group[0] >> 3;
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
idx = ((_group[0] & 0x07) << 2) | (_group[1] >> 6);
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
idx = ((_group[1] & 0x3E) >> 1);
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
idx = ((_group[1] & 0x01) << 4) | (_group[2] >> 4);
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
idx = ((_group[2] & 0x0F) << 1);
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
if (_doPadding) {
if (_buf.sputc('=') == eof) return eof;
if (_buf.sputc('=') == eof) return eof;
if (_buf.sputc('=') == eof) return eof;
}
}
else if (_groupLength == 4)
{
_group[4] = 0;
unsigned char idx;
idx = _group[0] >> 3;
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
idx = ((_group[0] & 0x07) << 2) | (_group[1] >> 6);
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
idx = ((_group[1] & 0x3E) >> 1);
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
idx = ((_group[1] & 0x01) << 4) | (_group[2] >> 4);
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
idx = ((_group[2] & 0x0F) << 1) | (_group[3] >> 7);
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
idx = ((_group[3] & 0x7C) >> 2);
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
idx = ((_group[3] & 0x03) << 3);
if (_buf.sputc(OUT_ENCODING[idx]) == eof) return eof;
if (_doPadding && _buf.sputc('=') == eof) return eof;
}
_groupLength = 0;
return _buf.pubsync();
}
Base32EncoderIOS::Base32EncoderIOS(std::ostream& ostr, bool padding):
_buf(ostr, padding)
{
poco_ios_init(&_buf);
}
Base32EncoderIOS::~Base32EncoderIOS()
{
}
int Base32EncoderIOS::close()
{
return _buf.close();
}
Base32EncoderBuf* Base32EncoderIOS::rdbuf()
{
return &_buf;
}
Base32Encoder::Base32Encoder(std::ostream& ostr, bool padding):
Base32EncoderIOS(ostr, padding), std::ostream(&_buf)
{
}
Base32Encoder::~Base32Encoder()
{
}
} // namespace Poco

View File

@ -1,221 +0,0 @@
//
// EventLogChannel.cpp
//
// Library: Foundation
// Package: Logging
// Module: EventLogChannel
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/EventLogChannel.h"
#include "Poco/Message.h"
#include "Poco/String.h"
#include "pocomsg.h"
namespace Poco {
const std::string EventLogChannel::PROP_NAME = "name";
const std::string EventLogChannel::PROP_HOST = "host";
const std::string EventLogChannel::PROP_LOGHOST = "loghost";
const std::string EventLogChannel::PROP_LOGFILE = "logfile";
EventLogChannel::EventLogChannel():
_logFile("Application"),
_h(0)
{
const DWORD maxPathLen = MAX_PATH + 1;
char name[maxPathLen];
int n = GetModuleFileNameA(NULL, name, maxPathLen);
if (n > 0)
{
char* end = name + n - 1;
while (end > name && *end != '\\') --end;
if (*end == '\\') ++end;
_name = end;
}
}
EventLogChannel::EventLogChannel(const std::string& name):
_name(name),
_logFile("Application"),
_h(0)
{
}
EventLogChannel::EventLogChannel(const std::string& name, const std::string& host):
_name(name),
_host(host),
_logFile("Application"),
_h(0)
{
}
EventLogChannel::~EventLogChannel()
{
try
{
close();
}
catch (...)
{
poco_unexpected();
}
}
void EventLogChannel::open()
{
setUpRegistry();
_h = RegisterEventSource(_host.empty() ? NULL : _host.c_str(), _name.c_str());
if (!_h) throw SystemException("cannot register event source");
}
void EventLogChannel::close()
{
if (_h) DeregisterEventSource(_h);
_h = 0;
}
void EventLogChannel::log(const Message& msg)
{
if (!_h) open();
const char* pMsg = msg.getText().c_str();
ReportEvent(_h, getType(msg), getCategory(msg), POCO_MSG_LOG, NULL, 1, 0, &pMsg, NULL);
}
void EventLogChannel::setProperty(const std::string& name, const std::string& value)
{
if (icompare(name, PROP_NAME) == 0)
_name = value;
else if (icompare(name, PROP_HOST) == 0)
_host = value;
else if (icompare(name, PROP_LOGHOST) == 0)
_host = value;
else if (icompare(name, PROP_LOGFILE) == 0)
_logFile = value;
else
Channel::setProperty(name, value);
}
std::string EventLogChannel::getProperty(const std::string& name) const
{
if (icompare(name, PROP_NAME) == 0)
return _name;
else if (icompare(name, PROP_HOST) == 0)
return _host;
else if (icompare(name, PROP_LOGHOST) == 0)
return _host;
else if (icompare(name, PROP_LOGFILE) == 0)
return _logFile;
else
return Channel::getProperty(name);
}
int EventLogChannel::getType(const Message& msg)
{
switch (msg.getPriority())
{
case Message::PRIO_TRACE:
case Message::PRIO_DEBUG:
case Message::PRIO_INFORMATION:
return EVENTLOG_INFORMATION_TYPE;
case Message::PRIO_NOTICE:
case Message::PRIO_WARNING:
return EVENTLOG_WARNING_TYPE;
default:
return EVENTLOG_ERROR_TYPE;
}
}
int EventLogChannel::getCategory(const Message& msg)
{
switch (msg.getPriority())
{
case Message::PRIO_TRACE:
return POCO_CTG_TRACE;
case Message::PRIO_DEBUG:
return POCO_CTG_DEBUG;
case Message::PRIO_INFORMATION:
return POCO_CTG_INFORMATION;
case Message::PRIO_NOTICE:
return POCO_CTG_NOTICE;
case Message::PRIO_WARNING:
return POCO_CTG_WARNING;
case Message::PRIO_ERROR:
return POCO_CTG_ERROR;
case Message::PRIO_CRITICAL:
return POCO_CTG_CRITICAL;
case Message::PRIO_FATAL:
return POCO_CTG_FATAL;
default:
return 0;
}
}
void EventLogChannel::setUpRegistry() const
{
std::string key = "SYSTEM\\CurrentControlSet\\Services\\EventLog\\";
key.append(_logFile);
key.append("\\");
key.append(_name);
HKEY hKey;
DWORD disp;
DWORD rc = RegCreateKeyEx(HKEY_LOCAL_MACHINE, key.c_str(), 0, NULL, REG_OPTION_NON_VOLATILE, KEY_ALL_ACCESS, NULL, &hKey, &disp);
if (rc != ERROR_SUCCESS) return;
if (disp == REG_CREATED_NEW_KEY)
{
std::string path;
if (path.empty())
path = findLibrary("PocoMsg.dll");
if (!path.empty())
{
DWORD count = 8;
DWORD types = 7;
RegSetValueEx(hKey, "CategoryMessageFile", 0, REG_SZ, (const BYTE*) path.c_str(), static_cast<DWORD>(path.size() + 1));
RegSetValueEx(hKey, "EventMessageFile", 0, REG_SZ, (const BYTE*) path.c_str(), static_cast<DWORD>(path.size() + 1));
RegSetValueEx(hKey, "CategoryCount", 0, REG_DWORD, (const BYTE*) &count, static_cast<DWORD>(sizeof(count)));
RegSetValueEx(hKey, "TypesSupported", 0, REG_DWORD, (const BYTE*) &types, static_cast<DWORD>(sizeof(types)));
}
}
RegCloseKey(hKey);
}
std::string EventLogChannel::findLibrary(const char* name)
{
std::string path;
HMODULE dll = LoadLibraryA(name);
if (dll)
{
const DWORD maxPathLen = MAX_PATH + 1;
char name[maxPathLen];
int n = GetModuleFileNameA(dll, name, maxPathLen);
if (n > 0) path = name;
FreeLibrary(dll);
}
return path;
}
} // namespace Poco

View File

@ -1,144 +0,0 @@
//
// FPEnvironment_DEC.cpp
//
// Library: Foundation
// Package: Core
// Module: FPEnvironment
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
//
// _XOPEN_SOURCE disables the ieee fp functions
// in <math.h>, therefore we undefine it for this file.
//
#undef _XOPEN_SOURCE
#include <math.h>
#include <fp.h>
#include <fp_class.h>
#include "Poco/FPEnvironment_DEC.h"
namespace Poco {
FPEnvironmentImpl::FPEnvironmentImpl()
{
_env = ieee_get_fp_control();
}
FPEnvironmentImpl::FPEnvironmentImpl(const FPEnvironmentImpl& env)
{
_env = env._env;
}
FPEnvironmentImpl::~FPEnvironmentImpl()
{
ieee_set_fp_control(_env);
}
FPEnvironmentImpl& FPEnvironmentImpl::operator = (const FPEnvironmentImpl& env)
{
_env = env._env;
return *this;
}
bool FPEnvironmentImpl::isInfiniteImpl(float value)
{
int cls = fp_classf(value);
return cls == FP_POS_INF || cls == FP_NEG_INF;
}
bool FPEnvironmentImpl::isInfiniteImpl(double value)
{
int cls = fp_class(value);
return cls == FP_POS_INF || cls == FP_NEG_INF;
}
bool FPEnvironmentImpl::isInfiniteImpl(long double value)
{
int cls = fp_classl(value);
return cls == FP_POS_INF || cls == FP_NEG_INF;
}
bool FPEnvironmentImpl::isNaNImpl(float value)
{
return isnanf(value) != 0;
}
bool FPEnvironmentImpl::isNaNImpl(double value)
{
return isnan(value) != 0;
}
bool FPEnvironmentImpl::isNaNImpl(long double value)
{
return isnanl(value) != 0;
}
float FPEnvironmentImpl::copySignImpl(float target, float source)
{
return copysignf(target, source);
}
double FPEnvironmentImpl::copySignImpl(double target, double source)
{
return copysign(target, source);
}
long double FPEnvironmentImpl::copySignImpl(long double target, long double source)
{
return copysignl(target, source);
}
void FPEnvironmentImpl::keepCurrentImpl()
{
ieee_set_fp_control(_env);
}
void FPEnvironmentImpl::clearFlagsImpl()
{
ieee_set_fp_control(0);
}
bool FPEnvironmentImpl::isFlagImpl(FlagImpl flag)
{
return (ieee_get_fp_control() & flag) != 0;
}
void FPEnvironmentImpl::setRoundingModeImpl(RoundingModeImpl mode)
{
// not supported
}
FPEnvironmentImpl::RoundingModeImpl FPEnvironmentImpl::getRoundingModeImpl()
{
// not supported
return FPEnvironmentImpl::RoundingModeImpl(0);
}
} // namespace Poco

View File

@ -1,79 +0,0 @@
//
// FPEnvironment_C99.cpp
//
// Library: Foundation
// Package: Core
// Module: FPEnvironment
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/FPEnvironment_DUMMY.h"
namespace Poco {
FPEnvironmentImpl::RoundingModeImpl FPEnvironmentImpl::_roundingMode;
FPEnvironmentImpl::FPEnvironmentImpl()
{
}
FPEnvironmentImpl::FPEnvironmentImpl(const FPEnvironmentImpl& env)
{
}
FPEnvironmentImpl::~FPEnvironmentImpl()
{
}
FPEnvironmentImpl& FPEnvironmentImpl::operator = (const FPEnvironmentImpl& env)
{
return *this;
}
void FPEnvironmentImpl::keepCurrentImpl()
{
}
void FPEnvironmentImpl::clearFlagsImpl()
{
}
bool FPEnvironmentImpl::isFlagImpl(FlagImpl flag)
{
return false;
}
void FPEnvironmentImpl::setRoundingModeImpl(RoundingModeImpl mode)
{
_roundingMode = mode;
}
FPEnvironmentImpl::RoundingModeImpl FPEnvironmentImpl::getRoundingModeImpl()
{
return _roundingMode;
}
long double FPEnvironmentImpl::copySignImpl(long double target, long double source)
{
return (source >= 0 && target >= 0) || (source < 0 && target < 0) ? target : -target;
}
} // namespace Poco

View File

@ -1,82 +0,0 @@
//
// FPEnvironment_QNX.cpp
//
// Library: Foundation
// Package: Core
// Module: FPEnvironment
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/FPEnvironment_QNX.h"
namespace Poco {
FPEnvironmentImpl::FPEnvironmentImpl()
{
fegetenv(&_env);
}
FPEnvironmentImpl::FPEnvironmentImpl(const FPEnvironmentImpl& env)
{
_env = env._env;
}
FPEnvironmentImpl::~FPEnvironmentImpl()
{
fesetenv(&_env);
}
FPEnvironmentImpl& FPEnvironmentImpl::operator = (const FPEnvironmentImpl& env)
{
_env = env._env;
return *this;
}
void FPEnvironmentImpl::keepCurrentImpl()
{
fegetenv(&_env);
}
void FPEnvironmentImpl::clearFlagsImpl()
{
feclearexcept(FE_ALL_EXCEPT);
}
bool FPEnvironmentImpl::isFlagImpl(FlagImpl flag)
{
return fetestexcept(flag) != 0;
}
void FPEnvironmentImpl::setRoundingModeImpl(RoundingModeImpl mode)
{
fesetround(mode);
}
FPEnvironmentImpl::RoundingModeImpl FPEnvironmentImpl::getRoundingModeImpl()
{
return (RoundingModeImpl) fegetround();
}
long double FPEnvironmentImpl::copySignImpl(long double target, long double source)
{
return (source >= 0 && target >= 0) || (source < 0 && target < 0) ? target : -target;
}
} // namespace Poco

View File

@ -1,119 +0,0 @@
//
// Latin1Encoding.cpp
//
// Library: Foundation
// Package: Text
// Module: Latin1Encoding
//
// Copyright (c) 2004-2007, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/Latin1Encoding.h"
#include "Poco/String.h"
namespace Poco {
const char* Latin1Encoding::_names[] =
{
"ISO-8859-1",
"Latin1",
"Latin-1",
NULL
};
const TextEncoding::CharacterMap Latin1Encoding::_charMap =
{
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f */
/* 00 */ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
/* 10 */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
/* 20 */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
/* 30 */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
/* 40 */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
/* 50 */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
/* 60 */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
/* 70 */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
/* 80 */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
/* 90 */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
/* a0 */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
/* b0 */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
/* c0 */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
/* d0 */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
/* e0 */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
/* f0 */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
};
Latin1Encoding::Latin1Encoding()
{
}
Latin1Encoding::~Latin1Encoding()
{
}
const char* Latin1Encoding::canonicalName() const
{
return _names[0];
}
bool Latin1Encoding::isA(const std::string& encodingName) const
{
for (const char** name = _names; *name; ++name)
{
if (Poco::icompare(encodingName, *name) == 0)
return true;
}
return false;
}
const TextEncoding::CharacterMap& Latin1Encoding::characterMap() const
{
return _charMap;
}
int Latin1Encoding::convert(const unsigned char* bytes) const
{
return *bytes;
}
int Latin1Encoding::convert(int ch, unsigned char* bytes, int length) const
{
if (ch >= 0 && ch <= 255)
{
if (bytes && length >= 1)
*bytes = (unsigned char) ch;
return 1;
}
else return 0;
}
int Latin1Encoding::queryConvert(const unsigned char* bytes, int length) const
{
if (1 <= length)
return *bytes;
else
return -1;
}
int Latin1Encoding::sequenceLength(const unsigned char* bytes, int length) const
{
return 1;
}
} // namespace Poco

View File

@ -1,179 +0,0 @@
//
// Latin2Encoding.cpp
//
// Library: Foundation
// Package: Text
// Module: Latin2Encoding
//
// Copyright (c) 2004-2007, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/Latin2Encoding.h"
#include "Poco/String.h"
namespace Poco {
const char* Latin2Encoding::_names[] =
{
"ISO-8859-2",
"Latin2",
"Latin-2",
NULL
};
const TextEncoding::CharacterMap Latin2Encoding::_charMap =
{
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f */
/* 00 */ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
/* 10 */ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f,
/* 20 */ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
/* 30 */ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
/* 40 */ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
/* 50 */ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f,
/* 60 */ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
/* 70 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f,
/* 80 */ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, 0x0088, 0x0089, 0x008a, 0x008b, 0x008c, 0x008d, 0x008e, 0x008f,
/* 90 */ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097, 0x0098, 0x0099, 0x009a, 0x009b, 0x009c, 0x009d, 0x009e, 0x009f,
/* a0 */ 0x00a0, 0x0104, 0x02d8, 0x0141, 0x00a4, 0x013d, 0x015a, 0x00a7, 0x00a8, 0x0160, 0x015e, 0x0164, 0x0179, 0x00ad, 0x017d, 0x017b,
/* b0 */ 0x00b0, 0x0105, 0x02db, 0x0142, 0x00b4, 0x013e, 0x015b, 0x02c7, 0x00b8, 0x0161, 0x015f, 0x0165, 0x017a, 0x02dd, 0x017e, 0x017c,
/* c0 */ 0x0154, 0x00c1, 0x00c2, 0x0102, 0x00c4, 0x0139, 0x0106, 0x00c7, 0x010c, 0x00c9, 0x0118, 0x00cb, 0x011a, 0x00cd, 0x00ce, 0x010e,
/* d0 */ 0x0110, 0x0143, 0x0147, 0x00d3, 0x00d4, 0x0150, 0x00d6, 0x00d7, 0x0158, 0x016e, 0x00da, 0x0170, 0x00dc, 0x00dd, 0x0162, 0x00df,
/* e0 */ 0x0155, 0x00e1, 0x00e2, 0x0103, 0x00e4, 0x013a, 0x0107, 0x00e7, 0x010d, 0x00e9, 0x0119, 0x00eb, 0x011b, 0x00ed, 0x00ee, 0x010f,
/* f0 */ 0x0111, 0x0144, 0x0148, 0x00f3, 0x00f4, 0x0151, 0x00f6, 0x00f7, 0x0159, 0x016f, 0x00fa, 0x0171, 0x00fc, 0x00fd, 0x0163, 0x02d9,
};
Latin2Encoding::Latin2Encoding()
{
}
Latin2Encoding::~Latin2Encoding()
{
}
const char* Latin2Encoding::canonicalName() const
{
return _names[0];
}
bool Latin2Encoding::isA(const std::string& encodingName) const
{
for (const char** name = _names; *name; ++name)
{
if (Poco::icompare(encodingName, *name) == 0)
return true;
}
return false;
}
const TextEncoding::CharacterMap& Latin2Encoding::characterMap() const
{
return _charMap;
}
int Latin2Encoding::convert(const unsigned char* bytes) const
{
return _charMap[*bytes];
}
int Latin2Encoding::convert(int ch, unsigned char* bytes, int length) const
{
if (ch >= 0 && ch <= 255 && _charMap[ch] == ch)
{
if (bytes && length >= 1)
*bytes = (unsigned char) ch;
return 1;
}
switch(ch)
{
case 0x0104: if (bytes && length >= 1) *bytes = 0xa1; return 1;
case 0x02d8: if (bytes && length >= 1) *bytes = 0xa2; return 1;
case 0x0141: if (bytes && length >= 1) *bytes = 0xa3; return 1;
case 0x013d: if (bytes && length >= 1) *bytes = 0xa5; return 1;
case 0x015a: if (bytes && length >= 1) *bytes = 0xa6; return 1;
case 0x0160: if (bytes && length >= 1) *bytes = 0xa9; return 1;
case 0x015e: if (bytes && length >= 1) *bytes = 0xaa; return 1;
case 0x0164: if (bytes && length >= 1) *bytes = 0xab; return 1;
case 0x0179: if (bytes && length >= 1) *bytes = 0xac; return 1;
case 0x017d: if (bytes && length >= 1) *bytes = 0xae; return 1;
case 0x017b: if (bytes && length >= 1) *bytes = 0xaf; return 1;
case 0x0105: if (bytes && length >= 1) *bytes = 0xb1; return 1;
case 0x02db: if (bytes && length >= 1) *bytes = 0xb2; return 1;
case 0x0142: if (bytes && length >= 1) *bytes = 0xb3; return 1;
case 0x013e: if (bytes && length >= 1) *bytes = 0xb5; return 1;
case 0x015b: if (bytes && length >= 1) *bytes = 0xb6; return 1;
case 0x02c7: if (bytes && length >= 1) *bytes = 0xb7; return 1;
case 0x0161: if (bytes && length >= 1) *bytes = 0xb9; return 1;
case 0x015f: if (bytes && length >= 1) *bytes = 0xba; return 1;
case 0x0165: if (bytes && length >= 1) *bytes = 0xbb; return 1;
case 0x017a: if (bytes && length >= 1) *bytes = 0xbc; return 1;
case 0x02dd: if (bytes && length >= 1) *bytes = 0xbd; return 1;
case 0x017e: if (bytes && length >= 1) *bytes = 0xbe; return 1;
case 0x017c: if (bytes && length >= 1) *bytes = 0xbf; return 1;
case 0x0154: if (bytes && length >= 1) *bytes = 0xc0; return 1;
case 0x0102: if (bytes && length >= 1) *bytes = 0xc3; return 1;
case 0x0139: if (bytes && length >= 1) *bytes = 0xc5; return 1;
case 0x0106: if (bytes && length >= 1) *bytes = 0xc6; return 1;
case 0x010c: if (bytes && length >= 1) *bytes = 0xc8; return 1;
case 0x0118: if (bytes && length >= 1) *bytes = 0xca; return 1;
case 0x011a: if (bytes && length >= 1) *bytes = 0xcc; return 1;
case 0x010e: if (bytes && length >= 1) *bytes = 0xcf; return 1;
case 0x0110: if (bytes && length >= 1) *bytes = 0xd0; return 1;
case 0x0143: if (bytes && length >= 1) *bytes = 0xd1; return 1;
case 0x0147: if (bytes && length >= 1) *bytes = 0xd2; return 1;
case 0x0150: if (bytes && length >= 1) *bytes = 0xd5; return 1;
case 0x0158: if (bytes && length >= 1) *bytes = 0xd8; return 1;
case 0x016e: if (bytes && length >= 1) *bytes = 0xd9; return 1;
case 0x0170: if (bytes && length >= 1) *bytes = 0xdb; return 1;
case 0x0162: if (bytes && length >= 1) *bytes = 0xde; return 1;
case 0x0155: if (bytes && length >= 1) *bytes = 0xe0; return 1;
case 0x0103: if (bytes && length >= 1) *bytes = 0xe3; return 1;
case 0x013a: if (bytes && length >= 1) *bytes = 0xe5; return 1;
case 0x0107: if (bytes && length >= 1) *bytes = 0xe6; return 1;
case 0x010d: if (bytes && length >= 1) *bytes = 0xe8; return 1;
case 0x0119: if (bytes && length >= 1) *bytes = 0xea; return 1;
case 0x011b: if (bytes && length >= 1) *bytes = 0xec; return 1;
case 0x010f: if (bytes && length >= 1) *bytes = 0xef; return 1;
case 0x0111: if (bytes && length >= 1) *bytes = 0xf0; return 1;
case 0x0144: if (bytes && length >= 1) *bytes = 0xf1; return 1;
case 0x0148: if (bytes && length >= 1) *bytes = 0xf2; return 1;
case 0x0151: if (bytes && length >= 1) *bytes = 0xf5; return 1;
case 0x0159: if (bytes && length >= 1) *bytes = 0xf8; return 1;
case 0x016f: if (bytes && length >= 1) *bytes = 0xf9; return 1;
case 0x0171: if (bytes && length >= 1) *bytes = 0xfb; return 1;
case 0x0163: if (bytes && length >= 1) *bytes = 0xfe; return 1;
case 0x02d9: if (bytes && length >= 1) *bytes = 0xff; return 1;
default: return 0;
}
}
int Latin2Encoding::queryConvert(const unsigned char* bytes, int length) const
{
if (1 <= length)
return _charMap[*bytes];
else
return -1;
}
int Latin2Encoding::sequenceLength(const unsigned char* bytes, int length) const
{
return 1;
}
} // namespace Poco

View File

@ -1,130 +0,0 @@
//
// Latin9Encoding.cpp
//
// Library: Foundation
// Package: Text
// Module: Latin9Encoding
//
// Copyright (c) 2004-2007, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/Latin9Encoding.h"
#include "Poco/String.h"
namespace Poco {
const char* Latin9Encoding::_names[] =
{
"ISO-8859-15",
"Latin9",
"Latin-9",
NULL
};
const TextEncoding::CharacterMap Latin9Encoding::_charMap =
{
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f */
/* 00 */ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
/* 10 */ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f,
/* 20 */ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
/* 30 */ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
/* 40 */ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
/* 50 */ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f,
/* 60 */ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
/* 70 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f,
/* 80 */ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, 0x0088, 0x0089, 0x008a, 0x008b, 0x008c, 0x008d, 0x008e, 0x008f,
/* 90 */ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097, 0x0098, 0x0099, 0x009a, 0x009b, 0x009c, 0x009d, 0x009e, 0x009f,
/* a0 */ 0x00a0, 0x00a1, 0x00a2, 0x00a3, 0x20ac, 0x00a5, 0x0160, 0x00a7, 0x0161, 0x00a9, 0x00aa, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x00af,
/* b0 */ 0x00b0, 0x00b1, 0x00b2, 0x00b3, 0x017d, 0x00b5, 0x00b6, 0x00b7, 0x017e, 0x00b9, 0x00ba, 0x00bb, 0x0152, 0x0153, 0x0178, 0x00bf,
/* c0 */ 0x00c0, 0x00c1, 0x00c2, 0x00c3, 0x00c4, 0x00c5, 0x00c6, 0x00c7, 0x00c8, 0x00c9, 0x00ca, 0x00cb, 0x00cc, 0x00cd, 0x00ce, 0x00cf,
/* d0 */ 0x00d0, 0x00d1, 0x00d2, 0x00d3, 0x00d4, 0x00d5, 0x00d6, 0x00d7, 0x00d8, 0x00d9, 0x00da, 0x00db, 0x00dc, 0x00dd, 0x00de, 0x00df,
/* e0 */ 0x00e0, 0x00e1, 0x00e2, 0x00e3, 0x00e4, 0x00e5, 0x00e6, 0x00e7, 0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef,
/* f0 */ 0x00f0, 0x00f1, 0x00f2, 0x00f3, 0x00f4, 0x00f5, 0x00f6, 0x00f7, 0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff,
};
Latin9Encoding::Latin9Encoding()
{
}
Latin9Encoding::~Latin9Encoding()
{
}
const char* Latin9Encoding::canonicalName() const
{
return _names[0];
}
bool Latin9Encoding::isA(const std::string& encodingName) const
{
for (const char** name = _names; *name; ++name)
{
if (Poco::icompare(encodingName, *name) == 0)
return true;
}
return false;
}
const TextEncoding::CharacterMap& Latin9Encoding::characterMap() const
{
return _charMap;
}
int Latin9Encoding::convert(const unsigned char* bytes) const
{
return _charMap[*bytes];
}
int Latin9Encoding::convert(int ch, unsigned char* bytes, int length) const
{
if (ch >= 0 && ch <= 255 && _charMap[ch] == ch)
{
if (bytes && length >= 1)
*bytes = ch;
return 1;
}
else switch (ch)
{
case 0x0152: if (bytes && length >= 1) *bytes = 0xbc; return 1;
case 0x0153: if (bytes && length >= 1) *bytes = 0xbd; return 1;
case 0x0160: if (bytes && length >= 1) *bytes = 0xa6; return 1;
case 0x0161: if (bytes && length >= 1) *bytes = 0xa8; return 1;
case 0x017d: if (bytes && length >= 1) *bytes = 0xb4; return 1;
case 0x017e: if (bytes && length >= 1) *bytes = 0xb8; return 1;
case 0x0178: if (bytes && length >= 1) *bytes = 0xbe; return 1;
case 0x20ac: if (bytes && length >= 1) *bytes = 0xa4; return 1;
default: return 0;
}
}
int Latin9Encoding::queryConvert(const unsigned char* bytes, int length) const
{
if (1 <= length)
return _charMap[*bytes];
else
return -1;
}
int Latin9Encoding::sequenceLength(const unsigned char* bytes, int length) const
{
return 1;
}
} // namespace Poco

View File

@ -1,278 +0,0 @@
//
// MD4Engine.cpp
//
// Library: Foundation
// Package: Crypt
// Module: MD4Engine
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
//
// MD4 (RFC 1320) algorithm:
// Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
// rights reserved.
//
// License to copy and use this software is granted provided that it
// is identified as the "RSA Data Security, Inc. MD4 Message-Digest
// Algorithm" in all material mentioning or referencing this software
// or this function.
//
// License is also granted to make and use derivative works provided
// that such works are identified as "derived from the RSA Data
// Security, Inc. MD4 Message-Digest Algorithm" in all material
// mentioning or referencing the derived work.
//
// RSA Data Security, Inc. makes no representations concerning either
// the merchantability of this software or the suitability of this
// software for any particular purpose. It is provided "as is"
// without express or implied warranty of any kind.
//
// These notices must be retained in any copies of any part of this
// documentation and/or software.
//
#include "Poco/MD4Engine.h"
#include <cstring>
namespace Poco {
MD4Engine::MD4Engine()
{
_digest.reserve(16);
reset();
}
MD4Engine::~MD4Engine()
{
reset();
}
void MD4Engine::updateImpl(const void* input_, std::size_t inputLen)
{
const unsigned char* input = (const unsigned char*) input_;
unsigned int i, index, partLen;
/* Compute number of bytes mod 64 */
index = (unsigned int)((_context.count[0] >> 3) & 0x3F);
/* Update number of bits */
if ((_context.count[0] += ((UInt32) inputLen << 3)) < ((UInt32) inputLen << 3))
_context.count[1]++;
_context.count[1] += ((UInt32) inputLen >> 29);
partLen = 64 - index;
/* Transform as many times as possible. */
if (inputLen >= partLen)
{
std::memcpy(&_context.buffer[index], input, partLen);
transform(_context.state, _context.buffer);
for (i = partLen; i + 63 < inputLen; i += 64)
transform(_context.state, &input[i]);
index = 0;
}
else i = 0;
/* Buffer remaining input */
std::memcpy(&_context.buffer[index], &input[i], inputLen-i);
}
std::size_t MD4Engine::digestLength() const
{
return DIGEST_SIZE;
}
void MD4Engine::reset()
{
std::memset(&_context, 0, sizeof(_context));
_context.count[0] = _context.count[1] = 0;
_context.state[0] = 0x67452301;
_context.state[1] = 0xefcdab89;
_context.state[2] = 0x98badcfe;
_context.state[3] = 0x10325476;
}
const DigestEngine::Digest& MD4Engine::digest()
{
static const unsigned char PADDING[64] =
{
0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
unsigned char bits[8];
unsigned int index, padLen;
/* Save number of bits */
encode(bits, _context.count, 8);
/* Pad out to 56 mod 64. */
index = (unsigned int)((_context.count[0] >> 3) & 0x3f);
padLen = (index < 56) ? (56 - index) : (120 - index);
update(PADDING, padLen);
/* Append length (before padding) */
update(bits, 8);
/* Store state in digest */
unsigned char digest[16];
encode(digest, _context.state, 16);
_digest.clear();
_digest.insert(_digest.begin(), digest, digest + sizeof(digest));
/* Zeroize sensitive information. */
std::memset(&_context, 0, sizeof (_context));
reset();
return _digest;
}
/* Constants for MD4Transform routine. */
#define S11 3
#define S12 7
#define S13 11
#define S14 19
#define S21 3
#define S22 5
#define S23 9
#define S24 13
#define S31 3
#define S32 9
#define S33 11
#define S34 15
/* F, G and H are basic MD4 functions. */
#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define G(x, y, z) (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
/* ROTATE_LEFT rotates x left n bits. */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG and HH are transformations for rounds 1, 2 and 3 */
/* Rotation is separate from addition to prevent recomputation */
#define FF(a, b, c, d, x, s) { \
(a) += F ((b), (c), (d)) + (x); \
(a) = ROTATE_LEFT ((a), (s)); \
}
#define GG(a, b, c, d, x, s) { \
(a) += G ((b), (c), (d)) + (x) + (UInt32)0x5a827999; \
(a) = ROTATE_LEFT ((a), (s)); \
}
#define HH(a, b, c, d, x, s) { \
(a) += H ((b), (c), (d)) + (x) + (UInt32)0x6ed9eba1; \
(a) = ROTATE_LEFT ((a), (s)); \
}
void MD4Engine::transform (UInt32 state[4], const unsigned char block[64])
{
UInt32 a = state[0], b = state[1], c = state[2], d = state[3], x[16];
decode(x, block, 64);
/* Round 1 */
FF (a, b, c, d, x[ 0], S11); /* 1 */
FF (d, a, b, c, x[ 1], S12); /* 2 */
FF (c, d, a, b, x[ 2], S13); /* 3 */
FF (b, c, d, a, x[ 3], S14); /* 4 */
FF (a, b, c, d, x[ 4], S11); /* 5 */
FF (d, a, b, c, x[ 5], S12); /* 6 */
FF (c, d, a, b, x[ 6], S13); /* 7 */
FF (b, c, d, a, x[ 7], S14); /* 8 */
FF (a, b, c, d, x[ 8], S11); /* 9 */
FF (d, a, b, c, x[ 9], S12); /* 10 */
FF (c, d, a, b, x[10], S13); /* 11 */
FF (b, c, d, a, x[11], S14); /* 12 */
FF (a, b, c, d, x[12], S11); /* 13 */
FF (d, a, b, c, x[13], S12); /* 14 */
FF (c, d, a, b, x[14], S13); /* 15 */
FF (b, c, d, a, x[15], S14); /* 16 */
/* Round 2 */
GG (a, b, c, d, x[ 0], S21); /* 17 */
GG (d, a, b, c, x[ 4], S22); /* 18 */
GG (c, d, a, b, x[ 8], S23); /* 19 */
GG (b, c, d, a, x[12], S24); /* 20 */
GG (a, b, c, d, x[ 1], S21); /* 21 */
GG (d, a, b, c, x[ 5], S22); /* 22 */
GG (c, d, a, b, x[ 9], S23); /* 23 */
GG (b, c, d, a, x[13], S24); /* 24 */
GG (a, b, c, d, x[ 2], S21); /* 25 */
GG (d, a, b, c, x[ 6], S22); /* 26 */
GG (c, d, a, b, x[10], S23); /* 27 */
GG (b, c, d, a, x[14], S24); /* 28 */
GG (a, b, c, d, x[ 3], S21); /* 29 */
GG (d, a, b, c, x[ 7], S22); /* 30 */
GG (c, d, a, b, x[11], S23); /* 31 */
GG (b, c, d, a, x[15], S24); /* 32 */
/* Round 3 */
HH (a, b, c, d, x[ 0], S31); /* 33 */
HH (d, a, b, c, x[ 8], S32); /* 34 */
HH (c, d, a, b, x[ 4], S33); /* 35 */
HH (b, c, d, a, x[12], S34); /* 36 */
HH (a, b, c, d, x[ 2], S31); /* 37 */
HH (d, a, b, c, x[10], S32); /* 38 */
HH (c, d, a, b, x[ 6], S33); /* 39 */
HH (b, c, d, a, x[14], S34); /* 40 */
HH (a, b, c, d, x[ 1], S31); /* 41 */
HH (d, a, b, c, x[ 9], S32); /* 42 */
HH (c, d, a, b, x[ 5], S33); /* 43 */
HH (b, c, d, a, x[13], S34); /* 44 */
HH (a, b, c, d, x[ 3], S31); /* 45 */
HH (d, a, b, c, x[11], S32); /* 46 */
HH (c, d, a, b, x[ 7], S33); /* 47 */
HH (b, c, d, a, x[15], S34); /* 48 */
state[0] += a;
state[1] += b;
state[2] += c;
state[3] += d;
/* Zeroize sensitive information. */
std::memset(x, 0, sizeof(x));
}
void MD4Engine::encode(unsigned char* output, const UInt32* input, std::size_t len)
{
unsigned int i, j;
for (i = 0, j = 0; j < len; i++, j += 4)
{
output[j] = (unsigned char)(input[i] & 0xff);
output[j+1] = (unsigned char)((input[i] >> 8) & 0xff);
output[j+2] = (unsigned char)((input[i] >> 16) & 0xff);
output[j+3] = (unsigned char)((input[i] >> 24) & 0xff);
}
}
void MD4Engine::decode(UInt32* output, const unsigned char* input, std::size_t len)
{
unsigned int i, j;
for (i = 0, j = 0; j < len; i++, j += 4)
output[i] = ((UInt32)input[j]) | (((UInt32)input[j+1]) << 8) |
(((UInt32)input[j+2]) << 16) | (((UInt32)input[j+3]) << 24);
}
} // namespace Poco

View File

@ -1,31 +0,0 @@
//
// Manifest.cpp
//
// Library: Foundation
// Package: SharedLibrary
// Module: ClassLoader
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/Manifest.h"
namespace Poco {
ManifestBase::ManifestBase()
{
}
ManifestBase::~ManifestBase()
{
}
} // namespace Poco

View File

@ -14,23 +14,9 @@
#include "Poco/Bugcheck.h"
// +++ double conversion +++
#define double_conversion poco_double_conversion // don't collide with standalone double_conversion library
#define UNREACHABLE poco_bugcheck
#define UNIMPLEMENTED poco_bugcheck
#include "diy-fp.cc"
#include "cached-powers.cc"
#include "bignum-dtoa.cc"
#include "bignum.cc"
#include "fast-dtoa.cc"
#include "fixed-dtoa.cc"
#include "strtod.cc"
#include "double-conversion.cc"
// --- double conversion ---
#include <double-conversion/double-conversion.h>
#include "Poco/NumericString.h"
poco_static_assert(POCO_MAX_FLT_STRING_LEN == double_conversion::kMaxSignificantDecimalDigits);
#include "Poco/String.h"
#include <memory>
#include <cctype>
@ -263,7 +249,7 @@ float strToFloat(const char* str)
int processed;
int flags = StringToDoubleConverter::ALLOW_LEADING_SPACES |
StringToDoubleConverter::ALLOW_TRAILING_SPACES;
StringToDoubleConverter converter(flags, 0.0, Single::NaN(), POCO_FLT_INF, POCO_FLT_NAN);
StringToDoubleConverter converter(flags, 0.0, std::numeric_limits<float>::quiet_NaN(), POCO_FLT_INF, POCO_FLT_NAN);
float result = converter.StringToFloat(str, static_cast<int>(strlen(str)), &processed);
return result;
}
@ -275,7 +261,7 @@ double strToDouble(const char* str)
int processed;
int flags = StringToDoubleConverter::ALLOW_LEADING_SPACES |
StringToDoubleConverter::ALLOW_TRAILING_SPACES;
StringToDoubleConverter converter(flags, 0.0, Double::NaN(), POCO_FLT_INF, POCO_FLT_NAN);
StringToDoubleConverter converter(flags, 0.0, std::numeric_limits<double>::quiet_NaN(), POCO_FLT_INF, POCO_FLT_NAN);
double result = converter.StringToDouble(str, static_cast<int>(strlen(str)), &processed);
return result;
}

View File

@ -1,65 +0,0 @@
//
// PipeImpl_DUMMY.cpp
//
// Library: Foundation
// Package: Processes
// Module: PipeImpl
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/PipeImpl_DUMMY.h"
namespace Poco {
PipeImpl::PipeImpl()
{
}
PipeImpl::~PipeImpl()
{
}
int PipeImpl::writeBytes(const void* buffer, int length)
{
return 0;
}
int PipeImpl::readBytes(void* buffer, int length)
{
return 0;
}
PipeImpl::Handle PipeImpl::readHandle() const
{
return 0;
}
PipeImpl::Handle PipeImpl::writeHandle() const
{
return 0;
}
void PipeImpl::closeRead()
{
}
void PipeImpl::closeWrite()
{
}
} // namespace Poco

View File

@ -1,127 +0,0 @@
//
// PipeStream.cpp
//
// Library: Foundation
// Package: Processes
// Module: PipeStream
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/PipeStream.h"
namespace Poco {
//
// PipeStreamBuf
//
PipeStreamBuf::PipeStreamBuf(const Pipe& pipe, openmode mode):
BufferedStreamBuf(STREAM_BUFFER_SIZE, mode),
_pipe(pipe)
{
}
PipeStreamBuf::~PipeStreamBuf()
{
}
int PipeStreamBuf::readFromDevice(char* buffer, std::streamsize length)
{
return _pipe.readBytes(buffer, (int) length);
}
int PipeStreamBuf::writeToDevice(const char* buffer, std::streamsize length)
{
return _pipe.writeBytes(buffer, (int) length);
}
void PipeStreamBuf::close()
{
_pipe.close(Pipe::CLOSE_BOTH);
}
//
// PipeIOS
//
PipeIOS::PipeIOS(const Pipe& pipe, openmode mode):
_buf(pipe, mode)
{
poco_ios_init(&_buf);
}
PipeIOS::~PipeIOS()
{
try
{
_buf.sync();
}
catch (...)
{
}
}
PipeStreamBuf* PipeIOS::rdbuf()
{
return &_buf;
}
void PipeIOS::close()
{
_buf.sync();
_buf.close();
}
//
// PipeOutputStream
//
PipeOutputStream::PipeOutputStream(const Pipe& pipe):
PipeIOS(pipe, std::ios::out),
std::ostream(&_buf)
{
}
PipeOutputStream::~PipeOutputStream()
{
}
//
// PipeInputStream
//
PipeInputStream::PipeInputStream(const Pipe& pipe):
PipeIOS(pipe, std::ios::in),
std::istream(&_buf)
{
}
PipeInputStream::~PipeInputStream()
{
}
} // namespace Poco

View File

@ -1,52 +0,0 @@
//
// Semaphore_VX.cpp
//
// Library: Foundation
// Package: Threading
// Module: Semaphore
//
// Copyright (c) 2004-2011, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/Semaphore_VX.h"
#include <sysLib.h>
namespace Poco {
SemaphoreImpl::SemaphoreImpl(int n, int max)
{
poco_assert (n >= 0 && max > 0 && n <= max);
_sem = semCCreate(SEM_Q_PRIORITY, n);
if (_sem == 0)
throw Poco::SystemException("cannot create semaphore");
}
SemaphoreImpl::~SemaphoreImpl()
{
semDelete(_sem);
}
void SemaphoreImpl::waitImpl()
{
if (semTake(_sem, WAIT_FOREVER) != OK)
throw SystemException("cannot wait for semaphore");
}
bool SemaphoreImpl::waitImpl(long milliseconds)
{
int ticks = milliseconds*sysClkRateGet()/1000;
return semTake(_sem, ticks) == OK;
}
} // namespace Poco

View File

@ -1,65 +0,0 @@
//
// Semaphore_WIN32.cpp
//
// Library: Foundation
// Package: Threading
// Module: Semaphore
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/Semaphore_WIN32.h"
namespace Poco {
SemaphoreImpl::SemaphoreImpl(int n, int max)
{
poco_assert (n >= 0 && max > 0 && n <= max);
_sema = CreateSemaphoreW(NULL, n, max, NULL);
if (!_sema)
{
throw SystemException("cannot create semaphore");
}
}
SemaphoreImpl::~SemaphoreImpl()
{
CloseHandle(_sema);
}
void SemaphoreImpl::waitImpl()
{
switch (WaitForSingleObject(_sema, INFINITE))
{
case WAIT_OBJECT_0:
return;
default:
throw SystemException("wait for semaphore failed");
}
}
bool SemaphoreImpl::waitImpl(long milliseconds)
{
switch (WaitForSingleObject(_sema, milliseconds + 1))
{
case WAIT_TIMEOUT:
return false;
case WAIT_OBJECT_0:
return true;
default:
throw SystemException("wait for semaphore failed");
}
}
} // namespace Poco

View File

@ -1,36 +0,0 @@
//
// SharedMemoryImpl.cpp
//
// Library: Foundation
// Package: Processes
// Module: SharedMemoryImpl
//
// Copyright (c) 2007, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/SharedMemory_DUMMY.h"
namespace Poco {
SharedMemoryImpl::SharedMemoryImpl(const std::string&, std::size_t, SharedMemory::AccessMode, const void*, bool)
{
}
SharedMemoryImpl::SharedMemoryImpl(const Poco::File&, SharedMemory::AccessMode, const void*)
{
}
SharedMemoryImpl::~SharedMemoryImpl()
{
}
} // namespace Poco

View File

@ -1,105 +0,0 @@
//
// StreamTokenizer.cpp
//
// Library: Foundation
// Package: Streams
// Module: StreamTokenizer
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/StreamTokenizer.h"
namespace Poco {
StreamTokenizer::StreamTokenizer():
_pIstr(0)
{
}
StreamTokenizer::StreamTokenizer(std::istream& istr):
_pIstr(&istr)
{
}
StreamTokenizer::~StreamTokenizer()
{
for (TokenVec::iterator it = _tokens.begin(); it != _tokens.end(); ++it)
{
delete it->pToken;
}
}
void StreamTokenizer::attachToStream(std::istream& istr)
{
_pIstr = &istr;
}
void StreamTokenizer::addToken(Token* pToken)
{
poco_check_ptr (pToken);
TokenInfo ti;
ti.pToken = pToken;
ti.ignore = (pToken->tokenClass() == Token::COMMENT_TOKEN || pToken->tokenClass() == Token::WHITESPACE_TOKEN);
_tokens.push_back(ti);
}
void StreamTokenizer::addToken(Token* pToken, bool ignore)
{
poco_check_ptr (pToken);
TokenInfo ti;
ti.pToken = pToken;
ti.ignore = ignore;
_tokens.push_back(ti);
}
const Token* StreamTokenizer::next()
{
poco_check_ptr (_pIstr);
static const int eof = std::char_traits<char>::eof();
int first = _pIstr->get();
TokenVec::const_iterator it = _tokens.begin();
while (first != eof && it != _tokens.end())
{
const TokenInfo& ti = *it;
if (ti.pToken->start((char) first, *_pIstr))
{
ti.pToken->finish(*_pIstr);
if (ti.ignore)
{
first = _pIstr->get();
it = _tokens.begin();
}
else return ti.pToken;
}
else ++it;
}
if (first == eof)
{
return &_eofToken;
}
else
{
_invalidToken.start((char) first, *_pIstr);
return &_invalidToken;
}
}
} // namespace Poco

View File

@ -1,31 +0,0 @@
//
// SynchronizedObject.cpp
//
// Library: Foundation
// Package: Threading
// Module: SynchronizedObject
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/SynchronizedObject.h"
namespace Poco {
SynchronizedObject::SynchronizedObject()
{
}
SynchronizedObject::~SynchronizedObject()
{
}
} // namespace Poco

View File

@ -16,15 +16,9 @@
#include "Poco/Exception.h"
#include "Poco/String.h"
#include "Poco/ASCIIEncoding.h"
#include "Poco/Latin1Encoding.h"
#include "Poco/Latin2Encoding.h"
#include "Poco/Latin9Encoding.h"
#include "Poco/UTF32Encoding.h"
#include "Poco/UTF16Encoding.h"
#include "Poco/UTF8Encoding.h"
#include "Poco/Windows1250Encoding.h"
#include "Poco/Windows1251Encoding.h"
#include "Poco/Windows1252Encoding.h"
#include "Poco/RWLock.h"
#include "Poco/SingletonHolder.h"
#include <map>
@ -47,15 +41,9 @@ public:
add(pUtf8Encoding, TextEncoding::GLOBAL);
add(new ASCIIEncoding);
add(new Latin1Encoding);
add(new Latin2Encoding);
add(new Latin9Encoding);
add(pUtf8Encoding);
add(new UTF16Encoding);
add(new UTF32Encoding);
add(new Windows1250Encoding);
add(new Windows1251Encoding);
add(new Windows1252Encoding);
}
~TextEncodingManager()

View File

@ -1,78 +0,0 @@
//
// Timezone_VXX.cpp
//
// Library: Foundation
// Package: DateTime
// Module: Timezone
//
// Copyright (c) 2004-2011, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/Timezone.h"
#include "Poco/Exception.h"
#include "Poco/Environment.h"
#include <ctime>
namespace Poco {
int Timezone::utcOffset()
{
std::time_t now = std::time(NULL);
struct std::tm t;
gmtime_r(&now, &t);
std::time_t utc = std::mktime(&t);
return now - utc;
}
int Timezone::dst()
{
std::time_t now = std::time(NULL);
struct std::tm t;
if (localtime_r(&now, &t) != OK)
throw Poco::SystemException("cannot get local time DST offset");
return t.tm_isdst == 1 ? 3600 : 0;
}
bool Timezone::isDst(const Timestamp& timestamp)
{
std::time_t time = timestamp.epochTime();
struct std::tm* tms = std::localtime(&time);
if (!tms) throw Poco::SystemException("cannot get local time DST flag");
return tms->tm_isdst > 0;
}
std::string Timezone::name()
{
// format of TIMEZONE environment variable:
// name_of_zone:<(unused)>:time_in_minutes_from_UTC:daylight_start:daylight_end
std::string tz = Environment::get("TIMEZONE", "UTC");
std::string::size_type pos = tz.find(':');
if (pos != std::string::npos)
return tz.substr(0, pos);
else
return tz;
}
std::string Timezone::standardName()
{
return name();
}
std::string Timezone::dstName()
{
return name();
}
} // namespace Poco

View File

@ -1,237 +0,0 @@
//
// Windows1251Encoding.cpp
//
// Library: Foundation
// Package: Text
// Module: Windows1251Encoding
//
// Copyright (c) 2005-2007, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/Windows1251Encoding.h"
#include "Poco/String.h"
namespace Poco {
const char* Windows1251Encoding::_names[] =
{
"windows-1251",
"Windows-1251",
"cp1251",
"CP1251",
NULL
};
const TextEncoding::CharacterMap Windows1251Encoding::_charMap =
{
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f */
/* 00 */ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
/* 10 */ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f,
/* 20 */ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
/* 30 */ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
/* 40 */ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
/* 50 */ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f,
/* 60 */ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
/* 70 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f,
/* 80 */ 0x0402, 0x0403, 0x201a, 0x0453, 0x201e, 0x2026, 0x2020, 0x2021, 0x20ac, 0x2030, 0x0409, 0x2039, 0x040a, 0x040c, 0x040b, 0x040f,
/* 90 */ 0x0452, 0x2018, 0x2019, 0x201c, 0x201d, 0x2022, 0x2013, 0x2014, 0xfffe, 0x2122, 0x0459, 0x203a, 0x045a, 0x045c, 0x045b, 0x045f,
/* a0 */ 0x00a0, 0x040e, 0x045e, 0x0408, 0x00a4, 0x0490, 0x00a6, 0x00a7, 0x0401, 0x00a9, 0x0404, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x0407,
/* b0 */ 0x00b0, 0x00b1, 0x0406, 0x0456, 0x0491, 0x00b5, 0x00b6, 0x00b7, 0x0451, 0x2116, 0x0454, 0x00bb, 0x0458, 0x0405, 0x0455, 0x0457,
/* c0 */ 0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417, 0x0418, 0x0419, 0x041a, 0x041b, 0x041c, 0x041d, 0x041e, 0x041f,
/* d0 */ 0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427, 0x0428, 0x0429, 0x042a, 0x042b, 0x042c, 0x042d, 0x042e, 0x042f,
/* e0 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, 0x0438, 0x0439, 0x043a, 0x043b, 0x043c, 0x043d, 0x043e, 0x043f,
/* f0 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, 0x0448, 0x0449, 0x044a, 0x044b, 0x044c, 0x044d, 0x044e, 0x044f,
};
Windows1251Encoding::Windows1251Encoding()
{
}
Windows1251Encoding::~Windows1251Encoding()
{
}
const char* Windows1251Encoding::canonicalName() const
{
return _names[0];
}
bool Windows1251Encoding::isA(const std::string& encodingName) const
{
for (const char** name = _names; *name; ++name)
{
if (Poco::icompare(encodingName, *name) == 0)
return true;
}
return false;
}
const TextEncoding::CharacterMap& Windows1251Encoding::characterMap() const
{
return _charMap;
}
int Windows1251Encoding::convert(const unsigned char* bytes) const
{
return _charMap[*bytes];
}
int Windows1251Encoding::convert(int ch, unsigned char* bytes, int length) const
{
if (ch >= 0 && ch <= 255 && _charMap[ch] == ch)
{
if (bytes && length >= 1)
*bytes = (unsigned char) ch;
return 1;
}
else switch(ch)
{
case 0x0402: if (bytes && length >= 1) *bytes = 0x80; return 1;
case 0x0403: if (bytes && length >= 1) *bytes = 0x81; return 1;
case 0x201a: if (bytes && length >= 1) *bytes = 0x82; return 1;
case 0x0453: if (bytes && length >= 1) *bytes = 0x83; return 1;
case 0x201e: if (bytes && length >= 1) *bytes = 0x84; return 1;
case 0x2026: if (bytes && length >= 1) *bytes = 0x85; return 1;
case 0x2020: if (bytes && length >= 1) *bytes = 0x86; return 1;
case 0x2021: if (bytes && length >= 1) *bytes = 0x87; return 1;
case 0x20ac: if (bytes && length >= 1) *bytes = 0x88; return 1;
case 0x2030: if (bytes && length >= 1) *bytes = 0x89; return 1;
case 0x0409: if (bytes && length >= 1) *bytes = 0x8a; return 1;
case 0x2039: if (bytes && length >= 1) *bytes = 0x8b; return 1;
case 0x040a: if (bytes && length >= 1) *bytes = 0x8c; return 1;
case 0x040c: if (bytes && length >= 1) *bytes = 0x8d; return 1;
case 0x040b: if (bytes && length >= 1) *bytes = 0x8e; return 1;
case 0x040f: if (bytes && length >= 1) *bytes = 0x8f; return 1;
case 0x0452: if (bytes && length >= 1) *bytes = 0x90; return 1;
case 0x2018: if (bytes && length >= 1) *bytes = 0x91; return 1;
case 0x2019: if (bytes && length >= 1) *bytes = 0x92; return 1;
case 0x201c: if (bytes && length >= 1) *bytes = 0x93; return 1;
case 0x201d: if (bytes && length >= 1) *bytes = 0x94; return 1;
case 0x2022: if (bytes && length >= 1) *bytes = 0x95; return 1;
case 0x2013: if (bytes && length >= 1) *bytes = 0x96; return 1;
case 0x2014: if (bytes && length >= 1) *bytes = 0x97; return 1;
case 0xfffe: if (bytes && length >= 1) *bytes = 0x98; return 1;
case 0x2122: if (bytes && length >= 1) *bytes = 0x99; return 1;
case 0x0459: if (bytes && length >= 1) *bytes = 0x9a; return 1;
case 0x203a: if (bytes && length >= 1) *bytes = 0x9b; return 1;
case 0x045a: if (bytes && length >= 1) *bytes = 0x9c; return 1;
case 0x045c: if (bytes && length >= 1) *bytes = 0x9d; return 1;
case 0x045b: if (bytes && length >= 1) *bytes = 0x9e; return 1;
case 0x045f: if (bytes && length >= 1) *bytes = 0x9f; return 1;
case 0x040e: if (bytes && length >= 1) *bytes = 0xa1; return 1;
case 0x045e: if (bytes && length >= 1) *bytes = 0xa2; return 1;
case 0x0408: if (bytes && length >= 1) *bytes = 0xa3; return 1;
case 0x0490: if (bytes && length >= 1) *bytes = 0xa5; return 1;
case 0x0401: if (bytes && length >= 1) *bytes = 0xa8; return 1;
case 0x0404: if (bytes && length >= 1) *bytes = 0xaa; return 1;
case 0x0407: if (bytes && length >= 1) *bytes = 0xaf; return 1;
case 0x0406: if (bytes && length >= 1) *bytes = 0xb2; return 1;
case 0x0456: if (bytes && length >= 1) *bytes = 0xb3; return 1;
case 0x0491: if (bytes && length >= 1) *bytes = 0xb4; return 1;
case 0x0451: if (bytes && length >= 1) *bytes = 0xb8; return 1;
case 0x2116: if (bytes && length >= 1) *bytes = 0xb9; return 1;
case 0x0454: if (bytes && length >= 1) *bytes = 0xba; return 1;
case 0x0458: if (bytes && length >= 1) *bytes = 0xbc; return 1;
case 0x0405: if (bytes && length >= 1) *bytes = 0xbd; return 1;
case 0x0455: if (bytes && length >= 1) *bytes = 0xbe; return 1;
case 0x0457: if (bytes && length >= 1) *bytes = 0xbf; return 1;
case 0x0410: if (bytes && length >= 1) *bytes = 0xc0; return 1;
case 0x0411: if (bytes && length >= 1) *bytes = 0xc1; return 1;
case 0x0412: if (bytes && length >= 1) *bytes = 0xc2; return 1;
case 0x0413: if (bytes && length >= 1) *bytes = 0xc3; return 1;
case 0x0414: if (bytes && length >= 1) *bytes = 0xc4; return 1;
case 0x0415: if (bytes && length >= 1) *bytes = 0xc5; return 1;
case 0x0416: if (bytes && length >= 1) *bytes = 0xc6; return 1;
case 0x0417: if (bytes && length >= 1) *bytes = 0xc7; return 1;
case 0x0418: if (bytes && length >= 1) *bytes = 0xc8; return 1;
case 0x0419: if (bytes && length >= 1) *bytes = 0xc9; return 1;
case 0x041a: if (bytes && length >= 1) *bytes = 0xca; return 1;
case 0x041b: if (bytes && length >= 1) *bytes = 0xcb; return 1;
case 0x041c: if (bytes && length >= 1) *bytes = 0xcc; return 1;
case 0x041d: if (bytes && length >= 1) *bytes = 0xcd; return 1;
case 0x041e: if (bytes && length >= 1) *bytes = 0xce; return 1;
case 0x041f: if (bytes && length >= 1) *bytes = 0xcf; return 1;
case 0x0420: if (bytes && length >= 1) *bytes = 0xd0; return 1;
case 0x0421: if (bytes && length >= 1) *bytes = 0xd1; return 1;
case 0x0422: if (bytes && length >= 1) *bytes = 0xd2; return 1;
case 0x0423: if (bytes && length >= 1) *bytes = 0xd3; return 1;
case 0x0424: if (bytes && length >= 1) *bytes = 0xd4; return 1;
case 0x0425: if (bytes && length >= 1) *bytes = 0xd5; return 1;
case 0x0426: if (bytes && length >= 1) *bytes = 0xd6; return 1;
case 0x0427: if (bytes && length >= 1) *bytes = 0xd7; return 1;
case 0x0428: if (bytes && length >= 1) *bytes = 0xd8; return 1;
case 0x0429: if (bytes && length >= 1) *bytes = 0xd9; return 1;
case 0x042a: if (bytes && length >= 1) *bytes = 0xda; return 1;
case 0x042b: if (bytes && length >= 1) *bytes = 0xdb; return 1;
case 0x042c: if (bytes && length >= 1) *bytes = 0xdc; return 1;
case 0x042d: if (bytes && length >= 1) *bytes = 0xdd; return 1;
case 0x042e: if (bytes && length >= 1) *bytes = 0xde; return 1;
case 0x042f: if (bytes && length >= 1) *bytes = 0xdf; return 1;
case 0x0430: if (bytes && length >= 1) *bytes = 0xe0; return 1;
case 0x0431: if (bytes && length >= 1) *bytes = 0xe1; return 1;
case 0x0432: if (bytes && length >= 1) *bytes = 0xe2; return 1;
case 0x0433: if (bytes && length >= 1) *bytes = 0xe3; return 1;
case 0x0434: if (bytes && length >= 1) *bytes = 0xe4; return 1;
case 0x0435: if (bytes && length >= 1) *bytes = 0xe5; return 1;
case 0x0436: if (bytes && length >= 1) *bytes = 0xe6; return 1;
case 0x0437: if (bytes && length >= 1) *bytes = 0xe7; return 1;
case 0x0438: if (bytes && length >= 1) *bytes = 0xe8; return 1;
case 0x0439: if (bytes && length >= 1) *bytes = 0xe9; return 1;
case 0x043a: if (bytes && length >= 1) *bytes = 0xea; return 1;
case 0x043b: if (bytes && length >= 1) *bytes = 0xeb; return 1;
case 0x043c: if (bytes && length >= 1) *bytes = 0xec; return 1;
case 0x043d: if (bytes && length >= 1) *bytes = 0xed; return 1;
case 0x043e: if (bytes && length >= 1) *bytes = 0xee; return 1;
case 0x043f: if (bytes && length >= 1) *bytes = 0xef; return 1;
case 0x0440: if (bytes && length >= 1) *bytes = 0xf0; return 1;
case 0x0441: if (bytes && length >= 1) *bytes = 0xf1; return 1;
case 0x0442: if (bytes && length >= 1) *bytes = 0xf2; return 1;
case 0x0443: if (bytes && length >= 1) *bytes = 0xf3; return 1;
case 0x0444: if (bytes && length >= 1) *bytes = 0xf4; return 1;
case 0x0445: if (bytes && length >= 1) *bytes = 0xf5; return 1;
case 0x0446: if (bytes && length >= 1) *bytes = 0xf6; return 1;
case 0x0447: if (bytes && length >= 1) *bytes = 0xf7; return 1;
case 0x0448: if (bytes && length >= 1) *bytes = 0xf8; return 1;
case 0x0449: if (bytes && length >= 1) *bytes = 0xf9; return 1;
case 0x044a: if (bytes && length >= 1) *bytes = 0xfa; return 1;
case 0x044b: if (bytes && length >= 1) *bytes = 0xfb; return 1;
case 0x044c: if (bytes && length >= 1) *bytes = 0xfc; return 1;
case 0x044d: if (bytes && length >= 1) *bytes = 0xfd; return 1;
case 0x044e: if (bytes && length >= 1) *bytes = 0xfe; return 1;
case 0x044f: if (bytes && length >= 1) *bytes = 0xff; return 1;
default: return 0;
}
}
int Windows1251Encoding::queryConvert(const unsigned char* bytes, int length) const
{
if (1 <= length)
return _charMap[*bytes];
else
return -1;
}
int Windows1251Encoding::sequenceLength(const unsigned char* bytes, int length) const
{
return 1;
}
} // namespace Poco

View File

@ -1,151 +0,0 @@
//
// Windows1252Encoding.cpp
//
// Library: Foundation
// Package: Text
// Module: Windows1252Encoding
//
// Copyright (c) 2005-2007, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/Windows1252Encoding.h"
#include "Poco/String.h"
#include <map>
namespace Poco {
const char* Windows1252Encoding::_names[] =
{
"windows-1252",
"Windows-1252",
"cp1252",
"CP1252",
NULL
};
const TextEncoding::CharacterMap Windows1252Encoding::_charMap =
{
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f */
/* 00 */ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
/* 10 */ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f,
/* 20 */ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
/* 30 */ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
/* 40 */ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
/* 50 */ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f,
/* 60 */ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
/* 70 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f,
/* 80 */ 0x20ac, 0x0081, 0x201a, 0x0192, 0x201e, 0x2026, 0x2020, 0x2021, 0x02c6, 0x2030, 0x0160, 0x2039, 0x0152, 0x008d, 0x017d, 0x008f,
/* 90 */ 0x0090, 0x2018, 0x2019, 0x201c, 0x201d, 0x2022, 0x2013, 0x2014, 0x02dc, 0x2122, 0x0161, 0x203a, 0x0153, 0x009d, 0x017e, 0x0178,
/* a0 */ 0x00a0, 0x00a1, 0x00a2, 0x00a3, 0x00a4, 0x00a5, 0x00a6, 0x00a7, 0x00a8, 0x00a9, 0x00aa, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x00af,
/* b0 */ 0x00b0, 0x00b1, 0x00b2, 0x00b3, 0x00b4, 0x00b5, 0x00b6, 0x00b7, 0x00b8, 0x00b9, 0x00ba, 0x00bb, 0x00bc, 0x00bd, 0x00be, 0x00bf,
/* c0 */ 0x00c0, 0x00c1, 0x00c2, 0x00c3, 0x00c4, 0x00c5, 0x00c6, 0x00c7, 0x00c8, 0x00c9, 0x00ca, 0x00cb, 0x00cc, 0x00cd, 0x00ce, 0x00cf,
/* d0 */ 0x00d0, 0x00d1, 0x00d2, 0x00d3, 0x00d4, 0x00d5, 0x00d6, 0x00d7, 0x00d8, 0x00d9, 0x00da, 0x00db, 0x00dc, 0x00dd, 0x00de, 0x00df,
/* e0 */ 0x00e0, 0x00e1, 0x00e2, 0x00e3, 0x00e4, 0x00e5, 0x00e6, 0x00e7, 0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef,
/* f0 */ 0x00f0, 0x00f1, 0x00f2, 0x00f3, 0x00f4, 0x00f5, 0x00f6, 0x00f7, 0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff,
};
Windows1252Encoding::Windows1252Encoding()
{
}
Windows1252Encoding::~Windows1252Encoding()
{
}
const char* Windows1252Encoding::canonicalName() const
{
return _names[0];
}
bool Windows1252Encoding::isA(const std::string& encodingName) const
{
for (const char** name = _names; *name; ++name)
{
if (Poco::icompare(encodingName, *name) == 0)
return true;
}
return false;
}
const TextEncoding::CharacterMap& Windows1252Encoding::characterMap() const
{
return _charMap;
}
int Windows1252Encoding::convert(const unsigned char* bytes) const
{
return _charMap[*bytes];
}
int Windows1252Encoding::convert(int ch, unsigned char* bytes, int length) const
{
if (ch >= 0 && ch <= 255 && _charMap[ch] == ch)
{
if (bytes && length >= 1)
*bytes = ch;
return 1;
}
else switch (ch)
{
case 0x20ac: if (bytes && length >= 1) *bytes = 0x80; return 1;
case 0x201a: if (bytes && length >= 1) *bytes = 0x82; return 1;
case 0x0192: if (bytes && length >= 1) *bytes = 0x83; return 1;
case 0x201e: if (bytes && length >= 1) *bytes = 0x84; return 1;
case 0x2026: if (bytes && length >= 1) *bytes = 0x85; return 1;
case 0x2020: if (bytes && length >= 1) *bytes = 0x86; return 1;
case 0x2021: if (bytes && length >= 1) *bytes = 0x87; return 1;
case 0x02c6: if (bytes && length >= 1) *bytes = 0x88; return 1;
case 0x2030: if (bytes && length >= 1) *bytes = 0x89; return 1;
case 0x0160: if (bytes && length >= 1) *bytes = 0x8a; return 1;
case 0x2039: if (bytes && length >= 1) *bytes = 0x8b; return 1;
case 0x0152: if (bytes && length >= 1) *bytes = 0x8c; return 1;
case 0x017d: if (bytes && length >= 1) *bytes = 0x8e; return 1;
case 0x2018: if (bytes && length >= 1) *bytes = 0x91; return 1;
case 0x2019: if (bytes && length >= 1) *bytes = 0x92; return 1;
case 0x201c: if (bytes && length >= 1) *bytes = 0x93; return 1;
case 0x201d: if (bytes && length >= 1) *bytes = 0x94; return 1;
case 0x2022: if (bytes && length >= 1) *bytes = 0x95; return 1;
case 0x2013: if (bytes && length >= 1) *bytes = 0x96; return 1;
case 0x2014: if (bytes && length >= 1) *bytes = 0x97; return 1;
case 0x02dc: if (bytes && length >= 1) *bytes = 0x98; return 1;
case 0x2122: if (bytes && length >= 1) *bytes = 0x99; return 1;
case 0x0161: if (bytes && length >= 1) *bytes = 0x9a; return 1;
case 0x203a: if (bytes && length >= 1) *bytes = 0x9b; return 1;
case 0x0153: if (bytes && length >= 1) *bytes = 0x9c; return 1;
case 0x017e: if (bytes && length >= 1) *bytes = 0x9e; return 1;
case 0x0178: if (bytes && length >= 1) *bytes = 0x9f; return 1;
default: return 0;
}
}
int Windows1252Encoding::queryConvert(const unsigned char* bytes, int length) const
{
if (1 <= length)
return _charMap[*bytes];
else
return -1;
}
int Windows1252Encoding::sequenceLength(const unsigned char* bytes, int length) const
{
return 1;
}
} // namespace Poco

View File

@ -1,269 +0,0 @@
//
// WindowsConsoleChannel.cpp
//
// Library: Foundation
// Package: Logging
// Module: WindowsConsoleChannel
//
// Copyright (c) 2007, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/WindowsConsoleChannel.h"
#include "Poco/Message.h"
#include "Poco/String.h"
#include "Poco/Exception.h"
namespace Poco {
WindowsConsoleChannel::WindowsConsoleChannel():
_isFile(false),
_hConsole(INVALID_HANDLE_VALUE)
{
_hConsole = GetStdHandle(STD_OUTPUT_HANDLE);
// check whether the console has been redirected
DWORD mode;
_isFile = (GetConsoleMode(_hConsole, &mode) == 0);
}
WindowsConsoleChannel::~WindowsConsoleChannel()
{
}
void WindowsConsoleChannel::log(const Message& msg)
{
std::string text = msg.getText();
text += "\r\n";
DWORD written;
WriteFile(_hConsole, text.data(), text.size(), &written, NULL);
}
WindowsColorConsoleChannel::WindowsColorConsoleChannel():
_enableColors(true),
_isFile(false),
_hConsole(INVALID_HANDLE_VALUE)
{
_hConsole = GetStdHandle(STD_OUTPUT_HANDLE);
// check whether the console has been redirected
DWORD mode;
_isFile = (GetConsoleMode(_hConsole, &mode) == 0);
initColors();
}
WindowsColorConsoleChannel::~WindowsColorConsoleChannel()
{
}
void WindowsColorConsoleChannel::log(const Message& msg)
{
std::string text = msg.getText();
text += "\r\n";
if (_enableColors && !_isFile)
{
WORD attr = _colors[0];
attr &= 0xFFF0;
attr |= _colors[msg.getPriority()];
SetConsoleTextAttribute(_hConsole, attr);
}
DWORD written;
WriteFile(_hConsole, text.data(), text.size(), &written, NULL);
if (_enableColors && !_isFile)
{
SetConsoleTextAttribute(_hConsole, _colors[0]);
}
}
void WindowsColorConsoleChannel::setProperty(const std::string& name, const std::string& value)
{
if (name == "enableColors")
{
_enableColors = icompare(value, "true") == 0;
}
else if (name == "traceColor")
{
_colors[Message::PRIO_TRACE] = parseColor(value);
}
else if (name == "debugColor")
{
_colors[Message::PRIO_DEBUG] = parseColor(value);
}
else if (name == "informationColor")
{
_colors[Message::PRIO_INFORMATION] = parseColor(value);
}
else if (name == "noticeColor")
{
_colors[Message::PRIO_NOTICE] = parseColor(value);
}
else if (name == "warningColor")
{
_colors[Message::PRIO_WARNING] = parseColor(value);
}
else if (name == "errorColor")
{
_colors[Message::PRIO_ERROR] = parseColor(value);
}
else if (name == "criticalColor")
{
_colors[Message::PRIO_CRITICAL] = parseColor(value);
}
else if (name == "fatalColor")
{
_colors[Message::PRIO_FATAL] = parseColor(value);
}
else
{
Channel::setProperty(name, value);
}
}
std::string WindowsColorConsoleChannel::getProperty(const std::string& name) const
{
if (name == "enableColors")
{
return _enableColors ? "true" : "false";
}
else if (name == "traceColor")
{
return formatColor(_colors[Message::PRIO_TRACE]);
}
else if (name == "debugColor")
{
return formatColor(_colors[Message::PRIO_DEBUG]);
}
else if (name == "informationColor")
{
return formatColor(_colors[Message::PRIO_INFORMATION]);
}
else if (name == "noticeColor")
{
return formatColor(_colors[Message::PRIO_NOTICE]);
}
else if (name == "warningColor")
{
return formatColor(_colors[Message::PRIO_WARNING]);
}
else if (name == "errorColor")
{
return formatColor(_colors[Message::PRIO_ERROR]);
}
else if (name == "criticalColor")
{
return formatColor(_colors[Message::PRIO_CRITICAL]);
}
else if (name == "fatalColor")
{
return formatColor(_colors[Message::PRIO_FATAL]);
}
else
{
return Channel::getProperty(name);
}
}
WORD WindowsColorConsoleChannel::parseColor(const std::string& color) const
{
if (icompare(color, "default") == 0)
return _colors[0];
else if (icompare(color, "black") == 0)
return CC_BLACK;
else if (icompare(color, "red") == 0)
return CC_RED;
else if (icompare(color, "green") == 0)
return CC_GREEN;
else if (icompare(color, "brown") == 0)
return CC_BROWN;
else if (icompare(color, "blue") == 0)
return CC_BLUE;
else if (icompare(color, "magenta") == 0)
return CC_MAGENTA;
else if (icompare(color, "cyan") == 0)
return CC_CYAN;
else if (icompare(color, "gray") == 0)
return CC_GRAY;
else if (icompare(color, "darkGray") == 0)
return CC_DARKGRAY;
else if (icompare(color, "lightRed") == 0)
return CC_LIGHTRED;
else if (icompare(color, "lightGreen") == 0)
return CC_LIGHTGREEN;
else if (icompare(color, "yellow") == 0)
return CC_YELLOW;
else if (icompare(color, "lightBlue") == 0)
return CC_LIGHTBLUE;
else if (icompare(color, "lightMagenta") == 0)
return CC_LIGHTMAGENTA;
else if (icompare(color, "lightCyan") == 0)
return CC_LIGHTCYAN;
else if (icompare(color, "white") == 0)
return CC_WHITE;
else throw InvalidArgumentException("Invalid color value", color);
}
std::string WindowsColorConsoleChannel::formatColor(WORD color) const
{
switch (color)
{
case CC_BLACK: return "black";
case CC_RED: return "red";
case CC_GREEN: return "green";
case CC_BROWN: return "brown";
case CC_BLUE: return "blue";
case CC_MAGENTA: return "magenta";
case CC_CYAN: return "cyan";
case CC_GRAY: return "gray";
case CC_DARKGRAY: return "darkGray";
case CC_LIGHTRED: return "lightRed";
case CC_LIGHTGREEN: return "lightGreen";
case CC_YELLOW: return "yellow";
case CC_LIGHTBLUE: return "lightBlue";
case CC_LIGHTMAGENTA: return "lightMagenta";
case CC_LIGHTCYAN: return "lightCyan";
case CC_WHITE: return "white";
default: return "invalid";
}
}
void WindowsColorConsoleChannel::initColors()
{
if (!_isFile)
{
CONSOLE_SCREEN_BUFFER_INFO csbi;
GetConsoleScreenBufferInfo(_hConsole, &csbi);
_colors[0] = csbi.wAttributes;
}
else
{
_colors[0] = CC_WHITE;
}
_colors[Message::PRIO_FATAL] = CC_LIGHTRED;
_colors[Message::PRIO_CRITICAL] = CC_LIGHTRED;
_colors[Message::PRIO_ERROR] = CC_LIGHTRED;
_colors[Message::PRIO_WARNING] = CC_YELLOW;
_colors[Message::PRIO_NOTICE] = _colors[0];
_colors[Message::PRIO_INFORMATION] = _colors[0];
_colors[Message::PRIO_DEBUG] = CC_GRAY;
_colors[Message::PRIO_TRACE] = CC_GRAY;
}
} // namespace Poco

View File

@ -1,188 +0,0 @@
/* adler32.c -- compute the Adler-32 checksum of a data stream
* Copyright (C) 1995-2011, 2016 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
/* @(#) $Id$ */
#include "zutil.h"
#define local static
local uLong adler32_combine_ OF((uLong adler1, uLong adler2, z_off64_t len2));
#define BASE 65521U /* largest prime smaller than 65536 */
#define NMAX 5552
/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
#define DO1(buf,i) {adler += (buf)[i]; sum2 += adler;}
#define DO2(buf,i) DO1(buf,i); DO1(buf,i+1);
#define DO4(buf,i) DO2(buf,i); DO2(buf,i+2);
#define DO8(buf,i) DO4(buf,i); DO4(buf,i+4);
#define DO16(buf) DO8(buf,0); DO8(buf,8);
/* use NO_DIVIDE if your processor does not do division in hardware --
try it both ways to see which is faster */
#ifdef NO_DIVIDE
/* note that this assumes BASE is 65521, where 65536 % 65521 == 15
(thank you to John Reiser for pointing this out) */
# define CHOP(a) \
do { \
unsigned long tmp = a >> 16; \
a &= 0xffffUL; \
a += (tmp << 4) - tmp; \
} while (0)
# define MOD28(a) \
do { \
CHOP(a); \
if (a >= BASE) a -= BASE; \
} while (0)
# define MOD(a) \
do { \
CHOP(a); \
MOD28(a); \
} while (0)
# define MOD63(a) \
do { /* this assumes a is not negative */ \
z_off64_t tmp = a >> 32; \
a &= 0xffffffffL; \
a += (tmp << 8) - (tmp << 5) + tmp; \
tmp = a >> 16; \
a &= 0xffffL; \
a += (tmp << 4) - tmp; \
tmp = a >> 16; \
a &= 0xffffL; \
a += (tmp << 4) - tmp; \
if (a >= BASE) a -= BASE; \
} while (0)
#else
# define MOD(a) a %= BASE
# define MOD28(a) a %= BASE
# define MOD63(a) a %= BASE
#endif
/* ========================================================================= */
uLong ZEXPORT adler32_z(adler, buf, len)
uLong adler;
const Bytef *buf;
z_size_t len;
{
unsigned long sum2;
unsigned n;
/* split Adler-32 into component sums */
sum2 = (adler >> 16) & 0xffff;
adler &= 0xffff;
/* in case user likes doing a byte at a time, keep it fast */
if (len == 1) {
adler += buf[0];
if (adler >= BASE)
adler -= BASE;
sum2 += adler;
if (sum2 >= BASE)
sum2 -= BASE;
return adler | (sum2 << 16);
}
/* initial Adler-32 value (deferred check for len == 1 speed) */
if (buf == Z_NULL)
return 1L;
/* in case short lengths are provided, keep it somewhat fast */
if (len < 16) {
while (len--) {
adler += *buf++;
sum2 += adler;
}
if (adler >= BASE)
adler -= BASE;
MOD28(sum2); /* only added so many BASE's */
return adler | (sum2 << 16);
}
/* do length NMAX blocks -- requires just one modulo operation */
while (len >= NMAX) {
len -= NMAX;
n = NMAX / 16; /* NMAX is divisible by 16 */
do {
DO16(buf); /* 16 sums unrolled */
buf += 16;
} while (--n);
MOD(adler);
MOD(sum2);
}
/* do remaining bytes (less than NMAX, still just one modulo) */
if (len) { /* avoid modulos if none remaining */
while (len >= 16) {
len -= 16;
DO16(buf);
buf += 16;
}
while (len--) {
adler += *buf++;
sum2 += adler;
}
MOD(adler);
MOD(sum2);
}
/* return recombined sums */
return adler | (sum2 << 16);
}
/* ========================================================================= */
uLong ZEXPORT adler32(adler, buf, len)
uLong adler;
const Bytef *buf;
uInt len;
{
return adler32_z(adler, buf, len);
}
/* ========================================================================= */
local uLong adler32_combine_(adler1, adler2, len2)
uLong adler1;
uLong adler2;
z_off64_t len2;
{
unsigned long sum1;
unsigned long sum2;
unsigned rem;
/* for negative len, return invalid adler32 as a clue for debugging */
if (len2 < 0)
return 0xffffffffUL;
/* the derivation of this formula is left as an exercise for the reader */
MOD63(len2); /* assumes len2 >= 0 */
rem = (unsigned)len2;
sum1 = adler1 & 0xffff;
sum2 = rem * sum1;
MOD(sum2);
sum1 += (adler2 & 0xffff) + BASE - 1;
sum2 += ((adler1 >> 16) & 0xffff) + ((adler2 >> 16) & 0xffff) + BASE - rem;
if (sum1 >= BASE) sum1 -= BASE;
if (sum1 >= BASE) sum1 -= BASE;
if (sum2 >= ((unsigned long)BASE << 1)) sum2 -= ((unsigned long)BASE << 1);
if (sum2 >= BASE) sum2 -= BASE;
return sum1 | (sum2 << 16);
}
/* ========================================================================= */
uLong ZEXPORT adler32_combine(adler1, adler2, len2)
uLong adler1;
uLong adler2;
z_off_t len2;
{
return adler32_combine_(adler1, adler2, len2);
}
uLong ZEXPORT adler32_combine64(adler1, adler2, len2)
uLong adler1;
uLong adler2;
z_off64_t len2;
{
return adler32_combine_(adler1, adler2, len2);
}

View File

@ -1,641 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <math.h>
#include "bignum-dtoa.h"
#include "bignum.h"
#include "ieee.h"
namespace double_conversion {
static int NormalizedExponent(uint64_t significand, int exponent) {
ASSERT(significand != 0);
while ((significand & Double::kHiddenBit) == 0) {
significand = significand << 1;
exponent = exponent - 1;
}
return exponent;
}
// Forward declarations:
// Returns an estimation of k such that 10^(k-1) <= v < 10^k.
static int EstimatePower(int exponent);
// Computes v / 10^estimated_power exactly, as a ratio of two bignums, numerator
// and denominator.
static void InitialScaledStartValues(uint64_t significand,
int exponent,
bool lower_boundary_is_closer,
int estimated_power,
bool need_boundary_deltas,
Bignum* numerator,
Bignum* denominator,
Bignum* delta_minus,
Bignum* delta_plus);
// Multiplies numerator/denominator so that its values lies in the range 1-10.
// Returns decimal_point s.t.
// v = numerator'/denominator' * 10^(decimal_point-1)
// where numerator' and denominator' are the values of numerator and
// denominator after the call to this function.
static void FixupMultiply10(int estimated_power, bool is_even,
int* decimal_point,
Bignum* numerator, Bignum* denominator,
Bignum* delta_minus, Bignum* delta_plus);
// Generates digits from the left to the right and stops when the generated
// digits yield the shortest decimal representation of v.
static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
Bignum* delta_minus, Bignum* delta_plus,
bool is_even,
Vector<char> buffer, int* length);
// Generates 'requested_digits' after the decimal point.
static void BignumToFixed(int requested_digits, int* decimal_point,
Bignum* numerator, Bignum* denominator,
Vector<char>(buffer), int* length);
// Generates 'count' digits of numerator/denominator.
// Once 'count' digits have been produced rounds the result depending on the
// remainder (remainders of exactly .5 round upwards). Might update the
// decimal_point when rounding up (for example for 0.9999).
static void GenerateCountedDigits(int count, int* decimal_point,
Bignum* numerator, Bignum* denominator,
Vector<char>(buffer), int* length);
void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
Vector<char> buffer, int* length, int* decimal_point) {
ASSERT(v > 0);
ASSERT(!Double(v).IsSpecial());
uint64_t significand;
int exponent;
bool lower_boundary_is_closer;
if (mode == BIGNUM_DTOA_SHORTEST_SINGLE) {
float f = static_cast<float>(v);
ASSERT(f == v);
significand = Single(f).Significand();
exponent = Single(f).Exponent();
lower_boundary_is_closer = Single(f).LowerBoundaryIsCloser();
} else {
significand = Double(v).Significand();
exponent = Double(v).Exponent();
lower_boundary_is_closer = Double(v).LowerBoundaryIsCloser();
}
bool need_boundary_deltas =
(mode == BIGNUM_DTOA_SHORTEST || mode == BIGNUM_DTOA_SHORTEST_SINGLE);
bool is_even = (significand & 1) == 0;
int normalized_exponent = NormalizedExponent(significand, exponent);
// estimated_power might be too low by 1.
int estimated_power = EstimatePower(normalized_exponent);
// Shortcut for Fixed.
// The requested digits correspond to the digits after the point. If the
// number is much too small, then there is no need in trying to get any
// digits.
if (mode == BIGNUM_DTOA_FIXED && -estimated_power - 1 > requested_digits) {
buffer[0] = '\0';
*length = 0;
// Set decimal-point to -requested_digits. This is what Gay does.
// Note that it should not have any effect anyways since the string is
// empty.
*decimal_point = -requested_digits;
return;
}
Bignum numerator;
Bignum denominator;
Bignum delta_minus;
Bignum delta_plus;
// Make sure the bignum can grow large enough. The smallest double equals
// 4e-324. In this case the denominator needs fewer than 324*4 binary digits.
// The maximum double is 1.7976931348623157e308 which needs fewer than
// 308*4 binary digits.
ASSERT(Bignum::kMaxSignificantBits >= 324*4);
InitialScaledStartValues(significand, exponent, lower_boundary_is_closer,
estimated_power, need_boundary_deltas,
&numerator, &denominator,
&delta_minus, &delta_plus);
// We now have v = (numerator / denominator) * 10^estimated_power.
FixupMultiply10(estimated_power, is_even, decimal_point,
&numerator, &denominator,
&delta_minus, &delta_plus);
// We now have v = (numerator / denominator) * 10^(decimal_point-1), and
// 1 <= (numerator + delta_plus) / denominator < 10
switch (mode) {
case BIGNUM_DTOA_SHORTEST:
case BIGNUM_DTOA_SHORTEST_SINGLE:
GenerateShortestDigits(&numerator, &denominator,
&delta_minus, &delta_plus,
is_even, buffer, length);
break;
case BIGNUM_DTOA_FIXED:
BignumToFixed(requested_digits, decimal_point,
&numerator, &denominator,
buffer, length);
break;
case BIGNUM_DTOA_PRECISION:
GenerateCountedDigits(requested_digits, decimal_point,
&numerator, &denominator,
buffer, length);
break;
default:
UNREACHABLE();
}
buffer[*length] = '\0';
}
// The procedure starts generating digits from the left to the right and stops
// when the generated digits yield the shortest decimal representation of v. A
// decimal representation of v is a number lying closer to v than to any other
// double, so it converts to v when read.
//
// This is true if d, the decimal representation, is between m- and m+, the
// upper and lower boundaries. d must be strictly between them if !is_even.
// m- := (numerator - delta_minus) / denominator
// m+ := (numerator + delta_plus) / denominator
//
// Precondition: 0 <= (numerator+delta_plus) / denominator < 10.
// If 1 <= (numerator+delta_plus) / denominator < 10 then no leading 0 digit
// will be produced. This should be the standard precondition.
static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
Bignum* delta_minus, Bignum* delta_plus,
bool is_even,
Vector<char> buffer, int* length) {
// Small optimization: if delta_minus and delta_plus are the same just reuse
// one of the two bignums.
if (Bignum::Equal(*delta_minus, *delta_plus)) {
delta_plus = delta_minus;
}
*length = 0;
for (;;) {
uint16_t digit;
digit = numerator->DivideModuloIntBignum(*denominator);
ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive.
// digit = numerator / denominator (integer division).
// numerator = numerator % denominator.
buffer[(*length)++] = static_cast<char>(digit + '0');
// Can we stop already?
// If the remainder of the division is less than the distance to the lower
// boundary we can stop. In this case we simply round down (discarding the
// remainder).
// Similarly we test if we can round up (using the upper boundary).
bool in_delta_room_minus;
bool in_delta_room_plus;
if (is_even) {
in_delta_room_minus = Bignum::LessEqual(*numerator, *delta_minus);
} else {
in_delta_room_minus = Bignum::Less(*numerator, *delta_minus);
}
if (is_even) {
in_delta_room_plus =
Bignum::PlusCompare(*numerator, *delta_plus, *denominator) >= 0;
} else {
in_delta_room_plus =
Bignum::PlusCompare(*numerator, *delta_plus, *denominator) > 0;
}
if (!in_delta_room_minus && !in_delta_room_plus) {
// Prepare for next iteration.
numerator->Times10();
delta_minus->Times10();
// We optimized delta_plus to be equal to delta_minus (if they share the
// same value). So don't multiply delta_plus if they point to the same
// object.
if (delta_minus != delta_plus) {
delta_plus->Times10();
}
} else if (in_delta_room_minus && in_delta_room_plus) {
// Let's see if 2*numerator < denominator.
// If yes, then the next digit would be < 5 and we can round down.
int compare = Bignum::PlusCompare(*numerator, *numerator, *denominator);
if (compare < 0) {
// Remaining digits are less than .5. -> Round down (== do nothing).
} else if (compare > 0) {
// Remaining digits are more than .5 of denominator. -> Round up.
// Note that the last digit could not be a '9' as otherwise the whole
// loop would have stopped earlier.
// We still have an assert here in case the preconditions were not
// satisfied.
ASSERT(buffer[(*length) - 1] != '9');
buffer[(*length) - 1]++;
} else {
// Halfway case.
// TODO(floitsch): need a way to solve half-way cases.
// For now let's round towards even (since this is what Gay seems to
// do).
if ((buffer[(*length) - 1] - '0') % 2 == 0) {
// Round down => Do nothing.
} else {
ASSERT(buffer[(*length) - 1] != '9');
buffer[(*length) - 1]++;
}
}
return;
} else if (in_delta_room_minus) {
// Round down (== do nothing).
return;
} else { // in_delta_room_plus
// Round up.
// Note again that the last digit could not be '9' since this would have
// stopped the loop earlier.
// We still have an ASSERT here, in case the preconditions were not
// satisfied.
ASSERT(buffer[(*length) -1] != '9');
buffer[(*length) - 1]++;
return;
}
}
}
// Let v = numerator / denominator < 10.
// Then we generate 'count' digits of d = x.xxxxx... (without the decimal point)
// from left to right. Once 'count' digits have been produced we decide whether
// to round up or down. Remainders of exactly .5 round upwards. Numbers such
// as 9.999999 propagate a carry all the way, and change the
// exponent (decimal_point), when rounding upwards.
static void GenerateCountedDigits(int count, int* decimal_point,
Bignum* numerator, Bignum* denominator,
Vector<char> buffer, int* length) {
ASSERT(count >= 0);
for (int i = 0; i < count - 1; ++i) {
uint16_t digit;
digit = numerator->DivideModuloIntBignum(*denominator);
ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive.
// digit = numerator / denominator (integer division).
// numerator = numerator % denominator.
buffer[i] = static_cast<char>(digit + '0');
// Prepare for next iteration.
numerator->Times10();
}
// Generate the last digit.
uint16_t digit;
digit = numerator->DivideModuloIntBignum(*denominator);
if (Bignum::PlusCompare(*numerator, *numerator, *denominator) >= 0) {
digit++;
}
ASSERT(digit <= 10);
buffer[count - 1] = static_cast<char>(digit + '0');
// Correct bad digits (in case we had a sequence of '9's). Propagate the
// carry until we hat a non-'9' or til we reach the first digit.
for (int i = count - 1; i > 0; --i) {
if (buffer[i] != '0' + 10) break;
buffer[i] = '0';
buffer[i - 1]++;
}
if (buffer[0] == '0' + 10) {
// Propagate a carry past the top place.
buffer[0] = '1';
(*decimal_point)++;
}
*length = count;
}
// Generates 'requested_digits' after the decimal point. It might omit
// trailing '0's. If the input number is too small then no digits at all are
// generated (ex.: 2 fixed digits for 0.00001).
//
// Input verifies: 1 <= (numerator + delta) / denominator < 10.
static void BignumToFixed(int requested_digits, int* decimal_point,
Bignum* numerator, Bignum* denominator,
Vector<char>(buffer), int* length) {
// Note that we have to look at more than just the requested_digits, since
// a number could be rounded up. Example: v=0.5 with requested_digits=0.
// Even though the power of v equals 0 we can't just stop here.
if (-(*decimal_point) > requested_digits) {
// The number is definitively too small.
// Ex: 0.001 with requested_digits == 1.
// Set decimal-point to -requested_digits. This is what Gay does.
// Note that it should not have any effect anyways since the string is
// empty.
*decimal_point = -requested_digits;
*length = 0;
return;
} else if (-(*decimal_point) == requested_digits) {
// We only need to verify if the number rounds down or up.
// Ex: 0.04 and 0.06 with requested_digits == 1.
ASSERT(*decimal_point == -requested_digits);
// Initially the fraction lies in range (1, 10]. Multiply the denominator
// by 10 so that we can compare more easily.
denominator->Times10();
if (Bignum::PlusCompare(*numerator, *numerator, *denominator) >= 0) {
// If the fraction is >= 0.5 then we have to include the rounded
// digit.
buffer[0] = '1';
*length = 1;
(*decimal_point)++;
} else {
// Note that we caught most of similar cases earlier.
*length = 0;
}
return;
} else {
// The requested digits correspond to the digits after the point.
// The variable 'needed_digits' includes the digits before the point.
int needed_digits = (*decimal_point) + requested_digits;
GenerateCountedDigits(needed_digits, decimal_point,
numerator, denominator,
buffer, length);
}
}
// Returns an estimation of k such that 10^(k-1) <= v < 10^k where
// v = f * 2^exponent and 2^52 <= f < 2^53.
// v is hence a normalized double with the given exponent. The output is an
// approximation for the exponent of the decimal approimation .digits * 10^k.
//
// The result might undershoot by 1 in which case 10^k <= v < 10^k+1.
// Note: this property holds for v's upper boundary m+ too.
// 10^k <= m+ < 10^k+1.
// (see explanation below).
//
// Examples:
// EstimatePower(0) => 16
// EstimatePower(-52) => 0
//
// Note: e >= 0 => EstimatedPower(e) > 0. No similar claim can be made for e<0.
static int EstimatePower(int exponent) {
// This function estimates log10 of v where v = f*2^e (with e == exponent).
// Note that 10^floor(log10(v)) <= v, but v <= 10^ceil(log10(v)).
// Note that f is bounded by its container size. Let p = 53 (the double's
// significand size). Then 2^(p-1) <= f < 2^p.
//
// Given that log10(v) == log2(v)/log2(10) and e+(len(f)-1) is quite close
// to log2(v) the function is simplified to (e+(len(f)-1)/log2(10)).
// The computed number undershoots by less than 0.631 (when we compute log3
// and not log10).
//
// Optimization: since we only need an approximated result this computation
// can be performed on 64 bit integers. On x86/x64 architecture the speedup is
// not really measurable, though.
//
// Since we want to avoid overshooting we decrement by 1e10 so that
// floating-point imprecisions don't affect us.
//
// Explanation for v's boundary m+: the computation takes advantage of
// the fact that 2^(p-1) <= f < 2^p. Boundaries still satisfy this requirement
// (even for denormals where the delta can be much more important).
const double k1Log10 = 0.30102999566398114; // 1/lg(10)
// For doubles len(f) == 53 (don't forget the hidden bit).
const int kSignificandSize = Double::kSignificandSize;
double estimate = ceil((exponent + kSignificandSize - 1) * k1Log10 - 1e-10);
return static_cast<int>(estimate);
}
// See comments for InitialScaledStartValues.
static void InitialScaledStartValuesPositiveExponent(
uint64_t significand, int exponent,
int estimated_power, bool need_boundary_deltas,
Bignum* numerator, Bignum* denominator,
Bignum* delta_minus, Bignum* delta_plus) {
// A positive exponent implies a positive power.
ASSERT(estimated_power >= 0);
// Since the estimated_power is positive we simply multiply the denominator
// by 10^estimated_power.
// numerator = v.
numerator->AssignUInt64(significand);
numerator->ShiftLeft(exponent);
// denominator = 10^estimated_power.
denominator->AssignPowerUInt16(10, estimated_power);
if (need_boundary_deltas) {
// Introduce a common denominator so that the deltas to the boundaries are
// integers.
denominator->ShiftLeft(1);
numerator->ShiftLeft(1);
// Let v = f * 2^e, then m+ - v = 1/2 * 2^e; With the common
// denominator (of 2) delta_plus equals 2^e.
delta_plus->AssignUInt16(1);
delta_plus->ShiftLeft(exponent);
// Same for delta_minus. The adjustments if f == 2^p-1 are done later.
delta_minus->AssignUInt16(1);
delta_minus->ShiftLeft(exponent);
}
}
// See comments for InitialScaledStartValues
static void InitialScaledStartValuesNegativeExponentPositivePower(
uint64_t significand, int exponent,
int estimated_power, bool need_boundary_deltas,
Bignum* numerator, Bignum* denominator,
Bignum* delta_minus, Bignum* delta_plus) {
// v = f * 2^e with e < 0, and with estimated_power >= 0.
// This means that e is close to 0 (have a look at how estimated_power is
// computed).
// numerator = significand
// since v = significand * 2^exponent this is equivalent to
// numerator = v * / 2^-exponent
numerator->AssignUInt64(significand);
// denominator = 10^estimated_power * 2^-exponent (with exponent < 0)
denominator->AssignPowerUInt16(10, estimated_power);
denominator->ShiftLeft(-exponent);
if (need_boundary_deltas) {
// Introduce a common denominator so that the deltas to the boundaries are
// integers.
denominator->ShiftLeft(1);
numerator->ShiftLeft(1);
// Let v = f * 2^e, then m+ - v = 1/2 * 2^e; With the common
// denominator (of 2) delta_plus equals 2^e.
// Given that the denominator already includes v's exponent the distance
// to the boundaries is simply 1.
delta_plus->AssignUInt16(1);
// Same for delta_minus. The adjustments if f == 2^p-1 are done later.
delta_minus->AssignUInt16(1);
}
}
// See comments for InitialScaledStartValues
static void InitialScaledStartValuesNegativeExponentNegativePower(
uint64_t significand, int exponent,
int estimated_power, bool need_boundary_deltas,
Bignum* numerator, Bignum* denominator,
Bignum* delta_minus, Bignum* delta_plus) {
// Instead of multiplying the denominator with 10^estimated_power we
// multiply all values (numerator and deltas) by 10^-estimated_power.
// Use numerator as temporary container for power_ten.
Bignum* power_ten = numerator;
power_ten->AssignPowerUInt16(10, -estimated_power);
if (need_boundary_deltas) {
// Since power_ten == numerator we must make a copy of 10^estimated_power
// before we complete the computation of the numerator.
// delta_plus = delta_minus = 10^estimated_power
delta_plus->AssignBignum(*power_ten);
delta_minus->AssignBignum(*power_ten);
}
// numerator = significand * 2 * 10^-estimated_power
// since v = significand * 2^exponent this is equivalent to
// numerator = v * 10^-estimated_power * 2 * 2^-exponent.
// Remember: numerator has been abused as power_ten. So no need to assign it
// to itself.
ASSERT(numerator == power_ten);
numerator->MultiplyByUInt64(significand);
// denominator = 2 * 2^-exponent with exponent < 0.
denominator->AssignUInt16(1);
denominator->ShiftLeft(-exponent);
if (need_boundary_deltas) {
// Introduce a common denominator so that the deltas to the boundaries are
// integers.
numerator->ShiftLeft(1);
denominator->ShiftLeft(1);
// With this shift the boundaries have their correct value, since
// delta_plus = 10^-estimated_power, and
// delta_minus = 10^-estimated_power.
// These assignments have been done earlier.
// The adjustments if f == 2^p-1 (lower boundary is closer) are done later.
}
}
// Let v = significand * 2^exponent.
// Computes v / 10^estimated_power exactly, as a ratio of two bignums, numerator
// and denominator. The functions GenerateShortestDigits and
// GenerateCountedDigits will then convert this ratio to its decimal
// representation d, with the required accuracy.
// Then d * 10^estimated_power is the representation of v.
// (Note: the fraction and the estimated_power might get adjusted before
// generating the decimal representation.)
//
// The initial start values consist of:
// - a scaled numerator: s.t. numerator/denominator == v / 10^estimated_power.
// - a scaled (common) denominator.
// optionally (used by GenerateShortestDigits to decide if it has the shortest
// decimal converting back to v):
// - v - m-: the distance to the lower boundary.
// - m+ - v: the distance to the upper boundary.
//
// v, m+, m-, and therefore v - m- and m+ - v all share the same denominator.
//
// Let ep == estimated_power, then the returned values will satisfy:
// v / 10^ep = numerator / denominator.
// v's boundaries m- and m+:
// m- / 10^ep == v / 10^ep - delta_minus / denominator
// m+ / 10^ep == v / 10^ep + delta_plus / denominator
// Or in other words:
// m- == v - delta_minus * 10^ep / denominator;
// m+ == v + delta_plus * 10^ep / denominator;
//
// Since 10^(k-1) <= v < 10^k (with k == estimated_power)
// or 10^k <= v < 10^(k+1)
// we then have 0.1 <= numerator/denominator < 1
// or 1 <= numerator/denominator < 10
//
// It is then easy to kickstart the digit-generation routine.
//
// The boundary-deltas are only filled if the mode equals BIGNUM_DTOA_SHORTEST
// or BIGNUM_DTOA_SHORTEST_SINGLE.
static void InitialScaledStartValues(uint64_t significand,
int exponent,
bool lower_boundary_is_closer,
int estimated_power,
bool need_boundary_deltas,
Bignum* numerator,
Bignum* denominator,
Bignum* delta_minus,
Bignum* delta_plus) {
if (exponent >= 0) {
InitialScaledStartValuesPositiveExponent(
significand, exponent, estimated_power, need_boundary_deltas,
numerator, denominator, delta_minus, delta_plus);
} else if (estimated_power >= 0) {
InitialScaledStartValuesNegativeExponentPositivePower(
significand, exponent, estimated_power, need_boundary_deltas,
numerator, denominator, delta_minus, delta_plus);
} else {
InitialScaledStartValuesNegativeExponentNegativePower(
significand, exponent, estimated_power, need_boundary_deltas,
numerator, denominator, delta_minus, delta_plus);
}
if (need_boundary_deltas && lower_boundary_is_closer) {
// The lower boundary is closer at half the distance of "normal" numbers.
// Increase the common denominator and adapt all but the delta_minus.
denominator->ShiftLeft(1); // *2
numerator->ShiftLeft(1); // *2
delta_plus->ShiftLeft(1); // *2
}
}
// This routine multiplies numerator/denominator so that its values lies in the
// range 1-10. That is after a call to this function we have:
// 1 <= (numerator + delta_plus) /denominator < 10.
// Let numerator the input before modification and numerator' the argument
// after modification, then the output-parameter decimal_point is such that
// numerator / denominator * 10^estimated_power ==
// numerator' / denominator' * 10^(decimal_point - 1)
// In some cases estimated_power was too low, and this is already the case. We
// then simply adjust the power so that 10^(k-1) <= v < 10^k (with k ==
// estimated_power) but do not touch the numerator or denominator.
// Otherwise the routine multiplies the numerator and the deltas by 10.
static void FixupMultiply10(int estimated_power, bool is_even,
int* decimal_point,
Bignum* numerator, Bignum* denominator,
Bignum* delta_minus, Bignum* delta_plus) {
bool in_range;
if (is_even) {
// For IEEE doubles half-way cases (in decimal system numbers ending with 5)
// are rounded to the closest floating-point number with even significand.
in_range = Bignum::PlusCompare(*numerator, *delta_plus, *denominator) >= 0;
} else {
in_range = Bignum::PlusCompare(*numerator, *delta_plus, *denominator) > 0;
}
if (in_range) {
// Since numerator + delta_plus >= denominator we already have
// 1 <= numerator/denominator < 10. Simply update the estimated_power.
*decimal_point = estimated_power + 1;
} else {
*decimal_point = estimated_power;
numerator->Times10();
if (Bignum::Equal(*delta_minus, *delta_plus)) {
delta_minus->Times10();
delta_plus->AssignBignum(*delta_minus);
} else {
delta_minus->Times10();
delta_plus->Times10();
}
}
}
} // namespace double_conversion

View File

@ -1,85 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef DOUBLE_CONVERSION_BIGNUM_DTOA_H_
#define DOUBLE_CONVERSION_BIGNUM_DTOA_H_
#include "utils.h"
namespace double_conversion
{
enum BignumDtoaMode
{
// Return the shortest correct representation.
// For example the output of 0.299999999999999988897 is (the less accurate but
// correct) 0.3.
BIGNUM_DTOA_SHORTEST,
// Same as BIGNUM_DTOA_SHORTEST but for single-precision floats.
BIGNUM_DTOA_SHORTEST_SINGLE,
// Return a fixed number of digits after the decimal point.
// For instance fixed(0.1, 4) becomes 0.1000
// If the input number is big, the output will be big.
BIGNUM_DTOA_FIXED,
// Return a fixed number of digits, no matter what the exponent is.
BIGNUM_DTOA_PRECISION
};
// Converts the given double 'v' to ascii.
// The result should be interpreted as buffer * 10^(point-length).
// The buffer will be null-terminated.
//
// The input v must be > 0 and different from NaN, and Infinity.
//
// The output depends on the given mode:
// - SHORTEST: produce the least amount of digits for which the internal
// identity requirement is still satisfied. If the digits are printed
// (together with the correct exponent) then reading this number will give
// 'v' again. The buffer will choose the representation that is closest to
// 'v'. If there are two at the same distance, than the number is round up.
// In this mode the 'requested_digits' parameter is ignored.
// - FIXED: produces digits necessary to print a given number with
// 'requested_digits' digits after the decimal point. The produced digits
// might be too short in which case the caller has to fill the gaps with '0's.
// Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2.
// Halfway cases are rounded up. The call toFixed(0.15, 2) thus returns
// buffer="2", point=0.
// Note: the length of the returned buffer has no meaning wrt the significance
// of its digits. That is, just because it contains '0's does not mean that
// any other digit would not satisfy the internal identity requirement.
// - PRECISION: produces 'requested_digits' where the first digit is not '0'.
// Even though the length of produced digits usually equals
// 'requested_digits', the function is allowed to return fewer digits, in
// which case the caller has to fill the missing digits with '0's.
// Halfway cases are again rounded up.
// 'BignumDtoa' expects the given buffer to be big enough to hold all digits
// and a terminating null-character.
void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits, Vector<char> buffer, int * length, int * point);
} // namespace double_conversion
#endif // DOUBLE_CONVERSION_BIGNUM_DTOA_H_

View File

@ -1,766 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "bignum.h"
#include "utils.h"
namespace double_conversion {
Bignum::Bignum()
: bigits_(bigits_buffer_, kBigitCapacity), used_digits_(0), exponent_(0) {
for (int i = 0; i < kBigitCapacity; ++i) {
bigits_[i] = 0;
}
}
template<typename S>
static int BitSize(S value) {
(void) value; // Mark variable as used.
return 8 * sizeof(value);
}
// Guaranteed to lie in one Bigit.
void Bignum::AssignUInt16(uint16_t value) {
ASSERT(kBigitSize >= BitSize(value));
Zero();
if (value == 0) return;
EnsureCapacity(1);
bigits_[0] = value;
used_digits_ = 1;
}
void Bignum::AssignUInt64(uint64_t value) {
const int kUInt64Size = 64;
Zero();
if (value == 0) return;
int needed_bigits = kUInt64Size / kBigitSize + 1;
EnsureCapacity(needed_bigits);
for (int i = 0; i < needed_bigits; ++i) {
bigits_[i] = value & kBigitMask;
value = value >> kBigitSize;
}
used_digits_ = needed_bigits;
Clamp();
}
void Bignum::AssignBignum(const Bignum& other) {
exponent_ = other.exponent_;
for (int i = 0; i < other.used_digits_; ++i) {
bigits_[i] = other.bigits_[i];
}
// Clear the excess digits (if there were any).
for (int i = other.used_digits_; i < used_digits_; ++i) {
bigits_[i] = 0;
}
used_digits_ = other.used_digits_;
}
static uint64_t ReadUInt64(Vector<const char> buffer,
int from,
int digits_to_read) {
uint64_t result = 0;
for (int i = from; i < from + digits_to_read; ++i) {
int digit = buffer[i] - '0';
ASSERT(0 <= digit && digit <= 9);
result = result * 10 + digit;
}
return result;
}
void Bignum::AssignDecimalString(Vector<const char> value) {
// 2^64 = 18446744073709551616 > 10^19
const int kMaxUint64DecimalDigits = 19;
Zero();
int length = value.length();
int pos = 0;
// Let's just say that each digit needs 4 bits.
while (length >= kMaxUint64DecimalDigits) {
uint64_t digits = ReadUInt64(value, pos, kMaxUint64DecimalDigits);
pos += kMaxUint64DecimalDigits;
length -= kMaxUint64DecimalDigits;
MultiplyByPowerOfTen(kMaxUint64DecimalDigits);
AddUInt64(digits);
}
uint64_t digits = ReadUInt64(value, pos, length);
MultiplyByPowerOfTen(length);
AddUInt64(digits);
Clamp();
}
static int HexCharValue(char c) {
if ('0' <= c && c <= '9') return c - '0';
if ('a' <= c && c <= 'f') return 10 + c - 'a';
ASSERT('A' <= c && c <= 'F');
return 10 + c - 'A';
}
void Bignum::AssignHexString(Vector<const char> value) {
Zero();
int length = value.length();
int needed_bigits = length * 4 / kBigitSize + 1;
EnsureCapacity(needed_bigits);
int string_index = length - 1;
for (int i = 0; i < needed_bigits - 1; ++i) {
// These bigits are guaranteed to be "full".
Chunk current_bigit = 0;
for (int j = 0; j < kBigitSize / 4; j++) {
current_bigit += HexCharValue(value[string_index--]) << (j * 4);
}
bigits_[i] = current_bigit;
}
used_digits_ = needed_bigits - 1;
Chunk most_significant_bigit = 0; // Could be = 0;
for (int j = 0; j <= string_index; ++j) {
most_significant_bigit <<= 4;
most_significant_bigit += HexCharValue(value[j]);
}
if (most_significant_bigit != 0) {
bigits_[used_digits_] = most_significant_bigit;
used_digits_++;
}
Clamp();
}
void Bignum::AddUInt64(uint64_t operand) {
if (operand == 0) return;
Bignum other;
other.AssignUInt64(operand);
AddBignum(other);
}
void Bignum::AddBignum(const Bignum& other) {
ASSERT(IsClamped());
ASSERT(other.IsClamped());
// If this has a greater exponent than other append zero-bigits to this.
// After this call exponent_ <= other.exponent_.
Align(other);
// There are two possibilities:
// aaaaaaaaaaa 0000 (where the 0s represent a's exponent)
// bbbbb 00000000
// ----------------
// ccccccccccc 0000
// or
// aaaaaaaaaa 0000
// bbbbbbbbb 0000000
// -----------------
// cccccccccccc 0000
// In both cases we might need a carry bigit.
EnsureCapacity(1 + Max(BigitLength(), other.BigitLength()) - exponent_);
Chunk carry = 0;
int bigit_pos = other.exponent_ - exponent_;
ASSERT(bigit_pos >= 0);
for (int i = 0; i < other.used_digits_; ++i) {
Chunk sum = bigits_[bigit_pos] + other.bigits_[i] + carry;
bigits_[bigit_pos] = sum & kBigitMask;
carry = sum >> kBigitSize;
bigit_pos++;
}
while (carry != 0) {
Chunk sum = bigits_[bigit_pos] + carry;
bigits_[bigit_pos] = sum & kBigitMask;
carry = sum >> kBigitSize;
bigit_pos++;
}
used_digits_ = Max(bigit_pos, used_digits_);
ASSERT(IsClamped());
}
void Bignum::SubtractBignum(const Bignum& other) {
ASSERT(IsClamped());
ASSERT(other.IsClamped());
// We require this to be bigger than other.
ASSERT(LessEqual(other, *this));
Align(other);
int offset = other.exponent_ - exponent_;
Chunk borrow = 0;
int i;
for (i = 0; i < other.used_digits_; ++i) {
ASSERT((borrow == 0) || (borrow == 1));
Chunk difference = bigits_[i + offset] - other.bigits_[i] - borrow;
bigits_[i + offset] = difference & kBigitMask;
borrow = difference >> (kChunkSize - 1);
}
while (borrow != 0) {
Chunk difference = bigits_[i + offset] - borrow;
bigits_[i + offset] = difference & kBigitMask;
borrow = difference >> (kChunkSize - 1);
++i;
}
Clamp();
}
void Bignum::ShiftLeft(int shift_amount) {
if (used_digits_ == 0) return;
exponent_ += shift_amount / kBigitSize;
int local_shift = shift_amount % kBigitSize;
EnsureCapacity(used_digits_ + 1);
BigitsShiftLeft(local_shift);
}
void Bignum::MultiplyByUInt32(uint32_t factor) {
if (factor == 1) return;
if (factor == 0) {
Zero();
return;
}
if (used_digits_ == 0) return;
// The product of a bigit with the factor is of size kBigitSize + 32.
// Assert that this number + 1 (for the carry) fits into double chunk.
ASSERT(kDoubleChunkSize >= kBigitSize + 32 + 1);
DoubleChunk carry = 0;
for (int i = 0; i < used_digits_; ++i) {
DoubleChunk product = static_cast<DoubleChunk>(factor) * bigits_[i] + carry;
bigits_[i] = static_cast<Chunk>(product & kBigitMask);
carry = (product >> kBigitSize);
}
while (carry != 0) {
EnsureCapacity(used_digits_ + 1);
bigits_[used_digits_] = carry & kBigitMask;
used_digits_++;
carry >>= kBigitSize;
}
}
void Bignum::MultiplyByUInt64(uint64_t factor) {
if (factor == 1) return;
if (factor == 0) {
Zero();
return;
}
ASSERT(kBigitSize < 32);
uint64_t carry = 0;
uint64_t low = factor & 0xFFFFFFFF;
uint64_t high = factor >> 32;
for (int i = 0; i < used_digits_; ++i) {
uint64_t product_low = low * bigits_[i];
uint64_t product_high = high * bigits_[i];
uint64_t tmp = (carry & kBigitMask) + product_low;
bigits_[i] = tmp & kBigitMask;
carry = (carry >> kBigitSize) + (tmp >> kBigitSize) +
(product_high << (32 - kBigitSize));
}
while (carry != 0) {
EnsureCapacity(used_digits_ + 1);
bigits_[used_digits_] = carry & kBigitMask;
used_digits_++;
carry >>= kBigitSize;
}
}
void Bignum::MultiplyByPowerOfTen(int exponent) {
const uint64_t kFive27 = UINT64_2PART_C(0x6765c793, fa10079d);
const uint16_t kFive1 = 5;
const uint16_t kFive2 = kFive1 * 5;
const uint16_t kFive3 = kFive2 * 5;
const uint16_t kFive4 = kFive3 * 5;
const uint16_t kFive5 = kFive4 * 5;
const uint16_t kFive6 = kFive5 * 5;
const uint32_t kFive7 = kFive6 * 5;
const uint32_t kFive8 = kFive7 * 5;
const uint32_t kFive9 = kFive8 * 5;
const uint32_t kFive10 = kFive9 * 5;
const uint32_t kFive11 = kFive10 * 5;
const uint32_t kFive12 = kFive11 * 5;
const uint32_t kFive13 = kFive12 * 5;
const uint32_t kFive1_to_12[] =
{ kFive1, kFive2, kFive3, kFive4, kFive5, kFive6,
kFive7, kFive8, kFive9, kFive10, kFive11, kFive12 };
ASSERT(exponent >= 0);
if (exponent == 0) return;
if (used_digits_ == 0) return;
// We shift by exponent at the end just before returning.
int remaining_exponent = exponent;
while (remaining_exponent >= 27) {
MultiplyByUInt64(kFive27);
remaining_exponent -= 27;
}
while (remaining_exponent >= 13) {
MultiplyByUInt32(kFive13);
remaining_exponent -= 13;
}
if (remaining_exponent > 0) {
MultiplyByUInt32(kFive1_to_12[remaining_exponent - 1]);
}
ShiftLeft(exponent);
}
void Bignum::Square() {
ASSERT(IsClamped());
int product_length = 2 * used_digits_;
EnsureCapacity(product_length);
// Comba multiplication: compute each column separately.
// Example: r = a2a1a0 * b2b1b0.
// r = 1 * a0b0 +
// 10 * (a1b0 + a0b1) +
// 100 * (a2b0 + a1b1 + a0b2) +
// 1000 * (a2b1 + a1b2) +
// 10000 * a2b2
//
// In the worst case we have to accumulate nb-digits products of digit*digit.
//
// Assert that the additional number of bits in a DoubleChunk are enough to
// sum up used_digits of Bigit*Bigit.
if ((1 << (2 * (kChunkSize - kBigitSize))) <= used_digits_) {
UNIMPLEMENTED();
}
DoubleChunk accumulator = 0;
// First shift the digits so we don't overwrite them.
int copy_offset = used_digits_;
for (int i = 0; i < used_digits_; ++i) {
bigits_[copy_offset + i] = bigits_[i];
}
// We have two loops to avoid some 'if's in the loop.
for (int i = 0; i < used_digits_; ++i) {
// Process temporary digit i with power i.
// The sum of the two indices must be equal to i.
int bigit_index1 = i;
int bigit_index2 = 0;
// Sum all of the sub-products.
while (bigit_index1 >= 0) {
Chunk chunk1 = bigits_[copy_offset + bigit_index1];
Chunk chunk2 = bigits_[copy_offset + bigit_index2];
accumulator += static_cast<DoubleChunk>(chunk1) * chunk2;
bigit_index1--;
bigit_index2++;
}
bigits_[i] = static_cast<Chunk>(accumulator) & kBigitMask;
accumulator >>= kBigitSize;
}
for (int i = used_digits_; i < product_length; ++i) {
int bigit_index1 = used_digits_ - 1;
int bigit_index2 = i - bigit_index1;
// Invariant: sum of both indices is again equal to i.
// Inner loop runs 0 times on last iteration, emptying accumulator.
while (bigit_index2 < used_digits_) {
Chunk chunk1 = bigits_[copy_offset + bigit_index1];
Chunk chunk2 = bigits_[copy_offset + bigit_index2];
accumulator += static_cast<DoubleChunk>(chunk1) * chunk2;
bigit_index1--;
bigit_index2++;
}
// The overwritten bigits_[i] will never be read in further loop iterations,
// because bigit_index1 and bigit_index2 are always greater
// than i - used_digits_.
bigits_[i] = static_cast<Chunk>(accumulator) & kBigitMask;
accumulator >>= kBigitSize;
}
// Since the result was guaranteed to lie inside the number the
// accumulator must be 0 now.
ASSERT(accumulator == 0);
// Don't forget to update the used_digits and the exponent.
used_digits_ = product_length;
exponent_ *= 2;
Clamp();
}
void Bignum::AssignPowerUInt16(uint16_t base, int power_exponent) {
ASSERT(base != 0);
ASSERT(power_exponent >= 0);
if (power_exponent == 0) {
AssignUInt16(1);
return;
}
Zero();
int shifts = 0;
// We expect base to be in range 2-32, and most often to be 10.
// It does not make much sense to implement different algorithms for counting
// the bits.
while ((base & 1) == 0) {
base >>= 1;
shifts++;
}
int bit_size = 0;
int tmp_base = base;
while (tmp_base != 0) {
tmp_base >>= 1;
bit_size++;
}
int final_size = bit_size * power_exponent;
// 1 extra bigit for the shifting, and one for rounded final_size.
EnsureCapacity(final_size / kBigitSize + 2);
// Left to Right exponentiation.
int mask = 1;
while (power_exponent >= mask) mask <<= 1;
// The mask is now pointing to the bit above the most significant 1-bit of
// power_exponent.
// Get rid of first 1-bit;
mask >>= 2;
uint64_t this_value = base;
bool delayed_multipliciation = false;
const uint64_t max_32bits = 0xFFFFFFFF;
while (mask != 0 && this_value <= max_32bits) {
this_value = this_value * this_value;
// Verify that there is enough space in this_value to perform the
// multiplication. The first bit_size bits must be 0.
if ((power_exponent & mask) != 0) {
uint64_t base_bits_mask =
~((static_cast<uint64_t>(1) << (64 - bit_size)) - 1);
bool high_bits_zero = (this_value & base_bits_mask) == 0;
if (high_bits_zero) {
this_value *= base;
} else {
delayed_multipliciation = true;
}
}
mask >>= 1;
}
AssignUInt64(this_value);
if (delayed_multipliciation) {
MultiplyByUInt32(base);
}
// Now do the same thing as a bignum.
while (mask != 0) {
Square();
if ((power_exponent & mask) != 0) {
MultiplyByUInt32(base);
}
mask >>= 1;
}
// And finally add the saved shifts.
ShiftLeft(shifts * power_exponent);
}
// Precondition: this/other < 16bit.
uint16_t Bignum::DivideModuloIntBignum(const Bignum& other) {
ASSERT(IsClamped());
ASSERT(other.IsClamped());
ASSERT(other.used_digits_ > 0);
// Easy case: if we have less digits than the divisor than the result is 0.
// Note: this handles the case where this == 0, too.
if (BigitLength() < other.BigitLength()) {
return 0;
}
Align(other);
uint16_t result = 0;
// Start by removing multiples of 'other' until both numbers have the same
// number of digits.
while (BigitLength() > other.BigitLength()) {
// This naive approach is extremely inefficient if `this` divided by other
// is big. This function is implemented for doubleToString where
// the result should be small (less than 10).
ASSERT(other.bigits_[other.used_digits_ - 1] >= ((1 << kBigitSize) / 16));
ASSERT(bigits_[used_digits_ - 1] < 0x10000);
// Remove the multiples of the first digit.
// Example this = 23 and other equals 9. -> Remove 2 multiples.
result += static_cast<uint16_t>(bigits_[used_digits_ - 1]);
SubtractTimes(other, bigits_[used_digits_ - 1]);
}
ASSERT(BigitLength() == other.BigitLength());
// Both bignums are at the same length now.
// Since other has more than 0 digits we know that the access to
// bigits_[used_digits_ - 1] is safe.
Chunk this_bigit = bigits_[used_digits_ - 1];
Chunk other_bigit = other.bigits_[other.used_digits_ - 1];
if (other.used_digits_ == 1) {
// Shortcut for easy (and common) case.
int quotient = this_bigit / other_bigit;
bigits_[used_digits_ - 1] = this_bigit - other_bigit * quotient;
ASSERT(quotient < 0x10000);
result += static_cast<uint16_t>(quotient);
Clamp();
return result;
}
int division_estimate = this_bigit / (other_bigit + 1);
ASSERT(division_estimate < 0x10000);
result += static_cast<uint16_t>(division_estimate);
SubtractTimes(other, division_estimate);
if (other_bigit * (division_estimate + 1) > this_bigit) {
// No need to even try to subtract. Even if other's remaining digits were 0
// another subtraction would be too much.
return result;
}
while (LessEqual(other, *this)) {
SubtractBignum(other);
result++;
}
return result;
}
template<typename S>
static int SizeInHexChars(S number) {
ASSERT(number > 0);
int result = 0;
while (number != 0) {
number >>= 4;
result++;
}
return result;
}
static char HexCharOfValue(int value) {
ASSERT(0 <= value && value <= 16);
if (value < 10) return static_cast<char>(value + '0');
return static_cast<char>(value - 10 + 'A');
}
bool Bignum::ToHexString(char* buffer, int buffer_size) const {
ASSERT(IsClamped());
// Each bigit must be printable as separate hex-character.
ASSERT(kBigitSize % 4 == 0);
const int kHexCharsPerBigit = kBigitSize / 4;
if (used_digits_ == 0) {
if (buffer_size < 2) return false;
buffer[0] = '0';
buffer[1] = '\0';
return true;
}
// We add 1 for the terminating '\0' character.
int needed_chars = (BigitLength() - 1) * kHexCharsPerBigit +
SizeInHexChars(bigits_[used_digits_ - 1]) + 1;
if (needed_chars > buffer_size) return false;
int string_index = needed_chars - 1;
buffer[string_index--] = '\0';
for (int i = 0; i < exponent_; ++i) {
for (int j = 0; j < kHexCharsPerBigit; ++j) {
buffer[string_index--] = '0';
}
}
for (int i = 0; i < used_digits_ - 1; ++i) {
Chunk current_bigit = bigits_[i];
for (int j = 0; j < kHexCharsPerBigit; ++j) {
buffer[string_index--] = HexCharOfValue(current_bigit & 0xF);
current_bigit >>= 4;
}
}
// And finally the last bigit.
Chunk most_significant_bigit = bigits_[used_digits_ - 1];
while (most_significant_bigit != 0) {
buffer[string_index--] = HexCharOfValue(most_significant_bigit & 0xF);
most_significant_bigit >>= 4;
}
return true;
}
Bignum::Chunk Bignum::BigitAt(int index) const {
if (index >= BigitLength()) return 0;
if (index < exponent_) return 0;
return bigits_[index - exponent_];
}
int Bignum::Compare(const Bignum& a, const Bignum& b) {
ASSERT(a.IsClamped());
ASSERT(b.IsClamped());
int bigit_length_a = a.BigitLength();
int bigit_length_b = b.BigitLength();
if (bigit_length_a < bigit_length_b) return -1;
if (bigit_length_a > bigit_length_b) return +1;
for (int i = bigit_length_a - 1; i >= Min(a.exponent_, b.exponent_); --i) {
Chunk bigit_a = a.BigitAt(i);
Chunk bigit_b = b.BigitAt(i);
if (bigit_a < bigit_b) return -1;
if (bigit_a > bigit_b) return +1;
// Otherwise they are equal up to this digit. Try the next digit.
}
return 0;
}
int Bignum::PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c) {
ASSERT(a.IsClamped());
ASSERT(b.IsClamped());
ASSERT(c.IsClamped());
if (a.BigitLength() < b.BigitLength()) {
return PlusCompare(b, a, c);
}
if (a.BigitLength() + 1 < c.BigitLength()) return -1;
if (a.BigitLength() > c.BigitLength()) return +1;
// The exponent encodes 0-bigits. So if there are more 0-digits in 'a' than
// 'b' has digits, then the bigit-length of 'a'+'b' must be equal to the one
// of 'a'.
if (a.exponent_ >= b.BigitLength() && a.BigitLength() < c.BigitLength()) {
return -1;
}
Chunk borrow = 0;
// Starting at min_exponent all digits are == 0. So no need to compare them.
int min_exponent = Min(Min(a.exponent_, b.exponent_), c.exponent_);
for (int i = c.BigitLength() - 1; i >= min_exponent; --i) {
Chunk chunk_a = a.BigitAt(i);
Chunk chunk_b = b.BigitAt(i);
Chunk chunk_c = c.BigitAt(i);
Chunk sum = chunk_a + chunk_b;
if (sum > chunk_c + borrow) {
return +1;
} else {
borrow = chunk_c + borrow - sum;
if (borrow > 1) return -1;
borrow <<= kBigitSize;
}
}
if (borrow == 0) return 0;
return -1;
}
void Bignum::Clamp() {
while (used_digits_ > 0 && bigits_[used_digits_ - 1] == 0) {
used_digits_--;
}
if (used_digits_ == 0) {
// Zero.
exponent_ = 0;
}
}
bool Bignum::IsClamped() const {
return used_digits_ == 0 || bigits_[used_digits_ - 1] != 0;
}
void Bignum::Zero() {
for (int i = 0; i < used_digits_; ++i) {
bigits_[i] = 0;
}
used_digits_ = 0;
exponent_ = 0;
}
void Bignum::Align(const Bignum& other) {
if (exponent_ > other.exponent_) {
// If "X" represents a "hidden" digit (by the exponent) then we are in the
// following case (a == this, b == other):
// a: aaaaaaXXXX or a: aaaaaXXX
// b: bbbbbbX b: bbbbbbbbXX
// We replace some of the hidden digits (X) of a with 0 digits.
// a: aaaaaa000X or a: aaaaa0XX
int zero_digits = exponent_ - other.exponent_;
EnsureCapacity(used_digits_ + zero_digits);
for (int i = used_digits_ - 1; i >= 0; --i) {
bigits_[i + zero_digits] = bigits_[i];
}
for (int i = 0; i < zero_digits; ++i) {
bigits_[i] = 0;
}
used_digits_ += zero_digits;
exponent_ -= zero_digits;
ASSERT(used_digits_ >= 0);
ASSERT(exponent_ >= 0);
}
}
void Bignum::BigitsShiftLeft(int shift_amount) {
ASSERT(shift_amount < kBigitSize);
ASSERT(shift_amount >= 0);
Chunk carry = 0;
for (int i = 0; i < used_digits_; ++i) {
Chunk new_carry = bigits_[i] >> (kBigitSize - shift_amount);
bigits_[i] = ((bigits_[i] << shift_amount) + carry) & kBigitMask;
carry = new_carry;
}
if (carry != 0) {
bigits_[used_digits_] = carry;
used_digits_++;
}
}
void Bignum::SubtractTimes(const Bignum& other, int factor) {
ASSERT(exponent_ <= other.exponent_);
if (factor < 3) {
for (int i = 0; i < factor; ++i) {
SubtractBignum(other);
}
return;
}
Chunk borrow = 0;
int exponent_diff = other.exponent_ - exponent_;
for (int i = 0; i < other.used_digits_; ++i) {
DoubleChunk product = static_cast<DoubleChunk>(factor) * other.bigits_[i];
DoubleChunk remove = borrow + product;
Chunk difference = bigits_[i + exponent_diff] - (remove & kBigitMask);
bigits_[i + exponent_diff] = difference & kBigitMask;
borrow = static_cast<Chunk>((difference >> (kChunkSize - 1)) +
(remove >> kBigitSize));
}
for (int i = other.used_digits_ + exponent_diff; i < used_digits_; ++i) {
if (borrow == 0) return;
Chunk difference = bigits_[i] - borrow;
bigits_[i] = difference & kBigitMask;
borrow = difference >> (kChunkSize - 1);
}
Clamp();
}
} // namespace double_conversion

View File

@ -1,138 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef DOUBLE_CONVERSION_BIGNUM_H_
#define DOUBLE_CONVERSION_BIGNUM_H_
#include "utils.h"
namespace double_conversion
{
class Bignum
{
public:
// 3584 = 128 * 28. We can represent 2^3584 > 10^1000 accurately.
// This bignum can encode much bigger numbers, since it contains an
// exponent.
static const int kMaxSignificantBits = 3584;
Bignum();
void AssignUInt16(uint16_t value);
void AssignUInt64(uint64_t value);
void AssignBignum(const Bignum & other);
void AssignDecimalString(Vector<const char> value);
void AssignHexString(Vector<const char> value);
void AssignPowerUInt16(uint16_t base, int exponent);
void AddUInt16(uint16_t operand);
void AddUInt64(uint64_t operand);
void AddBignum(const Bignum & other);
// Precondition: this >= other.
void SubtractBignum(const Bignum & other);
void Square();
void ShiftLeft(int shift_amount);
void MultiplyByUInt32(uint32_t factor);
void MultiplyByUInt64(uint64_t factor);
void MultiplyByPowerOfTen(int exponent);
void Times10() { return MultiplyByUInt32(10); }
// Pseudocode:
// int result = this / other;
// this = this % other;
// In the worst case this function is in O(this/other).
uint16_t DivideModuloIntBignum(const Bignum & other);
bool ToHexString(char * buffer, int buffer_size) const;
// Returns
// -1 if a < b,
// 0 if a == b, and
// +1 if a > b.
static int Compare(const Bignum & a, const Bignum & b);
static bool Equal(const Bignum & a, const Bignum & b) { return Compare(a, b) == 0; }
static bool LessEqual(const Bignum & a, const Bignum & b) { return Compare(a, b) <= 0; }
static bool Less(const Bignum & a, const Bignum & b) { return Compare(a, b) < 0; }
// Returns Compare(a + b, c);
static int PlusCompare(const Bignum & a, const Bignum & b, const Bignum & c);
// Returns a + b == c
static bool PlusEqual(const Bignum & a, const Bignum & b, const Bignum & c) { return PlusCompare(a, b, c) == 0; }
// Returns a + b <= c
static bool PlusLessEqual(const Bignum & a, const Bignum & b, const Bignum & c) { return PlusCompare(a, b, c) <= 0; }
// Returns a + b < c
static bool PlusLess(const Bignum & a, const Bignum & b, const Bignum & c) { return PlusCompare(a, b, c) < 0; }
private:
typedef uint32_t Chunk;
typedef uint64_t DoubleChunk;
static const int kChunkSize = sizeof(Chunk) * 8;
static const int kDoubleChunkSize = sizeof(DoubleChunk) * 8;
// With bigit size of 28 we loose some bits, but a double still fits easily
// into two chunks, and more importantly we can use the Comba multiplication.
static const int kBigitSize = 28;
static const Chunk kBigitMask = (1 << kBigitSize) - 1;
// Every instance allocates kBigitLength chunks on the stack. Bignums cannot
// grow. There are no checks if the stack-allocated space is sufficient.
static const int kBigitCapacity = kMaxSignificantBits / kBigitSize;
void EnsureCapacity(int size)
{
if (size > kBigitCapacity)
{
UNREACHABLE();
}
}
void Align(const Bignum & other);
void Clamp();
bool IsClamped() const;
void Zero();
// Requires this to have enough capacity (no tests done).
// Updates used_digits_ if necessary.
// shift_amount must be < kBigitSize.
void BigitsShiftLeft(int shift_amount);
// BigitLength includes the "hidden" digits encoded in the exponent.
int BigitLength() const { return used_digits_ + exponent_; }
Chunk BigitAt(int index) const;
void SubtractTimes(const Bignum & other, int factor);
Chunk bigits_buffer_[kBigitCapacity];
// A vector backed by bigits_buffer_. This way accesses to the array are
// checked for out-of-bounds errors.
Vector<Chunk> bigits_;
int used_digits_;
// The Bignum's value equals value(bigits_) * 2^(exponent_ * kBigitSize).
int exponent_;
DISALLOW_COPY_AND_ASSIGN(Bignum);
};
} // namespace double_conversion
#endif // DOUBLE_CONVERSION_BIGNUM_H_

View File

@ -1,176 +0,0 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdarg.h>
#include <limits.h>
#include <math.h>
#include "utils.h"
#include "cached-powers.h"
namespace double_conversion {
struct CachedPower {
uint64_t significand;
int16_t binary_exponent;
int16_t decimal_exponent;
};
static const CachedPower kCachedPowers[] = {
{UINT64_2PART_C(0xfa8fd5a0, 081c0288), -1220, -348},
{UINT64_2PART_C(0xbaaee17f, a23ebf76), -1193, -340},
{UINT64_2PART_C(0x8b16fb20, 3055ac76), -1166, -332},
{UINT64_2PART_C(0xcf42894a, 5dce35ea), -1140, -324},
{UINT64_2PART_C(0x9a6bb0aa, 55653b2d), -1113, -316},
{UINT64_2PART_C(0xe61acf03, 3d1a45df), -1087, -308},
{UINT64_2PART_C(0xab70fe17, c79ac6ca), -1060, -300},
{UINT64_2PART_C(0xff77b1fc, bebcdc4f), -1034, -292},
{UINT64_2PART_C(0xbe5691ef, 416bd60c), -1007, -284},
{UINT64_2PART_C(0x8dd01fad, 907ffc3c), -980, -276},
{UINT64_2PART_C(0xd3515c28, 31559a83), -954, -268},
{UINT64_2PART_C(0x9d71ac8f, ada6c9b5), -927, -260},
{UINT64_2PART_C(0xea9c2277, 23ee8bcb), -901, -252},
{UINT64_2PART_C(0xaecc4991, 4078536d), -874, -244},
{UINT64_2PART_C(0x823c1279, 5db6ce57), -847, -236},
{UINT64_2PART_C(0xc2109436, 4dfb5637), -821, -228},
{UINT64_2PART_C(0x9096ea6f, 3848984f), -794, -220},
{UINT64_2PART_C(0xd77485cb, 25823ac7), -768, -212},
{UINT64_2PART_C(0xa086cfcd, 97bf97f4), -741, -204},
{UINT64_2PART_C(0xef340a98, 172aace5), -715, -196},
{UINT64_2PART_C(0xb23867fb, 2a35b28e), -688, -188},
{UINT64_2PART_C(0x84c8d4df, d2c63f3b), -661, -180},
{UINT64_2PART_C(0xc5dd4427, 1ad3cdba), -635, -172},
{UINT64_2PART_C(0x936b9fce, bb25c996), -608, -164},
{UINT64_2PART_C(0xdbac6c24, 7d62a584), -582, -156},
{UINT64_2PART_C(0xa3ab6658, 0d5fdaf6), -555, -148},
{UINT64_2PART_C(0xf3e2f893, dec3f126), -529, -140},
{UINT64_2PART_C(0xb5b5ada8, aaff80b8), -502, -132},
{UINT64_2PART_C(0x87625f05, 6c7c4a8b), -475, -124},
{UINT64_2PART_C(0xc9bcff60, 34c13053), -449, -116},
{UINT64_2PART_C(0x964e858c, 91ba2655), -422, -108},
{UINT64_2PART_C(0xdff97724, 70297ebd), -396, -100},
{UINT64_2PART_C(0xa6dfbd9f, b8e5b88f), -369, -92},
{UINT64_2PART_C(0xf8a95fcf, 88747d94), -343, -84},
{UINT64_2PART_C(0xb9447093, 8fa89bcf), -316, -76},
{UINT64_2PART_C(0x8a08f0f8, bf0f156b), -289, -68},
{UINT64_2PART_C(0xcdb02555, 653131b6), -263, -60},
{UINT64_2PART_C(0x993fe2c6, d07b7fac), -236, -52},
{UINT64_2PART_C(0xe45c10c4, 2a2b3b06), -210, -44},
{UINT64_2PART_C(0xaa242499, 697392d3), -183, -36},
{UINT64_2PART_C(0xfd87b5f2, 8300ca0e), -157, -28},
{UINT64_2PART_C(0xbce50864, 92111aeb), -130, -20},
{UINT64_2PART_C(0x8cbccc09, 6f5088cc), -103, -12},
{UINT64_2PART_C(0xd1b71758, e219652c), -77, -4},
{UINT64_2PART_C(0x9c400000, 00000000), -50, 4},
{UINT64_2PART_C(0xe8d4a510, 00000000), -24, 12},
{UINT64_2PART_C(0xad78ebc5, ac620000), 3, 20},
{UINT64_2PART_C(0x813f3978, f8940984), 30, 28},
{UINT64_2PART_C(0xc097ce7b, c90715b3), 56, 36},
{UINT64_2PART_C(0x8f7e32ce, 7bea5c70), 83, 44},
{UINT64_2PART_C(0xd5d238a4, abe98068), 109, 52},
{UINT64_2PART_C(0x9f4f2726, 179a2245), 136, 60},
{UINT64_2PART_C(0xed63a231, d4c4fb27), 162, 68},
{UINT64_2PART_C(0xb0de6538, 8cc8ada8), 189, 76},
{UINT64_2PART_C(0x83c7088e, 1aab65db), 216, 84},
{UINT64_2PART_C(0xc45d1df9, 42711d9a), 242, 92},
{UINT64_2PART_C(0x924d692c, a61be758), 269, 100},
{UINT64_2PART_C(0xda01ee64, 1a708dea), 295, 108},
{UINT64_2PART_C(0xa26da399, 9aef774a), 322, 116},
{UINT64_2PART_C(0xf209787b, b47d6b85), 348, 124},
{UINT64_2PART_C(0xb454e4a1, 79dd1877), 375, 132},
{UINT64_2PART_C(0x865b8692, 5b9bc5c2), 402, 140},
{UINT64_2PART_C(0xc83553c5, c8965d3d), 428, 148},
{UINT64_2PART_C(0x952ab45c, fa97a0b3), 455, 156},
{UINT64_2PART_C(0xde469fbd, 99a05fe3), 481, 164},
{UINT64_2PART_C(0xa59bc234, db398c25), 508, 172},
{UINT64_2PART_C(0xf6c69a72, a3989f5c), 534, 180},
{UINT64_2PART_C(0xb7dcbf53, 54e9bece), 561, 188},
{UINT64_2PART_C(0x88fcf317, f22241e2), 588, 196},
{UINT64_2PART_C(0xcc20ce9b, d35c78a5), 614, 204},
{UINT64_2PART_C(0x98165af3, 7b2153df), 641, 212},
{UINT64_2PART_C(0xe2a0b5dc, 971f303a), 667, 220},
{UINT64_2PART_C(0xa8d9d153, 5ce3b396), 694, 228},
{UINT64_2PART_C(0xfb9b7cd9, a4a7443c), 720, 236},
{UINT64_2PART_C(0xbb764c4c, a7a44410), 747, 244},
{UINT64_2PART_C(0x8bab8eef, b6409c1a), 774, 252},
{UINT64_2PART_C(0xd01fef10, a657842c), 800, 260},
{UINT64_2PART_C(0x9b10a4e5, e9913129), 827, 268},
{UINT64_2PART_C(0xe7109bfb, a19c0c9d), 853, 276},
{UINT64_2PART_C(0xac2820d9, 623bf429), 880, 284},
{UINT64_2PART_C(0x80444b5e, 7aa7cf85), 907, 292},
{UINT64_2PART_C(0xbf21e440, 03acdd2d), 933, 300},
{UINT64_2PART_C(0x8e679c2f, 5e44ff8f), 960, 308},
{UINT64_2PART_C(0xd433179d, 9c8cb841), 986, 316},
{UINT64_2PART_C(0x9e19db92, b4e31ba9), 1013, 324},
{UINT64_2PART_C(0xeb96bf6e, badf77d9), 1039, 332},
{UINT64_2PART_C(0xaf87023b, 9bf0ee6b), 1066, 340},
};
static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers);
static const int kCachedPowersOffset = 348; // -1 * the first decimal_exponent.
static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10)
// Difference between the decimal exponents in the table above.
const int PowersOfTenCache::kDecimalExponentDistance = 8;
const int PowersOfTenCache::kMinDecimalExponent = -348;
const int PowersOfTenCache::kMaxDecimalExponent = 340;
void PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
int min_exponent,
int max_exponent,
DiyFp* power,
int* decimal_exponent) {
int kQ = DiyFp::kSignificandSize;
double k = ceil((min_exponent + kQ - 1) * kD_1_LOG2_10);
int foo = kCachedPowersOffset;
int index =
(foo + static_cast<int>(k) - 1) / kDecimalExponentDistance + 1;
ASSERT(0 <= index && index < kCachedPowersLength);
CachedPower cached_power = kCachedPowers[index];
ASSERT(min_exponent <= cached_power.binary_exponent);
(void) max_exponent; // Mark variable as used.
ASSERT(cached_power.binary_exponent <= max_exponent);
*decimal_exponent = cached_power.decimal_exponent;
*power = DiyFp(cached_power.significand, cached_power.binary_exponent);
}
void PowersOfTenCache::GetCachedPowerForDecimalExponent(int requested_exponent,
DiyFp* power,
int* found_exponent) {
ASSERT(kMinDecimalExponent <= requested_exponent);
ASSERT(requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance);
int index =
(requested_exponent + kCachedPowersOffset) / kDecimalExponentDistance;
CachedPower cached_power = kCachedPowers[index];
*power = DiyFp(cached_power.significand, cached_power.binary_exponent);
*found_exponent = cached_power.decimal_exponent;
ASSERT(*found_exponent <= requested_exponent);
ASSERT(requested_exponent < *found_exponent + kDecimalExponentDistance);
}
} // namespace double_conversion

View File

@ -1,60 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef DOUBLE_CONVERSION_CACHED_POWERS_H_
#define DOUBLE_CONVERSION_CACHED_POWERS_H_
#include "diy-fp.h"
namespace double_conversion
{
class PowersOfTenCache
{
public:
// Not all powers of ten are cached. The decimal exponent of two neighboring
// cached numbers will differ by kDecimalExponentDistance.
static const int kDecimalExponentDistance;
static const int kMinDecimalExponent;
static const int kMaxDecimalExponent;
// Returns a cached power-of-ten with a binary exponent in the range
// [min_exponent; max_exponent] (boundaries included).
static void GetCachedPowerForBinaryExponentRange(int min_exponent, int max_exponent, DiyFp * power, int * decimal_exponent);
// Returns a cached power of ten x ~= 10^k such that
// k <= decimal_exponent < k + kCachedPowersDecimalDistance.
// The given decimal_exponent must satisfy
// kMinDecimalExponent <= requested_exponent, and
// requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance.
static void GetCachedPowerForDecimalExponent(int requested_exponent, DiyFp * power, int * found_exponent);
};
} // namespace double_conversion
#endif // DOUBLE_CONVERSION_CACHED_POWERS_H_

View File

@ -1,86 +0,0 @@
/* compress.c -- compress a memory buffer
* Copyright (C) 1995-2005, 2014, 2016 Jean-loup Gailly, Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
/* @(#) $Id$ */
#define ZLIB_INTERNAL
#include "zlib.h"
/* ===========================================================================
Compresses the source buffer into the destination buffer. The level
parameter has the same meaning as in deflateInit. sourceLen is the byte
length of the source buffer. Upon entry, destLen is the total size of the
destination buffer, which must be at least 0.1% larger than sourceLen plus
12 bytes. Upon exit, destLen is the actual size of the compressed buffer.
compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
memory, Z_BUF_ERROR if there was not enough room in the output buffer,
Z_STREAM_ERROR if the level parameter is invalid.
*/
int ZEXPORT compress2 (dest, destLen, source, sourceLen, level)
Bytef *dest;
uLongf *destLen;
const Bytef *source;
uLong sourceLen;
int level;
{
z_stream stream;
int err;
const uInt max = (uInt)-1;
uLong left;
left = *destLen;
*destLen = 0;
stream.zalloc = (alloc_func)0;
stream.zfree = (free_func)0;
stream.opaque = (voidpf)0;
err = deflateInit(&stream, level);
if (err != Z_OK) return err;
stream.next_out = dest;
stream.avail_out = 0;
stream.next_in = (z_const Bytef *)source;
stream.avail_in = 0;
do {
if (stream.avail_out == 0) {
stream.avail_out = left > (uLong)max ? max : (uInt)left;
left -= stream.avail_out;
}
if (stream.avail_in == 0) {
stream.avail_in = sourceLen > (uLong)max ? max : (uInt)sourceLen;
sourceLen -= stream.avail_in;
}
err = deflate(&stream, sourceLen ? Z_NO_FLUSH : Z_FINISH);
} while (err == Z_OK);
*destLen = stream.total_out;
deflateEnd(&stream);
return err == Z_STREAM_END ? Z_OK : err;
}
/* ===========================================================================
*/
int ZEXPORT compress (dest, destLen, source, sourceLen)
Bytef *dest;
uLongf *destLen;
const Bytef *source;
uLong sourceLen;
{
return compress2(dest, destLen, source, sourceLen, Z_DEFAULT_COMPRESSION);
}
/* ===========================================================================
If the default memLevel or windowBits for deflateInit() is changed, then
this function needs to be updated.
*/
uLong ZEXPORT compressBound (sourceLen)
uLong sourceLen;
{
return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) +
(sourceLen >> 25) + 13;
}

View File

@ -1,444 +0,0 @@
/* crc32.c -- compute the CRC-32 of a data stream
* Copyright (C) 1995-2006, 2010, 2011, 2012 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*
* Thanks to Rodney Brown <rbrown64@csc.com.au> for his contribution of faster
* CRC methods: exclusive-oring 32 bits of data at a time, and pre-computing
* tables for updating the shift register in one step with three exclusive-ors
* instead of four steps with four exclusive-ors. This results in about a
* factor of two increase in speed on a Power PC G4 (PPC7455) using gcc -O3.
*/
/* @(#) $Id$ */
/*
Note on the use of DYNAMIC_CRC_TABLE: there is no mutex or semaphore
protection on the static variables used to control the first-use generation
of the crc tables. Therefore, if you #define DYNAMIC_CRC_TABLE, you should
first call get_crc_table() to initialize the tables before allowing more than
one thread to use crc32().
DYNAMIC_CRC_TABLE and MAKECRCH can be #defined to write out crc32.h.
*/
#ifdef MAKECRCH
# include <stdio.h>
# ifndef DYNAMIC_CRC_TABLE
# define DYNAMIC_CRC_TABLE
# endif /* !DYNAMIC_CRC_TABLE */
#endif /* MAKECRCH */
#include "zutil.h" /* for STDC and FAR definitions */
#define local static
/* Definitions for doing the crc four data bytes at a time. */
#if !defined(NOBYFOUR) && defined(Z_U4)
# define BYFOUR
#endif
#ifdef BYFOUR
local unsigned long crc32_little OF((unsigned long,
const unsigned char FAR *, z_size_t));
local unsigned long crc32_big OF((unsigned long,
const unsigned char FAR *, z_size_t));
# define TBLS 8
#else
# define TBLS 1
#endif /* BYFOUR */
/* Local functions for crc concatenation */
local unsigned long gf2_matrix_times OF((unsigned long *mat,
unsigned long vec));
local void gf2_matrix_square OF((unsigned long *square, unsigned long *mat));
local uLong crc32_combine_ OF((uLong crc1, uLong crc2, z_off64_t len2));
#ifdef DYNAMIC_CRC_TABLE
local volatile int crc_table_empty = 1;
local z_crc_t FAR crc_table[TBLS][256];
local void make_crc_table OF((void));
#ifdef MAKECRCH
local void write_table OF((FILE *, const z_crc_t FAR *));
#endif /* MAKECRCH */
/*
Generate tables for a byte-wise 32-bit CRC calculation on the polynomial:
x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x+1.
Polynomials over GF(2) are represented in binary, one bit per coefficient,
with the lowest powers in the most significant bit. Then adding polynomials
is just exclusive-or, and multiplying a polynomial by x is a right shift by
one. If we call the above polynomial p, and represent a byte as the
polynomial q, also with the lowest power in the most significant bit (so the
byte 0xb1 is the polynomial x^7+x^3+x+1), then the CRC is (q*x^32) mod p,
where a mod b means the remainder after dividing a by b.
This calculation is done using the shift-register method of multiplying and
taking the remainder. The register is initialized to zero, and for each
incoming bit, x^32 is added mod p to the register if the bit is a one (where
x^32 mod p is p+x^32 = x^26+...+1), and the register is multiplied mod p by
x (which is shifting right by one and adding x^32 mod p if the bit shifted
out is a one). We start with the highest power (least significant bit) of
q and repeat for all eight bits of q.
The first table is simply the CRC of all possible eight bit values. This is
all the information needed to generate CRCs on data a byte at a time for all
combinations of CRC register values and incoming bytes. The remaining tables
allow for word-at-a-time CRC calculation for both big-endian and little-
endian machines, where a word is four bytes.
*/
local void make_crc_table()
{
z_crc_t c;
int n, k;
z_crc_t poly; /* polynomial exclusive-or pattern */
/* terms of polynomial defining this crc (except x^32): */
static volatile int first = 1; /* flag to limit concurrent making */
static const unsigned char p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26};
/* See if another task is already doing this (not thread-safe, but better
than nothing -- significantly reduces duration of vulnerability in
case the advice about DYNAMIC_CRC_TABLE is ignored) */
if (first) {
first = 0;
/* make exclusive-or pattern from polynomial (0xedb88320UL) */
poly = 0;
for (n = 0; n < (int)(sizeof(p)/sizeof(unsigned char)); n++)
poly |= (z_crc_t)1 << (31 - p[n]);
/* generate a crc for every 8-bit value */
for (n = 0; n < 256; n++) {
c = (z_crc_t)n;
for (k = 0; k < 8; k++)
c = c & 1 ? poly ^ (c >> 1) : c >> 1;
crc_table[0][n] = c;
}
#ifdef BYFOUR
/* generate crc for each value followed by one, two, and three zeros,
and then the byte reversal of those as well as the first table */
for (n = 0; n < 256; n++) {
c = crc_table[0][n];
crc_table[4][n] = ZSWAP32(c);
for (k = 1; k < 4; k++) {
c = crc_table[0][c & 0xff] ^ (c >> 8);
crc_table[k][n] = c;
crc_table[k + 4][n] = ZSWAP32(c);
}
}
#endif /* BYFOUR */
crc_table_empty = 0;
}
else { /* not first */
/* wait for the other guy to finish (not efficient, but rare) */
while (crc_table_empty)
;
}
#ifdef MAKECRCH
/* write out CRC tables to crc32.h */
{
FILE *out;
out = fopen("crc32.h", "w");
if (out == NULL) return;
fprintf(out, "/* crc32.h -- tables for rapid CRC calculation\n");
fprintf(out, " * Generated automatically by crc32.c\n */\n\n");
fprintf(out, "local const z_crc_t FAR ");
fprintf(out, "crc_table[TBLS][256] =\n{\n {\n");
write_table(out, crc_table[0]);
# ifdef BYFOUR
fprintf(out, "#ifdef BYFOUR\n");
for (k = 1; k < 8; k++) {
fprintf(out, " },\n {\n");
write_table(out, crc_table[k]);
}
fprintf(out, "#endif\n");
# endif /* BYFOUR */
fprintf(out, " }\n};\n");
fclose(out);
}
#endif /* MAKECRCH */
}
#ifdef MAKECRCH
local void write_table(out, table)
FILE *out;
const z_crc_t FAR *table;
{
int n;
for (n = 0; n < 256; n++)
fprintf(out, "%s0x%08lxUL%s", n % 5 ? "" : " ",
(unsigned long)(table[n]),
n == 255 ? "\n" : (n % 5 == 4 ? ",\n" : ", "));
}
#endif /* MAKECRCH */
#else /* !DYNAMIC_CRC_TABLE */
/* ========================================================================
* Tables of CRC-32s of all single-byte values, made by make_crc_table().
*/
#include "crc32.h"
#endif /* DYNAMIC_CRC_TABLE */
/* =========================================================================
* This function can be used by asm versions of crc32()
*/
const z_crc_t FAR * ZEXPORT get_crc_table()
{
#ifdef DYNAMIC_CRC_TABLE
if (crc_table_empty)
make_crc_table();
#endif /* DYNAMIC_CRC_TABLE */
return (const z_crc_t FAR *)crc_table;
}
/* ========================================================================= */
#define DO1 crc = crc_table[0][((int)crc ^ (*buf++)) & 0xff] ^ (crc >> 8)
#define DO8 DO1; DO1; DO1; DO1; DO1; DO1; DO1; DO1
/* ========================================================================= */
unsigned long ZEXPORT crc32_z(crc, buf, len)
unsigned long crc;
const unsigned char FAR *buf;
z_size_t len;
{
if (buf == Z_NULL) return 0UL;
#ifdef DYNAMIC_CRC_TABLE
if (crc_table_empty)
make_crc_table();
#endif /* DYNAMIC_CRC_TABLE */
#ifdef BYFOUR
if (sizeof(void *) == sizeof(ptrdiff_t)) {
z_crc_t endian;
endian = 1;
if (*((unsigned char *)(&endian)))
return crc32_little(crc, buf, len);
else
return crc32_big(crc, buf, len);
}
#endif /* BYFOUR */
crc = crc ^ 0xffffffffUL;
while (len >= 8) {
DO8;
len -= 8;
}
if (len) do {
DO1;
} while (--len);
return crc ^ 0xffffffffUL;
}
/* ========================================================================= */
unsigned long ZEXPORT crc32(crc, buf, len)
unsigned long crc;
const unsigned char FAR *buf;
uInt len;
{
return crc32_z(crc, buf, len);
}
#ifdef BYFOUR
/*
This BYFOUR code accesses the passed unsigned char * buffer with a 32-bit
integer pointer type. This violates the strict aliasing rule, where a
compiler can assume, for optimization purposes, that two pointers to
fundamentally different types won't ever point to the same memory. This can
manifest as a problem only if one of the pointers is written to. This code
only reads from those pointers. So long as this code remains isolated in
this compilation unit, there won't be a problem. For this reason, this code
should not be copied and pasted into a compilation unit in which other code
writes to the buffer that is passed to these routines.
*/
/* ========================================================================= */
#define DOLIT4 c ^= *buf4++; \
c = crc_table[3][c & 0xff] ^ crc_table[2][(c >> 8) & 0xff] ^ \
crc_table[1][(c >> 16) & 0xff] ^ crc_table[0][c >> 24]
#define DOLIT32 DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4
/* ========================================================================= */
local unsigned long crc32_little(crc, buf, len)
unsigned long crc;
const unsigned char FAR *buf;
z_size_t len;
{
register z_crc_t c;
register const z_crc_t FAR *buf4;
c = (z_crc_t)crc;
c = ~c;
while (len && ((ptrdiff_t)buf & 3)) {
c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8);
len--;
}
buf4 = (const z_crc_t FAR *)(const void FAR *)buf;
while (len >= 32) {
DOLIT32;
len -= 32;
}
while (len >= 4) {
DOLIT4;
len -= 4;
}
buf = (const unsigned char FAR *)buf4;
if (len) do {
c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8);
} while (--len);
c = ~c;
return (unsigned long)c;
}
/* ========================================================================= */
#define DOBIG4 c ^= *buf4++; \
c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \
crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]
#define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4
/* ========================================================================= */
local unsigned long crc32_big(crc, buf, len)
unsigned long crc;
const unsigned char FAR *buf;
z_size_t len;
{
register z_crc_t c;
register const z_crc_t FAR *buf4;
c = ZSWAP32((z_crc_t)crc);
c = ~c;
while (len && ((ptrdiff_t)buf & 3)) {
c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8);
len--;
}
buf4 = (const z_crc_t FAR *)(const void FAR *)buf;
while (len >= 32) {
DOBIG32;
len -= 32;
}
while (len >= 4) {
DOBIG4;
len -= 4;
}
buf = (const unsigned char FAR *)buf4;
if (len) do {
c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8);
} while (--len);
c = ~c;
return (unsigned long)(ZSWAP32(c));
}
#endif /* BYFOUR */
#define GF2_DIM 32 /* dimension of GF(2) vectors (length of CRC) */
/* ========================================================================= */
local unsigned long gf2_matrix_times(mat, vec)
unsigned long *mat;
unsigned long vec;
{
unsigned long sum;
sum = 0;
while (vec) {
if (vec & 1)
sum ^= *mat;
vec >>= 1;
mat++;
}
return sum;
}
/* ========================================================================= */
local void gf2_matrix_square(square, mat)
unsigned long *square;
unsigned long *mat;
{
int n;
for (n = 0; n < GF2_DIM; n++)
square[n] = gf2_matrix_times(mat, mat[n]);
}
/* ========================================================================= */
local uLong crc32_combine_(crc1, crc2, len2)
uLong crc1;
uLong crc2;
z_off64_t len2;
{
int n;
unsigned long row;
unsigned long even[GF2_DIM]; /* even-power-of-two zeros operator */
unsigned long odd[GF2_DIM]; /* odd-power-of-two zeros operator */
/* degenerate case (also disallow negative lengths) */
if (len2 <= 0)
return crc1;
/* put operator for one zero bit in odd */
odd[0] = 0xedb88320UL; /* CRC-32 polynomial */
row = 1;
for (n = 1; n < GF2_DIM; n++) {
odd[n] = row;
row <<= 1;
}
/* put operator for two zero bits in even */
gf2_matrix_square(even, odd);
/* put operator for four zero bits in odd */
gf2_matrix_square(odd, even);
/* apply len2 zeros to crc1 (first square will put the operator for one
zero byte, eight zero bits, in even) */
do {
/* apply zeros operator for this bit of len2 */
gf2_matrix_square(even, odd);
if (len2 & 1)
crc1 = gf2_matrix_times(even, crc1);
len2 >>= 1;
/* if no more bits set, then done */
if (len2 == 0)
break;
/* another iteration of the loop with odd and even swapped */
gf2_matrix_square(odd, even);
if (len2 & 1)
crc1 = gf2_matrix_times(odd, crc1);
len2 >>= 1;
/* if no more bits set, then done */
} while (len2 != 0);
/* return combined crc */
crc1 ^= crc2;
return crc1;
}
/* ========================================================================= */
uLong ZEXPORT crc32_combine(crc1, crc2, len2)
uLong crc1;
uLong crc2;
z_off_t len2;
{
return crc32_combine_(crc1, crc2, len2);
}
uLong ZEXPORT crc32_combine64(crc1, crc2, len2)
uLong crc1;
uLong crc2;
z_off64_t len2;
{
return crc32_combine_(crc1, crc2, len2);
}

View File

@ -1,241 +0,0 @@
/* crc32.h -- tables for rapid CRC calculation
* Generated automatically by crc32.c
*/
local const z_crc_t FAR crc_table[TBLS][256]
= {{0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL, 0x706af48fUL, 0xe963a535UL, 0x9e6495a3UL, 0x0edb8832UL,
0x79dcb8a4UL, 0xe0d5e91eUL, 0x97d2d988UL, 0x09b64c2bUL, 0x7eb17cbdUL, 0xe7b82d07UL, 0x90bf1d91UL, 0x1db71064UL, 0x6ab020f2UL,
0xf3b97148UL, 0x84be41deUL, 0x1adad47dUL, 0x6ddde4ebUL, 0xf4d4b551UL, 0x83d385c7UL, 0x136c9856UL, 0x646ba8c0UL, 0xfd62f97aUL,
0x8a65c9ecUL, 0x14015c4fUL, 0x63066cd9UL, 0xfa0f3d63UL, 0x8d080df5UL, 0x3b6e20c8UL, 0x4c69105eUL, 0xd56041e4UL, 0xa2677172UL,
0x3c03e4d1UL, 0x4b04d447UL, 0xd20d85fdUL, 0xa50ab56bUL, 0x35b5a8faUL, 0x42b2986cUL, 0xdbbbc9d6UL, 0xacbcf940UL, 0x32d86ce3UL,
0x45df5c75UL, 0xdcd60dcfUL, 0xabd13d59UL, 0x26d930acUL, 0x51de003aUL, 0xc8d75180UL, 0xbfd06116UL, 0x21b4f4b5UL, 0x56b3c423UL,
0xcfba9599UL, 0xb8bda50fUL, 0x2802b89eUL, 0x5f058808UL, 0xc60cd9b2UL, 0xb10be924UL, 0x2f6f7c87UL, 0x58684c11UL, 0xc1611dabUL,
0xb6662d3dUL, 0x76dc4190UL, 0x01db7106UL, 0x98d220bcUL, 0xefd5102aUL, 0x71b18589UL, 0x06b6b51fUL, 0x9fbfe4a5UL, 0xe8b8d433UL,
0x7807c9a2UL, 0x0f00f934UL, 0x9609a88eUL, 0xe10e9818UL, 0x7f6a0dbbUL, 0x086d3d2dUL, 0x91646c97UL, 0xe6635c01UL, 0x6b6b51f4UL,
0x1c6c6162UL, 0x856530d8UL, 0xf262004eUL, 0x6c0695edUL, 0x1b01a57bUL, 0x8208f4c1UL, 0xf50fc457UL, 0x65b0d9c6UL, 0x12b7e950UL,
0x8bbeb8eaUL, 0xfcb9887cUL, 0x62dd1ddfUL, 0x15da2d49UL, 0x8cd37cf3UL, 0xfbd44c65UL, 0x4db26158UL, 0x3ab551ceUL, 0xa3bc0074UL,
0xd4bb30e2UL, 0x4adfa541UL, 0x3dd895d7UL, 0xa4d1c46dUL, 0xd3d6f4fbUL, 0x4369e96aUL, 0x346ed9fcUL, 0xad678846UL, 0xda60b8d0UL,
0x44042d73UL, 0x33031de5UL, 0xaa0a4c5fUL, 0xdd0d7cc9UL, 0x5005713cUL, 0x270241aaUL, 0xbe0b1010UL, 0xc90c2086UL, 0x5768b525UL,
0x206f85b3UL, 0xb966d409UL, 0xce61e49fUL, 0x5edef90eUL, 0x29d9c998UL, 0xb0d09822UL, 0xc7d7a8b4UL, 0x59b33d17UL, 0x2eb40d81UL,
0xb7bd5c3bUL, 0xc0ba6cadUL, 0xedb88320UL, 0x9abfb3b6UL, 0x03b6e20cUL, 0x74b1d29aUL, 0xead54739UL, 0x9dd277afUL, 0x04db2615UL,
0x73dc1683UL, 0xe3630b12UL, 0x94643b84UL, 0x0d6d6a3eUL, 0x7a6a5aa8UL, 0xe40ecf0bUL, 0x9309ff9dUL, 0x0a00ae27UL, 0x7d079eb1UL,
0xf00f9344UL, 0x8708a3d2UL, 0x1e01f268UL, 0x6906c2feUL, 0xf762575dUL, 0x806567cbUL, 0x196c3671UL, 0x6e6b06e7UL, 0xfed41b76UL,
0x89d32be0UL, 0x10da7a5aUL, 0x67dd4accUL, 0xf9b9df6fUL, 0x8ebeeff9UL, 0x17b7be43UL, 0x60b08ed5UL, 0xd6d6a3e8UL, 0xa1d1937eUL,
0x38d8c2c4UL, 0x4fdff252UL, 0xd1bb67f1UL, 0xa6bc5767UL, 0x3fb506ddUL, 0x48b2364bUL, 0xd80d2bdaUL, 0xaf0a1b4cUL, 0x36034af6UL,
0x41047a60UL, 0xdf60efc3UL, 0xa867df55UL, 0x316e8eefUL, 0x4669be79UL, 0xcb61b38cUL, 0xbc66831aUL, 0x256fd2a0UL, 0x5268e236UL,
0xcc0c7795UL, 0xbb0b4703UL, 0x220216b9UL, 0x5505262fUL, 0xc5ba3bbeUL, 0xb2bd0b28UL, 0x2bb45a92UL, 0x5cb36a04UL, 0xc2d7ffa7UL,
0xb5d0cf31UL, 0x2cd99e8bUL, 0x5bdeae1dUL, 0x9b64c2b0UL, 0xec63f226UL, 0x756aa39cUL, 0x026d930aUL, 0x9c0906a9UL, 0xeb0e363fUL,
0x72076785UL, 0x05005713UL, 0x95bf4a82UL, 0xe2b87a14UL, 0x7bb12baeUL, 0x0cb61b38UL, 0x92d28e9bUL, 0xe5d5be0dUL, 0x7cdcefb7UL,
0x0bdbdf21UL, 0x86d3d2d4UL, 0xf1d4e242UL, 0x68ddb3f8UL, 0x1fda836eUL, 0x81be16cdUL, 0xf6b9265bUL, 0x6fb077e1UL, 0x18b74777UL,
0x88085ae6UL, 0xff0f6a70UL, 0x66063bcaUL, 0x11010b5cUL, 0x8f659effUL, 0xf862ae69UL, 0x616bffd3UL, 0x166ccf45UL, 0xa00ae278UL,
0xd70dd2eeUL, 0x4e048354UL, 0x3903b3c2UL, 0xa7672661UL, 0xd06016f7UL, 0x4969474dUL, 0x3e6e77dbUL, 0xaed16a4aUL, 0xd9d65adcUL,
0x40df0b66UL, 0x37d83bf0UL, 0xa9bcae53UL, 0xdebb9ec5UL, 0x47b2cf7fUL, 0x30b5ffe9UL, 0xbdbdf21cUL, 0xcabac28aUL, 0x53b39330UL,
0x24b4a3a6UL, 0xbad03605UL, 0xcdd70693UL, 0x54de5729UL, 0x23d967bfUL, 0xb3667a2eUL, 0xc4614ab8UL, 0x5d681b02UL, 0x2a6f2b94UL,
0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL, 0x2d02ef8dUL
#ifdef BYFOUR
},
{0x00000000UL, 0x191b3141UL, 0x32366282UL, 0x2b2d53c3UL, 0x646cc504UL, 0x7d77f445UL, 0x565aa786UL, 0x4f4196c7UL, 0xc8d98a08UL,
0xd1c2bb49UL, 0xfaefe88aUL, 0xe3f4d9cbUL, 0xacb54f0cUL, 0xb5ae7e4dUL, 0x9e832d8eUL, 0x87981ccfUL, 0x4ac21251UL, 0x53d92310UL,
0x78f470d3UL, 0x61ef4192UL, 0x2eaed755UL, 0x37b5e614UL, 0x1c98b5d7UL, 0x05838496UL, 0x821b9859UL, 0x9b00a918UL, 0xb02dfadbUL,
0xa936cb9aUL, 0xe6775d5dUL, 0xff6c6c1cUL, 0xd4413fdfUL, 0xcd5a0e9eUL, 0x958424a2UL, 0x8c9f15e3UL, 0xa7b24620UL, 0xbea97761UL,
0xf1e8e1a6UL, 0xe8f3d0e7UL, 0xc3de8324UL, 0xdac5b265UL, 0x5d5daeaaUL, 0x44469febUL, 0x6f6bcc28UL, 0x7670fd69UL, 0x39316baeUL,
0x202a5aefUL, 0x0b07092cUL, 0x121c386dUL, 0xdf4636f3UL, 0xc65d07b2UL, 0xed705471UL, 0xf46b6530UL, 0xbb2af3f7UL, 0xa231c2b6UL,
0x891c9175UL, 0x9007a034UL, 0x179fbcfbUL, 0x0e848dbaUL, 0x25a9de79UL, 0x3cb2ef38UL, 0x73f379ffUL, 0x6ae848beUL, 0x41c51b7dUL,
0x58de2a3cUL, 0xf0794f05UL, 0xe9627e44UL, 0xc24f2d87UL, 0xdb541cc6UL, 0x94158a01UL, 0x8d0ebb40UL, 0xa623e883UL, 0xbf38d9c2UL,
0x38a0c50dUL, 0x21bbf44cUL, 0x0a96a78fUL, 0x138d96ceUL, 0x5ccc0009UL, 0x45d73148UL, 0x6efa628bUL, 0x77e153caUL, 0xbabb5d54UL,
0xa3a06c15UL, 0x888d3fd6UL, 0x91960e97UL, 0xded79850UL, 0xc7cca911UL, 0xece1fad2UL, 0xf5facb93UL, 0x7262d75cUL, 0x6b79e61dUL,
0x4054b5deUL, 0x594f849fUL, 0x160e1258UL, 0x0f152319UL, 0x243870daUL, 0x3d23419bUL, 0x65fd6ba7UL, 0x7ce65ae6UL, 0x57cb0925UL,
0x4ed03864UL, 0x0191aea3UL, 0x188a9fe2UL, 0x33a7cc21UL, 0x2abcfd60UL, 0xad24e1afUL, 0xb43fd0eeUL, 0x9f12832dUL, 0x8609b26cUL,
0xc94824abUL, 0xd05315eaUL, 0xfb7e4629UL, 0xe2657768UL, 0x2f3f79f6UL, 0x362448b7UL, 0x1d091b74UL, 0x04122a35UL, 0x4b53bcf2UL,
0x52488db3UL, 0x7965de70UL, 0x607eef31UL, 0xe7e6f3feUL, 0xfefdc2bfUL, 0xd5d0917cUL, 0xcccba03dUL, 0x838a36faUL, 0x9a9107bbUL,
0xb1bc5478UL, 0xa8a76539UL, 0x3b83984bUL, 0x2298a90aUL, 0x09b5fac9UL, 0x10aecb88UL, 0x5fef5d4fUL, 0x46f46c0eUL, 0x6dd93fcdUL,
0x74c20e8cUL, 0xf35a1243UL, 0xea412302UL, 0xc16c70c1UL, 0xd8774180UL, 0x9736d747UL, 0x8e2de606UL, 0xa500b5c5UL, 0xbc1b8484UL,
0x71418a1aUL, 0x685abb5bUL, 0x4377e898UL, 0x5a6cd9d9UL, 0x152d4f1eUL, 0x0c367e5fUL, 0x271b2d9cUL, 0x3e001cddUL, 0xb9980012UL,
0xa0833153UL, 0x8bae6290UL, 0x92b553d1UL, 0xddf4c516UL, 0xc4eff457UL, 0xefc2a794UL, 0xf6d996d5UL, 0xae07bce9UL, 0xb71c8da8UL,
0x9c31de6bUL, 0x852aef2aUL, 0xca6b79edUL, 0xd37048acUL, 0xf85d1b6fUL, 0xe1462a2eUL, 0x66de36e1UL, 0x7fc507a0UL, 0x54e85463UL,
0x4df36522UL, 0x02b2f3e5UL, 0x1ba9c2a4UL, 0x30849167UL, 0x299fa026UL, 0xe4c5aeb8UL, 0xfdde9ff9UL, 0xd6f3cc3aUL, 0xcfe8fd7bUL,
0x80a96bbcUL, 0x99b25afdUL, 0xb29f093eUL, 0xab84387fUL, 0x2c1c24b0UL, 0x350715f1UL, 0x1e2a4632UL, 0x07317773UL, 0x4870e1b4UL,
0x516bd0f5UL, 0x7a468336UL, 0x635db277UL, 0xcbfad74eUL, 0xd2e1e60fUL, 0xf9ccb5ccUL, 0xe0d7848dUL, 0xaf96124aUL, 0xb68d230bUL,
0x9da070c8UL, 0x84bb4189UL, 0x03235d46UL, 0x1a386c07UL, 0x31153fc4UL, 0x280e0e85UL, 0x674f9842UL, 0x7e54a903UL, 0x5579fac0UL,
0x4c62cb81UL, 0x8138c51fUL, 0x9823f45eUL, 0xb30ea79dUL, 0xaa1596dcUL, 0xe554001bUL, 0xfc4f315aUL, 0xd7626299UL, 0xce7953d8UL,
0x49e14f17UL, 0x50fa7e56UL, 0x7bd72d95UL, 0x62cc1cd4UL, 0x2d8d8a13UL, 0x3496bb52UL, 0x1fbbe891UL, 0x06a0d9d0UL, 0x5e7ef3ecUL,
0x4765c2adUL, 0x6c48916eUL, 0x7553a02fUL, 0x3a1236e8UL, 0x230907a9UL, 0x0824546aUL, 0x113f652bUL, 0x96a779e4UL, 0x8fbc48a5UL,
0xa4911b66UL, 0xbd8a2a27UL, 0xf2cbbce0UL, 0xebd08da1UL, 0xc0fdde62UL, 0xd9e6ef23UL, 0x14bce1bdUL, 0x0da7d0fcUL, 0x268a833fUL,
0x3f91b27eUL, 0x70d024b9UL, 0x69cb15f8UL, 0x42e6463bUL, 0x5bfd777aUL, 0xdc656bb5UL, 0xc57e5af4UL, 0xee530937UL, 0xf7483876UL,
0xb809aeb1UL, 0xa1129ff0UL, 0x8a3fcc33UL, 0x9324fd72UL},
{0x00000000UL, 0x01c26a37UL, 0x0384d46eUL, 0x0246be59UL, 0x0709a8dcUL, 0x06cbc2ebUL, 0x048d7cb2UL, 0x054f1685UL, 0x0e1351b8UL,
0x0fd13b8fUL, 0x0d9785d6UL, 0x0c55efe1UL, 0x091af964UL, 0x08d89353UL, 0x0a9e2d0aUL, 0x0b5c473dUL, 0x1c26a370UL, 0x1de4c947UL,
0x1fa2771eUL, 0x1e601d29UL, 0x1b2f0bacUL, 0x1aed619bUL, 0x18abdfc2UL, 0x1969b5f5UL, 0x1235f2c8UL, 0x13f798ffUL, 0x11b126a6UL,
0x10734c91UL, 0x153c5a14UL, 0x14fe3023UL, 0x16b88e7aUL, 0x177ae44dUL, 0x384d46e0UL, 0x398f2cd7UL, 0x3bc9928eUL, 0x3a0bf8b9UL,
0x3f44ee3cUL, 0x3e86840bUL, 0x3cc03a52UL, 0x3d025065UL, 0x365e1758UL, 0x379c7d6fUL, 0x35dac336UL, 0x3418a901UL, 0x3157bf84UL,
0x3095d5b3UL, 0x32d36beaUL, 0x331101ddUL, 0x246be590UL, 0x25a98fa7UL, 0x27ef31feUL, 0x262d5bc9UL, 0x23624d4cUL, 0x22a0277bUL,
0x20e69922UL, 0x2124f315UL, 0x2a78b428UL, 0x2bbade1fUL, 0x29fc6046UL, 0x283e0a71UL, 0x2d711cf4UL, 0x2cb376c3UL, 0x2ef5c89aUL,
0x2f37a2adUL, 0x709a8dc0UL, 0x7158e7f7UL, 0x731e59aeUL, 0x72dc3399UL, 0x7793251cUL, 0x76514f2bUL, 0x7417f172UL, 0x75d59b45UL,
0x7e89dc78UL, 0x7f4bb64fUL, 0x7d0d0816UL, 0x7ccf6221UL, 0x798074a4UL, 0x78421e93UL, 0x7a04a0caUL, 0x7bc6cafdUL, 0x6cbc2eb0UL,
0x6d7e4487UL, 0x6f38fadeUL, 0x6efa90e9UL, 0x6bb5866cUL, 0x6a77ec5bUL, 0x68315202UL, 0x69f33835UL, 0x62af7f08UL, 0x636d153fUL,
0x612bab66UL, 0x60e9c151UL, 0x65a6d7d4UL, 0x6464bde3UL, 0x662203baUL, 0x67e0698dUL, 0x48d7cb20UL, 0x4915a117UL, 0x4b531f4eUL,
0x4a917579UL, 0x4fde63fcUL, 0x4e1c09cbUL, 0x4c5ab792UL, 0x4d98dda5UL, 0x46c49a98UL, 0x4706f0afUL, 0x45404ef6UL, 0x448224c1UL,
0x41cd3244UL, 0x400f5873UL, 0x4249e62aUL, 0x438b8c1dUL, 0x54f16850UL, 0x55330267UL, 0x5775bc3eUL, 0x56b7d609UL, 0x53f8c08cUL,
0x523aaabbUL, 0x507c14e2UL, 0x51be7ed5UL, 0x5ae239e8UL, 0x5b2053dfUL, 0x5966ed86UL, 0x58a487b1UL, 0x5deb9134UL, 0x5c29fb03UL,
0x5e6f455aUL, 0x5fad2f6dUL, 0xe1351b80UL, 0xe0f771b7UL, 0xe2b1cfeeUL, 0xe373a5d9UL, 0xe63cb35cUL, 0xe7fed96bUL, 0xe5b86732UL,
0xe47a0d05UL, 0xef264a38UL, 0xeee4200fUL, 0xeca29e56UL, 0xed60f461UL, 0xe82fe2e4UL, 0xe9ed88d3UL, 0xebab368aUL, 0xea695cbdUL,
0xfd13b8f0UL, 0xfcd1d2c7UL, 0xfe976c9eUL, 0xff5506a9UL, 0xfa1a102cUL, 0xfbd87a1bUL, 0xf99ec442UL, 0xf85cae75UL, 0xf300e948UL,
0xf2c2837fUL, 0xf0843d26UL, 0xf1465711UL, 0xf4094194UL, 0xf5cb2ba3UL, 0xf78d95faUL, 0xf64fffcdUL, 0xd9785d60UL, 0xd8ba3757UL,
0xdafc890eUL, 0xdb3ee339UL, 0xde71f5bcUL, 0xdfb39f8bUL, 0xddf521d2UL, 0xdc374be5UL, 0xd76b0cd8UL, 0xd6a966efUL, 0xd4efd8b6UL,
0xd52db281UL, 0xd062a404UL, 0xd1a0ce33UL, 0xd3e6706aUL, 0xd2241a5dUL, 0xc55efe10UL, 0xc49c9427UL, 0xc6da2a7eUL, 0xc7184049UL,
0xc25756ccUL, 0xc3953cfbUL, 0xc1d382a2UL, 0xc011e895UL, 0xcb4dafa8UL, 0xca8fc59fUL, 0xc8c97bc6UL, 0xc90b11f1UL, 0xcc440774UL,
0xcd866d43UL, 0xcfc0d31aUL, 0xce02b92dUL, 0x91af9640UL, 0x906dfc77UL, 0x922b422eUL, 0x93e92819UL, 0x96a63e9cUL, 0x976454abUL,
0x9522eaf2UL, 0x94e080c5UL, 0x9fbcc7f8UL, 0x9e7eadcfUL, 0x9c381396UL, 0x9dfa79a1UL, 0x98b56f24UL, 0x99770513UL, 0x9b31bb4aUL,
0x9af3d17dUL, 0x8d893530UL, 0x8c4b5f07UL, 0x8e0de15eUL, 0x8fcf8b69UL, 0x8a809decUL, 0x8b42f7dbUL, 0x89044982UL, 0x88c623b5UL,
0x839a6488UL, 0x82580ebfUL, 0x801eb0e6UL, 0x81dcdad1UL, 0x8493cc54UL, 0x8551a663UL, 0x8717183aUL, 0x86d5720dUL, 0xa9e2d0a0UL,
0xa820ba97UL, 0xaa6604ceUL, 0xaba46ef9UL, 0xaeeb787cUL, 0xaf29124bUL, 0xad6fac12UL, 0xacadc625UL, 0xa7f18118UL, 0xa633eb2fUL,
0xa4755576UL, 0xa5b73f41UL, 0xa0f829c4UL, 0xa13a43f3UL, 0xa37cfdaaUL, 0xa2be979dUL, 0xb5c473d0UL, 0xb40619e7UL, 0xb640a7beUL,
0xb782cd89UL, 0xb2cddb0cUL, 0xb30fb13bUL, 0xb1490f62UL, 0xb08b6555UL, 0xbbd72268UL, 0xba15485fUL, 0xb853f606UL, 0xb9919c31UL,
0xbcde8ab4UL, 0xbd1ce083UL, 0xbf5a5edaUL, 0xbe9834edUL},
{0x00000000UL, 0xb8bc6765UL, 0xaa09c88bUL, 0x12b5afeeUL, 0x8f629757UL, 0x37def032UL, 0x256b5fdcUL, 0x9dd738b9UL, 0xc5b428efUL,
0x7d084f8aUL, 0x6fbde064UL, 0xd7018701UL, 0x4ad6bfb8UL, 0xf26ad8ddUL, 0xe0df7733UL, 0x58631056UL, 0x5019579fUL, 0xe8a530faUL,
0xfa109f14UL, 0x42acf871UL, 0xdf7bc0c8UL, 0x67c7a7adUL, 0x75720843UL, 0xcdce6f26UL, 0x95ad7f70UL, 0x2d111815UL, 0x3fa4b7fbUL,
0x8718d09eUL, 0x1acfe827UL, 0xa2738f42UL, 0xb0c620acUL, 0x087a47c9UL, 0xa032af3eUL, 0x188ec85bUL, 0x0a3b67b5UL, 0xb28700d0UL,
0x2f503869UL, 0x97ec5f0cUL, 0x8559f0e2UL, 0x3de59787UL, 0x658687d1UL, 0xdd3ae0b4UL, 0xcf8f4f5aUL, 0x7733283fUL, 0xeae41086UL,
0x525877e3UL, 0x40edd80dUL, 0xf851bf68UL, 0xf02bf8a1UL, 0x48979fc4UL, 0x5a22302aUL, 0xe29e574fUL, 0x7f496ff6UL, 0xc7f50893UL,
0xd540a77dUL, 0x6dfcc018UL, 0x359fd04eUL, 0x8d23b72bUL, 0x9f9618c5UL, 0x272a7fa0UL, 0xbafd4719UL, 0x0241207cUL, 0x10f48f92UL,
0xa848e8f7UL, 0x9b14583dUL, 0x23a83f58UL, 0x311d90b6UL, 0x89a1f7d3UL, 0x1476cf6aUL, 0xaccaa80fUL, 0xbe7f07e1UL, 0x06c36084UL,
0x5ea070d2UL, 0xe61c17b7UL, 0xf4a9b859UL, 0x4c15df3cUL, 0xd1c2e785UL, 0x697e80e0UL, 0x7bcb2f0eUL, 0xc377486bUL, 0xcb0d0fa2UL,
0x73b168c7UL, 0x6104c729UL, 0xd9b8a04cUL, 0x446f98f5UL, 0xfcd3ff90UL, 0xee66507eUL, 0x56da371bUL, 0x0eb9274dUL, 0xb6054028UL,
0xa4b0efc6UL, 0x1c0c88a3UL, 0x81dbb01aUL, 0x3967d77fUL, 0x2bd27891UL, 0x936e1ff4UL, 0x3b26f703UL, 0x839a9066UL, 0x912f3f88UL,
0x299358edUL, 0xb4446054UL, 0x0cf80731UL, 0x1e4da8dfUL, 0xa6f1cfbaUL, 0xfe92dfecUL, 0x462eb889UL, 0x549b1767UL, 0xec277002UL,
0x71f048bbUL, 0xc94c2fdeUL, 0xdbf98030UL, 0x6345e755UL, 0x6b3fa09cUL, 0xd383c7f9UL, 0xc1366817UL, 0x798a0f72UL, 0xe45d37cbUL,
0x5ce150aeUL, 0x4e54ff40UL, 0xf6e89825UL, 0xae8b8873UL, 0x1637ef16UL, 0x048240f8UL, 0xbc3e279dUL, 0x21e91f24UL, 0x99557841UL,
0x8be0d7afUL, 0x335cb0caUL, 0xed59b63bUL, 0x55e5d15eUL, 0x47507eb0UL, 0xffec19d5UL, 0x623b216cUL, 0xda874609UL, 0xc832e9e7UL,
0x708e8e82UL, 0x28ed9ed4UL, 0x9051f9b1UL, 0x82e4565fUL, 0x3a58313aUL, 0xa78f0983UL, 0x1f336ee6UL, 0x0d86c108UL, 0xb53aa66dUL,
0xbd40e1a4UL, 0x05fc86c1UL, 0x1749292fUL, 0xaff54e4aUL, 0x322276f3UL, 0x8a9e1196UL, 0x982bbe78UL, 0x2097d91dUL, 0x78f4c94bUL,
0xc048ae2eUL, 0xd2fd01c0UL, 0x6a4166a5UL, 0xf7965e1cUL, 0x4f2a3979UL, 0x5d9f9697UL, 0xe523f1f2UL, 0x4d6b1905UL, 0xf5d77e60UL,
0xe762d18eUL, 0x5fdeb6ebUL, 0xc2098e52UL, 0x7ab5e937UL, 0x680046d9UL, 0xd0bc21bcUL, 0x88df31eaUL, 0x3063568fUL, 0x22d6f961UL,
0x9a6a9e04UL, 0x07bda6bdUL, 0xbf01c1d8UL, 0xadb46e36UL, 0x15080953UL, 0x1d724e9aUL, 0xa5ce29ffUL, 0xb77b8611UL, 0x0fc7e174UL,
0x9210d9cdUL, 0x2aacbea8UL, 0x38191146UL, 0x80a57623UL, 0xd8c66675UL, 0x607a0110UL, 0x72cfaefeUL, 0xca73c99bUL, 0x57a4f122UL,
0xef189647UL, 0xfdad39a9UL, 0x45115eccUL, 0x764dee06UL, 0xcef18963UL, 0xdc44268dUL, 0x64f841e8UL, 0xf92f7951UL, 0x41931e34UL,
0x5326b1daUL, 0xeb9ad6bfUL, 0xb3f9c6e9UL, 0x0b45a18cUL, 0x19f00e62UL, 0xa14c6907UL, 0x3c9b51beUL, 0x842736dbUL, 0x96929935UL,
0x2e2efe50UL, 0x2654b999UL, 0x9ee8defcUL, 0x8c5d7112UL, 0x34e11677UL, 0xa9362eceUL, 0x118a49abUL, 0x033fe645UL, 0xbb838120UL,
0xe3e09176UL, 0x5b5cf613UL, 0x49e959fdUL, 0xf1553e98UL, 0x6c820621UL, 0xd43e6144UL, 0xc68bceaaUL, 0x7e37a9cfUL, 0xd67f4138UL,
0x6ec3265dUL, 0x7c7689b3UL, 0xc4caeed6UL, 0x591dd66fUL, 0xe1a1b10aUL, 0xf3141ee4UL, 0x4ba87981UL, 0x13cb69d7UL, 0xab770eb2UL,
0xb9c2a15cUL, 0x017ec639UL, 0x9ca9fe80UL, 0x241599e5UL, 0x36a0360bUL, 0x8e1c516eUL, 0x866616a7UL, 0x3eda71c2UL, 0x2c6fde2cUL,
0x94d3b949UL, 0x090481f0UL, 0xb1b8e695UL, 0xa30d497bUL, 0x1bb12e1eUL, 0x43d23e48UL, 0xfb6e592dUL, 0xe9dbf6c3UL, 0x516791a6UL,
0xccb0a91fUL, 0x740cce7aUL, 0x66b96194UL, 0xde0506f1UL},
{0x00000000UL, 0x96300777UL, 0x2c610eeeUL, 0xba510999UL, 0x19c46d07UL, 0x8ff46a70UL, 0x35a563e9UL, 0xa395649eUL, 0x3288db0eUL,
0xa4b8dc79UL, 0x1ee9d5e0UL, 0x88d9d297UL, 0x2b4cb609UL, 0xbd7cb17eUL, 0x072db8e7UL, 0x911dbf90UL, 0x6410b71dUL, 0xf220b06aUL,
0x4871b9f3UL, 0xde41be84UL, 0x7dd4da1aUL, 0xebe4dd6dUL, 0x51b5d4f4UL, 0xc785d383UL, 0x56986c13UL, 0xc0a86b64UL, 0x7af962fdUL,
0xecc9658aUL, 0x4f5c0114UL, 0xd96c0663UL, 0x633d0ffaUL, 0xf50d088dUL, 0xc8206e3bUL, 0x5e10694cUL, 0xe44160d5UL, 0x727167a2UL,
0xd1e4033cUL, 0x47d4044bUL, 0xfd850dd2UL, 0x6bb50aa5UL, 0xfaa8b535UL, 0x6c98b242UL, 0xd6c9bbdbUL, 0x40f9bcacUL, 0xe36cd832UL,
0x755cdf45UL, 0xcf0dd6dcUL, 0x593dd1abUL, 0xac30d926UL, 0x3a00de51UL, 0x8051d7c8UL, 0x1661d0bfUL, 0xb5f4b421UL, 0x23c4b356UL,
0x9995bacfUL, 0x0fa5bdb8UL, 0x9eb80228UL, 0x0888055fUL, 0xb2d90cc6UL, 0x24e90bb1UL, 0x877c6f2fUL, 0x114c6858UL, 0xab1d61c1UL,
0x3d2d66b6UL, 0x9041dc76UL, 0x0671db01UL, 0xbc20d298UL, 0x2a10d5efUL, 0x8985b171UL, 0x1fb5b606UL, 0xa5e4bf9fUL, 0x33d4b8e8UL,
0xa2c90778UL, 0x34f9000fUL, 0x8ea80996UL, 0x18980ee1UL, 0xbb0d6a7fUL, 0x2d3d6d08UL, 0x976c6491UL, 0x015c63e6UL, 0xf4516b6bUL,
0x62616c1cUL, 0xd8306585UL, 0x4e0062f2UL, 0xed95066cUL, 0x7ba5011bUL, 0xc1f40882UL, 0x57c40ff5UL, 0xc6d9b065UL, 0x50e9b712UL,
0xeab8be8bUL, 0x7c88b9fcUL, 0xdf1ddd62UL, 0x492dda15UL, 0xf37cd38cUL, 0x654cd4fbUL, 0x5861b24dUL, 0xce51b53aUL, 0x7400bca3UL,
0xe230bbd4UL, 0x41a5df4aUL, 0xd795d83dUL, 0x6dc4d1a4UL, 0xfbf4d6d3UL, 0x6ae96943UL, 0xfcd96e34UL, 0x468867adUL, 0xd0b860daUL,
0x732d0444UL, 0xe51d0333UL, 0x5f4c0aaaUL, 0xc97c0dddUL, 0x3c710550UL, 0xaa410227UL, 0x10100bbeUL, 0x86200cc9UL, 0x25b56857UL,
0xb3856f20UL, 0x09d466b9UL, 0x9fe461ceUL, 0x0ef9de5eUL, 0x98c9d929UL, 0x2298d0b0UL, 0xb4a8d7c7UL, 0x173db359UL, 0x810db42eUL,
0x3b5cbdb7UL, 0xad6cbac0UL, 0x2083b8edUL, 0xb6b3bf9aUL, 0x0ce2b603UL, 0x9ad2b174UL, 0x3947d5eaUL, 0xaf77d29dUL, 0x1526db04UL,
0x8316dc73UL, 0x120b63e3UL, 0x843b6494UL, 0x3e6a6d0dUL, 0xa85a6a7aUL, 0x0bcf0ee4UL, 0x9dff0993UL, 0x27ae000aUL, 0xb19e077dUL,
0x44930ff0UL, 0xd2a30887UL, 0x68f2011eUL, 0xfec20669UL, 0x5d5762f7UL, 0xcb676580UL, 0x71366c19UL, 0xe7066b6eUL, 0x761bd4feUL,
0xe02bd389UL, 0x5a7ada10UL, 0xcc4add67UL, 0x6fdfb9f9UL, 0xf9efbe8eUL, 0x43beb717UL, 0xd58eb060UL, 0xe8a3d6d6UL, 0x7e93d1a1UL,
0xc4c2d838UL, 0x52f2df4fUL, 0xf167bbd1UL, 0x6757bca6UL, 0xdd06b53fUL, 0x4b36b248UL, 0xda2b0dd8UL, 0x4c1b0aafUL, 0xf64a0336UL,
0x607a0441UL, 0xc3ef60dfUL, 0x55df67a8UL, 0xef8e6e31UL, 0x79be6946UL, 0x8cb361cbUL, 0x1a8366bcUL, 0xa0d26f25UL, 0x36e26852UL,
0x95770cccUL, 0x03470bbbUL, 0xb9160222UL, 0x2f260555UL, 0xbe3bbac5UL, 0x280bbdb2UL, 0x925ab42bUL, 0x046ab35cUL, 0xa7ffd7c2UL,
0x31cfd0b5UL, 0x8b9ed92cUL, 0x1daede5bUL, 0xb0c2649bUL, 0x26f263ecUL, 0x9ca36a75UL, 0x0a936d02UL, 0xa906099cUL, 0x3f360eebUL,
0x85670772UL, 0x13570005UL, 0x824abf95UL, 0x147ab8e2UL, 0xae2bb17bUL, 0x381bb60cUL, 0x9b8ed292UL, 0x0dbed5e5UL, 0xb7efdc7cUL,
0x21dfdb0bUL, 0xd4d2d386UL, 0x42e2d4f1UL, 0xf8b3dd68UL, 0x6e83da1fUL, 0xcd16be81UL, 0x5b26b9f6UL, 0xe177b06fUL, 0x7747b718UL,
0xe65a0888UL, 0x706a0fffUL, 0xca3b0666UL, 0x5c0b0111UL, 0xff9e658fUL, 0x69ae62f8UL, 0xd3ff6b61UL, 0x45cf6c16UL, 0x78e20aa0UL,
0xeed20dd7UL, 0x5483044eUL, 0xc2b30339UL, 0x612667a7UL, 0xf71660d0UL, 0x4d476949UL, 0xdb776e3eUL, 0x4a6ad1aeUL, 0xdc5ad6d9UL,
0x660bdf40UL, 0xf03bd837UL, 0x53aebca9UL, 0xc59ebbdeUL, 0x7fcfb247UL, 0xe9ffb530UL, 0x1cf2bdbdUL, 0x8ac2bacaUL, 0x3093b353UL,
0xa6a3b424UL, 0x0536d0baUL, 0x9306d7cdUL, 0x2957de54UL, 0xbf67d923UL, 0x2e7a66b3UL, 0xb84a61c4UL, 0x021b685dUL, 0x942b6f2aUL,
0x37be0bb4UL, 0xa18e0cc3UL, 0x1bdf055aUL, 0x8def022dUL},
{0x00000000UL, 0x41311b19UL, 0x82623632UL, 0xc3532d2bUL, 0x04c56c64UL, 0x45f4777dUL, 0x86a75a56UL, 0xc796414fUL, 0x088ad9c8UL,
0x49bbc2d1UL, 0x8ae8effaUL, 0xcbd9f4e3UL, 0x0c4fb5acUL, 0x4d7eaeb5UL, 0x8e2d839eUL, 0xcf1c9887UL, 0x5112c24aUL, 0x1023d953UL,
0xd370f478UL, 0x9241ef61UL, 0x55d7ae2eUL, 0x14e6b537UL, 0xd7b5981cUL, 0x96848305UL, 0x59981b82UL, 0x18a9009bUL, 0xdbfa2db0UL,
0x9acb36a9UL, 0x5d5d77e6UL, 0x1c6c6cffUL, 0xdf3f41d4UL, 0x9e0e5acdUL, 0xa2248495UL, 0xe3159f8cUL, 0x2046b2a7UL, 0x6177a9beUL,
0xa6e1e8f1UL, 0xe7d0f3e8UL, 0x2483dec3UL, 0x65b2c5daUL, 0xaaae5d5dUL, 0xeb9f4644UL, 0x28cc6b6fUL, 0x69fd7076UL, 0xae6b3139UL,
0xef5a2a20UL, 0x2c09070bUL, 0x6d381c12UL, 0xf33646dfUL, 0xb2075dc6UL, 0x715470edUL, 0x30656bf4UL, 0xf7f32abbUL, 0xb6c231a2UL,
0x75911c89UL, 0x34a00790UL, 0xfbbc9f17UL, 0xba8d840eUL, 0x79dea925UL, 0x38efb23cUL, 0xff79f373UL, 0xbe48e86aUL, 0x7d1bc541UL,
0x3c2ade58UL, 0x054f79f0UL, 0x447e62e9UL, 0x872d4fc2UL, 0xc61c54dbUL, 0x018a1594UL, 0x40bb0e8dUL, 0x83e823a6UL, 0xc2d938bfUL,
0x0dc5a038UL, 0x4cf4bb21UL, 0x8fa7960aUL, 0xce968d13UL, 0x0900cc5cUL, 0x4831d745UL, 0x8b62fa6eUL, 0xca53e177UL, 0x545dbbbaUL,
0x156ca0a3UL, 0xd63f8d88UL, 0x970e9691UL, 0x5098d7deUL, 0x11a9ccc7UL, 0xd2fae1ecUL, 0x93cbfaf5UL, 0x5cd76272UL, 0x1de6796bUL,
0xdeb55440UL, 0x9f844f59UL, 0x58120e16UL, 0x1923150fUL, 0xda703824UL, 0x9b41233dUL, 0xa76bfd65UL, 0xe65ae67cUL, 0x2509cb57UL,
0x6438d04eUL, 0xa3ae9101UL, 0xe29f8a18UL, 0x21cca733UL, 0x60fdbc2aUL, 0xafe124adUL, 0xeed03fb4UL, 0x2d83129fUL, 0x6cb20986UL,
0xab2448c9UL, 0xea1553d0UL, 0x29467efbUL, 0x687765e2UL, 0xf6793f2fUL, 0xb7482436UL, 0x741b091dUL, 0x352a1204UL, 0xf2bc534bUL,
0xb38d4852UL, 0x70de6579UL, 0x31ef7e60UL, 0xfef3e6e7UL, 0xbfc2fdfeUL, 0x7c91d0d5UL, 0x3da0cbccUL, 0xfa368a83UL, 0xbb07919aUL,
0x7854bcb1UL, 0x3965a7a8UL, 0x4b98833bUL, 0x0aa99822UL, 0xc9fab509UL, 0x88cbae10UL, 0x4f5def5fUL, 0x0e6cf446UL, 0xcd3fd96dUL,
0x8c0ec274UL, 0x43125af3UL, 0x022341eaUL, 0xc1706cc1UL, 0x804177d8UL, 0x47d73697UL, 0x06e62d8eUL, 0xc5b500a5UL, 0x84841bbcUL,
0x1a8a4171UL, 0x5bbb5a68UL, 0x98e87743UL, 0xd9d96c5aUL, 0x1e4f2d15UL, 0x5f7e360cUL, 0x9c2d1b27UL, 0xdd1c003eUL, 0x120098b9UL,
0x533183a0UL, 0x9062ae8bUL, 0xd153b592UL, 0x16c5f4ddUL, 0x57f4efc4UL, 0x94a7c2efUL, 0xd596d9f6UL, 0xe9bc07aeUL, 0xa88d1cb7UL,
0x6bde319cUL, 0x2aef2a85UL, 0xed796bcaUL, 0xac4870d3UL, 0x6f1b5df8UL, 0x2e2a46e1UL, 0xe136de66UL, 0xa007c57fUL, 0x6354e854UL,
0x2265f34dUL, 0xe5f3b202UL, 0xa4c2a91bUL, 0x67918430UL, 0x26a09f29UL, 0xb8aec5e4UL, 0xf99fdefdUL, 0x3accf3d6UL, 0x7bfde8cfUL,
0xbc6ba980UL, 0xfd5ab299UL, 0x3e099fb2UL, 0x7f3884abUL, 0xb0241c2cUL, 0xf1150735UL, 0x32462a1eUL, 0x73773107UL, 0xb4e17048UL,
0xf5d06b51UL, 0x3683467aUL, 0x77b25d63UL, 0x4ed7facbUL, 0x0fe6e1d2UL, 0xccb5ccf9UL, 0x8d84d7e0UL, 0x4a1296afUL, 0x0b238db6UL,
0xc870a09dUL, 0x8941bb84UL, 0x465d2303UL, 0x076c381aUL, 0xc43f1531UL, 0x850e0e28UL, 0x42984f67UL, 0x03a9547eUL, 0xc0fa7955UL,
0x81cb624cUL, 0x1fc53881UL, 0x5ef42398UL, 0x9da70eb3UL, 0xdc9615aaUL, 0x1b0054e5UL, 0x5a314ffcUL, 0x996262d7UL, 0xd85379ceUL,
0x174fe149UL, 0x567efa50UL, 0x952dd77bUL, 0xd41ccc62UL, 0x138a8d2dUL, 0x52bb9634UL, 0x91e8bb1fUL, 0xd0d9a006UL, 0xecf37e5eUL,
0xadc26547UL, 0x6e91486cUL, 0x2fa05375UL, 0xe836123aUL, 0xa9070923UL, 0x6a542408UL, 0x2b653f11UL, 0xe479a796UL, 0xa548bc8fUL,
0x661b91a4UL, 0x272a8abdUL, 0xe0bccbf2UL, 0xa18dd0ebUL, 0x62defdc0UL, 0x23efe6d9UL, 0xbde1bc14UL, 0xfcd0a70dUL, 0x3f838a26UL,
0x7eb2913fUL, 0xb924d070UL, 0xf815cb69UL, 0x3b46e642UL, 0x7a77fd5bUL, 0xb56b65dcUL, 0xf45a7ec5UL, 0x370953eeUL, 0x763848f7UL,
0xb1ae09b8UL, 0xf09f12a1UL, 0x33cc3f8aUL, 0x72fd2493UL},
{0x00000000UL, 0x376ac201UL, 0x6ed48403UL, 0x59be4602UL, 0xdca80907UL, 0xebc2cb06UL, 0xb27c8d04UL, 0x85164f05UL, 0xb851130eUL,
0x8f3bd10fUL, 0xd685970dUL, 0xe1ef550cUL, 0x64f91a09UL, 0x5393d808UL, 0x0a2d9e0aUL, 0x3d475c0bUL, 0x70a3261cUL, 0x47c9e41dUL,
0x1e77a21fUL, 0x291d601eUL, 0xac0b2f1bUL, 0x9b61ed1aUL, 0xc2dfab18UL, 0xf5b56919UL, 0xc8f23512UL, 0xff98f713UL, 0xa626b111UL,
0x914c7310UL, 0x145a3c15UL, 0x2330fe14UL, 0x7a8eb816UL, 0x4de47a17UL, 0xe0464d38UL, 0xd72c8f39UL, 0x8e92c93bUL, 0xb9f80b3aUL,
0x3cee443fUL, 0x0b84863eUL, 0x523ac03cUL, 0x6550023dUL, 0x58175e36UL, 0x6f7d9c37UL, 0x36c3da35UL, 0x01a91834UL, 0x84bf5731UL,
0xb3d59530UL, 0xea6bd332UL, 0xdd011133UL, 0x90e56b24UL, 0xa78fa925UL, 0xfe31ef27UL, 0xc95b2d26UL, 0x4c4d6223UL, 0x7b27a022UL,
0x2299e620UL, 0x15f32421UL, 0x28b4782aUL, 0x1fdeba2bUL, 0x4660fc29UL, 0x710a3e28UL, 0xf41c712dUL, 0xc376b32cUL, 0x9ac8f52eUL,
0xada2372fUL, 0xc08d9a70UL, 0xf7e75871UL, 0xae591e73UL, 0x9933dc72UL, 0x1c259377UL, 0x2b4f5176UL, 0x72f11774UL, 0x459bd575UL,
0x78dc897eUL, 0x4fb64b7fUL, 0x16080d7dUL, 0x2162cf7cUL, 0xa4748079UL, 0x931e4278UL, 0xcaa0047aUL, 0xfdcac67bUL, 0xb02ebc6cUL,
0x87447e6dUL, 0xdefa386fUL, 0xe990fa6eUL, 0x6c86b56bUL, 0x5bec776aUL, 0x02523168UL, 0x3538f369UL, 0x087faf62UL, 0x3f156d63UL,
0x66ab2b61UL, 0x51c1e960UL, 0xd4d7a665UL, 0xe3bd6464UL, 0xba032266UL, 0x8d69e067UL, 0x20cbd748UL, 0x17a11549UL, 0x4e1f534bUL,
0x7975914aUL, 0xfc63de4fUL, 0xcb091c4eUL, 0x92b75a4cUL, 0xa5dd984dUL, 0x989ac446UL, 0xaff00647UL, 0xf64e4045UL, 0xc1248244UL,
0x4432cd41UL, 0x73580f40UL, 0x2ae64942UL, 0x1d8c8b43UL, 0x5068f154UL, 0x67023355UL, 0x3ebc7557UL, 0x09d6b756UL, 0x8cc0f853UL,
0xbbaa3a52UL, 0xe2147c50UL, 0xd57ebe51UL, 0xe839e25aUL, 0xdf53205bUL, 0x86ed6659UL, 0xb187a458UL, 0x3491eb5dUL, 0x03fb295cUL,
0x5a456f5eUL, 0x6d2fad5fUL, 0x801b35e1UL, 0xb771f7e0UL, 0xeecfb1e2UL, 0xd9a573e3UL, 0x5cb33ce6UL, 0x6bd9fee7UL, 0x3267b8e5UL,
0x050d7ae4UL, 0x384a26efUL, 0x0f20e4eeUL, 0x569ea2ecUL, 0x61f460edUL, 0xe4e22fe8UL, 0xd388ede9UL, 0x8a36abebUL, 0xbd5c69eaUL,
0xf0b813fdUL, 0xc7d2d1fcUL, 0x9e6c97feUL, 0xa90655ffUL, 0x2c101afaUL, 0x1b7ad8fbUL, 0x42c49ef9UL, 0x75ae5cf8UL, 0x48e900f3UL,
0x7f83c2f2UL, 0x263d84f0UL, 0x115746f1UL, 0x944109f4UL, 0xa32bcbf5UL, 0xfa958df7UL, 0xcdff4ff6UL, 0x605d78d9UL, 0x5737bad8UL,
0x0e89fcdaUL, 0x39e33edbUL, 0xbcf571deUL, 0x8b9fb3dfUL, 0xd221f5ddUL, 0xe54b37dcUL, 0xd80c6bd7UL, 0xef66a9d6UL, 0xb6d8efd4UL,
0x81b22dd5UL, 0x04a462d0UL, 0x33cea0d1UL, 0x6a70e6d3UL, 0x5d1a24d2UL, 0x10fe5ec5UL, 0x27949cc4UL, 0x7e2adac6UL, 0x494018c7UL,
0xcc5657c2UL, 0xfb3c95c3UL, 0xa282d3c1UL, 0x95e811c0UL, 0xa8af4dcbUL, 0x9fc58fcaUL, 0xc67bc9c8UL, 0xf1110bc9UL, 0x740744ccUL,
0x436d86cdUL, 0x1ad3c0cfUL, 0x2db902ceUL, 0x4096af91UL, 0x77fc6d90UL, 0x2e422b92UL, 0x1928e993UL, 0x9c3ea696UL, 0xab546497UL,
0xf2ea2295UL, 0xc580e094UL, 0xf8c7bc9fUL, 0xcfad7e9eUL, 0x9613389cUL, 0xa179fa9dUL, 0x246fb598UL, 0x13057799UL, 0x4abb319bUL,
0x7dd1f39aUL, 0x3035898dUL, 0x075f4b8cUL, 0x5ee10d8eUL, 0x698bcf8fUL, 0xec9d808aUL, 0xdbf7428bUL, 0x82490489UL, 0xb523c688UL,
0x88649a83UL, 0xbf0e5882UL, 0xe6b01e80UL, 0xd1dadc81UL, 0x54cc9384UL, 0x63a65185UL, 0x3a181787UL, 0x0d72d586UL, 0xa0d0e2a9UL,
0x97ba20a8UL, 0xce0466aaUL, 0xf96ea4abUL, 0x7c78ebaeUL, 0x4b1229afUL, 0x12ac6fadUL, 0x25c6adacUL, 0x1881f1a7UL, 0x2feb33a6UL,
0x765575a4UL, 0x413fb7a5UL, 0xc429f8a0UL, 0xf3433aa1UL, 0xaafd7ca3UL, 0x9d97bea2UL, 0xd073c4b5UL, 0xe71906b4UL, 0xbea740b6UL,
0x89cd82b7UL, 0x0cdbcdb2UL, 0x3bb10fb3UL, 0x620f49b1UL, 0x55658bb0UL, 0x6822d7bbUL, 0x5f4815baUL, 0x06f653b8UL, 0x319c91b9UL,
0xb48adebcUL, 0x83e01cbdUL, 0xda5e5abfUL, 0xed3498beUL},
{0x00000000UL, 0x6567bcb8UL, 0x8bc809aaUL, 0xeeafb512UL, 0x5797628fUL, 0x32f0de37UL, 0xdc5f6b25UL, 0xb938d79dUL, 0xef28b4c5UL,
0x8a4f087dUL, 0x64e0bd6fUL, 0x018701d7UL, 0xb8bfd64aUL, 0xddd86af2UL, 0x3377dfe0UL, 0x56106358UL, 0x9f571950UL, 0xfa30a5e8UL,
0x149f10faUL, 0x71f8ac42UL, 0xc8c07bdfUL, 0xada7c767UL, 0x43087275UL, 0x266fcecdUL, 0x707fad95UL, 0x1518112dUL, 0xfbb7a43fUL,
0x9ed01887UL, 0x27e8cf1aUL, 0x428f73a2UL, 0xac20c6b0UL, 0xc9477a08UL, 0x3eaf32a0UL, 0x5bc88e18UL, 0xb5673b0aUL, 0xd00087b2UL,
0x6938502fUL, 0x0c5fec97UL, 0xe2f05985UL, 0x8797e53dUL, 0xd1878665UL, 0xb4e03addUL, 0x5a4f8fcfUL, 0x3f283377UL, 0x8610e4eaUL,
0xe3775852UL, 0x0dd8ed40UL, 0x68bf51f8UL, 0xa1f82bf0UL, 0xc49f9748UL, 0x2a30225aUL, 0x4f579ee2UL, 0xf66f497fUL, 0x9308f5c7UL,
0x7da740d5UL, 0x18c0fc6dUL, 0x4ed09f35UL, 0x2bb7238dUL, 0xc518969fUL, 0xa07f2a27UL, 0x1947fdbaUL, 0x7c204102UL, 0x928ff410UL,
0xf7e848a8UL, 0x3d58149bUL, 0x583fa823UL, 0xb6901d31UL, 0xd3f7a189UL, 0x6acf7614UL, 0x0fa8caacUL, 0xe1077fbeUL, 0x8460c306UL,
0xd270a05eUL, 0xb7171ce6UL, 0x59b8a9f4UL, 0x3cdf154cUL, 0x85e7c2d1UL, 0xe0807e69UL, 0x0e2fcb7bUL, 0x6b4877c3UL, 0xa20f0dcbUL,
0xc768b173UL, 0x29c70461UL, 0x4ca0b8d9UL, 0xf5986f44UL, 0x90ffd3fcUL, 0x7e5066eeUL, 0x1b37da56UL, 0x4d27b90eUL, 0x284005b6UL,
0xc6efb0a4UL, 0xa3880c1cUL, 0x1ab0db81UL, 0x7fd76739UL, 0x9178d22bUL, 0xf41f6e93UL, 0x03f7263bUL, 0x66909a83UL, 0x883f2f91UL,
0xed589329UL, 0x546044b4UL, 0x3107f80cUL, 0xdfa84d1eUL, 0xbacff1a6UL, 0xecdf92feUL, 0x89b82e46UL, 0x67179b54UL, 0x027027ecUL,
0xbb48f071UL, 0xde2f4cc9UL, 0x3080f9dbUL, 0x55e74563UL, 0x9ca03f6bUL, 0xf9c783d3UL, 0x176836c1UL, 0x720f8a79UL, 0xcb375de4UL,
0xae50e15cUL, 0x40ff544eUL, 0x2598e8f6UL, 0x73888baeUL, 0x16ef3716UL, 0xf8408204UL, 0x9d273ebcUL, 0x241fe921UL, 0x41785599UL,
0xafd7e08bUL, 0xcab05c33UL, 0x3bb659edUL, 0x5ed1e555UL, 0xb07e5047UL, 0xd519ecffUL, 0x6c213b62UL, 0x094687daUL, 0xe7e932c8UL,
0x828e8e70UL, 0xd49eed28UL, 0xb1f95190UL, 0x5f56e482UL, 0x3a31583aUL, 0x83098fa7UL, 0xe66e331fUL, 0x08c1860dUL, 0x6da63ab5UL,
0xa4e140bdUL, 0xc186fc05UL, 0x2f294917UL, 0x4a4ef5afUL, 0xf3762232UL, 0x96119e8aUL, 0x78be2b98UL, 0x1dd99720UL, 0x4bc9f478UL,
0x2eae48c0UL, 0xc001fdd2UL, 0xa566416aUL, 0x1c5e96f7UL, 0x79392a4fUL, 0x97969f5dUL, 0xf2f123e5UL, 0x05196b4dUL, 0x607ed7f5UL,
0x8ed162e7UL, 0xebb6de5fUL, 0x528e09c2UL, 0x37e9b57aUL, 0xd9460068UL, 0xbc21bcd0UL, 0xea31df88UL, 0x8f566330UL, 0x61f9d622UL,
0x049e6a9aUL, 0xbda6bd07UL, 0xd8c101bfUL, 0x366eb4adUL, 0x53090815UL, 0x9a4e721dUL, 0xff29cea5UL, 0x11867bb7UL, 0x74e1c70fUL,
0xcdd91092UL, 0xa8beac2aUL, 0x46111938UL, 0x2376a580UL, 0x7566c6d8UL, 0x10017a60UL, 0xfeaecf72UL, 0x9bc973caUL, 0x22f1a457UL,
0x479618efUL, 0xa939adfdUL, 0xcc5e1145UL, 0x06ee4d76UL, 0x6389f1ceUL, 0x8d2644dcUL, 0xe841f864UL, 0x51792ff9UL, 0x341e9341UL,
0xdab12653UL, 0xbfd69aebUL, 0xe9c6f9b3UL, 0x8ca1450bUL, 0x620ef019UL, 0x07694ca1UL, 0xbe519b3cUL, 0xdb362784UL, 0x35999296UL,
0x50fe2e2eUL, 0x99b95426UL, 0xfcdee89eUL, 0x12715d8cUL, 0x7716e134UL, 0xce2e36a9UL, 0xab498a11UL, 0x45e63f03UL, 0x208183bbUL,
0x7691e0e3UL, 0x13f65c5bUL, 0xfd59e949UL, 0x983e55f1UL, 0x2106826cUL, 0x44613ed4UL, 0xaace8bc6UL, 0xcfa9377eUL, 0x38417fd6UL,
0x5d26c36eUL, 0xb389767cUL, 0xd6eecac4UL, 0x6fd61d59UL, 0x0ab1a1e1UL, 0xe41e14f3UL, 0x8179a84bUL, 0xd769cb13UL, 0xb20e77abUL,
0x5ca1c2b9UL, 0x39c67e01UL, 0x80fea99cUL, 0xe5991524UL, 0x0b36a036UL, 0x6e511c8eUL, 0xa7166686UL, 0xc271da3eUL, 0x2cde6f2cUL,
0x49b9d394UL, 0xf0810409UL, 0x95e6b8b1UL, 0x7b490da3UL, 0x1e2eb11bUL, 0x483ed243UL, 0x2d596efbUL, 0xc3f6dbe9UL, 0xa6916751UL,
0x1fa9b0ccUL, 0x7ace0c74UL, 0x9461b966UL, 0xf10605deUL
#endif
}};

Some files were not shown because too many files have changed in this diff Show More