Merge branch 'master' into fix-pr-max-execution-timeout-leaf

This commit is contained in:
Igor Nikonov 2024-10-15 23:11:02 +02:00 committed by GitHub
commit 3eb010ec8a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3376 changed files with 86433 additions and 41450 deletions

View File

@ -16,3 +16,6 @@
# Applied Black formatter for Python code # Applied Black formatter for Python code
e6f5a3f98b21ba99cf274a9833797889e020a2b3 e6f5a3f98b21ba99cf274a9833797889e020a2b3
# Enabling clang-tidy readability-else-no-return rule
67c1e89d90ef576e62f8b1c68269742a3c6f9b1e

View File

@ -15,7 +15,7 @@ assignees: ''
**Use case** **Use case**
> A clear and concise description of what is the intended usage scenario is. > A clear and concise description of what the intended usage scenario is.
**Describe the solution you'd like** **Describe the solution you'd like**

View File

@ -4,7 +4,6 @@ self-hosted-runner:
- func-tester - func-tester
- func-tester-aarch64 - func-tester-aarch64
- fuzzer-unit-tester - fuzzer-unit-tester
- stress-tester
- style-checker - style-checker
- style-checker-aarch64 - style-checker-aarch64
- release-maker - release-maker

View File

@ -4,15 +4,31 @@ description: Prints workflow debug info
runs: runs:
using: "composite" using: "composite"
steps: steps:
- name: Print envs - name: Envs, event.json and contexts
shell: bash shell: bash
run: | run: |
echo "::group::Envs" echo '::group::Environment variables'
env env | sort
echo "::endgroup::" echo '::endgroup::'
- name: Print Event.json
shell: bash echo '::group::event.json'
run: |
echo "::group::Event.json"
python3 -m json.tool "$GITHUB_EVENT_PATH" python3 -m json.tool "$GITHUB_EVENT_PATH"
echo "::endgroup::" echo '::endgroup::'
cat << 'EOF'
::group::github context
${{ toJSON(github) }}
::endgroup::
::group::env context
${{ toJSON(env) }}
::endgroup::
::group::runner context
${{ toJSON(runner) }}
::endgroup::
::group::job context
${{ toJSON(job) }}
::endgroup::
EOF

View File

@ -27,6 +27,8 @@ jobs:
clear-repository: true # to ensure correct digests clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version fetch-depth: 0 # to get version
filter: tree:0 filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: Labels check - name: Labels check
run: | run: |
cd "$GITHUB_WORKSPACE/tests/ci" cd "$GITHUB_WORKSPACE/tests/ci"
@ -227,18 +229,26 @@ jobs:
uses: ./.github/workflows/reusable_test.yml uses: ./.github/workflows/reusable_test.yml
with: with:
test_name: Stress test (tsan) test_name: Stress test (tsan)
runner_type: stress-tester runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }} data: ${{ needs.RunConfig.outputs.data }}
############################################################################################# #############################################################################################
############################# INTEGRATION TESTS ############################################# ############################# INTEGRATION TESTS #############################################
############################################################################################# #############################################################################################
IntegrationTestsRelease: IntegrationTestsAsanOldAnalyzer:
needs: [RunConfig, BuilderDebRelease] needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }} if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml uses: ./.github/workflows/reusable_test.yml
with: with:
test_name: Integration tests (release) test_name: Integration tests (asan, old analyzer)
runner_type: stress-tester runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsTsan:
needs: [RunConfig, BuilderDebTsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (tsan)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }} data: ${{ needs.RunConfig.outputs.data }}
FinishCheck: FinishCheck:
if: ${{ !cancelled() }} if: ${{ !cancelled() }}
@ -248,7 +258,8 @@ jobs:
- FunctionalStatelessTestAsan - FunctionalStatelessTestAsan
- FunctionalStatefulTestDebug - FunctionalStatefulTestDebug
- StressTestTsan - StressTestTsan
- IntegrationTestsRelease - IntegrationTestsTsan
- IntegrationTestsAsanOldAnalyzer
- CompatibilityCheckX86 - CompatibilityCheckX86
- CompatibilityCheckAarch64 - CompatibilityCheckAarch64
runs-on: [self-hosted, style-checker] runs-on: [self-hosted, style-checker]

View File

@ -33,6 +33,8 @@ jobs:
clear-repository: true clear-repository: true
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
fetch-depth: 0 fetch-depth: 0
- name: Debug Info
uses: ./.github/actions/debug
- name: Cherry pick - name: Cherry pick
run: | run: |
cd "$GITHUB_WORKSPACE/tests/ci" cd "$GITHUB_WORKSPACE/tests/ci"

View File

@ -3,6 +3,9 @@ name: CreateRelease
concurrency: concurrency:
group: release group: release
env:
PYTHONUNBUFFERED: 1
'on': 'on':
workflow_dispatch: workflow_dispatch:
inputs: inputs:
@ -56,13 +59,13 @@ jobs:
GH_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} GH_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
runs-on: [self-hosted, release-maker] runs-on: [self-hosted, release-maker]
steps: steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code - name: Check out repository code
uses: ClickHouse/checkout@v1 uses: ClickHouse/checkout@v1
with: with:
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
fetch-depth: 0 fetch-depth: 0
- name: Debug Info
uses: ./.github/actions/debug
- name: Prepare Release Info - name: Prepare Release Info
shell: bash shell: bash
run: | run: |

View File

@ -1,4 +1,5 @@
name: Build docker images name: Build docker images
'on': 'on':
workflow_call: workflow_call:
inputs: inputs:
@ -11,6 +12,10 @@ name: Build docker images
required: false required: false
type: boolean type: boolean
default: false default: false
env:
PYTHONUNBUFFERED: 1
jobs: jobs:
DockerBuildAarch64: DockerBuildAarch64:
runs-on: [self-hosted, style-checker-aarch64] runs-on: [self-hosted, style-checker-aarch64]

View File

@ -8,27 +8,28 @@ on: # yamllint disable-line rule:truthy
schedule: schedule:
- cron: '0 */6 * * *' - cron: '0 */6 * * *'
workflow_dispatch: workflow_dispatch:
jobs: jobs:
RunConfig: RunConfig:
runs-on: [self-hosted, style-checker-aarch64] runs-on: [self-hosted, style-checker-aarch64]
outputs: outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }} data: ${{ steps.runconfig.outputs.CI_DATA }}
steps: steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code - name: Check out repository code
uses: ClickHouse/checkout@v1 uses: ClickHouse/checkout@v1
with: with:
clear-repository: true # to ensure correct digests clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version fetch-depth: 0 # to get version
filter: tree:0 filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: PrepareRunConfig - name: PrepareRunConfig
id: runconfig id: runconfig
run: | run: |
echo "::group::configure CI run" echo "::group::configure CI run"
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --workflow "$GITHUB_WORKFLOW" --outfile ${{ runner.temp }}/ci_run_data.json python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --workflow "$GITHUB_WORKFLOW" --outfile ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::" echo "::endgroup::"
echo "::group::CI run configure results" echo "::group::CI run configure results"
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::" echo "::endgroup::"

View File

@ -15,14 +15,14 @@ jobs:
outputs: outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }} data: ${{ steps.runconfig.outputs.CI_DATA }}
steps: steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code - name: Check out repository code
uses: ClickHouse/checkout@v1 uses: ClickHouse/checkout@v1
with: with:
clear-repository: true # to ensure correct digests clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version fetch-depth: 0 # to get version
filter: tree:0 filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: Merge sync PR - name: Merge sync PR
run: | run: |
cd "$GITHUB_WORKSPACE/tests/ci" cd "$GITHUB_WORKSPACE/tests/ci"

View File

@ -14,14 +14,14 @@ jobs:
outputs: outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }} data: ${{ steps.runconfig.outputs.CI_DATA }}
steps: steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code - name: Check out repository code
uses: ClickHouse/checkout@v1 uses: ClickHouse/checkout@v1
with: with:
clear-repository: true # to ensure correct digests clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get a version fetch-depth: 0 # to get a version
filter: tree:0 filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: Cancel PR workflow - name: Cancel PR workflow
run: | run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run

View File

@ -15,14 +15,14 @@ jobs:
outputs: outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }} data: ${{ steps.runconfig.outputs.CI_DATA }}
steps: steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code - name: Check out repository code
uses: ClickHouse/checkout@v1 uses: ClickHouse/checkout@v1
with: with:
clear-repository: true # to ensure correct digests clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version fetch-depth: 0 # to get version
filter: tree:0 filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: PrepareRunConfig - name: PrepareRunConfig
id: runconfig id: runconfig
run: | run: |

View File

@ -25,17 +25,14 @@ jobs:
outputs: outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }} data: ${{ steps.runconfig.outputs.CI_DATA }}
steps: steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code - name: Check out repository code
uses: ClickHouse/checkout@v1 uses: ClickHouse/checkout@v1
with: with:
clear-repository: true # to ensure correct digests clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get a version fetch-depth: 0 # to get a version
filter: tree:0 filter: tree:0
- name: Cancel previous Sync PR workflow - name: Debug Info
run: | uses: ./.github/actions/debug
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run
- name: Set pending Sync status - name: Set pending Sync status
run: | run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --set-pending-status python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --set-pending-status

View File

@ -24,6 +24,8 @@ jobs:
clear-repository: true # to ensure correct digests clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version fetch-depth: 0 # to get version
filter: tree:0 filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: Labels check - name: Labels check
run: | run: |
cd "$GITHUB_WORKSPACE/tests/ci" cd "$GITHUB_WORKSPACE/tests/ci"
@ -372,7 +374,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml uses: ./.github/workflows/reusable_test.yml
with: with:
test_name: Stress test (asan) test_name: Stress test (asan)
runner_type: stress-tester runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }} data: ${{ needs.RunConfig.outputs.data }}
StressTestTsan: StressTestTsan:
needs: [RunConfig, BuilderDebTsan] needs: [RunConfig, BuilderDebTsan]
@ -380,7 +382,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml uses: ./.github/workflows/reusable_test.yml
with: with:
test_name: Stress test (tsan) test_name: Stress test (tsan)
runner_type: stress-tester runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }} data: ${{ needs.RunConfig.outputs.data }}
StressTestMsan: StressTestMsan:
needs: [RunConfig, BuilderDebMsan] needs: [RunConfig, BuilderDebMsan]
@ -388,7 +390,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml uses: ./.github/workflows/reusable_test.yml
with: with:
test_name: Stress test (msan) test_name: Stress test (msan)
runner_type: stress-tester runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }} data: ${{ needs.RunConfig.outputs.data }}
StressTestUBsan: StressTestUBsan:
needs: [RunConfig, BuilderDebUBsan] needs: [RunConfig, BuilderDebUBsan]
@ -396,7 +398,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml uses: ./.github/workflows/reusable_test.yml
with: with:
test_name: Stress test (ubsan) test_name: Stress test (ubsan)
runner_type: stress-tester runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }} data: ${{ needs.RunConfig.outputs.data }}
StressTestDebug: StressTestDebug:
needs: [RunConfig, BuilderDebDebug] needs: [RunConfig, BuilderDebDebug]
@ -404,7 +406,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml uses: ./.github/workflows/reusable_test.yml
with: with:
test_name: Stress test (debug) test_name: Stress test (debug)
runner_type: stress-tester runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }} data: ${{ needs.RunConfig.outputs.data }}
############################################################################################# #############################################################################################
############################# INTEGRATION TESTS ############################################# ############################# INTEGRATION TESTS #############################################
@ -415,7 +417,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml uses: ./.github/workflows/reusable_test.yml
with: with:
test_name: Integration tests (asan) test_name: Integration tests (asan)
runner_type: stress-tester runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }} data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsAnalyzerAsan: IntegrationTestsAnalyzerAsan:
needs: [RunConfig, BuilderDebAsan] needs: [RunConfig, BuilderDebAsan]
@ -423,7 +425,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml uses: ./.github/workflows/reusable_test.yml
with: with:
test_name: Integration tests (asan, old analyzer) test_name: Integration tests (asan, old analyzer)
runner_type: stress-tester runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }} data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsTsan: IntegrationTestsTsan:
needs: [RunConfig, BuilderDebTsan] needs: [RunConfig, BuilderDebTsan]
@ -431,7 +433,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml uses: ./.github/workflows/reusable_test.yml
with: with:
test_name: Integration tests (tsan) test_name: Integration tests (tsan)
runner_type: stress-tester runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }} data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsRelease: IntegrationTestsRelease:
needs: [RunConfig, BuilderDebRelease] needs: [RunConfig, BuilderDebRelease]
@ -439,7 +441,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml uses: ./.github/workflows/reusable_test.yml
with: with:
test_name: Integration tests (release) test_name: Integration tests (release)
runner_type: stress-tester runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }} data: ${{ needs.RunConfig.outputs.data }}
FinishCheck: FinishCheck:
if: ${{ !cancelled() }} if: ${{ !cancelled() }}

View File

@ -1,7 +1,11 @@
### FIXME: merge reusable_test.yml and reusable_build.yml as they are almost identical ### FIXME: merge reusable_test.yml and reusable_build.yml as they are almost identical
# and then merge reusable_build_stage.yml and reusable_test_stage.yml # and then merge reusable_build_stage.yml and reusable_test_stage.yml
env:
PYTHONUNBUFFERED: 1
name: BuildStageWF name: BuildStageWF
'on': 'on':
workflow_call: workflow_call:
inputs: inputs:

View File

@ -62,8 +62,6 @@ jobs:
env: env:
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}} GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}
steps: steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code - name: Check out repository code
uses: ClickHouse/checkout@v1 uses: ClickHouse/checkout@v1
with: with:
@ -72,6 +70,8 @@ jobs:
submodules: ${{inputs.submodules}} submodules: ${{inputs.submodules}}
fetch-depth: ${{inputs.checkout_depth}} fetch-depth: ${{inputs.checkout_depth}}
filter: tree:0 filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: Set build envs - name: Set build envs
run: | run: |
cat >> "$GITHUB_ENV" << 'EOF' cat >> "$GITHUB_ENV" << 'EOF'

View File

@ -1,4 +1,8 @@
env:
PYTHONUNBUFFERED: 1
name: StageWF name: StageWF
'on': 'on':
workflow_call: workflow_call:
inputs: inputs:

12
.gitmodules vendored
View File

@ -170,9 +170,6 @@
[submodule "contrib/fast_float"] [submodule "contrib/fast_float"]
path = contrib/fast_float path = contrib/fast_float
url = https://github.com/fastfloat/fast_float url = https://github.com/fastfloat/fast_float
[submodule "contrib/libpq"]
path = contrib/libpq
url = https://github.com/ClickHouse/libpq
[submodule "contrib/NuRaft"] [submodule "contrib/NuRaft"]
path = contrib/NuRaft path = contrib/NuRaft
url = https://github.com/ClickHouse/NuRaft url = https://github.com/ClickHouse/NuRaft
@ -366,6 +363,15 @@
[submodule "contrib/double-conversion"] [submodule "contrib/double-conversion"]
path = contrib/double-conversion path = contrib/double-conversion
url = https://github.com/ClickHouse/double-conversion.git url = https://github.com/ClickHouse/double-conversion.git
[submodule "contrib/mongo-cxx-driver"]
path = contrib/mongo-cxx-driver
url = https://github.com/ClickHouse/mongo-cxx-driver.git
[submodule "contrib/mongo-c-driver"]
path = contrib/mongo-c-driver
url = https://github.com/ClickHouse/mongo-c-driver.git
[submodule "contrib/numactl"] [submodule "contrib/numactl"]
path = contrib/numactl path = contrib/numactl
url = https://github.com/ClickHouse/numactl.git url = https://github.com/ClickHouse/numactl.git
[submodule "contrib/postgres"]
path = contrib/postgres
url = https://github.com/ClickHouse/postgres.git

View File

@ -1,5 +1,6 @@
### Table of Contents ### Table of Contents
**[ClickHouse release v24.8 LTS, 2024-08-20](#243)**<br/> **[ClickHouse release v24.9, 2024-09-26](#249)**<br/>
**[ClickHouse release v24.8 LTS, 2024-08-20](#248)**<br/>
**[ClickHouse release v24.7, 2024-07-30](#247)**<br/> **[ClickHouse release v24.7, 2024-07-30](#247)**<br/>
**[ClickHouse release v24.6, 2024-07-01](#246)**<br/> **[ClickHouse release v24.6, 2024-07-01](#246)**<br/>
**[ClickHouse release v24.5, 2024-05-30](#245)**<br/> **[ClickHouse release v24.5, 2024-05-30](#245)**<br/>
@ -11,6 +12,168 @@
# 2024 Changelog # 2024 Changelog
### <a id="249"></a> ClickHouse release 24.9, 2024-09-26
#### Backward Incompatible Change
* Expressions like `a[b].c` are supported for named tuples, as well as named subscripts from arbitrary expressions, e.g., `expr().name`. This is useful for processing JSON. This closes [#54965](https://github.com/ClickHouse/ClickHouse/issues/54965). In previous versions, an expression of form `expr().name` was parsed as `tupleElement(expr(), name)`, and the query analyzer was searching for a column `name` rather than for the corresponding tuple element; while in the new version, it is changed to `tupleElement(expr(), 'name')`. In most cases, the previous version was not working, but it is possible to imagine a very unusual scenario when this change could lead to incompatibility: if you stored names of tuple elements in a column or an alias, that was named differently than the tuple element's name: `SELECT 'b' AS a, CAST([tuple(123)] AS 'Array(Tuple(b UInt8))') AS t, t[1].a`. It is very unlikely that you used such queries, but we still have to mark this change as potentially backward incompatible. [#68435](https://github.com/ClickHouse/ClickHouse/pull/68435) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* When the setting `print_pretty_type_names` is enabled, it will print `Tuple` data type in a pretty form in `SHOW CREATE TABLE` statements, `formatQuery` function, and in the interactive mode in `clickhouse-client` and `clickhouse-local`. In previous versions, this setting was only applied to `DESCRIBE` queries and `toTypeName`. This closes [#65753](https://github.com/ClickHouse/ClickHouse/issues/65753). [#68492](https://github.com/ClickHouse/ClickHouse/pull/68492) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Do not allow explicitly specifying UUID when creating a table in `Replicated` databases. Also, do not allow explicitly specifying Keeper path and replica name for *MergeTree tables in Replicated databases. It introduces a new setting `database_replicated_allow_explicit_uuid` and changes the type of `database_replicated_allow_replicated_engine_arguments` from Bool to UInt64 [#66104](https://github.com/ClickHouse/ClickHouse/pull/66104) ([Alexander Tokmakov](https://github.com/tavplubix)).
#### New Feature
* Allow a user to have multiple authentication methods instead of only one. Allow authentication methods to be reset to most recently added method. If you want to run instances on 24.8 and one on 24.9 for some time, it's better to set `max_authentication_methods_per_user` = 1 for that period to avoid potential incompatibilities. [#65277](https://github.com/ClickHouse/ClickHouse/pull/65277) ([Arthur Passos](https://github.com/arthurpassos)).
* Add support for `ATTACH PARTITION ALL FROM`. [#61987](https://github.com/ClickHouse/ClickHouse/pull/61987) ([Kirill Nikiforov](https://github.com/allmazz)).
* Add the `input_format_json_empty_as_default` setting which, when enabled, treats empty fields in JSON inputs as default values. Closes [#59339](https://github.com/ClickHouse/ClickHouse/issues/59339). [#66782](https://github.com/ClickHouse/ClickHouse/pull/66782) ([Alexis Arnaud](https://github.com/a-a-f)).
* Added functions `overlay` and `overlayUTF8` which replace parts of a string by another string. Example: `SELECT overlay('Hello New York', 'Jersey', 11)` returns `Hello New Jersey`. [#66933](https://github.com/ClickHouse/ClickHouse/pull/66933) ([李扬](https://github.com/taiyang-li)).
* Add support for lightweight deletes in partition `DELETE FROM [db.]table [ON CLUSTER cluster] [IN PARTITION partition_expr] WHERE expr; ` [#67805](https://github.com/ClickHouse/ClickHouse/pull/67805) ([sunny](https://github.com/sunny19930321)).
* Implemented comparison for `Interval` data type values of different domains (such as seconds and minutes) so they are converting now to the least supertype. [#68057](https://github.com/ClickHouse/ClickHouse/pull/68057) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
* Add `create_if_not_exists` setting to default to `IF NOT EXISTS` behavior during CREATE statements. [#68164](https://github.com/ClickHouse/ClickHouse/pull/68164) ([Peter Nguyen](https://github.com/petern48)).
* Makes it possible to read `Iceberg` tables in Azure and locally. [#68210](https://github.com/ClickHouse/ClickHouse/pull/68210) ([Daniil Ivanik](https://github.com/divanik)).
* Query cache entries can now be dropped by tag. For example, the query cache entry created by `SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'abc'` can now be dropped by `SYSTEM DROP QUERY CACHE TAG 'abc'`. [#68477](https://github.com/ClickHouse/ClickHouse/pull/68477) ([Michał Tabaszewski](https://github.com/pinsvin00)).
* Add storage encryption for named collections. [#68615](https://github.com/ClickHouse/ClickHouse/pull/68615) ([Pablo Marcos](https://github.com/pamarcos)).
* Add virtual column `_headers` for the `URL` table engine. Closes [#65026](https://github.com/ClickHouse/ClickHouse/issues/65026). [#68867](https://github.com/ClickHouse/ClickHouse/pull/68867) ([flynn](https://github.com/ucasfl)).
* Add `system.projections` table to track available projections. [#68901](https://github.com/ClickHouse/ClickHouse/pull/68901) ([Jordi Villar](https://github.com/jrdi)).
* Add new function `arrayZipUnaligned` for spark compatibility (which is named `arrays_zip` in Spark), which allowed unaligned arrays based on original `arrayZip`. [#69030](https://github.com/ClickHouse/ClickHouse/pull/69030) ([李扬](https://github.com/taiyang-li)).
* Added `cp`/`mv` commands for the keeper client command line application which atomically copies/moves node. [#69034](https://github.com/ClickHouse/ClickHouse/pull/69034) ([Mikhail Artemenko](https://github.com/Michicosun)).
* Adds argument `scale` (default: `true`) to function `arrayAUC` which allows to skip the normalization step (issue [#69609](https://github.com/ClickHouse/ClickHouse/issues/69609)). [#69717](https://github.com/ClickHouse/ClickHouse/pull/69717) ([gabrielmcg44](https://github.com/gabrielmcg44)).
#### Experimental feature
* Adds a setting `input_format_try_infer_variants` which allows `Variant` type to be inferred during schema inference for text formats when there is more than one possible type for column/array elements. [#63798](https://github.com/ClickHouse/ClickHouse/pull/63798) ([Shaun Struwig](https://github.com/Blargian)).
* Add aggregate functions `distinctDynamicTypes`/`distinctJSONPaths`/`distinctJSONPathsAndTypes` for better introspection of JSON column type content. [#68463](https://github.com/ClickHouse/ClickHouse/pull/68463) ([Kruglov Pavel](https://github.com/Avogar)).
* New algorithm to determine the unit of marks distribution between parallel replicas by a consistent hash. Different numbers of marks chosen for different read patterns to improve performance. [#68424](https://github.com/ClickHouse/ClickHouse/pull/68424) ([Nikita Taranov](https://github.com/nickitat)).
* Previously the algorithmic complexity of part deduplication logic in parallel replica announcement handling was O(n^2) which could take noticeable time for tables with many part (or partitions). This change makes the complexity O(n*log(n)). [#69596](https://github.com/ClickHouse/ClickHouse/pull/69596) ([Alexander Gololobov](https://github.com/davenger)).
* Refreshable materialized view improvements: append mode (`... REFRESH EVERY 1 MINUTE APPEND ...`) to add rows to existing table instead of overwriting the whole table, retries (disabled by default, configured in SETTINGS section of the query), `SYSTEM WAIT VIEW <name>` query that waits for the currently running refresh, some fixes. [#58934](https://github.com/ClickHouse/ClickHouse/pull/58934) ([Michael Kolupaev](https://github.com/al13n321)).
* Added `min_max`as a new type of (experimental) statistics. It supports estimating range predicates over numeric columns, e.g. `x < 100`. [#67013](https://github.com/ClickHouse/ClickHouse/pull/67013) ([JackyWoo](https://github.com/JackyWoo)).
* Improve castOrDefault from Variant/Dynamic columns so it works when inner types are not convertable at all. [#67150](https://github.com/ClickHouse/ClickHouse/pull/67150) ([Kruglov Pavel](https://github.com/Avogar)).
* Replication of subset of columns is now available through MaterializedPostgreSQL. Closes [#33748](https://github.com/ClickHouse/ClickHouse/issues/33748). [#69092](https://github.com/ClickHouse/ClickHouse/pull/69092) ([Kruglov Kirill](https://github.com/1on)).
#### Performance Improvement
* Implemented reading of required files only for Hive partitioning. [#68963](https://github.com/ClickHouse/ClickHouse/pull/68963) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
* Improve the JOIN performance by rearranging the right table by keys while the table keys are dense in the LEFT or INNER hash joins. [#60341](https://github.com/ClickHouse/ClickHouse/pull/60341) ([kevinyhzou](https://github.com/KevinyhZou)).
* Improve ALL JOIN performance by appending the list of rows lazily. [#63677](https://github.com/ClickHouse/ClickHouse/pull/63677) ([kevinyhzou](https://github.com/KevinyhZou)).
* Load filesystem cache metadata asynchronously during the boot process, in order to make restarts faster (controlled by setting `load_metadata_asynchronously`). [#65736](https://github.com/ClickHouse/ClickHouse/pull/65736) ([Daniel Pozo Escalona](https://github.com/danipozo)).
* Functions `array` and `map` were optimized to process certain common cases much faster. [#67707](https://github.com/ClickHouse/ClickHouse/pull/67707) ([李扬](https://github.com/taiyang-li)).
* Trivial optimize on ORC strings reading especially when a column contains no NULLs. [#67794](https://github.com/ClickHouse/ClickHouse/pull/67794) ([李扬](https://github.com/taiyang-li)).
* Improved overall performance of merges by reducing the overhead of scheduling steps of merges. [#68016](https://github.com/ClickHouse/ClickHouse/pull/68016) ([Anton Popov](https://github.com/CurtizJ)).
* Speed up requests to S3 when a profile is not set, credentials are not set, and IMDS is not available (for example, when you are querying a public bucket on a machine outside of a cloud). This closes [#52771](https://github.com/ClickHouse/ClickHouse/issues/52771). [#68082](https://github.com/ClickHouse/ClickHouse/pull/68082) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Devirtualize format reader in `RowInputFormatWithNamesAndTypes` for some performance improvement. [#68437](https://github.com/ClickHouse/ClickHouse/pull/68437) ([李扬](https://github.com/taiyang-li)).
* Add the parallel merge for `uniq` aggregate function when aggregating with a group by key to maximize the CPU utilization. [#68441](https://github.com/ClickHouse/ClickHouse/pull/68441) ([Jiebin Sun](https://github.com/jiebinn)).
* Add settings `output_format_orc_dictionary_key_size_threshold` to allow user to enable dict encoding for string column in `ORC` output format. It helps reduce the output `ORC` file size and improve reading performance significantly. [#68591](https://github.com/ClickHouse/ClickHouse/pull/68591) ([李扬](https://github.com/taiyang-li)).
* Introduce new Keeper request RemoveRecursive which removes node with all it's subtree. [#69332](https://github.com/ClickHouse/ClickHouse/pull/69332) ([Mikhail Artemenko](https://github.com/Michicosun)).
* Speedup insertion performance into a table with a vector similarity index by adding data to the vector index in parallel. [#69493](https://github.com/ClickHouse/ClickHouse/pull/69493) ([flynn](https://github.com/ucasfl)).
* Reduce memory usage of inserts to JSON by using adaptive write buffer size. A lot of files created by JSON column in wide part contains small amount of data and it doesn't make sense to allocate 1MB buffer for them. [#69272](https://github.com/ClickHouse/ClickHouse/pull/69272) ([Kruglov Pavel](https://github.com/Avogar)).
* Avoid returning a thread in the concurrent hash join threadpool to avoid query excessively spawn threads. [#69406](https://github.com/ClickHouse/ClickHouse/pull/69406) ([Duc Canh Le](https://github.com/canhld94)).
#### Improvement
* CREATE TABLE AS now copies PRIMARY KEY, ORDER BY, and similar clauses. Now it supports only for MergeTree family of table engines. [#69076](https://github.com/ClickHouse/ClickHouse/pull/69076) ([sakulali](https://github.com/sakulali)).
* Hardened parts of the codebase related to parsing of small entities. The following (minor) bugs were found and fixed: - if a `DeltaLake` table is partitioned by Bool, the partition value is always interpreted as false; - `ExternalDistributed` table was using only a single shard in the provided addresses; the value of `max_threads` setting and similar were printed as `'auto(N)'` instead of `auto(N)`. [#52503](https://github.com/ClickHouse/ClickHouse/pull/52503) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Use cgroup-specific metrics for CPU usage accounting instead of system-wide metrics. [#62003](https://github.com/ClickHouse/ClickHouse/pull/62003) ([Nikita Taranov](https://github.com/nickitat)).
* IO scheduling for remote S3 disks is now done on the level of HTTP socket streams (instead of the whole S3 requests) to resolve `bandwidth_limit` throttling issues. [#65182](https://github.com/ClickHouse/ClickHouse/pull/65182) ([Sergei Trifonov](https://github.com/serxa)).
* Functions `upperUTF8` and `lowerUTF8` were previously only able to uppercase / lowercase Cyrillic characters. This limitation is now removed and characters in arbitrary languages are uppercased/lowercased. Example: `SELECT upperUTF8('Süden')` now returns `SÜDEN`. [#65761](https://github.com/ClickHouse/ClickHouse/pull/65761) ([李扬](https://github.com/taiyang-li)).
* When lightweight delete happens on a table with projection(s), despite users have choices either throw an exception (by default) or drop the projection when the lightweight delete would happen, now there is the third option to still have lightweight delete and then rebuild projection(s). [#66169](https://github.com/ClickHouse/ClickHouse/pull/66169) ([jsc0218](https://github.com/jsc0218)).
* Two options (`dns_allow_resolve_names_to_ipv4` and `dns_allow_resolve_names_to_ipv6`) have been added, to allow block connections ip family. [#66895](https://github.com/ClickHouse/ClickHouse/pull/66895) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
* Make Ctrl-Z ignorance configurable (ignore_shell_suspend) in clickhouse-client. [#67134](https://github.com/ClickHouse/ClickHouse/pull/67134) ([Azat Khuzhin](https://github.com/azat)).
* Improve UTF-8 validation in JSON output formats. Ensures that valid JSON is generated in the case of certain byte sequences in the result data. [#67938](https://github.com/ClickHouse/ClickHouse/pull/67938) ([mwoenker](https://github.com/mwoenker)).
* Added profile events for merges and mutations for better introspection. [#68015](https://github.com/ClickHouse/ClickHouse/pull/68015) ([Anton Popov](https://github.com/CurtizJ)).
* ODBC: get http_max_tries from the server configuration. [#68128](https://github.com/ClickHouse/ClickHouse/pull/68128) ([Rodolphe Dugé de Bernonville](https://github.com/RodolpheDuge)).
* Add wildcard support for user identification in X.509 SubjectAltName extension. [#68236](https://github.com/ClickHouse/ClickHouse/pull/68236) ([Marco Vilas Boas](https://github.com/marco-vb)).
* Improve schema inference of date times. Now `DateTime64` used only when date time has fractional part, otherwise regular DateTime is used. Inference of Date/DateTime is more strict now, especially when `date_time_input_format='best_effort'` to avoid inferring date times from strings in corner cases. [#68382](https://github.com/ClickHouse/ClickHouse/pull/68382) ([Kruglov Pavel](https://github.com/Avogar)).
* Delete old code of named collections from dictionaries and substitute it to the new, which allows to use DDL created named collections in dictionaries. Closes [#60936](https://github.com/ClickHouse/ClickHouse/issues/60936), closes [#36890](https://github.com/ClickHouse/ClickHouse/issues/36890). [#68412](https://github.com/ClickHouse/ClickHouse/pull/68412) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Use HTTP/1.1 instead of HTTP/1.0 (set by default) for external HTTP authenticators. [#68456](https://github.com/ClickHouse/ClickHouse/pull/68456) ([Aleksei Filatov](https://github.com/aalexfvk)).
* Added a new set of metrics for thread pool introspection, providing deeper insights into thread pool performance and behavior. [#68674](https://github.com/ClickHouse/ClickHouse/pull/68674) ([filimonov](https://github.com/filimonov)).
* Support query parameters in async inserts with format `Values`. [#68741](https://github.com/ClickHouse/ClickHouse/pull/68741) ([Anton Popov](https://github.com/CurtizJ)).
* Support `Date32` on `dateTrunc` and `toStartOfInterval`. [#68874](https://github.com/ClickHouse/ClickHouse/pull/68874) ([LiuNeng](https://github.com/liuneng1994)).
* Add `plan_step_name` and `plan_step_description` columns to `system.processors_profile_log`. [#68954](https://github.com/ClickHouse/ClickHouse/pull/68954) ([Alexander Gololobov](https://github.com/davenger)).
* Support for the Spanish language in the embedded dictionaries. [#69035](https://github.com/ClickHouse/ClickHouse/pull/69035) ([Vasily Okunev](https://github.com/VOkunev)).
* Add CPU arch to the short fault information message. [#69037](https://github.com/ClickHouse/ClickHouse/pull/69037) ([Konstantin Bogdanov](https://github.com/thevar1able)).
* Queries will fail faster if a new Keeper connection cannot be established during retries. [#69148](https://github.com/ClickHouse/ClickHouse/pull/69148) ([Raúl Marín](https://github.com/Algunenano)).
* Update Database Factory so it would be possible for user defined database engines to have arguments, settings and table overrides (similar to StorageFactory). [#69201](https://github.com/ClickHouse/ClickHouse/pull/69201) ([NikBarykin](https://github.com/NikBarykin)).
* Restore mode that replaces all external table engines and functions to the `Null` engine (`restore_replace_external_engines_to_null`, `restore_replace_external_table_functions_to_null` settings) was failing if table had SETTINGS. Now it removes settings from table definition in this case and allows to restore such tables. [#69253](https://github.com/ClickHouse/ClickHouse/pull/69253) ([Ilya Yatsishin](https://github.com/qoega)).
* CLICKHOUSE_PASSWORD is correctly escaped for XML in clickhouse image's entrypoint. [#69301](https://github.com/ClickHouse/ClickHouse/pull/69301) ([aohoyd](https://github.com/aohoyd)).
* Allow empty arguments for `arrayZip`/`arrayZipUnaligned`, as concat did in https://github.com/ClickHouse/ClickHouse/pull/65887. It is for spark compatiability in Gluten CH Backend. [#69576](https://github.com/ClickHouse/ClickHouse/pull/69576) ([李扬](https://github.com/taiyang-li)).
* Support more advanced SSL options for Keeper's internal communication (e.g. private keys with passphrase). [#69582](https://github.com/ClickHouse/ClickHouse/pull/69582) ([Antonio Andelic](https://github.com/antonio2368)).
* Index analysis can take noticeable time for big tables with many parts or partitions. This change should enable killing a heavy query at that stage. [#69606](https://github.com/ClickHouse/ClickHouse/pull/69606) ([Alexander Gololobov](https://github.com/davenger)).
* Masking sensitive info in `gcs` table function. [#69611](https://github.com/ClickHouse/ClickHouse/pull/69611) ([Vitaly Baranov](https://github.com/vitlibar)).
* Rebuild projection for merges that reduce number of rows. [#62364](https://github.com/ClickHouse/ClickHouse/pull/62364) ([cangyin](https://github.com/cangyin)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Fix attaching table when pg dbname contains "-" in the experimental and unsupported MaterializedPostgreSQL engine. [#62730](https://github.com/ClickHouse/ClickHouse/pull/62730) ([takakawa](https://github.com/takakawa)).
* Fixed error on generated columns in the experimental and totally unsupported MaterializedPostgreSQL engine when adnum ordering is broken [#63161](https://github.com/ClickHouse/ClickHouse/issues/63161). Fixed error on id column with nextval expression as default in the experimental and totally unsupported MaterializedPostgreSQL when there are generated columns in table. Fixed error on dropping publication with symbols except \[a-z1-9-\]. [#67664](https://github.com/ClickHouse/ClickHouse/pull/67664) ([Kruglov Kirill](https://github.com/1on)).
* Storage Join to support Nullable columns in the left table, close [#61247](https://github.com/ClickHouse/ClickHouse/issues/61247). [#66926](https://github.com/ClickHouse/ClickHouse/pull/66926) ([vdimir](https://github.com/vdimir)).
* Incorrect query result with parallel replicas (distribute queries as well) when `IN` operator contains conversion to Decimal(). The bug was introduced with the new analyzer. [#67234](https://github.com/ClickHouse/ClickHouse/pull/67234) ([Igor Nikonov](https://github.com/devcrafter)).
* Fix the problem that alter modify order by causes inconsistent metadata. [#67436](https://github.com/ClickHouse/ClickHouse/pull/67436) ([iceFireser](https://github.com/iceFireser)).
* Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
* Fix when the index is not at the beginning of the tuple during `IN` query. [#67626](https://github.com/ClickHouse/ClickHouse/pull/67626) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
* Fix expiration in `RoleCache`. [#67748](https://github.com/ClickHouse/ClickHouse/pull/67748) ([Vitaly Baranov](https://github.com/vitlibar)).
* Fix window view missing blocks due to slow flush to view. [#67983](https://github.com/ClickHouse/ClickHouse/pull/67983) ([Raúl Marín](https://github.com/Algunenano)).
* Fix MSan issue caused by incorrect date format. [#68105](https://github.com/ClickHouse/ClickHouse/pull/68105) ([JackyWoo](https://github.com/JackyWoo)).
* Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
* Fix crash in `lag`/`lead` which is introduced in [#67091](https://github.com/ClickHouse/ClickHouse/issues/67091). [#68262](https://github.com/ClickHouse/ClickHouse/pull/68262) ([lgbo](https://github.com/lgbo-ustc)).
* Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
* After https://github.com/ClickHouse/ClickHouse/pull/61984 `schema_inference_make_columns_nullable=0` still can make columns `Nullable` in Parquet/Arrow formats. The change was backward incompatible and users noticed the changes in the behaviour. This PR makes `schema_inference_make_columns_nullable=0` to work as before (no Nullable columns will be inferred) and introduces new value `auto` for this setting that will make columns `Nullable` only if data has information about nullability. [#68298](https://github.com/ClickHouse/ClickHouse/pull/68298) ([Kruglov Pavel](https://github.com/Avogar)).
* Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
* Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).
* Fix bug in key condition. [#68354](https://github.com/ClickHouse/ClickHouse/pull/68354) ([Han Fei](https://github.com/hanfei1991)).
* Fix crash on drop or rename a role that is used in LDAP external user directory. [#68355](https://github.com/ClickHouse/ClickHouse/pull/68355) ([Andrey Zvonov](https://github.com/zvonand)).
* Fix Progress column value of system.view_refreshes greater than 1 [#68377](https://github.com/ClickHouse/ClickHouse/issues/68377). [#68378](https://github.com/ClickHouse/ClickHouse/pull/68378) ([megao](https://github.com/jetgm)).
* Process regexp flags correctly. [#68389](https://github.com/ClickHouse/ClickHouse/pull/68389) ([Han Fei](https://github.com/hanfei1991)).
* PostgreSQL-style cast operator (`::`) works correctly even for SQL-style hex and binary string literals (e.g., `SELECT x'414243'::String`). This closes [#68324](https://github.com/ClickHouse/ClickHouse/issues/68324). [#68482](https://github.com/ClickHouse/ClickHouse/pull/68482) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Minor patch for https://github.com/ClickHouse/ClickHouse/pull/68131. [#68494](https://github.com/ClickHouse/ClickHouse/pull/68494) ([Chang chen](https://github.com/baibaichen)).
* Fix [#68239](https://github.com/ClickHouse/ClickHouse/issues/68239) SAMPLE n where n is an integer. [#68499](https://github.com/ClickHouse/ClickHouse/pull/68499) ([Denis Hananein](https://github.com/denis-hananein)).
* Fix bug in mann-whitney-utest when the size of two districutions are not equal. [#68556](https://github.com/ClickHouse/ClickHouse/pull/68556) ([Han Fei](https://github.com/hanfei1991)).
* After unexpected restart, fail to start replication of ReplicatedMergeTree due to abnormal handling of covered-by-broken part. [#68584](https://github.com/ClickHouse/ClickHouse/pull/68584) ([baolin](https://github.com/baolinhuang)).
* Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
* Full text index may filter out wrong columns when index multiple columns, it didn't reset row_id between different columns, the reproduce procedure is in tests/queries/0_stateless/03228_full_text_with_multi_col.sql. Without this. [#68644](https://github.com/ClickHouse/ClickHouse/pull/68644) ([siyuan](https://github.com/linkwk7)).
* Fix invalid character '\t' and '\n' in replica_name when creating a Replicated table, which causes incorrect parsing of 'source replica' in LogEntry. Mentioned in issue [#68640](https://github.com/ClickHouse/ClickHouse/issues/68640). [#68645](https://github.com/ClickHouse/ClickHouse/pull/68645) ([Zhigao Hong](https://github.com/zghong)).
* Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
* Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix possible error `DB::Exception: Block structure mismatch in joined block stream: different columns:` with new JSON column. [#68686](https://github.com/ClickHouse/ClickHouse/pull/68686) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix issue with materialized constant keys when hashing maps with arrays as keys in functions `sipHash(64/128)Keyed`. [#68731](https://github.com/ClickHouse/ClickHouse/pull/68731) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Make `ColumnsDescription::toString` format each column using the same `IAST::FormatState object`. This results in uniform columns metadata being written to disk and ZooKeeper. [#68733](https://github.com/ClickHouse/ClickHouse/pull/68733) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
* Fix merging of aggregated data for grouping sets. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix logical error, when we create a replicated merge tree, alter a column and then execute modify statistics. [#68820](https://github.com/ClickHouse/ClickHouse/pull/68820) ([Han Fei](https://github.com/hanfei1991)).
* Fix resolving dynamic subcolumns from subqueries in analyzer. [#68824](https://github.com/ClickHouse/ClickHouse/pull/68824) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix complex types metadata parsing in DeltaLake. Closes [#68739](https://github.com/ClickHouse/ClickHouse/issues/68739). [#68836](https://github.com/ClickHouse/ClickHouse/pull/68836) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fixed asynchronous inserts in case when metadata of table is changed (by `ALTER ADD/MODIFY COLUMN` queries) after insert but before flush to the table. [#68837](https://github.com/ClickHouse/ClickHouse/pull/68837) ([Anton Popov](https://github.com/CurtizJ)).
* Fix unexpected exception when passing empty tuple in array. This fixes [#68618](https://github.com/ClickHouse/ClickHouse/issues/68618). [#68848](https://github.com/ClickHouse/ClickHouse/pull/68848) ([Amos Bird](https://github.com/amosbird)).
* Fix parsing pure metadata mutations commands. [#68935](https://github.com/ClickHouse/ClickHouse/pull/68935) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
* Fixed writing to Materialized Views with enabled setting `optimize_functions_to_subcolumns`. [#68951](https://github.com/ClickHouse/ClickHouse/pull/68951) ([Anton Popov](https://github.com/CurtizJ)).
* Don't use serializations cache in const Dynamic column methods. It could let to use-of-uninitialized value or even race condition during aggregations. [#68953](https://github.com/ClickHouse/ClickHouse/pull/68953) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix parsing error when null should be inserted as default in some cases during JSON type parsing. [#68955](https://github.com/ClickHouse/ClickHouse/pull/68955) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix `Content-Encoding` not sent in some compressed responses. [#64802](https://github.com/ClickHouse/ClickHouse/issues/64802). [#68975](https://github.com/ClickHouse/ClickHouse/pull/68975) ([Konstantin Bogdanov](https://github.com/thevar1able)).
* There were cases when path was concatenated incorrectly and had the `//` part in it, solving this problem using path normalization. [#69066](https://github.com/ClickHouse/ClickHouse/pull/69066) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
* Fix logical error when we have empty async insert. [#69080](https://github.com/ClickHouse/ClickHouse/pull/69080) ([Han Fei](https://github.com/hanfei1991)).
* Fixed data race of progress indication in clickhouse-client during query canceling. [#69081](https://github.com/ClickHouse/ClickHouse/pull/69081) ([Sergei Trifonov](https://github.com/serxa)).
* Fix a bug that the vector similarity index (currently experimental) was not utilized when used with cosine distance as distance function. [#69090](https://github.com/ClickHouse/ClickHouse/pull/69090) ([flynn](https://github.com/ucasfl)).
* This change addresses an issue where attempting to create a Replicated database again after a server failure during the initial creation process could result in error. [#69102](https://github.com/ClickHouse/ClickHouse/pull/69102) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
* Don't infer Bool type from String in CSV when `input_format_csv_try_infer_numbers_from_strings = 1` because we don't allow reading bool values from strings. [#69109](https://github.com/ClickHouse/ClickHouse/pull/69109) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix explain ast insert queries parsing errors on client when `--multiquery` is enabled. [#69123](https://github.com/ClickHouse/ClickHouse/pull/69123) ([wxybear](https://github.com/wxybear)).
* `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
* Fix propogating structure argument in s3Cluster. Previously the `DEFAULT` expression of the column could be lost when sending the query to the replicas in s3Cluster. [#69147](https://github.com/ClickHouse/ClickHouse/pull/69147) ([Kruglov Pavel](https://github.com/Avogar)).
* Respect format settings in Values format during conversion from expression to the destination type. [#69149](https://github.com/ClickHouse/ClickHouse/pull/69149) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix `clickhouse-client --queries-file` for readonly users (previously fails with `Cannot modify 'log_comment' setting in readonly mode`). [#69175](https://github.com/ClickHouse/ClickHouse/pull/69175) ([Azat Khuzhin](https://github.com/azat)).
* Fix data race in clickhouse-client when it's piped to a process that terminated early. [#69186](https://github.com/ClickHouse/ClickHouse/pull/69186) ([vdimir](https://github.com/vdimir)).
* Fix incorrect results of Fix uniq and GROUP BY for JSON/Dynamic types. [#69203](https://github.com/ClickHouse/ClickHouse/pull/69203) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix the INFILE format detection for asynchronous inserts. If the format is not explicitly defined in the FORMAT clause, it can be detected from the INFILE file extension. [#69237](https://github.com/ClickHouse/ClickHouse/pull/69237) ([Julia Kartseva](https://github.com/jkartseva)).
* After [this issue](https://github.com/ClickHouse/ClickHouse/pull/59946#issuecomment-1943653197) there are quite a few table replicas in production such that their `metadata_version` node value is both equal to `0` and is different from the respective table's `metadata` node version. This leads to `alter` queries failing on such replicas. [#69274](https://github.com/ClickHouse/ClickHouse/pull/69274) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
* Mark Dynamic type as not safe primary key type to avoid issues with Fields. [#69311](https://github.com/ClickHouse/ClickHouse/pull/69311) ([Kruglov Pavel](https://github.com/Avogar)).
* Improve restoring of access entities' dependencies. [#69346](https://github.com/ClickHouse/ClickHouse/pull/69346) ([Vitaly Baranov](https://github.com/vitlibar)).
* Fix undefined behavior when all connection attempts fail getting a connection for insertions. [#69390](https://github.com/ClickHouse/ClickHouse/pull/69390) ([Pablo Marcos](https://github.com/pamarcos)).
* Close [#69135](https://github.com/ClickHouse/ClickHouse/issues/69135). If we try to reuse joined data for `cross` join, but this could not happen in ClickHouse at present. It's better to keep `have_compressed` in `reuseJoinedData`. [#69404](https://github.com/ClickHouse/ClickHouse/pull/69404) ([lgbo](https://github.com/lgbo-ustc)).
* Make `materialize()` function return full column when parameter is a sparse column. [#69429](https://github.com/ClickHouse/ClickHouse/pull/69429) ([Alexander Gololobov](https://github.com/davenger)).
* Fixed a `LOGICAL_ERROR` with function `sqidDecode` ([#69450](https://github.com/ClickHouse/ClickHouse/issues/69450)). [#69451](https://github.com/ClickHouse/ClickHouse/pull/69451) ([Robert Schulze](https://github.com/rschu1ze)).
* Quick fix for s3queue problem on 24.6 or create query with database replicated. [#69454](https://github.com/ClickHouse/ClickHouse/pull/69454) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fixed case when memory consumption was too high because of the squashing in `INSERT INTO ... SELECT` or `CREATE TABLE AS SELECT` queries. [#69469](https://github.com/ClickHouse/ClickHouse/pull/69469) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
* Statements `SHOW COLUMNS` and `SHOW INDEX` now work properly if the table has dots in its name. [#69514](https://github.com/ClickHouse/ClickHouse/pull/69514) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Usage of the query cache for queries with an overflow mode != 'throw' is now disallowed. This prevents situations where potentially truncated and incorrect query results could be stored in the query cache. (issue [#67476](https://github.com/ClickHouse/ClickHouse/issues/67476)). [#69549](https://github.com/ClickHouse/ClickHouse/pull/69549) ([Robert Schulze](https://github.com/rschu1ze)).
* Keep original order of conditions during move to prewhere. Previously the order could change and it could lead to failing queries when the order is important. [#69560](https://github.com/ClickHouse/ClickHouse/pull/69560) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix Keeper multi-request preprocessing after ZNOAUTH error. [#69627](https://github.com/ClickHouse/ClickHouse/pull/69627) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix METADATA_MISMATCH that might have happened due to TTL with a WHERE clause in DatabaseReplicated when creating a new replica. [#69736](https://github.com/ClickHouse/ClickHouse/pull/69736) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix `StorageS3(Azure)Queue` settings `tracked_file_ttl_sec`. We wrote it to keeper with key `tracked_file_ttl_sec`, but read as `tracked_files_ttl_sec`, which was a typo. [#69742](https://github.com/ClickHouse/ClickHouse/pull/69742) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Use tryconvertfieldtotype in gethyperrectangleforrowgroup. [#69745](https://github.com/ClickHouse/ClickHouse/pull/69745) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
* Revert "Fix prewhere without columns and without adaptive index granularity (almost w/o anything)"'. Due to the reverted changes some errors might happen when reading data parts produced by old CH releases (presumably 2021 or older). [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
### <a id="248"></a> ClickHouse release 24.8 LTS, 2024-08-20 ### <a id="248"></a> ClickHouse release 24.8 LTS, 2024-08-20
#### Backward Incompatible Change #### Backward Incompatible Change
@ -28,7 +191,7 @@
* Add `_etag` virtual column for S3 table engine. Fixes [#65312](https://github.com/ClickHouse/ClickHouse/issues/65312). [#65386](https://github.com/ClickHouse/ClickHouse/pull/65386) ([skyoct](https://github.com/skyoct)). * Add `_etag` virtual column for S3 table engine. Fixes [#65312](https://github.com/ClickHouse/ClickHouse/issues/65312). [#65386](https://github.com/ClickHouse/ClickHouse/pull/65386) ([skyoct](https://github.com/skyoct)).
* Added a tagging (namespace) mechanism for the query cache. The same queries with different tags are considered different by the query cache. Example: `SELECT 1 SETTINGS use_query_cache = 1, query_cache_tag = 'abc'` and `SELECT 1 SETTINGS use_query_cache = 1, query_cache_tag = 'def'` now create different query cache entries. [#68235](https://github.com/ClickHouse/ClickHouse/pull/68235) ([sakulali](https://github.com/sakulali)). * Added a tagging (namespace) mechanism for the query cache. The same queries with different tags are considered different by the query cache. Example: `SELECT 1 SETTINGS use_query_cache = 1, query_cache_tag = 'abc'` and `SELECT 1 SETTINGS use_query_cache = 1, query_cache_tag = 'def'` now create different query cache entries. [#68235](https://github.com/ClickHouse/ClickHouse/pull/68235) ([sakulali](https://github.com/sakulali)).
* Support more variants of JOIN strictness (`LEFT/RIGHT SEMI/ANTI/ANY JOIN`) with inequality conditions which involve columns from both left and right table. e.g. `t1.y < t2.y` (see the setting `allow_experimental_join_condition`). [#64281](https://github.com/ClickHouse/ClickHouse/pull/64281) ([lgbo](https://github.com/lgbo-ustc)). * Support more variants of JOIN strictness (`LEFT/RIGHT SEMI/ANTI/ANY JOIN`) with inequality conditions which involve columns from both left and right table. e.g. `t1.y < t2.y` (see the setting `allow_experimental_join_condition`). [#64281](https://github.com/ClickHouse/ClickHouse/pull/64281) ([lgbo](https://github.com/lgbo-ustc)).
* Intrpret Hive-style partitioning for different engines (`File`, `URL`, `S3`, `AzureBlobStorage`, `HDFS`). Hive-style partitioning organizes data into partitioned sub-directories, making it efficient to query and manage large datasets. Currently, it only creates virtual columns with the appropriate name and data. The follow-up PR will introduce the appropriate data filtering (performance speedup). [#65997](https://github.com/ClickHouse/ClickHouse/pull/65997) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). * Interpret Hive-style partitioning for different engines (`File`, `URL`, `S3`, `AzureBlobStorage`, `HDFS`). Hive-style partitioning organizes data into partitioned sub-directories, making it efficient to query and manage large datasets. Currently, it only creates virtual columns with the appropriate name and data. The follow-up PR will introduce the appropriate data filtering (performance speedup). [#65997](https://github.com/ClickHouse/ClickHouse/pull/65997) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
* Add function `printf` for Spark compatiability (but you can use the existing `format` function). [#66257](https://github.com/ClickHouse/ClickHouse/pull/66257) ([李扬](https://github.com/taiyang-li)). * Add function `printf` for Spark compatiability (but you can use the existing `format` function). [#66257](https://github.com/ClickHouse/ClickHouse/pull/66257) ([李扬](https://github.com/taiyang-li)).
* Add options `restore_replace_external_engines_to_null` and `restore_replace_external_table_functions_to_null` to replace external engines and table_engines to `Null` engine that can be useful for testing. It should work for RESTORE and explicit table creation. [#66536](https://github.com/ClickHouse/ClickHouse/pull/66536) ([Ilya Yatsishin](https://github.com/qoega)). * Add options `restore_replace_external_engines_to_null` and `restore_replace_external_table_functions_to_null` to replace external engines and table_engines to `Null` engine that can be useful for testing. It should work for RESTORE and explicit table creation. [#66536](https://github.com/ClickHouse/ClickHouse/pull/66536) ([Ilya Yatsishin](https://github.com/qoega)).
* Added support for reading `MULTILINESTRING` geometry in `WKT` format using function `readWKTLineString`. [#67647](https://github.com/ClickHouse/ClickHouse/pull/67647) ([Jacob Reckhard](https://github.com/jacobrec)). * Added support for reading `MULTILINESTRING` geometry in `WKT` format using function `readWKTLineString`. [#67647](https://github.com/ClickHouse/ClickHouse/pull/67647) ([Jacob Reckhard](https://github.com/jacobrec)).
@ -80,7 +243,6 @@
* Automatically retry Keeper requests in KeeperMap if they happen because of timeout or connection loss. [#67448](https://github.com/ClickHouse/ClickHouse/pull/67448) ([Antonio Andelic](https://github.com/antonio2368)). * Automatically retry Keeper requests in KeeperMap if they happen because of timeout or connection loss. [#67448](https://github.com/ClickHouse/ClickHouse/pull/67448) ([Antonio Andelic](https://github.com/antonio2368)).
* Add `-no-pie` to Aarch64 Linux builds to allow proper introspection and symbolizing of stacktraces after a ClickHouse restart. [#67916](https://github.com/ClickHouse/ClickHouse/pull/67916) ([filimonov](https://github.com/filimonov)). * Add `-no-pie` to Aarch64 Linux builds to allow proper introspection and symbolizing of stacktraces after a ClickHouse restart. [#67916](https://github.com/ClickHouse/ClickHouse/pull/67916) ([filimonov](https://github.com/filimonov)).
* Added profile events for merges and mutations for better introspection. [#68015](https://github.com/ClickHouse/ClickHouse/pull/68015) ([Anton Popov](https://github.com/CurtizJ)). * Added profile events for merges and mutations for better introspection. [#68015](https://github.com/ClickHouse/ClickHouse/pull/68015) ([Anton Popov](https://github.com/CurtizJ)).
* Fix settings and `current_database` in `system.processes` for async BACKUP/RESTORE. [#68163](https://github.com/ClickHouse/ClickHouse/pull/68163) ([Azat Khuzhin](https://github.com/azat)).
* Remove unnecessary logs for non-replicated `MergeTree`. [#68238](https://github.com/ClickHouse/ClickHouse/pull/68238) ([Daniil Ivanik](https://github.com/divanik)). * Remove unnecessary logs for non-replicated `MergeTree`. [#68238](https://github.com/ClickHouse/ClickHouse/pull/68238) ([Daniil Ivanik](https://github.com/divanik)).
#### Build/Testing/Packaging Improvement #### Build/Testing/Packaging Improvement

View File

@ -339,7 +339,6 @@ set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}") set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
if (OS_DARWIN) if (OS_DARWIN)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-U,_inside_main") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-U,_inside_main")
endif() endif()

View File

@ -34,41 +34,39 @@ curl https://clickhouse.com/ | sh
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know. Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
* [v24.9 Community Call](https://clickhouse.com/company/events/v24-9-community-release-call) - September 26 * [v24.10 Community Call](https://clickhouse.com/company/events/v24-10-community-release-call) - October 31
## Upcoming Events ## Upcoming Events
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc. Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
The following upcoming meetups are featuring creator of ClickHouse & CTO, Alexey Milovidov: Upcoming meetups
* [Raleigh Meetup (Deutsche Bank)](https://www.meetup.com/triangletechtalks/events/302723486/) - September 9
* [New York Meetup (Rokt)](https://www.meetup.com/clickhouse-new-york-user-group/events/302575342) - September 10
* [Chicago Meetup (Jump Capital)](https://lu.ma/43tvmrfw) - September 12
Other upcoming meetups
* [Toronto Meetup (Shopify)](https://www.meetup.com/clickhouse-toronto-user-group/events/301490855/) - September 10
* [Austin Meetup](https://www.meetup.com/clickhouse-austin-user-group/events/302558689/) - September 17
* [London Meetup](https://www.meetup.com/clickhouse-london-user-group/events/302977267) - September 17
* [Bangalore Meetup](https://www.meetup.com/clickhouse-bangalore-user-group/events/303208274/) - September 18
* [Tel Aviv Meetup](https://www.meetup.com/clickhouse-meetup-israel/events/303095121) - September 22
* [Jakarta Meetup](https://www.meetup.com/clickhouse-indonesia-user-group/events/303191359/) - October 1 * [Jakarta Meetup](https://www.meetup.com/clickhouse-indonesia-user-group/events/303191359/) - October 1
* [Singapore Meetup](https://www.meetup.com/clickhouse-singapore-meetup-group/events/303212064/) - October 3 * [Singapore Meetup](https://www.meetup.com/clickhouse-singapore-meetup-group/events/303212064/) - October 3
* [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22 * [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - October 29
* [Oslo Meetup](https://www.meetup.com/open-source-real-time-data-warehouse-real-time-analytics/events/302938622) - October 31 * [Oslo Meetup](https://www.meetup.com/open-source-real-time-data-warehouse-real-time-analytics/events/302938622) - October 31
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - November 12
* [Ghent Meetup](https://www.meetup.com/clickhouse-belgium-user-group/events/303049405/) - November 19 * [Ghent Meetup](https://www.meetup.com/clickhouse-belgium-user-group/events/303049405/) - November 19
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21 * [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26 * [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26
Recently completed events Recently completed meetups
* [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25 * [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25
* [Seattle Meetup (Statsig)](https://www.meetup.com/clickhouse-seattle-user-group/events/302518075/) - August 27 * [Seattle Meetup (Statsig)](https://www.meetup.com/clickhouse-seattle-user-group/events/302518075/) - August 27
* [Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302732666/) - August 27 * [Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302732666/) - August 27
* [Sydney Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302862966/) - September 5 * [Sydney Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302862966/) - September 5
* [Zurich Meetup](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/302267429/) - September 5 * [Zurich Meetup](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/302267429/) - September 5
* [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5 * [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5
* [Raleigh Meetup (Deutsche Bank)](https://www.meetup.com/triangletechtalks/events/302723486/) - September 9
* [New York Meetup (Rokt)](https://www.meetup.com/clickhouse-new-york-user-group/events/302575342) - September 10
* [Toronto Meetup (Shopify)](https://www.meetup.com/clickhouse-toronto-user-group/events/301490855/) - September 10
* [Chicago Meetup (Jump Capital)](https://lu.ma/43tvmrfw) - September 12
* [London Meetup](https://www.meetup.com/clickhouse-london-user-group/events/302977267) - September 17
* [Austin Meetup](https://www.meetup.com/clickhouse-austin-user-group/events/302558689/) - September 17
* [Bangalore Meetup](https://www.meetup.com/clickhouse-bangalore-user-group/events/303208274/) - September 18
* [Tel Aviv Meetup](https://www.meetup.com/clickhouse-meetup-israel/events/303095121) - September 22
## Recent Recordings ## Recent Recordings
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments" * **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"

View File

@ -14,9 +14,10 @@ The following versions of ClickHouse server are currently supported with securit
| Version | Supported | | Version | Supported |
|:-|:-| |:-|:-|
| 24.9 | ✔️ |
| 24.8 | ✔️ | | 24.8 | ✔️ |
| 24.7 | ✔️ | | 24.7 | ✔️ |
| 24.6 | ✔️ | | 24.6 | |
| 24.5 | ❌ | | 24.5 | ❌ |
| 24.4 | ❌ | | 24.4 | ❌ |
| 24.3 | ✔️ | | 24.3 | ✔️ |

View File

@ -110,8 +110,7 @@ struct DecomposedFloat
{ {
if (!isNegative()) if (!isNegative())
return rhs > 0 ? -1 : 1; return rhs > 0 ? -1 : 1;
else return rhs >= 0 ? -1 : 1;
return rhs >= 0 ? -1 : 1;
} }
/// The case of the most negative integer /// The case of the most negative integer
@ -128,8 +127,7 @@ struct DecomposedFloat
if (mantissa() == 0) if (mantissa() == 0)
return 0; return 0;
else return -1;
return -1;
} }
} }
@ -169,9 +167,8 @@ struct DecomposedFloat
/// Float has no fractional part means that the numbers are equal. /// Float has no fractional part means that the numbers are equal.
if (large_and_always_integer || (mantissa() & ((1ULL << (Traits::mantissa_bits - normalizedExponent())) - 1)) == 0) if (large_and_always_integer || (mantissa() & ((1ULL << (Traits::mantissa_bits - normalizedExponent())) - 1)) == 0)
return 0; return 0;
else /// Float has fractional part means its abs value is larger.
/// Float has fractional part means its abs value is larger. return isNegative() ? -1 : 1;
return isNegative() ? -1 : 1;
} }

View File

@ -205,8 +205,7 @@ JSON::ElementType JSON::getType() const
Pos after_string = skipString(); Pos after_string = skipString();
if (after_string < ptr_end && *after_string == ':') if (after_string < ptr_end && *after_string == ':')
return TYPE_NAME_VALUE_PAIR; return TYPE_NAME_VALUE_PAIR;
else return TYPE_STRING;
return TYPE_STRING;
} }
default: default:
throw JSONException(std::string("JSON: unexpected char ") + *ptr_begin + ", expected one of '{[tfn-0123456789\"'"); throw JSONException(std::string("JSON: unexpected char ") + *ptr_begin + ", expected one of '{[tfn-0123456789\"'");
@ -474,8 +473,7 @@ JSON::Pos JSON::searchField(const char * data, size_t size) const
if (it == end()) if (it == end())
return nullptr; return nullptr;
else return it->data();
return it->data();
} }
@ -487,7 +485,7 @@ bool JSON::hasEscapes() const
if (*pos == '"') if (*pos == '"')
return false; return false;
else if (*pos == '\\') if (*pos == '\\')
return true; return true;
throw JSONException("JSON: unexpected end of data."); throw JSONException("JSON: unexpected end of data.");
} }
@ -503,7 +501,7 @@ bool JSON::hasSpecialChars() const
if (*pos == '"') if (*pos == '"')
return false; return false;
else if (pos < ptr_end) if (pos < ptr_end)
return true; return true;
throw JSONException("JSON: unexpected end of data."); throw JSONException("JSON: unexpected end of data.");
} }
@ -682,10 +680,9 @@ double JSON::toDouble() const
if (type == TYPE_NUMBER) if (type == TYPE_NUMBER)
return getDouble(); return getDouble();
else if (type == TYPE_STRING) if (type == TYPE_STRING)
return JSON(ptr_begin + 1, ptr_end, level + 1).getDouble(); return JSON(ptr_begin + 1, ptr_end, level + 1).getDouble();
else throw JSONException("JSON: cannot convert value to double.");
throw JSONException("JSON: cannot convert value to double.");
} }
Int64 JSON::toInt() const Int64 JSON::toInt() const
@ -694,10 +691,9 @@ Int64 JSON::toInt() const
if (type == TYPE_NUMBER) if (type == TYPE_NUMBER)
return getInt(); return getInt();
else if (type == TYPE_STRING) if (type == TYPE_STRING)
return JSON(ptr_begin + 1, ptr_end, level + 1).getInt(); return JSON(ptr_begin + 1, ptr_end, level + 1).getInt();
else throw JSONException("JSON: cannot convert value to signed integer.");
throw JSONException("JSON: cannot convert value to signed integer.");
} }
UInt64 JSON::toUInt() const UInt64 JSON::toUInt() const
@ -706,10 +702,9 @@ UInt64 JSON::toUInt() const
if (type == TYPE_NUMBER) if (type == TYPE_NUMBER)
return getUInt(); return getUInt();
else if (type == TYPE_STRING) if (type == TYPE_STRING)
return JSON(ptr_begin + 1, ptr_end, level + 1).getUInt(); return JSON(ptr_begin + 1, ptr_end, level + 1).getUInt();
else throw JSONException("JSON: cannot convert value to unsigned integer.");
throw JSONException("JSON: cannot convert value to unsigned integer.");
} }
std::string JSON::toString() const std::string JSON::toString() const
@ -718,11 +713,9 @@ std::string JSON::toString() const
if (type == TYPE_STRING) if (type == TYPE_STRING)
return getString(); return getString();
else
{ Pos pos = skipElement();
Pos pos = skipElement(); return std::string(ptr_begin, pos - ptr_begin);
return std::string(ptr_begin, pos - ptr_begin);
}
} }

View File

@ -203,9 +203,7 @@ T JSON::getWithDefault(const std::string & key, const T & default_) const
if (key_json.isType<T>()) if (key_json.isType<T>())
return key_json.get<T>(); return key_json.get<T>();
else
return default_;
}
else
return default_; return default_;
}
return default_;
} }

View File

@ -151,19 +151,19 @@ inline bool memequalWide(const char * p1, const char * p2, size_t size)
return unalignedLoad<uint64_t>(p1) == unalignedLoad<uint64_t>(p2) return unalignedLoad<uint64_t>(p1) == unalignedLoad<uint64_t>(p2)
&& unalignedLoad<uint64_t>(p1 + size - 8) == unalignedLoad<uint64_t>(p2 + size - 8); && unalignedLoad<uint64_t>(p1 + size - 8) == unalignedLoad<uint64_t>(p2 + size - 8);
} }
else if (size >= 4) if (size >= 4)
{ {
/// Chunks of 4..7 bytes. /// Chunks of 4..7 bytes.
return unalignedLoad<uint32_t>(p1) == unalignedLoad<uint32_t>(p2) return unalignedLoad<uint32_t>(p1) == unalignedLoad<uint32_t>(p2)
&& unalignedLoad<uint32_t>(p1 + size - 4) == unalignedLoad<uint32_t>(p2 + size - 4); && unalignedLoad<uint32_t>(p1 + size - 4) == unalignedLoad<uint32_t>(p2 + size - 4);
} }
else if (size >= 2) if (size >= 2)
{ {
/// Chunks of 2..3 bytes. /// Chunks of 2..3 bytes.
return unalignedLoad<uint16_t>(p1) == unalignedLoad<uint16_t>(p2) return unalignedLoad<uint16_t>(p1) == unalignedLoad<uint16_t>(p2)
&& unalignedLoad<uint16_t>(p1 + size - 2) == unalignedLoad<uint16_t>(p2 + size - 2); && unalignedLoad<uint16_t>(p1 + size - 2) == unalignedLoad<uint16_t>(p2 + size - 2);
} }
else if (size >= 1) if (size >= 1)
{ {
/// A single byte. /// A single byte.
return *p1 == *p2; return *p1 == *p2;

View File

@ -53,10 +53,9 @@ void argsToConfig(const Poco::Util::Application::ArgVec & argv,
key = arg.substr(key_start); key = arg.substr(key_start);
continue; continue;
} }
else
{ key = "";
key = "";
}
if (key_start == std::string::npos) if (key_start == std::string::npos)
continue; continue;

View File

@ -330,9 +330,8 @@ inline const char * find_first_symbols_dispatch(const char * begin, const char *
#if defined(__SSE4_2__) #if defined(__SSE4_2__)
if (sizeof...(symbols) >= 5) if (sizeof...(symbols) >= 5)
return find_first_symbols_sse42<positive, return_mode, sizeof...(symbols), symbols...>(begin, end); return find_first_symbols_sse42<positive, return_mode, sizeof...(symbols), symbols...>(begin, end);
else
#endif #endif
return find_first_symbols_sse2<positive, return_mode, symbols...>(begin, end); return find_first_symbols_sse2<positive, return_mode, symbols...>(begin, end);
} }
template <bool positive, ReturnMode return_mode> template <bool positive, ReturnMode return_mode>
@ -341,9 +340,8 @@ inline const char * find_first_symbols_dispatch(const std::string_view haystack,
#if defined(__SSE4_2__) #if defined(__SSE4_2__)
if (symbols.str.size() >= 5) if (symbols.str.size() >= 5)
return find_first_symbols_sse42<positive, return_mode>(haystack.begin(), haystack.end(), symbols); return find_first_symbols_sse42<positive, return_mode>(haystack.begin(), haystack.end(), symbols);
else
#endif #endif
return find_first_symbols_sse2<positive, return_mode>(haystack.begin(), haystack.end(), symbols.str.data(), symbols.str.size()); return find_first_symbols_sse2<positive, return_mode>(haystack.begin(), haystack.end(), symbols.str.data(), symbols.str.size());
} }
} }

View File

@ -33,8 +33,7 @@ std::optional<uint64_t> getCgroupsV2MemoryLimit()
uint64_t value; uint64_t value;
if (setting_file >> value) if (setting_file >> value)
return {value}; return {value};
else return {}; /// e.g. the cgroups default "max"
return {}; /// e.g. the cgroups default "max"
} }
current_cgroup = current_cgroup.parent_path(); current_cgroup = current_cgroup.parent_path();
} }

View File

@ -3,7 +3,11 @@ add_subdirectory (Data)
add_subdirectory (Data/ODBC) add_subdirectory (Data/ODBC)
add_subdirectory (Foundation) add_subdirectory (Foundation)
add_subdirectory (JSON) add_subdirectory (JSON)
add_subdirectory (MongoDB)
if (USE_MONGODB)
add_subdirectory(MongoDB)
endif()
add_subdirectory (Net) add_subdirectory (Net)
add_subdirectory (NetSSL_OpenSSL) add_subdirectory (NetSSL_OpenSSL)
add_subdirectory (Redis) add_subdirectory (Redis)

View File

@ -188,8 +188,9 @@ namespace Crypto
pFile = fopen(keyFile.c_str(), "r"); pFile = fopen(keyFile.c_str(), "r");
if (pFile) if (pFile)
{ {
pem_password_cb * pCB = pass.empty() ? (pem_password_cb *)0 : &passCB; pem_password_cb * pCB = &passCB;
void * pPassword = pass.empty() ? (void *)0 : (void *)pass.c_str(); static constexpr char * no_password = "";
void * pPassword = pass.empty() ? (void *)no_password : (void *)pass.c_str();
if (readFunc(pFile, &pKey, pCB, pPassword)) if (readFunc(pFile, &pKey, pCB, pPassword))
{ {
fclose(pFile); fclose(pFile);
@ -225,6 +226,13 @@ namespace Crypto
error: error:
if (pFile) if (pFile)
fclose(pFile); fclose(pFile);
if (*ppKey)
{
if constexpr (std::is_same_v<K, EVP_PKEY>)
EVP_PKEY_free(*ppKey);
else
EC_KEY_free(*ppKey);
}
throw OpenSSLException("EVPKey::loadKey(string)"); throw OpenSSLException("EVPKey::loadKey(string)");
} }
@ -286,6 +294,13 @@ namespace Crypto
error: error:
if (pBIO) if (pBIO)
BIO_free(pBIO); BIO_free(pBIO);
if (*ppKey)
{
if constexpr (std::is_same_v<K, EVP_PKEY>)
EVP_PKEY_free(*ppKey);
else
EC_KEY_free(*ppKey);
}
throw OpenSSLException("EVPKey::loadKey(stream)"); throw OpenSSLException("EVPKey::loadKey(stream)");
} }

View File

@ -248,6 +248,9 @@ namespace Net
SSL_CTX * sslContext() const; SSL_CTX * sslContext() const;
/// Returns the underlying OpenSSL SSL Context object. /// Returns the underlying OpenSSL SSL Context object.
SSL_CTX * takeSslContext();
/// Takes ownership of the underlying OpenSSL SSL Context object.
Usage usage() const; Usage usage() const;
/// Returns whether the context is for use by a client or by a server /// Returns whether the context is for use by a client or by a server
/// and whether TLSv1 is required. /// and whether TLSv1 is required.
@ -401,6 +404,13 @@ namespace Net
return _pSSLContext; return _pSSLContext;
} }
inline SSL_CTX * Context::takeSslContext()
{
auto * result = _pSSLContext;
_pSSLContext = nullptr;
return result;
}
inline bool Context::extendedCertificateVerificationEnabled() const inline bool Context::extendedCertificateVerificationEnabled() const
{ {

View File

@ -106,6 +106,11 @@ Context::Context(
Context::~Context() Context::~Context()
{ {
if (_pSSLContext == nullptr)
{
return;
}
try try
{ {
SSL_CTX_free(_pSSLContext); SSL_CTX_free(_pSSLContext);

View File

@ -0,0 +1,17 @@
# docker build -t clickhouse/style-test .
FROM ubuntu:22.04
RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
aspell \
libxml2-utils \
python3-pip \
locales \
git \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8
ENV LC_ALL=en_US.UTF-8
COPY requirements.txt /
RUN pip3 install --no-cache-dir -r requirements.txt

View File

@ -0,0 +1,4 @@
requests==2.32.3
yamllint==1.26.3
codespell==2.2.1
https://clickhouse-builds.s3.amazonaws.com/packages/praktika-0.1-py3-none-any.whl

410
ci_v2/jobs/check_style.py Normal file
View File

@ -0,0 +1,410 @@
import math
import multiprocessing
import os
import re
import sys
from concurrent.futures import ProcessPoolExecutor
from pathlib import Path
from praktika.result import Result
from praktika.utils import Shell, Utils
NPROC = multiprocessing.cpu_count()
def chunk_list(data, n):
"""Split the data list into n nearly equal-sized chunks."""
chunk_size = math.ceil(len(data) / n)
for i in range(0, len(data), chunk_size):
yield data[i : i + chunk_size]
def run_check_concurrent(check_name, check_function, files, nproc=NPROC):
stop_watch = Utils.Stopwatch()
if not files:
print(f"File list is empty [{files}]")
raise
file_chunks = list(chunk_list(files, nproc))
results = []
# Run check_function concurrently on each chunk
with ProcessPoolExecutor(max_workers=NPROC) as executor:
futures = [executor.submit(check_function, chunk) for chunk in file_chunks]
# Wait for results and process them (optional)
for future in futures:
try:
res = future.result()
if res and res not in results:
results.append(res)
except Exception as e:
results.append(f"Exception in {check_name}: {e}")
result = Result(
name=check_name,
status=Result.Status.SUCCESS if not results else Result.Status.FAILED,
start_time=stop_watch.start_time,
duration=stop_watch.duration,
info=f"errors: {results}" if results else "",
)
return result
def run_simple_check(check_name, check_function, **kwargs):
stop_watch = Utils.Stopwatch()
error = check_function(**kwargs)
result = Result(
name=check_name,
status=Result.Status.SUCCESS if not error else Result.Status.FAILED,
start_time=stop_watch.start_time,
duration=stop_watch.duration,
info=error,
)
return result
def run_check(check_name, check_function, files):
return run_check_concurrent(check_name, check_function, files, nproc=1)
def check_duplicate_includes(file_path):
includes = []
with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
for line in f:
if re.match(r"^#include ", line):
includes.append(line.strip())
include_counts = {line: includes.count(line) for line in includes}
duplicates = {line: count for line, count in include_counts.items() if count > 1}
if duplicates:
return f"{file_path}: {duplicates}"
return ""
def check_whitespaces(file_paths):
for file in file_paths:
exit_code, out, err = Shell.get_res_stdout_stderr(
f'./ci_v2/jobs/scripts/check_style/double_whitespaces.pl "{file}"',
verbose=False,
)
if out or err:
return out + " err: " + err
return ""
def check_yamllint(file_paths):
file_paths = " ".join([f"'{file}'" for file in file_paths])
exit_code, out, err = Shell.get_res_stdout_stderr(
f"yamllint --config-file=./.yamllint {file_paths}", verbose=False
)
return out or err
def check_xmllint(file_paths):
if not isinstance(file_paths, list):
file_paths = [file_paths]
file_paths = " ".join([f"'{file}'" for file in file_paths])
exit_code, out, err = Shell.get_res_stdout_stderr(
f"xmllint --noout --nonet {file_paths}", verbose=False
)
return out or err
def check_functional_test_cases(files):
"""
Queries with event_date should have yesterday() not today()
NOTE: it is not that accuate, but at least something.
"""
patterns = [
re.compile(
r"(?i)where.*?\bevent_date\s*(=|>=)\s*today\(\)(?!\s*-\s*1)",
re.IGNORECASE | re.DOTALL,
)
]
errors = []
for test_case in files:
try:
with open(test_case, "r", encoding="utf-8", errors="replace") as f:
file_content = " ".join(
f.read().splitlines()
) # Combine lines into a single string
# Check if any pattern matches in the concatenated string
if any(pattern.search(file_content) for pattern in patterns):
errors.append(
f"event_date should be filtered using >=yesterday() in {test_case} (to avoid flakiness)"
)
except Exception as e:
errors.append(f"Error checking {test_case}: {e}")
for test_case in files:
if "fail" in test_case:
errors.append(f"test case {test_case} includes 'fail' in its name")
return " ".join(errors)
def check_gaps_in_tests_numbers(file_paths, gap_threshold=100):
test_numbers = set()
pattern = re.compile(r"(\d+)")
for file in file_paths:
file_name = os.path.basename(file)
match = pattern.search(file_name)
if match:
test_numbers.add(int(match.group(1)))
sorted_numbers = sorted(test_numbers)
large_gaps = []
for i in range(1, len(sorted_numbers)):
prev_num = sorted_numbers[i - 1]
next_num = sorted_numbers[i]
diff = next_num - prev_num
if diff >= gap_threshold:
large_gaps.append(f"Gap ({prev_num}, {next_num}) > {gap_threshold}")
return large_gaps
def check_broken_links(path, exclude_paths):
broken_symlinks = []
for path in Path(path).rglob("*"):
if any(exclude_path in str(path) for exclude_path in exclude_paths):
continue
if path.is_symlink():
if not path.exists():
broken_symlinks.append(str(path))
if broken_symlinks:
for symlink in broken_symlinks:
print(symlink)
return f"Broken symlinks found: {broken_symlinks}"
else:
return ""
def check_cpp_code():
res, out, err = Shell.get_res_stdout_stderr(
"./ci_v2/jobs/scripts/check_style/check_cpp.sh"
)
if err:
out += err
return out
def check_repo_submodules():
res, out, err = Shell.get_res_stdout_stderr(
"./ci_v2/jobs/scripts/check_style/check_submodules.sh"
)
if err:
out += err
return out
def check_other():
res, out, err = Shell.get_res_stdout_stderr(
"./ci_v2/jobs/scripts/check_style/checks_to_refactor.sh"
)
if err:
out += err
return out
def check_codespell():
res, out, err = Shell.get_res_stdout_stderr(
"./ci_v2/jobs/scripts/check_style/check_typos.sh"
)
if err:
out += err
return out
def check_aspell():
res, out, err = Shell.get_res_stdout_stderr(
"./ci_v2/jobs/scripts/check_style/check_aspell.sh"
)
if err:
out += err
return out
def check_mypy():
res, out, err = Shell.get_res_stdout_stderr(
"./ci_v2/jobs/scripts/check_style/check-mypy"
)
if err:
out += err
return out
def check_pylint():
res, out, err = Shell.get_res_stdout_stderr(
"./ci_v2/jobs/scripts/check_style/check-pylint"
)
if err:
out += err
return out
def check_file_names(files):
files_set = set()
for file in files:
file_ = file.lower()
if file_ in files_set:
return f"Non-uniq file name in lower case: {file}"
files_set.add(file_)
return ""
if __name__ == "__main__":
results = []
stop_watch = Utils.Stopwatch()
all_files = Utils.traverse_paths(
include_paths=["."],
exclude_paths=[
"./.git",
"./contrib",
"./build",
],
not_exists_ok=True, # ./build may exist if runs locally
)
cpp_files = Utils.traverse_paths(
include_paths=["./src", "./base", "./programs", "./utils"],
exclude_paths=[
"./base/glibc-compatibility",
"./contrib/consistent-hashing",
"./base/widechar_width",
],
file_suffixes=[".h", ".cpp"],
)
yaml_workflow_files = Utils.traverse_paths(
include_paths=["./.github"],
exclude_paths=[],
file_suffixes=[".yaml", ".yml"],
)
xml_files = Utils.traverse_paths(
include_paths=["."],
exclude_paths=["./.git", "./contrib/"],
file_suffixes=[".xml"],
)
functional_test_files = Utils.traverse_paths(
include_paths=["./tests/queries"],
exclude_paths=[],
file_suffixes=[".sql", ".sh", ".py", ".j2"],
)
results.append(
Result(
name="Read Files",
status=Result.Status.SUCCESS,
start_time=stop_watch.start_time,
duration=stop_watch.duration,
)
)
results.append(
run_check_concurrent(
check_name="Whitespace Check",
check_function=check_whitespaces,
files=cpp_files,
)
)
results.append(
run_check_concurrent(
check_name="YamlLint Check",
check_function=check_yamllint,
files=yaml_workflow_files,
)
)
results.append(
run_check_concurrent(
check_name="XmlLint Check",
check_function=check_xmllint,
files=xml_files,
)
)
results.append(
run_check_concurrent(
check_name="Functional Tests scripts smoke check",
check_function=check_functional_test_cases,
files=functional_test_files,
)
)
results.append(
run_check(
check_name="Check Tests Numbers",
check_function=check_gaps_in_tests_numbers,
files=functional_test_files,
)
)
results.append(
run_simple_check(
check_name="Check Broken Symlinks",
check_function=check_broken_links,
path="./",
exclude_paths=["contrib/", "metadata/", "programs/server/data"],
)
)
results.append(
run_simple_check(
check_name="Check CPP code",
check_function=check_cpp_code,
)
)
results.append(
run_simple_check(
check_name="Check Submodules",
check_function=check_repo_submodules,
)
)
results.append(
run_check(
check_name="Check File Names",
check_function=check_file_names,
files=all_files,
)
)
results.append(
run_simple_check(
check_name="Check Many Different Things",
check_function=check_other,
)
)
results.append(
run_simple_check(
check_name="Check Codespell",
check_function=check_codespell,
)
)
results.append(
run_simple_check(
check_name="Check Aspell",
check_function=check_aspell,
)
)
res = Result.create_from(results=results, stopwatch=stop_watch).dump()
if not res.is_ok():
print("Style check: failed")
for result in results:
if not result.is_ok():
print("Failed check:")
print(" | ", result)
sys.exit(1)
else:
print("Style check: ok")

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,59 @@
#!/usr/bin/env bash
# force-enable double star globbing
shopt -s globstar
# Perform spell checking on the docs
if [[ ${1:-} == "--help" ]] || [[ ${1:-} == "-h" ]]; then
echo "Usage $0 [--help|-h] [-i [filename]]"
echo " --help|-h: print this help"
echo " -i: interactive mode. If filename is specified, check only this file, otherwise check all files"
exit 0
fi
ROOT_PATH="."
CHECK_LANG=en
ASPELL_IGNORE_PATH="${ROOT_PATH}/utils/check-style/aspell-ignore/${CHECK_LANG}"
if [[ ${1:-} == "-i" ]]; then
if [[ ! -z ${2:-} ]]; then
FILES_TO_CHECK=${ROOT_PATH}/docs/${CHECK_LANG}/${2}
else
FILES_TO_CHECK=${ROOT_PATH}/docs/${CHECK_LANG}/**/*.md
fi
for fname in ${FILES_TO_CHECK}; do
echo "Checking $fname"
aspell --personal=aspell-dict.txt --add-sgml-skip=code --encoding=utf-8 --mode=markdown -W 3 --lang=${CHECK_LANG} --home-dir=${ASPELL_IGNORE_PATH} -c "$fname"
done
exit
fi
STATUS=0
for fname in ${ROOT_PATH}/docs/${CHECK_LANG}/**/*.md; do
errors=$(cat "$fname" \
| aspell list \
-W 3 \
--personal=aspell-dict.txt \
--add-sgml-skip=code \
--encoding=utf-8 \
--mode=markdown \
--lang=${CHECK_LANG} \
--home-dir=${ASPELL_IGNORE_PATH} \
| sort | uniq)
if [ ! -z "$errors" ]; then
STATUS=1
echo "====== $fname ======"
echo "$errors"
fi
done
if (( STATUS != 0 )); then
echo "====== Errors found ======"
echo "To exclude some words add them to the dictionary file \"${ASPELL_IGNORE_PATH}/aspell-dict.txt\""
echo "You can also run ${0} -i to see the errors interactively and fix them or add to the dictionary file"
fi
exit ${STATUS}

View File

@ -0,0 +1,339 @@
#!/usr/bin/env bash
# For code formatting we have clang-format.
#
# But it's not sane to apply clang-format for whole code base,
# because it sometimes makes worse for properly formatted files.
#
# It's only reasonable to blindly apply clang-format only in cases
# when the code is likely to be out of style.
#
# For this purpose we have a script that will use very primitive heuristics
# (simple regexps) to check if the code is likely to have basic style violations.
# and then to run formatter only for the specified files.
LC_ALL="en_US.UTF-8"
ROOT_PATH="."
EXCLUDE_DIRS='build/|integration/|widechar_width/|glibc-compatibility/|poco/|memcpy/|consistent-hashing|benchmark|tests/.*.cpp|utils/keeper-bench/example.yaml'
# From [1]:
# But since array_to_string_internal() in array.c still loops over array
# elements and concatenates them into a string, it's probably not more
# efficient than the looping solutions proposed, but it's more readable.
#
# [1]: https://stackoverflow.com/a/15394738/328260
function in_array()
{
local IFS="|"
local value=$1 && shift
[[ "${IFS}${*}${IFS}" =~ "${IFS}${value}${IFS}" ]]
}
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null |
grep -vP $EXCLUDE_DIRS |
xargs grep $@ -P '((class|struct|namespace|enum|if|for|while|else|throw|switch).*|\)(\s*const)?(\s*override)?\s*)\{$|\s$|^ {1,3}[^\* ]\S|\t|^\s*(if|else if|if constexpr|else if constexpr|for|while|catch|switch)\(|\( [^\s\\]|\S \)' |
# a curly brace not in a new line, but not for the case of C++11 init or agg. initialization | trailing whitespace | number of ws not a multiple of 4, but not in the case of comment continuation | missing whitespace after for/if/while... before opening brace | whitespaces inside braces
grep -v -P '(//|:\s+\*|\$\(\()| \)"'
# single-line comment | continuation of a multiline comment | a typical piece of embedded shell code | something like ending of raw string literal
# Tabs
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null |
grep -vP $EXCLUDE_DIRS |
xargs grep $@ -F $'\t'
# // namespace comments are unneeded
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null |
grep -vP $EXCLUDE_DIRS |
xargs grep $@ -P '}\s*//+\s*namespace\s*'
# Broken symlinks
find -L $ROOT_PATH -type l 2>/dev/null | grep -v contrib && echo "^ Broken symlinks found"
# Duplicated or incorrect setting declarations
SETTINGS_FILE=$(mktemp)
cat $ROOT_PATH/src/Core/Settings.cpp $ROOT_PATH/src/Core/FormatFactorySettingsDeclaration.h | grep "M(" | awk '{print substr($2, 0, length($2) - 1) " " substr($1, 3, length($1) - 3) " SettingsDeclaration" }' > ${SETTINGS_FILE}
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep "extern const Settings" -T | awk '{print substr($5, 0, length($5) -1) " " substr($4, 9) " " substr($1, 0, length($1) - 1)}' >> ${SETTINGS_FILE}
# Duplicate extern declarations for settings
awk '{if (seen[$0]++) print $3 " -> " $1 ;}' ${SETTINGS_FILE} | while read line;
do
echo "Found duplicated setting declaration in: $line"
done
# Incorrect declarations for settings
for setting in $(awk '{print $1 " " $2}' ${SETTINGS_FILE} | sort | uniq | awk '{ print $1 }' | sort | uniq -d);
do
expected=$(grep "^$setting " ${SETTINGS_FILE} | grep SettingsDeclaration | awk '{ print $2 }')
grep "^$setting " ${SETTINGS_FILE} | grep -v " $expected" | awk '{ print $3 " found setting " $1 " with type " $2 }' | while read line;
do
echo "In $line but it should be $expected"
done
done
rm ${SETTINGS_FILE}
# Unused/Undefined/Duplicates ErrorCodes/ProfileEvents/CurrentMetrics
declare -A EXTERN_TYPES
EXTERN_TYPES[ErrorCodes]=int
EXTERN_TYPES[ProfileEvents]=Event
EXTERN_TYPES[CurrentMetrics]=Metric
EXTERN_TYPES_EXCLUDES=(
ProfileEvents::global_counters
ProfileEvents::Event
ProfileEvents::Count
ProfileEvents::Counters
ProfileEvents::end
ProfileEvents::increment
ProfileEvents::incrementForLogMessage
ProfileEvents::getName
ProfileEvents::Timer
ProfileEvents::Type
ProfileEvents::TypeEnum
ProfileEvents::dumpToMapColumn
ProfileEvents::getProfileEvents
ProfileEvents::ThreadIdToCountersSnapshot
ProfileEvents::LOCAL_NAME
ProfileEvents::keeper_profile_events
ProfileEvents::CountersIncrement
CurrentMetrics::add
CurrentMetrics::sub
CurrentMetrics::get
CurrentMetrics::set
CurrentMetrics::end
CurrentMetrics::Increment
CurrentMetrics::Metric
CurrentMetrics::values
CurrentMetrics::Value
CurrentMetrics::keeper_metrics
ErrorCodes::ErrorCode
ErrorCodes::getName
ErrorCodes::increment
ErrorCodes::end
ErrorCodes::values
ErrorCodes::values[i]
ErrorCodes::getErrorCodeByName
ErrorCodes::Value
)
for extern_type in ${!EXTERN_TYPES[@]}; do
type_of_extern=${EXTERN_TYPES[$extern_type]}
allowed_chars='[_A-Za-z]+'
# Unused
# NOTE: to fix automatically, replace echo with:
# sed -i "/extern const $type_of_extern $val/d" $file
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | {
# NOTE: the check is pretty dumb and distinguish only by the type_of_extern,
# and this matches with zkutil::CreateMode
grep -v -e 'src/Common/ZooKeeper/Types.h' -e 'src/Coordination/KeeperConstants.cpp'
} | {
grep -vP $EXCLUDE_DIRS | xargs grep -l -P "extern const $type_of_extern $allowed_chars"
} | while read file; do
grep -P "extern const $type_of_extern $allowed_chars;" $file | sed -r -e "s/^.*?extern const $type_of_extern ($allowed_chars);.*?$/\1/" | while read val; do
if ! grep -q "$extern_type::$val" $file; then
# Excludes for SOFTWARE_EVENT/HARDWARE_EVENT/CACHE_EVENT in ThreadProfileEvents.cpp
if [[ ! $extern_type::$val =~ ProfileEvents::Perf.* ]]; then
echo "$extern_type::$val is defined but not used in file $file"
fi
fi
done
done
# Undefined
# NOTE: to fix automatically, replace echo with:
# ( grep -q -F 'namespace $extern_type' $file && \
# sed -i -r "0,/(\s*)extern const $type_of_extern [$allowed_chars]+/s//\1extern const $type_of_extern $val;\n&/" $file || \
# awk '{ print; if (ns == 1) { ns = 2 }; if (ns == 2) { ns = 0; print "namespace $extern_type\n{\n extern const $type_of_extern '$val';\n}" } }; /namespace DB/ { ns = 1; };' < $file > ${file}.tmp && mv ${file}.tmp $file )
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | {
grep -vP $EXCLUDE_DIRS | xargs grep -l -P "$extern_type::$allowed_chars"
} | while read file; do
grep -P "$extern_type::$allowed_chars" $file | grep -P -v '^\s*//' | sed -r -e "s/^.*?$extern_type::($allowed_chars).*?$/\1/" | while read val; do
if ! grep -q "extern const $type_of_extern $val" $file; then
if ! in_array "$extern_type::$val" "${EXTERN_TYPES_EXCLUDES[@]}"; then
echo "$extern_type::$val is used in file $file but not defined"
fi
fi
done
done
# Duplicates
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | {
grep -vP $EXCLUDE_DIRS | xargs grep -l -P "$extern_type::$allowed_chars"
} | while read file; do
grep -P "extern const $type_of_extern $allowed_chars;" $file | sort | uniq -c | grep -v -P ' +1 ' && echo "Duplicate $extern_type in file $file"
done
done
# Three or more consecutive empty lines
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' |
grep -vP $EXCLUDE_DIRS |
while read file; do awk '/^$/ { ++i; if (i > 2) { print "More than two consecutive empty lines in file '$file'" } } /./ { i = 0 }' $file; done
# Check that every header file has #pragma once in first line
find $ROOT_PATH/{src,programs,utils} -name '*.h' |
grep -vP $EXCLUDE_DIRS |
while read file; do [[ $(head -n1 $file) != '#pragma once' ]] && echo "File $file must have '#pragma once' in first line"; done
# Too many exclamation marks
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' |
grep -vP $EXCLUDE_DIRS |
xargs grep -F '!!!' | grep -P '.' && echo "Too many exclamation marks (looks dirty, unconfident)."
# Exclamation mark in a message
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' |
grep -vP $EXCLUDE_DIRS |
xargs grep -F '!",' | grep -P '.' && echo "No need for an exclamation mark (looks dirty, unconfident)."
# Trailing whitespaces
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' |
grep -vP $EXCLUDE_DIRS |
xargs grep -n -P ' $' | grep -n -P '.' && echo "^ Trailing whitespaces."
# Forbid stringstream because it's easy to use them incorrectly and hard to debug possible issues
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' |
grep -vP $EXCLUDE_DIRS |
xargs grep -P 'std::[io]?stringstream' | grep -v "STYLE_CHECK_ALLOW_STD_STRING_STREAM" && echo "Use WriteBufferFromOwnString or ReadBufferFromString instead of std::stringstream"
# Forbid std::cerr/std::cout in src (fine in programs/utils)
std_cerr_cout_excludes=(
/examples/
/tests/
_fuzzer
# OK
src/Common/ProgressIndication.cpp
# only under #ifdef DBMS_HASH_MAP_DEBUG_RESIZES, that is used only in tests
src/Common/HashTable/HashTable.h
# SensitiveDataMasker::printStats()
src/Common/SensitiveDataMasker.cpp
# StreamStatistics::print()
src/Compression/LZ4_decompress_faster.cpp
# ContextSharedPart with subsequent std::terminate()
src/Interpreters/Context.cpp
# IProcessor::dump()
src/Processors/IProcessor.cpp
src/Client/ClientApplicationBase.cpp
src/Client/ClientBase.cpp
src/Client/LineReader.cpp
src/Client/QueryFuzzer.cpp
src/Client/Suggest.cpp
src/Client/ClientBase.h
src/Client/LineReader.h
src/Client/ReplxxLineReader.h
src/Bridge/IBridge.cpp
src/Daemon/BaseDaemon.cpp
src/Loggers/Loggers.cpp
src/Common/GWPAsan.cpp
src/Common/ProgressIndication.h
)
sources_with_std_cerr_cout=( $(
find $ROOT_PATH/{src,base} -name '*.h' -or -name '*.cpp' | \
grep -vP $EXCLUDE_DIRS | \
grep -F -v $(printf -- "-e %s " "${std_cerr_cout_excludes[@]}") | \
xargs grep -F --with-filename -e std::cerr -e std::cout | cut -d: -f1 | sort -u
) )
# Exclude comments
for src in "${sources_with_std_cerr_cout[@]}"; do
# suppress stderr, since it may contain warning for #pargma once in headers
if gcc -fpreprocessed -dD -E "$src" 2>/dev/null | grep -F -q -e std::cerr -e std::cout; then
echo "$src: uses std::cerr/std::cout"
fi
done
expect_tests=( $(find $ROOT_PATH/tests/queries -name '*.expect') )
for test_case in "${expect_tests[@]}"; do
pattern="^exp_internal -f \$CLICKHOUSE_TMP/\$basename.debuglog 0$"
grep -q "$pattern" "$test_case" || echo "Missing '$pattern' in '$test_case'"
if grep -q "^spawn.*CLICKHOUSE_CLIENT_BINARY$" "$test_case"; then
pattern="^spawn.*CLICKHOUSE_CLIENT_BINARY.*--history_file$"
grep -q "$pattern" "$test_case" || echo "Missing '$pattern' in '$test_case'"
fi
# Otherwise expect_after/expect_before will not bail without stdin attached
# (and actually this is a hack anyway, correct way is to use $any_spawn_id)
pattern="-i \$any_spawn_id timeout"
grep -q -- "$pattern" "$test_case" || echo "Missing '$pattern' in '$test_case'"
pattern="-i \$any_spawn_id eof"
grep -q -- "$pattern" "$test_case" || echo "Missing '$pattern' in '$test_case'"
done
# Forbid non-unique error codes
if [[ "$(grep -Po "M\([0-9]*," $ROOT_PATH/src/Common/ErrorCodes.cpp | wc -l)" != "$(grep -Po "M\([0-9]*," $ROOT_PATH/src/Common/ErrorCodes.cpp | sort | uniq | wc -l)" ]]
then
echo "ErrorCodes.cpp contains non-unique error codes"
fi
# Check that there is no system-wide libraries/headers in use.
#
# NOTE: it is better to override find_path/find_library in cmake, but right now
# it is not possible, see [1] for the reference.
#
# [1]: git grep --recurse-submodules -e find_library -e find_path contrib
if git grep -e find_path -e find_library -- :**CMakeLists.txt; then
echo "There is find_path/find_library usage. ClickHouse should use everything bundled. Consider adding one more contrib module."
fi
# Forbid std::filesystem::is_symlink and std::filesystem::read_symlink, because it's easy to use them incorrectly
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' |
grep -vP $EXCLUDE_DIRS |
xargs grep -P '::(is|read)_symlink' | grep -v "STYLE_CHECK_ALLOW_STD_FS_SYMLINK" && echo "Use DB::FS::isSymlink and DB::FS::readSymlink instead"
# Forbid __builtin_unreachable(), because it's hard to debug when it becomes reachable
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' |
grep -vP $EXCLUDE_DIRS |
xargs grep -P '__builtin_unreachable' && echo "Use UNREACHABLE() from defines.h instead"
# Forbid mt19937() and random_device() which are outdated and slow
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' |
grep -vP $EXCLUDE_DIRS |
xargs grep -P '(std::mt19937|std::mersenne_twister_engine|std::random_device)' && echo "Use pcg64_fast (from pcg_random.h) and randomSeed (from Common/randomSeed.h) instead"
# Require checking return value of close(),
# since it can hide fd misuse and break other places.
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' |
grep -vP $EXCLUDE_DIRS |
xargs grep -e ' close(.*fd' -e ' ::close(' | grep -v = && echo "Return value of close() should be checked"
# A small typo can lead to debug code in release builds, see https://github.com/ClickHouse/ClickHouse/pull/47647
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -l -F '#ifdef NDEBUG' | xargs -I@FILE awk '/#ifdef NDEBUG/ { inside = 1; dirty = 1 } /#endif/ { if (inside && dirty) { print "File @FILE has suspicious #ifdef NDEBUG, possibly confused with #ifndef NDEBUG" }; inside = 0 } /#else/ { dirty = 0 }' @FILE
# If a user is doing dynamic or typeid cast with a pointer, and immediately dereferencing it, it is unsafe.
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep --line-number -P '(dynamic|typeid)_cast<[^>]+\*>\([^\(\)]+\)->' | grep -P '.' && echo "It's suspicious when you are doing a dynamic_cast or typeid_cast with a pointer and immediately dereferencing it. Use references instead of pointers or check a pointer to nullptr."
# Check for bad punctuation: whitespace before comma.
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -P --line-number '\w ,' | grep -v 'bad punctuation is ok here' && echo "^ There is bad punctuation: whitespace before comma. You should write it like this: 'Hello, world!'"
# Check usage of std::regex which is too bloated and slow.
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -P --line-number 'std::regex' | grep -P '.' && echo "^ Please use re2 instead of std::regex"
# Cyrillic characters hiding inside Latin.
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' | grep -v StorageSystemContributors.generated.cpp | xargs grep -P --line-number '[a-zA-Z][а-яА-ЯёЁ]|[а-яА-ЯёЁ][a-zA-Z]' && echo "^ Cyrillic characters found in unexpected place."
# Orphaned header files.
join -v1 <(find $ROOT_PATH/{src,programs,utils} -name '*.h' -printf '%f\n' | sort | uniq) <(find $ROOT_PATH/{src,programs,utils} -name '*.cpp' -or -name '*.c' -or -name '*.h' -or -name '*.S' | xargs grep --no-filename -o -P '[\w-]+\.h' | sort | uniq) |
grep . && echo '^ Found orphan header files.'
# Don't allow dynamic compiler check with CMake, because we are using hermetic, reproducible, cross-compiled, static (TLDR, good) builds.
ls -1d $ROOT_PATH/contrib/*-cmake | xargs -I@ find @ -name 'CMakeLists.txt' -or -name '*.cmake' | xargs grep --with-filename -i -P 'check_c_compiler_flag|check_cxx_compiler_flag|check_c_source_compiles|check_cxx_source_compiles|check_include_file|check_symbol_exists|cmake_push_check_state|cmake_pop_check_state|find_package|CMAKE_REQUIRED_FLAGS|CheckIncludeFile|CheckCCompilerFlag|CheckCXXCompilerFlag|CheckCSourceCompiles|CheckCXXSourceCompiles|CheckCSymbolExists|CheckCXXSymbolExists' | grep -v Rust && echo "^ It's not allowed to have dynamic compiler checks with CMake."
# Wrong spelling of abbreviations, e.g. SQL is right, Sql is wrong. XMLHttpRequest is very wrong.
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' |
grep -vP $EXCLUDE_DIRS |
xargs grep -P 'Sql|Html|Xml|Cpu|Tcp|Udp|Http|Db|Json|Yaml' | grep -v -P 'RabbitMQ|Azure|Aws|aws|Avro|IO/S3' &&
echo "Abbreviations such as SQL, XML, HTTP, should be in all caps. For example, SQL is right, Sql is wrong. XMLHttpRequest is very wrong."
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' |
grep -vP $EXCLUDE_DIRS |
xargs grep -F -i 'ErrorCodes::LOGICAL_ERROR, "Logical error:' &&
echo "If an exception has LOGICAL_ERROR code, there is no need to include the text 'Logical error' in the exception message, because then the phrase 'Logical error' will be printed twice."
# There shouldn't be any code snippets under GPL or LGPL
find $ROOT_PATH/{src,base,programs} -name '*.h' -or -name '*.cpp' 2>/dev/null | xargs grep -i -F 'General Public License' && echo "There shouldn't be any code snippets under GPL or LGPL"
PATTERN="allow_";
DIFF=$(comm -3 <(grep -o "\b$PATTERN\w*\b" $ROOT_PATH/src/Core/Settings.cpp | sort -u) <(grep -o -h "\b$PATTERN\w*\b" $ROOT_PATH/src/Databases/enableAllExperimentalSettings.cpp $ROOT_PATH/utils/check-style/experimental_settings_ignore.txt | sort -u));
[ -n "$DIFF" ] && echo "$DIFF" && echo "^^ Detected 'allow_*' settings that might need to be included in src/Databases/enableAllExperimentalSettings.cpp" && echo "Alternatively, consider adding an exception to utils/check-style/experimental_settings_ignore.txt"

View File

@ -0,0 +1,37 @@
#!/usr/bin/env bash
# The script checks if all submodules defined in $GIT_ROOT/.gitmodules exist in $GIT_ROOT/contrib
set -e
GIT_ROOT="."
cd "$GIT_ROOT"
# Remove keys for submodule.*.path parameters, the values are separated by \0
# and check if the directory exists
git config --file .gitmodules --null --get-regexp path | sed -z 's|.*\n||' | \
xargs -P100 -0 --no-run-if-empty -I{} bash -c 'if ! test -d '"'{}'"'; then echo Directory for submodule {} is not found; exit 1; fi' 2>&1
# And check that the submodule is fine
git config --file .gitmodules --null --get-regexp path | sed -z 's|.*\n||' | \
xargs -P100 -0 --no-run-if-empty -I{} git submodule status -q '{}' 2>&1
# All submodules should be from https://github.com/
git config --file "$ROOT_PATH/.gitmodules" --get-regexp 'submodule\..+\.url' | \
while read -r line; do
name=${line#submodule.}; name=${name%.url*}
url=${line#* }
[[ "$url" != 'https://github.com/'* ]] && echo "All submodules should be from https://github.com/, submodule '$name' has '$url'"
done
# All submodules should be of this form: [submodule "contrib/libxyz"] (for consistency, the submodule name does matter too much)
# - restrict the check to top-level .gitmodules file
git config --file "$ROOT_PATH/.gitmodules" --get-regexp 'submodule\..+\.path' | \
while read -r line; do
name=${line#submodule.}; name=${name%.path*}
path=${line#* }
[ "$name" != "$path" ] && echo "Submodule name '$name' is not equal to it's path '$path'"
done

View File

@ -0,0 +1,15 @@
#!/usr/bin/env bash
# Check for typos in code.
ROOT_PATH="."
#FIXME: check all (or almost all) repo
codespell \
--skip "*generated*,*gperf*,*.bin,*.mrk*,*.idx,checksums.txt,*.dat,*.pyc,*.kate-swp,*obfuscateQueries.cpp,d3-*.js,*.min.js,*.sum,${ROOT_PATH}/utils/check-style/aspell-ignore" \
--ignore-words "${ROOT_PATH}/utils/check-style/codespell-ignore-words.list" \
--exclude-file "${ROOT_PATH}/utils/check-style/codespell-ignore-lines.list" \
--quiet-level 2 \
"$ROOT_PATH"/{src,base,programs,utils} \
$@ | grep -P '.' \
&& echo -e "\nFound some typos in code.\nSee the files utils/check-style/codespell* if you want to add an exception."

View File

@ -0,0 +1,98 @@
#!/bin/bash
ROOT_PATH="."
# Queries to system.query_log/system.query_thread_log should have current_database = currentDatabase() condition
# NOTE: it is not that accurate, but at least something.
tests_with_query_log=( $(
find $ROOT_PATH/tests/queries -iname '*.sql' -or -iname '*.sh' -or -iname '*.py' -or -iname '*.j2' |
xargs grep --with-filename -e system.query_log -e system.query_thread_log | cut -d: -f1 | sort -u
) )
for test_case in "${tests_with_query_log[@]}"; do
grep -qE current_database.*currentDatabase "$test_case" || {
grep -qE 'current_database.*\$CLICKHOUSE_DATABASE' "$test_case"
} || echo "Queries to system.query_log/system.query_thread_log does not have current_database = currentDatabase() condition in $test_case"
done
grep -iE 'SYSTEM STOP MERGES;?$' -R $ROOT_PATH/tests/queries && echo "Merges cannot be disabled globally in fast/stateful/stateless tests, because it will break concurrently running queries"
# Queries to:
tables_with_database_column=(
system.tables
system.parts
system.detached_parts
system.parts_columns
system.columns
system.projection_parts
system.mutations
)
# should have database = currentDatabase() condition
#
# NOTE: it is not that accuate, but at least something.
tests_with_database_column=( $(
find $ROOT_PATH/tests/queries -iname '*.sql' -or -iname '*.sh' -or -iname '*.py' -or -iname '*.j2' |
xargs grep --with-filename $(printf -- "-e %s " "${tables_with_database_column[@]}") |
grep -v -e ':--' -e ':#' |
cut -d: -f1 | sort -u
) )
for test_case in "${tests_with_database_column[@]}"; do
grep -qE database.*currentDatabase "$test_case" || {
grep -qE 'database.*\$CLICKHOUSE_DATABASE' "$test_case"
} || {
# explicit database
grep -qE "database[ ]*=[ ]*'" "$test_case"
} || {
echo "Queries to ${tables_with_database_column[*]} does not have database = currentDatabase()/\$CLICKHOUSE_DATABASE condition in $test_case"
}
done
# Queries with ReplicatedMergeTree
# NOTE: it is not that accuate, but at least something.
tests_with_replicated_merge_tree=( $(
find $ROOT_PATH/tests/queries -iname '*.sql' -or -iname '*.sh' -or -iname '*.py' -or -iname '*.j2' |
xargs grep --with-filename -e "Replicated.*MergeTree[ ]*(.*" | cut -d: -f1 | sort -u
) )
for test_case in "${tests_with_replicated_merge_tree[@]}"; do
case "$test_case" in
*.gen.*)
;;
*.sh)
test_case_zk_prefix="\(\$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX\|{database}\)"
grep -q -e "Replicated.*MergeTree[ ]*(.*$test_case_zk_prefix" "$test_case" || echo "Replicated.*MergeTree should contain '$test_case_zk_prefix' in zookeeper path to avoid overlaps ($test_case)"
;;
*.sql|*.sql.j2)
test_case_zk_prefix="\({database}\|currentDatabase()\|{uuid}\|{default_path_test}\)"
grep -q -e "Replicated.*MergeTree[ ]*(.*$test_case_zk_prefix" "$test_case" || echo "Replicated.*MergeTree should contain '$test_case_zk_prefix' in zookeeper path to avoid overlaps ($test_case)"
;;
*.py)
# Right now there is not such tests anyway
echo "No ReplicatedMergeTree style check for *.py ($test_case)"
;;
esac
done
# The stateful directory should only contain the tests that depend on the test dataset (hits or visits).
find $ROOT_PATH/tests/queries/1_stateful -name '*.sql' -or -name '*.sh' | grep -v '00076_system_columns_bytes' | xargs -I{} bash -c 'grep -q -P "hits|visits" "{}" || echo "The test {} does not depend on the test dataset (hits or visits table) and should be located in the 0_stateless directory. You can also add an exception to the check-style script."'
# Check for existence of __init__.py files
for i in "${ROOT_PATH}"/tests/integration/test_*; do FILE="${i}/__init__.py"; [ ! -f "${FILE}" ] && echo "${FILE} should exist for every integration test"; done
# Check for executable bit on non-executable files
find $ROOT_PATH/{src,base,programs,utils,tests,docs,cmake} '(' -name '*.cpp' -or -name '*.h' -or -name '*.sql' -or -name '*.j2' -or -name '*.xml' -or -name '*.reference' -or -name '*.txt' -or -name '*.md' ')' -and -executable | grep -P '.' && echo "These files should not be executable."
# Check for BOM
find $ROOT_PATH/{src,base,programs,utils,tests,docs,cmake} -name '*.md' -or -name '*.cpp' -or -name '*.h' | xargs grep -l -F $'\xEF\xBB\xBF' | grep -P '.' && echo "Files should not have UTF-8 BOM"
find $ROOT_PATH/{src,base,programs,utils,tests,docs,cmake} -name '*.md' -or -name '*.cpp' -or -name '*.h' | xargs grep -l -F $'\xFF\xFE' | grep -P '.' && echo "Files should not have UTF-16LE BOM"
find $ROOT_PATH/{src,base,programs,utils,tests,docs,cmake} -name '*.md' -or -name '*.cpp' -or -name '*.h' | xargs grep -l -F $'\xFE\xFF' | grep -P '.' && echo "Files should not have UTF-16BE BOM"
# Conflict markers
find $ROOT_PATH/{src,base,programs,utils,tests,docs,cmake} -name '*.md' -or -name '*.cpp' -or -name '*.h' |
xargs grep -P '^(<<<<<<<|=======|>>>>>>>)$' | grep -P '.' && echo "Conflict markers are found in files"
# DOS/Windows newlines
find $ROOT_PATH/{base,src,programs,utils,docs} -name '*.md' -or -name '*.h' -or -name '*.cpp' -or -name '*.js' -or -name '*.py' -or -name '*.html' | xargs grep -l -P '\r$' && echo "^ Files contain DOS/Windows newlines (\r\n instead of \n)."
# # workflows check
# act --list --directory="$ROOT_PATH" 1>/dev/null 2>&1 || act --list --directory="$ROOT_PATH" 2>&1
# actionlint -ignore 'reusable workflow call.+' || :

View File

@ -0,0 +1,37 @@
#!/usr/bin/perl
use strict;
# Find double whitespace such as "a, b, c" that looks very ugly and annoying.
# But skip double whitespaces if they are used as an alignment - by comparing to surrounding lines.
my $ret = 0;
foreach my $file (@ARGV)
{
my @array;
open (FH,'<',$file);
while (<FH>)
{
push @array, $_;
}
for (my $i = 1; $i < $#array; ++$i)
{
if ($array[$i] =~ ',( {2,3})[^ /]')
{
# https://stackoverflow.com/questions/87380/how-can-i-find-the-location-of-a-regex-match-in-perl
if ((substr($array[$i - 1], $+[1] - 1, 2) !~ /^[ -][^ ]$/) # whitespaces are not part of alignment
&& (substr($array[$i + 1], $+[1] - 1, 2) !~ /^[ -][^ ]$/)
&& $array[$i] !~ /(-?\d+\w*,\s+){3,}/) # this is not a number table like { 10, -1, 2 }
{
print($file . ":" . ($i + 1) . $array[$i]);
$ret = 1;
}
}
}
}
exit $ret;

View File

@ -0,0 +1,251 @@
from praktika import Docker, Secret
S3_BUCKET_NAME = "clickhouse-builds"
S3_BUCKET_HTTP_ENDPOINT = "clickhouse-builds.s3.amazonaws.com"
class RunnerLabels:
CI_SERVICES = "ci_services"
CI_SERVICES_EBS = "ci_services_ebs"
BASE_BRANCH = "master"
SECRETS = [
Secret.Config(
name="dockerhub_robot_password",
type=Secret.Type.AWS_SSM_VAR,
),
Secret.Config(
name="woolenwolf_gh_app.clickhouse-app-id",
type=Secret.Type.AWS_SSM_SECRET,
),
Secret.Config(
name="woolenwolf_gh_app.clickhouse-app-key",
type=Secret.Type.AWS_SSM_SECRET,
),
]
DOCKERS = [
# Docker.Config(
# name="clickhouse/binary-builder",
# path="./docker/packager/binary-builder",
# arm64=True,
# amd64=True,
# depends_on=[],
# ),
# Docker.Config(
# name="clickhouse/cctools",
# path="./docker/packager/cctools",
# arm64=True,
# amd64=True,
# depends_on=[],
# ),
# Docker.Config(
# name="clickhouse/test-old-centos",
# path="./docker/test/compatibility/centos",
# arm64=True,
# amd64=True,
# depends_on=[],
# ),
# Docker.Config(
# name="clickhouse/test-old-ubuntu",
# path="./docker/test/compatibility/ubuntu",
# arm64=True,
# amd64=True,
# depends_on=[],
# ),
# Docker.Config(
# name="clickhouse/test-util",
# path="./docker/test/util",
# arm64=True,
# amd64=True,
# depends_on=[],
# ),
# Docker.Config(
# name="clickhouse/integration-test",
# path="./docker/test/integration/base",
# arm64=True,
# amd64=True,
# depends_on=["clickhouse/test-base"],
# ),
# Docker.Config(
# name="clickhouse/fuzzer",
# path="./docker/test/fuzzer",
# arm64=True,
# amd64=True,
# depends_on=["clickhouse/test-base"],
# ),
# Docker.Config(
# name="clickhouse/performance-comparison",
# path="./docker/test/performance-comparison",
# arm64=True,
# amd64=True,
# depends_on=[],
# ),
# Docker.Config(
# name="clickhouse/fasttest",
# path="./docker/test/fasttest",
# arm64=True,
# amd64=True,
# depends_on=["clickhouse/test-util"],
# ),
# Docker.Config(
# name="clickhouse/test-base",
# path="./docker/test/base",
# arm64=True,
# amd64=True,
# depends_on=["clickhouse/test-util"],
# ),
# Docker.Config(
# name="clickhouse/clickbench",
# path="./docker/test/clickbench",
# arm64=True,
# amd64=True,
# depends_on=["clickhouse/test-base"],
# ),
# Docker.Config(
# name="clickhouse/keeper-jepsen-test",
# path="./docker/test/keeper-jepsen",
# arm64=True,
# amd64=True,
# depends_on=["clickhouse/test-base"],
# ),
# Docker.Config(
# name="clickhouse/server-jepsen-test",
# path="./docker/test/server-jepsen",
# arm64=True,
# amd64=True,
# depends_on=["clickhouse/test-base"],
# ),
# Docker.Config(
# name="clickhouse/sqllogic-test",
# path="./docker/test/sqllogic",
# arm64=True,
# amd64=True,
# depends_on=["clickhouse/test-base"],
# ),
# Docker.Config(
# name="clickhouse/sqltest",
# path="./docker/test/sqltest",
# arm64=True,
# amd64=True,
# depends_on=["clickhouse/test-base"],
# ),
# Docker.Config(
# name="clickhouse/stateless-test",
# path="./docker/test/stateless",
# arm64=True,
# amd64=True,
# depends_on=["clickhouse/test-base"],
# ),
# Docker.Config(
# name="clickhouse/stateful-test",
# path="./docker/test/stateful",
# arm64=True,
# amd64=True,
# depends_on=["clickhouse/stateless-test"],
# ),
# Docker.Config(
# name="clickhouse/stress-test",
# path="./docker/test/stress",
# arm64=True,
# amd64=True,
# depends_on=["clickhouse/stateful-test"],
# ),
# Docker.Config(
# name="clickhouse/unit-test",
# path="./docker/test/unit",
# arm64=True,
# amd64=True,
# depends_on=["clickhouse/test-base"],
# ),
# Docker.Config(
# name="clickhouse/integration-tests-runner",
# path="./docker/test/integration/runner",
# arm64=True,
# amd64=True,
# depends_on=["clickhouse/test-base"],
# ),
Docker.Config(
name="clickhouse/style-test",
path="./ci_v2/docker/style-test",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
# Docker.Config(
# name="clickhouse/docs-builder",
# path="./docker/docs/builder",
# arm64=True,
# amd64=True,
# depends_on=["clickhouse/test-base"],
# ),
]
# TODO:
# "docker/test/integration/s3_proxy": {
# "name": "clickhouse/s3-proxy",
# "dependent": []
# },
# "docker/test/integration/resolver": {
# "name": "clickhouse/python-bottle",
# "dependent": []
# },
# "docker/test/integration/helper_container": {
# "name": "clickhouse/integration-helper",
# "dependent": []
# },
# "docker/test/integration/mysql_golang_client": {
# "name": "clickhouse/mysql-golang-client",
# "dependent": []
# },
# "docker/test/integration/dotnet_client": {
# "name": "clickhouse/dotnet-client",
# "dependent": []
# },
# "docker/test/integration/mysql_java_client": {
# "name": "clickhouse/mysql-java-client",
# "dependent": []
# },
# "docker/test/integration/mysql_js_client": {
# "name": "clickhouse/mysql-js-client",
# "dependent": []
# },
# "docker/test/integration/mysql_php_client": {
# "name": "clickhouse/mysql-php-client",
# "dependent": []
# },
# "docker/test/integration/postgresql_java_client": {
# "name": "clickhouse/postgresql-java-client",
# "dependent": []
# },
# "docker/test/integration/kerberos_kdc": {
# "only_amd64": true,
# "name": "clickhouse/kerberos-kdc",
# "dependent": []
# },
# "docker/test/integration/kerberized_hadoop": {
# "only_amd64": true,
# "name": "clickhouse/kerberized-hadoop",
# "dependent": []
# },
# "docker/test/sqlancer": {
# "name": "clickhouse/sqlancer-test",
# "dependent": []
# },
# "docker/test/install/deb": {
# "name": "clickhouse/install-deb-test",
# "dependent": []
# },
# "docker/test/install/rpm": {
# "name": "clickhouse/install-rpm-test",
# "dependent": []
# },
# "docker/test/integration/nginx_dav": {
# "name": "clickhouse/nginx-dav",
# "dependent": []
# }
class JobNames:
STYLE_CHECK = "Style Check"

View File

@ -0,0 +1,20 @@
from ci_v2.settings.definitions import (
S3_BUCKET_HTTP_ENDPOINT,
S3_BUCKET_NAME,
RunnerLabels,
)
S3_ARTIFACT_PATH = f"{S3_BUCKET_NAME}/artifacts"
CI_CONFIG_RUNS_ON = [RunnerLabels.CI_SERVICES]
DOCKER_BUILD_RUNS_ON = [RunnerLabels.CI_SERVICES_EBS]
CACHE_S3_PATH = f"{S3_BUCKET_NAME}/ci_ch_cache"
HTML_S3_PATH = f"{S3_BUCKET_NAME}/reports"
S3_BUCKET_TO_HTTP_ENDPOINT = {S3_BUCKET_NAME: S3_BUCKET_HTTP_ENDPOINT}
DOCKERHUB_USERNAME = "robotclickhouse"
DOCKERHUB_SECRET = "dockerhub_robot_password"
CI_DB_DB_NAME = "default"
CI_DB_TABLE_NAME = "checks"
INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS = ""

View File

@ -0,0 +1,44 @@
from typing import List
from ci_v2.settings.definitions import (
BASE_BRANCH,
DOCKERS,
SECRETS,
JobNames,
RunnerLabels,
)
from praktika import Job, Workflow
style_check_job = Job.Config(
name=JobNames.STYLE_CHECK,
runs_on=[RunnerLabels.CI_SERVICES],
command="python3 ./ci_v2/jobs/check_style.py",
run_in_docker="clickhouse/style-test",
)
workflow = Workflow.Config(
name="PR",
event=Workflow.Event.PULL_REQUEST,
base_branches=[BASE_BRANCH],
jobs=[
style_check_job,
],
dockers=DOCKERS,
secrets=SECRETS,
enable_cache=True,
enable_report=True,
enable_merge_ready_status=True,
)
WORKFLOWS = [
workflow,
] # type: List[Workflow.Config]
if __name__ == "__main__":
# example: local job test inside praktika environment
from praktika.runner import Runner
Runner.generate_dummy_environment(workflow, style_check_job)
Runner().run(workflow, style_check_job)

View File

@ -2,11 +2,11 @@
# NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION, # NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION,
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. # only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
SET(VERSION_REVISION 54490) SET(VERSION_REVISION 54491)
SET(VERSION_MAJOR 24) SET(VERSION_MAJOR 24)
SET(VERSION_MINOR 9) SET(VERSION_MINOR 10)
SET(VERSION_PATCH 1) SET(VERSION_PATCH 1)
SET(VERSION_GITHASH e02b434d2fc0c4fbee29ca675deab7474d274608) SET(VERSION_GITHASH b12a367741812f9e5fe754d19ebae600e2a2614c)
SET(VERSION_DESCRIBE v24.9.1.1-testing) SET(VERSION_DESCRIBE v24.10.1.1-testing)
SET(VERSION_STRING 24.9.1.1) SET(VERSION_STRING 24.10.1.1)
# end of autochange # end of autochange

View File

@ -11,6 +11,38 @@ option (ARCH_NATIVE "Add -march=native compiler flag. This makes your binaries n
if (ARCH_NATIVE) if (ARCH_NATIVE)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=native") set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=native")
# Populate the ENABLE_ option flags. This is required for the build of some third-party dependencies, specifically snappy, which
# (somewhat weirdly) expects the relative SNAPPY_HAVE_ preprocessor variables to be populated, in addition to the microarchitecture
# feature flags being enabled in the compiler. This fixes the ARCH_NATIVE flag by automatically populating the ENABLE_ option flags
# according to the current CPU's capabilities, detected using clang.
if (ARCH_AMD64)
execute_process(
COMMAND sh -c "clang -E - -march=native -###"
INPUT_FILE /dev/null
OUTPUT_QUIET
ERROR_VARIABLE TEST_FEATURE_RESULT)
macro(TEST_AMD64_FEATURE TEST_FEATURE_RESULT feat flag)
if (${TEST_FEATURE_RESULT} MATCHES "\"\\+${feat}\"")
set(${flag} ON)
else ()
set(${flag} OFF)
endif ()
endmacro()
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} ssse3 ENABLE_SSSE3)
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} sse4.1 ENABLE_SSE41)
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} sse4.2 ENABLE_SSE42)
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} vpclmulqdq ENABLE_PCLMULQDQ)
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} popcnt ENABLE_POPCNT)
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} avx ENABLE_AVX)
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} avx2 ENABLE_AVX2)
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} avx512f ENABLE_AVX512)
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} avx512vbmi ENABLE_AVX512_VBMI)
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} bmi ENABLE_BMI)
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} bmi2 ENABLE_BMI2)
endif ()
elseif (ARCH_AARCH64) elseif (ARCH_AARCH64)
# ARM publishes almost every year a new revision of it's ISA [1]. Each version comes with new mandatory and optional features from # ARM publishes almost every year a new revision of it's ISA [1]. Each version comes with new mandatory and optional features from
# which CPU vendors can pick and choose. This creates a lot of variability ... We provide two build "profiles", one for maximum # which CPU vendors can pick and choose. This creates a lot of variability ... We provide two build "profiles", one for maximum

View File

@ -12,11 +12,13 @@ macro(add_headers_only prefix common_path)
add_glob(${prefix}_headers ${common_path}/*.h) add_glob(${prefix}_headers ${common_path}/*.h)
endmacro() endmacro()
# Assumes the path is under src and that src/ needs to be removed (valid for any subdirectory under src/)
macro(extract_into_parent_list src_list dest_list) macro(extract_into_parent_list src_list dest_list)
list(REMOVE_ITEM ${src_list} ${ARGN}) list(REMOVE_ITEM ${src_list} ${ARGN})
get_filename_component(__dir_name ${CMAKE_CURRENT_SOURCE_DIR} NAME) file(RELATIVE_PATH relative ${CMAKE_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR})
string(REPLACE "src/" "" relative ${relative})
foreach(file IN ITEMS ${ARGN}) foreach(file IN ITEMS ${ARGN})
list(APPEND ${dest_list} ${__dir_name}/${file}) list(APPEND ${dest_list} ${relative}/${file})
endforeach() endforeach()
set(${dest_list} "${${dest_list}}" PARENT_SCOPE) set(${dest_list} "${${dest_list}}" PARENT_SCOPE)
endmacro() endmacro()

View File

@ -18,4 +18,4 @@ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (USE_MUSL 1) set (USE_MUSL 1)
add_definitions(-DUSE_MUSL=1) add_definitions(-DUSE_MUSL=1 -D__MUSL__=1)

View File

@ -48,6 +48,8 @@ if (NOT LINKER_NAME)
find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "ld.lld") find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "ld.lld")
elseif (OS_DARWIN) elseif (OS_DARWIN)
find_program (LLD_PATH NAMES "ld") find_program (LLD_PATH NAMES "ld")
# Duplicate libraries passed to the linker is not a problem.
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-no_warn_duplicate_libraries")
endif () endif ()
if (LLD_PATH) if (LLD_PATH)
if (OS_LINUX OR OS_DARWIN) if (OS_LINUX OR OS_DARWIN)

View File

@ -145,8 +145,13 @@ add_contrib (isa-l-cmake isa-l)
add_contrib (libhdfs3-cmake libhdfs3) # requires: google-protobuf, krb5, isa-l add_contrib (libhdfs3-cmake libhdfs3) # requires: google-protobuf, krb5, isa-l
add_contrib (hive-metastore-cmake hive-metastore) # requires: thrift, avro, arrow, libhdfs3 add_contrib (hive-metastore-cmake hive-metastore) # requires: thrift, avro, arrow, libhdfs3
add_contrib (cppkafka-cmake cppkafka) add_contrib (cppkafka-cmake cppkafka)
add_contrib (libpqxx-cmake libpqxx)
add_contrib (libpq-cmake libpq) option(ENABLE_LIBPQXX "Enable PostgreSQL" ${ENABLE_LIBRARIES})
if (ENABLE_LIBPQXX)
add_contrib (postgres-cmake postgres)
add_contrib (libpqxx-cmake libpqxx)
endif()
add_contrib (rocksdb-cmake rocksdb) # requires: jemalloc, snappy, zlib, lz4, zstd, liburing add_contrib (rocksdb-cmake rocksdb) # requires: jemalloc, snappy, zlib, lz4, zstd, liburing
add_contrib (nuraft-cmake NuRaft) add_contrib (nuraft-cmake NuRaft)
add_contrib (fast_float-cmake fast_float) add_contrib (fast_float-cmake fast_float)
@ -155,6 +160,12 @@ add_contrib (datasketches-cpp-cmake datasketches-cpp)
add_contrib (incbin-cmake incbin) add_contrib (incbin-cmake incbin)
add_contrib (sqids-cpp-cmake sqids-cpp) add_contrib (sqids-cpp-cmake sqids-cpp)
option(USE_MONGODB "Enable MongoDB support" ${ENABLE_LIBRARIES})
if (USE_MONGODB)
add_contrib (mongo-c-driver-cmake mongo-c-driver) # requires: zlib
add_contrib (mongo-cxx-driver-cmake mongo-cxx-driver) # requires: libmongoc, libbson
endif()
option(ENABLE_NLP "Enable NLP functions support" ${ENABLE_LIBRARIES}) option(ENABLE_NLP "Enable NLP functions support" ${ENABLE_LIBRARIES})
if (ENABLE_NLP) if (ENABLE_NLP)
add_contrib (libstemmer-c-cmake libstemmer_c) add_contrib (libstemmer-c-cmake libstemmer_c)

View File

@ -1 +1,4 @@
# See contrib/usearch-cmake/CMakeLists.txt set (FP16_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/FP16/")
add_library(_fp16 INTERFACE)
target_include_directories(_fp16 SYSTEM INTERFACE ${FP16_PROJECT_DIR}/include)

2
contrib/SimSIMD vendored

@ -1 +1 @@
Subproject commit 91a76d1ac519b3b9dc8957734a3dabd985f00c26 Subproject commit ff51434d90c66f916e94ff05b24530b127aa4cff

View File

@ -1 +1,4 @@
# See contrib/usearch-cmake/CMakeLists.txt set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD")
add_library(_simsimd INTERFACE)
target_include_directories(_simsimd SYSTEM INTERFACE "${SIMSIMD_PROJECT_DIR}/include")

View File

@ -1,6 +1,9 @@
set(ABSL_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/abseil-cpp") set(ABSL_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/abseil-cpp")
set(ABSL_COMMON_INCLUDE_DIRS "${ABSL_ROOT_DIR}") set(ABSL_COMMON_INCLUDE_DIRS "${ABSL_ROOT_DIR}")
# To avoid errors "'X' does not refer to a value" while using `offsetof` function.
set(CMAKE_CXX_STANDARD 17)
# This is a minimized version of the function definition in CMake/AbseilHelpers.cmake # This is a minimized version of the function definition in CMake/AbseilHelpers.cmake
# #

View File

@ -69,6 +69,11 @@ set (SRCS_PROGRAM_OPTIONS
"${LIBRARY_DIR}/libs/program_options/src/winmain.cpp" "${LIBRARY_DIR}/libs/program_options/src/winmain.cpp"
) )
# Always compile this file with the highest possible level of optimizations, even in Debug builds.
# Otherwise the client takes too long to start and SQL stateless tests (many options to parse)
# https://github.com/ClickHouse/ClickHouse/issues/65745
set_source_files_properties(${SRCS_PROGRAM_OPTIONS} PROPERTIES COMPILE_FLAGS "-O3")
add_library (_boost_program_options ${SRCS_PROGRAM_OPTIONS}) add_library (_boost_program_options ${SRCS_PROGRAM_OPTIONS})
add_library (boost::program_options ALIAS _boost_program_options) add_library (boost::program_options ALIAS _boost_program_options)
target_include_directories (_boost_program_options SYSTEM BEFORE PUBLIC ${LIBRARY_DIR}) target_include_directories (_boost_program_options SYSTEM BEFORE PUBLIC ${LIBRARY_DIR})

View File

@ -5,6 +5,9 @@ if(NOT ENABLE_PROTOBUF)
return() return()
endif() endif()
# To avoid errors "'X' does not refer to a value" while using `offsetof` function.
set(CMAKE_CXX_STANDARD 17)
set(Protobuf_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/google-protobuf/src") set(Protobuf_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/google-protobuf/src")
if(OS_FREEBSD AND SANITIZE STREQUAL "address") if(OS_FREEBSD AND SANITIZE STREQUAL "address")
# ../contrib/protobuf/src/google/protobuf/arena_impl.h:45:10: fatal error: 'sanitizer/asan_interface.h' file not found # ../contrib/protobuf/src/google/protobuf/arena_impl.h:45:10: fatal error: 'sanitizer/asan_interface.h' file not found

2
contrib/grpc vendored

@ -1 +1 @@
Subproject commit 7bc3abe952aba1dc7bce7f2f790dc781cb51a41e Subproject commit 62e871c36fa93c0af939bd31762845265214fe3d

View File

@ -6,6 +6,8 @@ if(NOT ENABLE_GRPC)
return() return()
endif() endif()
set(CMAKE_CXX_STANDARD 17)
set(_gRPC_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/grpc") set(_gRPC_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/grpc")
set(_gRPC_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/grpc") set(_gRPC_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/grpc")

View File

@ -22,7 +22,7 @@
# limitations under the License. # limitations under the License.
# We want to use C++23, but GRPC is not ready # We want to use C++23, but GRPC is not ready
set (CMAKE_CXX_STANDARD 20) set (CMAKE_CXX_STANDARD 17)
set(_gRPC_ZLIB_INCLUDE_DIR "") set(_gRPC_ZLIB_INCLUDE_DIR "")
set(_gRPC_ZLIB_LIBRARIES ch_contrib::zlib) set(_gRPC_ZLIB_LIBRARIES ch_contrib::zlib)

View File

@ -164,15 +164,6 @@ target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_NO_PRIVATE_NAMESPACE)
# Because our coverage callbacks call malloc, and recursive call of malloc could not work. # Because our coverage callbacks call malloc, and recursive call of malloc could not work.
target_compile_options(_jemalloc PRIVATE ${WITHOUT_COVERAGE_FLAGS_LIST}) target_compile_options(_jemalloc PRIVATE ${WITHOUT_COVERAGE_FLAGS_LIST})
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
target_compile_definitions(_jemalloc PRIVATE
-DJEMALLOC_DEBUG=1
# Usage examples:
# - MALLOC_CONF=log:.
# - MALLOC_CONF='log:core.malloc.exit|core.sallocx.entry|core.sdallocx.entry'
-DJEMALLOC_LOG=1)
endif ()
target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1) target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1)
# jemalloc provides support two unwind flavors: # jemalloc provides support two unwind flavors:

2
contrib/libdivide vendored

@ -1 +1 @@
Subproject commit 3bd34388573681ce563348cdf04fe15d24770d04 Subproject commit 01526031eb79375dc85e0212c966d2c514a01234

1
contrib/libpq vendored

@ -1 +0,0 @@
Subproject commit 2446f2c85650b56df9d4ebc4c2ea7f4b01beee57

View File

@ -1,78 +0,0 @@
if (NOT ENABLE_LIBPQXX)
return()
endif()
set(LIBPQ_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libpq")
set(SRCS
"${LIBPQ_SOURCE_DIR}/fe-auth.c"
"${LIBPQ_SOURCE_DIR}/fe-auth-scram.c"
"${LIBPQ_SOURCE_DIR}/fe-connect.c"
"${LIBPQ_SOURCE_DIR}/fe-exec.c"
"${LIBPQ_SOURCE_DIR}/fe-lobj.c"
"${LIBPQ_SOURCE_DIR}/fe-misc.c"
"${LIBPQ_SOURCE_DIR}/fe-print.c"
"${LIBPQ_SOURCE_DIR}/fe-trace.c"
"${LIBPQ_SOURCE_DIR}/fe-protocol3.c"
"${LIBPQ_SOURCE_DIR}/fe-secure.c"
"${LIBPQ_SOURCE_DIR}/fe-secure-common.c"
"${LIBPQ_SOURCE_DIR}/fe-secure-openssl.c"
"${LIBPQ_SOURCE_DIR}/legacy-pqsignal.c"
"${LIBPQ_SOURCE_DIR}/libpq-events.c"
"${LIBPQ_SOURCE_DIR}/pqexpbuffer.c"
"${LIBPQ_SOURCE_DIR}/common/scram-common.c"
"${LIBPQ_SOURCE_DIR}/common/sha2.c"
"${LIBPQ_SOURCE_DIR}/common/sha1.c"
"${LIBPQ_SOURCE_DIR}/common/md5.c"
"${LIBPQ_SOURCE_DIR}/common/md5_common.c"
"${LIBPQ_SOURCE_DIR}/common/hmac_openssl.c"
"${LIBPQ_SOURCE_DIR}/common/cryptohash.c"
"${LIBPQ_SOURCE_DIR}/common/saslprep.c"
"${LIBPQ_SOURCE_DIR}/common/unicode_norm.c"
"${LIBPQ_SOURCE_DIR}/common/ip.c"
"${LIBPQ_SOURCE_DIR}/common/jsonapi.c"
"${LIBPQ_SOURCE_DIR}/common/wchar.c"
"${LIBPQ_SOURCE_DIR}/common/base64.c"
"${LIBPQ_SOURCE_DIR}/common/link-canary.c"
"${LIBPQ_SOURCE_DIR}/common/fe_memutils.c"
"${LIBPQ_SOURCE_DIR}/common/string.c"
"${LIBPQ_SOURCE_DIR}/common/pg_get_line.c"
"${LIBPQ_SOURCE_DIR}/common/stringinfo.c"
"${LIBPQ_SOURCE_DIR}/common/psprintf.c"
"${LIBPQ_SOURCE_DIR}/common/encnames.c"
"${LIBPQ_SOURCE_DIR}/common/logging.c"
"${LIBPQ_SOURCE_DIR}/port/snprintf.c"
"${LIBPQ_SOURCE_DIR}/port/strlcpy.c"
"${LIBPQ_SOURCE_DIR}/port/strerror.c"
"${LIBPQ_SOURCE_DIR}/port/inet_net_ntop.c"
"${LIBPQ_SOURCE_DIR}/port/getpeereid.c"
"${LIBPQ_SOURCE_DIR}/port/chklocale.c"
"${LIBPQ_SOURCE_DIR}/port/noblock.c"
"${LIBPQ_SOURCE_DIR}/port/pg_strong_random.c"
"${LIBPQ_SOURCE_DIR}/port/pgstrcasecmp.c"
"${LIBPQ_SOURCE_DIR}/port/thread.c"
"${LIBPQ_SOURCE_DIR}/port/path.c"
)
add_library(_libpq ${SRCS})
add_definitions(-DHAVE_BIO_METH_NEW)
add_definitions(-DHAVE_HMAC_CTX_NEW)
add_definitions(-DHAVE_HMAC_CTX_FREE)
target_include_directories (_libpq SYSTEM PUBLIC ${LIBPQ_SOURCE_DIR})
target_include_directories (_libpq SYSTEM PUBLIC "${LIBPQ_SOURCE_DIR}/include")
target_include_directories (_libpq SYSTEM PRIVATE "${LIBPQ_SOURCE_DIR}/configs")
# NOTE: this is a dirty hack to avoid and instead pg_config.h should be shipped
# for different OS'es like for jemalloc, not one generic for all OS'es like
# now.
if (OS_DARWIN OR OS_FREEBSD OR USE_MUSL)
target_compile_definitions(_libpq PRIVATE -DSTRERROR_R_INT=1)
endif()
target_link_libraries (_libpq PRIVATE OpenSSL::SSL)
add_library(ch_contrib::libpq ALIAS _libpq)

2
contrib/libpqxx vendored

@ -1 +1 @@
Subproject commit c995193a3a14d71f4711f1f421f65a1a1db64640 Subproject commit 41e4c331564167cca97ad6eccbd5b8879c2ca044

View File

@ -1,16 +1,9 @@
option(ENABLE_LIBPQXX "Enalbe libpqxx" ${ENABLE_LIBRARIES})
if (NOT ENABLE_LIBPQXX)
message(STATUS "Not using libpqxx")
return()
endif()
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/libpqxx") set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/libpqxx")
set (SRCS set (SRCS
"${LIBRARY_DIR}/src/strconv.cxx"
"${LIBRARY_DIR}/src/array.cxx" "${LIBRARY_DIR}/src/array.cxx"
"${LIBRARY_DIR}/src/binarystring.cxx" "${LIBRARY_DIR}/src/binarystring.cxx"
"${LIBRARY_DIR}/src/blob.cxx"
"${LIBRARY_DIR}/src/connection.cxx" "${LIBRARY_DIR}/src/connection.cxx"
"${LIBRARY_DIR}/src/cursor.cxx" "${LIBRARY_DIR}/src/cursor.cxx"
"${LIBRARY_DIR}/src/encodings.cxx" "${LIBRARY_DIR}/src/encodings.cxx"
@ -19,59 +12,25 @@ set (SRCS
"${LIBRARY_DIR}/src/field.cxx" "${LIBRARY_DIR}/src/field.cxx"
"${LIBRARY_DIR}/src/largeobject.cxx" "${LIBRARY_DIR}/src/largeobject.cxx"
"${LIBRARY_DIR}/src/notification.cxx" "${LIBRARY_DIR}/src/notification.cxx"
"${LIBRARY_DIR}/src/params.cxx"
"${LIBRARY_DIR}/src/pipeline.cxx" "${LIBRARY_DIR}/src/pipeline.cxx"
"${LIBRARY_DIR}/src/result.cxx" "${LIBRARY_DIR}/src/result.cxx"
"${LIBRARY_DIR}/src/robusttransaction.cxx" "${LIBRARY_DIR}/src/robusttransaction.cxx"
"${LIBRARY_DIR}/src/row.cxx"
"${LIBRARY_DIR}/src/sql_cursor.cxx" "${LIBRARY_DIR}/src/sql_cursor.cxx"
"${LIBRARY_DIR}/src/strconv.cxx"
"${LIBRARY_DIR}/src/stream_from.cxx" "${LIBRARY_DIR}/src/stream_from.cxx"
"${LIBRARY_DIR}/src/stream_to.cxx" "${LIBRARY_DIR}/src/stream_to.cxx"
"${LIBRARY_DIR}/src/subtransaction.cxx" "${LIBRARY_DIR}/src/subtransaction.cxx"
"${LIBRARY_DIR}/src/time.cxx"
"${LIBRARY_DIR}/src/transaction.cxx" "${LIBRARY_DIR}/src/transaction.cxx"
"${LIBRARY_DIR}/src/transaction_base.cxx" "${LIBRARY_DIR}/src/transaction_base.cxx"
"${LIBRARY_DIR}/src/row.cxx"
"${LIBRARY_DIR}/src/params.cxx"
"${LIBRARY_DIR}/src/util.cxx" "${LIBRARY_DIR}/src/util.cxx"
"${LIBRARY_DIR}/src/version.cxx" "${LIBRARY_DIR}/src/version.cxx"
"${LIBRARY_DIR}/src/wait.cxx"
) )
# Need to explicitly include each header file, because in the directory include/pqxx there are also files add_library(_libpqxx ${SRCS})
# like just 'array'. So if including the whole directory with `target_include_directories`, it will make
# conflicts with all includes of <array>.
set (HDRS
"${LIBRARY_DIR}/include/pqxx/array.hxx"
"${LIBRARY_DIR}/include/pqxx/params.hxx"
"${LIBRARY_DIR}/include/pqxx/binarystring.hxx"
"${LIBRARY_DIR}/include/pqxx/composite.hxx"
"${LIBRARY_DIR}/include/pqxx/connection.hxx"
"${LIBRARY_DIR}/include/pqxx/cursor.hxx"
"${LIBRARY_DIR}/include/pqxx/dbtransaction.hxx"
"${LIBRARY_DIR}/include/pqxx/errorhandler.hxx"
"${LIBRARY_DIR}/include/pqxx/except.hxx"
"${LIBRARY_DIR}/include/pqxx/field.hxx"
"${LIBRARY_DIR}/include/pqxx/isolation.hxx"
"${LIBRARY_DIR}/include/pqxx/largeobject.hxx"
"${LIBRARY_DIR}/include/pqxx/nontransaction.hxx"
"${LIBRARY_DIR}/include/pqxx/notification.hxx"
"${LIBRARY_DIR}/include/pqxx/pipeline.hxx"
"${LIBRARY_DIR}/include/pqxx/prepared_statement.hxx"
"${LIBRARY_DIR}/include/pqxx/result.hxx"
"${LIBRARY_DIR}/include/pqxx/robusttransaction.hxx"
"${LIBRARY_DIR}/include/pqxx/row.hxx"
"${LIBRARY_DIR}/include/pqxx/separated_list.hxx"
"${LIBRARY_DIR}/include/pqxx/strconv.hxx"
"${LIBRARY_DIR}/include/pqxx/stream_from.hxx"
"${LIBRARY_DIR}/include/pqxx/stream_to.hxx"
"${LIBRARY_DIR}/include/pqxx/subtransaction.hxx"
"${LIBRARY_DIR}/include/pqxx/transaction.hxx"
"${LIBRARY_DIR}/include/pqxx/transaction_base.hxx"
"${LIBRARY_DIR}/include/pqxx/types.hxx"
"${LIBRARY_DIR}/include/pqxx/util.hxx"
"${LIBRARY_DIR}/include/pqxx/version.hxx"
"${LIBRARY_DIR}/include/pqxx/zview.hxx"
)
add_library(_libpqxx ${SRCS} ${HDRS})
target_link_libraries(_libpqxx PUBLIC ch_contrib::libpq) target_link_libraries(_libpqxx PUBLIC ch_contrib::libpq)
target_include_directories (_libpqxx SYSTEM BEFORE PUBLIC "${LIBRARY_DIR}/include") target_include_directories (_libpqxx SYSTEM BEFORE PUBLIC "${LIBRARY_DIR}/include")

View File

@ -50,7 +50,11 @@ if(NOT MARIADB_UNIX_ADDR)
set(MARIADB_UNIX_ADDR "/tmp/mysql.sock") set(MARIADB_UNIX_ADDR "/tmp/mysql.sock")
endif() endif()
set(HAVE_ALLOCA_H 1) if (OS_FREEBSD)
set(HAVE_ALLOCA_H 0)
else()
set(HAVE_ALLOCA_H 1)
endif()
set(HAVE_ARPA_INET_H 1) set(HAVE_ARPA_INET_H 1)
set(HAVE_DLFCN_H 1) set(HAVE_DLFCN_H 1)
set(HAVE_FCNTL_H 1) set(HAVE_FCNTL_H 1)

1
contrib/mongo-c-driver vendored Submodule

@ -0,0 +1 @@
Subproject commit d55410c69183c90d18fd3b3f1d9db3d224fc8d52

View File

@ -0,0 +1,150 @@
option(USE_MONGODB "Enable MongoDB support" ${ENABLE_LIBRARIES})
if(NOT USE_MONGODB)
message(STATUS "Not using libmongoc and libbson")
return()
endif()
set(libbson_VERSION_MAJOR 1)
set(libbson_VERSION_MINOR 27)
set(libbson_VERSION_PATCH 0)
set(libbson_VERSION 1.27.0)
set(libmongoc_VERSION_MAJOR 1)
set(libmongoc_VERSION_MINOR 27)
set(libmongoc_VERSION_PATCH 0)
set(libmongoc_VERSION 1.27.0)
set(LIBBSON_SOURCES_ROOT "${ClickHouse_SOURCE_DIR}/contrib/mongo-c-driver/src")
set(LIBBSON_SOURCE_DIR "${LIBBSON_SOURCES_ROOT}/libbson/src")
file(GLOB_RECURSE LIBBSON_SOURCES "${LIBBSON_SOURCE_DIR}/*.c")
set(LIBBSON_BINARY_ROOT "${ClickHouse_BINARY_DIR}/contrib/mongo-c-driver/src")
set(LIBBSON_BINARY_DIR "${LIBBSON_BINARY_ROOT}/libbson/src")
include(TestBigEndian)
test_big_endian(BSON_BIG_ENDIAN)
if(BSON_BIG_ENDIAN)
set(BSON_BYTE_ORDER 4321)
else()
set(BSON_BYTE_ORDER 1234)
endif()
set(BSON_OS 1)
set(BSON_EXTRA_ALIGN 1)
set(BSON_HAVE_SNPRINTF 1)
set(BSON_HAVE_TIMESPEC 1)
set(BSON_HAVE_GMTIME_R 1)
set(BSON_HAVE_RAND_R 1)
set(BSON_HAVE_STRINGS_H 1)
set(BSON_HAVE_STRLCPY 0)
set(BSON_HAVE_STRNLEN 1)
set(BSON_HAVE_STDBOOL_H 1)
set(BSON_HAVE_CLOCK_GETTIME 1)
# common settings
set(MONGOC_TRACE 0)
set(MONGOC_ENABLE_STATIC_BUILD 1)
set(MONGOC_ENABLE_DEBUG_ASSERTIONS 0)
set(MONGOC_ENABLE_MONGODB_AWS_AUTH 0)
set(MONGOC_ENABLE_SASL_CYRUS 0)
set(MONGOC_ENABLE_SASL 0)
set(MONGOC_ENABLE_SASL_SSPI 0)
set(MONGOC_HAVE_SASL_CLIENT_DONE 0)
set(MONGOC_ENABLE_SRV 0)
# DNS
set(MONGOC_HAVE_DNSAPI 0)
set(MONGOC_HAVE_RES_SEARCH 0)
set(MONGOC_HAVE_RES_NSEARCH 0)
set(MONGOC_HAVE_RES_NCLOSE 0)
set(MONGOC_HAVE_RES_NDESTROY 0)
set(MONGOC_ENABLE_COMPRESSION 1)
set(MONGOC_ENABLE_COMPRESSION_ZLIB 0)
set(MONGOC_ENABLE_COMPRESSION_SNAPPY 0)
set(MONGOC_ENABLE_COMPRESSION_ZSTD 1)
# SSL
set(MONGOC_ENABLE_CRYPTO 0)
set(MONGOC_ENABLE_CRYPTO_CNG 0)
set(MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO 0)
set(MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE 0)
set(MONGOC_ENABLE_SSL 0)
set(MONGOC_ENABLE_SSL_OPENSSL 0)
set(MONGOC_ENABLE_SSL_SECURE_CHANNEL 0)
set(MONGOC_ENABLE_SSL_SECURE_TRANSPORT 0)
set(MONGOC_ENABLE_SSL_LIBRESSL 0)
set(MONGOC_ENABLE_CRYPTO_LIBCRYPTO 0)
set(MONGOC_ENABLE_CLIENT_SIDE_ENCRYPTION 0)
set(MONGOC_HAVE_ASN1_STRING_GET0_DATA 0)
if(ENABLE_SSL)
set(MONGOC_ENABLE_SSL 1)
set(MONGOC_ENABLE_CRYPTO 1)
set(MONGOC_ENABLE_SSL_OPENSSL 1)
set(MONGOC_ENABLE_CRYPTO_LIBCRYPTO 1)
set(MONGOC_HAVE_ASN1_STRING_GET0_DATA 1)
else()
message(WARNING "Building mongoc without SSL")
endif()
set(CMAKE_EXTRA_INCLUDE_FILES "sys/socket.h")
set(MONGOC_SOCKET_ARG2 "struct sockaddr")
set(MONGOC_HAVE_SOCKLEN 1)
set(MONGOC_SOCKET_ARG3 "socklen_t")
set(MONGOC_ENABLE_RDTSCP 0)
set(MONGOC_NO_AUTOMATIC_GLOBALS 1)
set(MONGOC_ENABLE_STATIC_INSTALL 0)
set(MONGOC_ENABLE_SHM_COUNTERS 0)
set(MONGOC_HAVE_SCHED_GETCPU 0)
set(MONGOC_HAVE_SS_FAMILY 0)
configure_file(
${LIBBSON_SOURCE_DIR}/bson/bson-config.h.in
${LIBBSON_BINARY_DIR}/bson/bson-config.h
)
configure_file(
${LIBBSON_SOURCE_DIR}/bson/bson-version.h.in
${LIBBSON_BINARY_DIR}/bson/bson-version.h
)
set(COMMON_SOURCE_DIR "${LIBBSON_SOURCES_ROOT}/common")
file(GLOB_RECURSE COMMON_SOURCES "${COMMON_SOURCE_DIR}/*.c")
configure_file(
${COMMON_SOURCE_DIR}/common-config.h.in
${COMMON_SOURCE_DIR}/common-config.h
)
add_library(_libbson ${LIBBSON_SOURCES} ${COMMON_SOURCES})
add_library(ch_contrib::libbson ALIAS _libbson)
target_include_directories(_libbson SYSTEM PUBLIC ${LIBBSON_SOURCE_DIR} ${LIBBSON_BINARY_DIR} ${COMMON_SOURCE_DIR})
target_compile_definitions(_libbson PRIVATE BSON_COMPILATION)
if(OS_LINUX)
target_compile_definitions(_libbson PRIVATE -D_GNU_SOURCE -D_POSIX_C_SOURCE=199309L -D_XOPEN_SOURCE=600)
elseif(OS_DARWIN)
target_compile_definitions(_libbson PRIVATE -D_DARWIN_C_SOURCE)
endif()
set(LIBMONGOC_SOURCE_DIR "${LIBBSON_SOURCES_ROOT}/libmongoc/src")
file(GLOB_RECURSE LIBMONGOC_SOURCES "${LIBMONGOC_SOURCE_DIR}/*.c")
set(UTF8PROC_SOURCE_DIR "${LIBBSON_SOURCES_ROOT}/utf8proc-2.8.0")
set(UTF8PROC_SOURCES "${UTF8PROC_SOURCE_DIR}/utf8proc.c")
set(UTHASH_SOURCE_DIR "${LIBBSON_SOURCES_ROOT}/uthash")
configure_file(
${LIBMONGOC_SOURCE_DIR}/mongoc/mongoc-config.h.in
${LIBMONGOC_SOURCE_DIR}/mongoc/mongoc-config.h
)
configure_file(
${LIBMONGOC_SOURCE_DIR}/mongoc/mongoc-version.h.in
${LIBMONGOC_SOURCE_DIR}/mongoc/mongoc-version.h
)
add_library(_libmongoc ${LIBMONGOC_SOURCES} ${COMMON_SOURCES} ${UTF8PROC_SOURCES})
add_library(ch_contrib::libmongoc ALIAS _libmongoc)
target_include_directories(_libmongoc SYSTEM PUBLIC ${LIBMONGOC_SOURCE_DIR} ${COMMON_SOURCE_DIR} ${UTF8PROC_SOURCE_DIR} ${UTHASH_SOURCE_DIR})
target_include_directories(_libmongoc SYSTEM PRIVATE ${LIBMONGOC_SOURCE_DIR}/mongoc ${UTHASH_SOURCE_DIR})
target_compile_definitions(_libmongoc PRIVATE MONGOC_COMPILATION)
target_link_libraries(_libmongoc ch_contrib::libbson ch_contrib::c-ares ch_contrib::zstd)
if(ENABLE_SSL)
target_link_libraries(_libmongoc OpenSSL::SSL)
endif()

1
contrib/mongo-cxx-driver vendored Submodule

@ -0,0 +1 @@
Subproject commit 3166bdb49b717ce1bc30f46cc2b274ab1de7005b

View File

@ -0,0 +1,192 @@
option(USE_MONGODB "Enable MongoDB support" ${ENABLE_LIBRARIES})
if(NOT USE_MONGODB)
message(STATUS "Not using mongocxx and bsoncxx")
return()
endif()
set(BSONCXX_SOURCES_DIR "${ClickHouse_SOURCE_DIR}/contrib/mongo-cxx-driver/src/bsoncxx")
set(BSONCXX_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/mongo-cxx-driver/src/bsoncxx")
set(BSONCXX_SOURCES
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/array/element.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/array/value.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/array/view.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/builder/core.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/decimal128.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/document/element.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/document/value.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/document/view.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/exception/error_code.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/json.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/oid.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/private/itoa.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/string/view_or_value.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/types.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/types/bson_value/value.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/types/bson_value/view.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/validate.cpp
)
set(BSONCXX_POLY_USE_IMPLS ON)
configure_file(
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/config.hpp.in
${BSONCXX_BINARY_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/config.hpp
)
configure_file(
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/version.hpp.in
${BSONCXX_BINARY_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/version.hpp
)
configure_file(
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/private/config.hh.in
${BSONCXX_BINARY_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/private/config.hh
)
add_library(_bsoncxx ${BSONCXX_SOURCES})
add_library(ch_contrib::bsoncxx ALIAS _bsoncxx)
target_include_directories(_bsoncxx SYSTEM PUBLIC "${BSONCXX_SOURCES_DIR}/include/bsoncxx/v_noabi" "${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi" "${BSONCXX_BINARY_DIR}/lib/bsoncxx/v_noabi")
target_compile_definitions(_bsoncxx PUBLIC BSONCXX_STATIC)
target_link_libraries(_bsoncxx ch_contrib::libbson)
include(GenerateExportHeader)
generate_export_header(_bsoncxx
BASE_NAME BSONCXX
EXPORT_MACRO_NAME BSONCXX_API
NO_EXPORT_MACRO_NAME BSONCXX_PRIVATE
EXPORT_FILE_NAME ${BSONCXX_BINARY_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/export.hpp
STATIC_DEFINE BSONCXX_STATIC
)
set(MONGOCXX_SOURCES_DIR "${ClickHouse_SOURCE_DIR}/contrib/mongo-cxx-driver/src/mongocxx")
set(MONGOCXX_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/mongo-cxx-driver/src/mongocxx")
set(MONGOCXX_SOURCES
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/bulk_write.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/change_stream.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/client.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/client_encryption.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/client_session.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/collection.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/cursor.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/database.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/command_failed_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/command_started_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/command_succeeded_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/heartbeat_failed_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/heartbeat_started_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/heartbeat_succeeded_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/server_changed_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/server_closed_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/server_description.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/server_opening_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/topology_changed_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/topology_closed_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/topology_description.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/topology_opening_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/exception/error_code.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/exception/operation_exception.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/exception/server_error_code.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/gridfs/bucket.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/gridfs/downloader.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/gridfs/uploader.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/hint.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/index_model.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/index_view.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/instance.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/logger.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/model/delete_many.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/model/delete_one.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/model/insert_one.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/model/replace_one.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/model/update_many.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/model/update_one.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/model/write.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/aggregate.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/apm.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/auto_encryption.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/bulk_write.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/change_stream.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/client.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/client_encryption.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/client_session.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/count.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/create_collection.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/data_key.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/delete.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/distinct.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/encrypt.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/estimated_document_count.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/find.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/find_one_and_delete.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/find_one_and_replace.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/find_one_and_update.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/gridfs/bucket.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/gridfs/upload.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/index.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/index_view.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/insert.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/pool.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/range.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/replace.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/rewrap_many_datakey.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/server_api.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/tls.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/transaction.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/update.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/pipeline.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/pool.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/private/conversions.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/private/libbson.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/private/libmongoc.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/private/numeric_casting.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/read_concern.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/read_preference.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/bulk_write.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/delete.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/gridfs/upload.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/insert_many.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/insert_one.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/replace_one.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/rewrap_many_datakey.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/update.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/search_index_model.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/search_index_view.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/uri.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/validation_criteria.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/write_concern.cpp
)
set(MONGOCXX_COMPILER_VERSION "${CMAKE_CXX_COMPILER_VERSION}")
set(MONGOCXX_COMPILER_ID "${CMAKE_CXX_COMPILER_ID}")
set(MONGOCXX_LINK_WITH_STATIC_MONGOC 1)
set(MONGOCXX_BUILD_STATIC 1)
if(ENABLE_SSL)
set(MONGOCXX_ENABLE_SSL 1)
endif()
configure_file(
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/config/config.hpp.in
${MONGOCXX_BINARY_DIR}/lib/mongocxx/v_noabi/mongocxx/config/config.hpp
)
configure_file(
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/config/version.hpp.in
${MONGOCXX_BINARY_DIR}/lib/mongocxx/v_noabi/mongocxx/config/version.hpp
)
configure_file(
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/config/private/config.hh.in
${MONGOCXX_BINARY_DIR}/lib/mongocxx/v_noabi/mongocxx/config/private/config.hh
)
add_library(_mongocxx ${MONGOCXX_SOURCES})
add_library(ch_contrib::mongocxx ALIAS _mongocxx)
target_include_directories(_mongocxx SYSTEM PUBLIC "${MONGOCXX_SOURCES_DIR}/include/mongocxx/v_noabi" "${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi" "${MONGOCXX_BINARY_DIR}/lib/mongocxx/v_noabi")
target_compile_definitions(_mongocxx PUBLIC MONGOCXX_STATIC)
target_link_libraries(_mongocxx ch_contrib::bsoncxx ch_contrib::libmongoc)
generate_export_header(_mongocxx
BASE_NAME MONGOCXX
EXPORT_MACRO_NAME MONGOCXX_API
NO_EXPORT_MACRO_NAME MONGOCXX_PRIVATE
EXPORT_FILE_NAME ${MONGOCXX_BINARY_DIR}/lib/mongocxx/v_noabi/mongocxx/config/export.hpp
STATIC_DEFINE MONGOCXX_STATIC
)

1
contrib/postgres vendored Submodule

@ -0,0 +1 @@
Subproject commit c041ed8cbda02eb881de8d7645ca96b6e4b2327d

View File

@ -0,0 +1,81 @@
# Build description for libpq which is part of the PostgreSQL sources
set(POSTGRES_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/postgres")
set(LIBPQ_SOURCE_DIR "${POSTGRES_SOURCE_DIR}/src/interfaces/libpq")
set(LIBPQ_CMAKE_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/postgres-cmake")
set(SRCS
"${LIBPQ_SOURCE_DIR}/fe-auth.c"
"${LIBPQ_SOURCE_DIR}/fe-auth-scram.c"
"${LIBPQ_SOURCE_DIR}/fe-connect.c"
"${LIBPQ_SOURCE_DIR}/fe-exec.c"
"${LIBPQ_SOURCE_DIR}/fe-lobj.c"
"${LIBPQ_SOURCE_DIR}/fe-misc.c"
"${LIBPQ_SOURCE_DIR}/fe-print.c"
"${LIBPQ_SOURCE_DIR}/fe-trace.c"
"${LIBPQ_SOURCE_DIR}/fe-protocol3.c"
"${LIBPQ_SOURCE_DIR}/fe-secure.c"
"${LIBPQ_SOURCE_DIR}/fe-secure-common.c"
"${LIBPQ_SOURCE_DIR}/fe-secure-openssl.c"
"${LIBPQ_SOURCE_DIR}/legacy-pqsignal.c"
"${LIBPQ_SOURCE_DIR}/libpq-events.c"
"${LIBPQ_SOURCE_DIR}/pqexpbuffer.c"
"${POSTGRES_SOURCE_DIR}/src/common/scram-common.c"
"${POSTGRES_SOURCE_DIR}/src/common/sha2.c"
"${POSTGRES_SOURCE_DIR}/src/common/sha1.c"
"${POSTGRES_SOURCE_DIR}/src/common/md5.c"
"${POSTGRES_SOURCE_DIR}/src/common/md5_common.c"
"${POSTGRES_SOURCE_DIR}/src/common/hmac_openssl.c"
"${POSTGRES_SOURCE_DIR}/src/common/cryptohash.c"
"${POSTGRES_SOURCE_DIR}/src/common/saslprep.c"
"${POSTGRES_SOURCE_DIR}/src/common/unicode_norm.c"
"${POSTGRES_SOURCE_DIR}/src/common/ip.c"
"${POSTGRES_SOURCE_DIR}/src/common/jsonapi.c"
"${POSTGRES_SOURCE_DIR}/src/common/wchar.c"
"${POSTGRES_SOURCE_DIR}/src/common/base64.c"
"${POSTGRES_SOURCE_DIR}/src/common/link-canary.c"
"${POSTGRES_SOURCE_DIR}/src/common/fe_memutils.c"
"${POSTGRES_SOURCE_DIR}/src/common/string.c"
"${POSTGRES_SOURCE_DIR}/src/common/pg_get_line.c"
"${POSTGRES_SOURCE_DIR}/src/common/pg_prng.c"
"${POSTGRES_SOURCE_DIR}/src/common/stringinfo.c"
"${POSTGRES_SOURCE_DIR}/src/common/psprintf.c"
"${POSTGRES_SOURCE_DIR}/src/common/encnames.c"
"${POSTGRES_SOURCE_DIR}/src/common/logging.c"
"${POSTGRES_SOURCE_DIR}/src/port/snprintf.c"
"${POSTGRES_SOURCE_DIR}/src/port/strlcat.c"
"${POSTGRES_SOURCE_DIR}/src/port/strlcpy.c"
"${POSTGRES_SOURCE_DIR}/src/port/strerror.c"
"${POSTGRES_SOURCE_DIR}/src/port/inet_net_ntop.c"
"${POSTGRES_SOURCE_DIR}/src/port/getpeereid.c"
"${POSTGRES_SOURCE_DIR}/src/port/chklocale.c"
"${POSTGRES_SOURCE_DIR}/src/port/noblock.c"
"${POSTGRES_SOURCE_DIR}/src/port/pg_strong_random.c"
"${POSTGRES_SOURCE_DIR}/src/port/pgstrcasecmp.c"
"${POSTGRES_SOURCE_DIR}/src/port/pg_bitutils.c"
"${POSTGRES_SOURCE_DIR}/src/port/thread.c"
"${POSTGRES_SOURCE_DIR}/src/port/path.c"
)
add_library(_libpq ${SRCS})
add_definitions(-DHAVE_BIO_METH_NEW)
add_definitions(-DHAVE_HMAC_CTX_NEW)
add_definitions(-DHAVE_HMAC_CTX_FREE)
target_include_directories (_libpq SYSTEM PUBLIC ${LIBPQ_SOURCE_DIR})
target_include_directories (_libpq SYSTEM PUBLIC "${POSTGRES_SOURCE_DIR}/src/include")
target_include_directories (_libpq SYSTEM PUBLIC "${LIBPQ_CMAKE_SOURCE_DIR}") # pre-generated headers
# NOTE: this is a dirty hack to avoid and instead pg_config.h should be shipped
# for different OS'es like for jemalloc, not one generic for all OS'es like
# now.
if (OS_DARWIN OR OS_FREEBSD OR USE_MUSL)
target_compile_definitions(_libpq PRIVATE -DSTRERROR_R_INT=1)
endif()
target_link_libraries (_libpq PRIVATE OpenSSL::SSL)
add_library(ch_contrib::libpq ALIAS _libpq)

View File

@ -0,0 +1,471 @@
/*-------------------------------------------------------------------------
*
* nodetags.h
* Generated node infrastructure code
*
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* NOTES
* ******************************
* *** DO NOT EDIT THIS FILE! ***
* ******************************
*
* It has been GENERATED by src/backend/nodes/gen_node_support.pl
*
*-------------------------------------------------------------------------
*/
T_List = 1,
T_Alias = 2,
T_RangeVar = 3,
T_TableFunc = 4,
T_IntoClause = 5,
T_Var = 6,
T_Const = 7,
T_Param = 8,
T_Aggref = 9,
T_GroupingFunc = 10,
T_WindowFunc = 11,
T_SubscriptingRef = 12,
T_FuncExpr = 13,
T_NamedArgExpr = 14,
T_OpExpr = 15,
T_DistinctExpr = 16,
T_NullIfExpr = 17,
T_ScalarArrayOpExpr = 18,
T_BoolExpr = 19,
T_SubLink = 20,
T_SubPlan = 21,
T_AlternativeSubPlan = 22,
T_FieldSelect = 23,
T_FieldStore = 24,
T_RelabelType = 25,
T_CoerceViaIO = 26,
T_ArrayCoerceExpr = 27,
T_ConvertRowtypeExpr = 28,
T_CollateExpr = 29,
T_CaseExpr = 30,
T_CaseWhen = 31,
T_CaseTestExpr = 32,
T_ArrayExpr = 33,
T_RowExpr = 34,
T_RowCompareExpr = 35,
T_CoalesceExpr = 36,
T_MinMaxExpr = 37,
T_SQLValueFunction = 38,
T_XmlExpr = 39,
T_JsonFormat = 40,
T_JsonReturning = 41,
T_JsonValueExpr = 42,
T_JsonConstructorExpr = 43,
T_JsonIsPredicate = 44,
T_NullTest = 45,
T_BooleanTest = 46,
T_CoerceToDomain = 47,
T_CoerceToDomainValue = 48,
T_SetToDefault = 49,
T_CurrentOfExpr = 50,
T_NextValueExpr = 51,
T_InferenceElem = 52,
T_TargetEntry = 53,
T_RangeTblRef = 54,
T_JoinExpr = 55,
T_FromExpr = 56,
T_OnConflictExpr = 57,
T_Query = 58,
T_TypeName = 59,
T_ColumnRef = 60,
T_ParamRef = 61,
T_A_Expr = 62,
T_A_Const = 63,
T_TypeCast = 64,
T_CollateClause = 65,
T_RoleSpec = 66,
T_FuncCall = 67,
T_A_Star = 68,
T_A_Indices = 69,
T_A_Indirection = 70,
T_A_ArrayExpr = 71,
T_ResTarget = 72,
T_MultiAssignRef = 73,
T_SortBy = 74,
T_WindowDef = 75,
T_RangeSubselect = 76,
T_RangeFunction = 77,
T_RangeTableFunc = 78,
T_RangeTableFuncCol = 79,
T_RangeTableSample = 80,
T_ColumnDef = 81,
T_TableLikeClause = 82,
T_IndexElem = 83,
T_DefElem = 84,
T_LockingClause = 85,
T_XmlSerialize = 86,
T_PartitionElem = 87,
T_PartitionSpec = 88,
T_PartitionBoundSpec = 89,
T_PartitionRangeDatum = 90,
T_PartitionCmd = 91,
T_RangeTblEntry = 92,
T_RTEPermissionInfo = 93,
T_RangeTblFunction = 94,
T_TableSampleClause = 95,
T_WithCheckOption = 96,
T_SortGroupClause = 97,
T_GroupingSet = 98,
T_WindowClause = 99,
T_RowMarkClause = 100,
T_WithClause = 101,
T_InferClause = 102,
T_OnConflictClause = 103,
T_CTESearchClause = 104,
T_CTECycleClause = 105,
T_CommonTableExpr = 106,
T_MergeWhenClause = 107,
T_MergeAction = 108,
T_TriggerTransition = 109,
T_JsonOutput = 110,
T_JsonKeyValue = 111,
T_JsonObjectConstructor = 112,
T_JsonArrayConstructor = 113,
T_JsonArrayQueryConstructor = 114,
T_JsonAggConstructor = 115,
T_JsonObjectAgg = 116,
T_JsonArrayAgg = 117,
T_RawStmt = 118,
T_InsertStmt = 119,
T_DeleteStmt = 120,
T_UpdateStmt = 121,
T_MergeStmt = 122,
T_SelectStmt = 123,
T_SetOperationStmt = 124,
T_ReturnStmt = 125,
T_PLAssignStmt = 126,
T_CreateSchemaStmt = 127,
T_AlterTableStmt = 128,
T_ReplicaIdentityStmt = 129,
T_AlterTableCmd = 130,
T_AlterCollationStmt = 131,
T_AlterDomainStmt = 132,
T_GrantStmt = 133,
T_ObjectWithArgs = 134,
T_AccessPriv = 135,
T_GrantRoleStmt = 136,
T_AlterDefaultPrivilegesStmt = 137,
T_CopyStmt = 138,
T_VariableSetStmt = 139,
T_VariableShowStmt = 140,
T_CreateStmt = 141,
T_Constraint = 142,
T_CreateTableSpaceStmt = 143,
T_DropTableSpaceStmt = 144,
T_AlterTableSpaceOptionsStmt = 145,
T_AlterTableMoveAllStmt = 146,
T_CreateExtensionStmt = 147,
T_AlterExtensionStmt = 148,
T_AlterExtensionContentsStmt = 149,
T_CreateFdwStmt = 150,
T_AlterFdwStmt = 151,
T_CreateForeignServerStmt = 152,
T_AlterForeignServerStmt = 153,
T_CreateForeignTableStmt = 154,
T_CreateUserMappingStmt = 155,
T_AlterUserMappingStmt = 156,
T_DropUserMappingStmt = 157,
T_ImportForeignSchemaStmt = 158,
T_CreatePolicyStmt = 159,
T_AlterPolicyStmt = 160,
T_CreateAmStmt = 161,
T_CreateTrigStmt = 162,
T_CreateEventTrigStmt = 163,
T_AlterEventTrigStmt = 164,
T_CreatePLangStmt = 165,
T_CreateRoleStmt = 166,
T_AlterRoleStmt = 167,
T_AlterRoleSetStmt = 168,
T_DropRoleStmt = 169,
T_CreateSeqStmt = 170,
T_AlterSeqStmt = 171,
T_DefineStmt = 172,
T_CreateDomainStmt = 173,
T_CreateOpClassStmt = 174,
T_CreateOpClassItem = 175,
T_CreateOpFamilyStmt = 176,
T_AlterOpFamilyStmt = 177,
T_DropStmt = 178,
T_TruncateStmt = 179,
T_CommentStmt = 180,
T_SecLabelStmt = 181,
T_DeclareCursorStmt = 182,
T_ClosePortalStmt = 183,
T_FetchStmt = 184,
T_IndexStmt = 185,
T_CreateStatsStmt = 186,
T_StatsElem = 187,
T_AlterStatsStmt = 188,
T_CreateFunctionStmt = 189,
T_FunctionParameter = 190,
T_AlterFunctionStmt = 191,
T_DoStmt = 192,
T_InlineCodeBlock = 193,
T_CallStmt = 194,
T_CallContext = 195,
T_RenameStmt = 196,
T_AlterObjectDependsStmt = 197,
T_AlterObjectSchemaStmt = 198,
T_AlterOwnerStmt = 199,
T_AlterOperatorStmt = 200,
T_AlterTypeStmt = 201,
T_RuleStmt = 202,
T_NotifyStmt = 203,
T_ListenStmt = 204,
T_UnlistenStmt = 205,
T_TransactionStmt = 206,
T_CompositeTypeStmt = 207,
T_CreateEnumStmt = 208,
T_CreateRangeStmt = 209,
T_AlterEnumStmt = 210,
T_ViewStmt = 211,
T_LoadStmt = 212,
T_CreatedbStmt = 213,
T_AlterDatabaseStmt = 214,
T_AlterDatabaseRefreshCollStmt = 215,
T_AlterDatabaseSetStmt = 216,
T_DropdbStmt = 217,
T_AlterSystemStmt = 218,
T_ClusterStmt = 219,
T_VacuumStmt = 220,
T_VacuumRelation = 221,
T_ExplainStmt = 222,
T_CreateTableAsStmt = 223,
T_RefreshMatViewStmt = 224,
T_CheckPointStmt = 225,
T_DiscardStmt = 226,
T_LockStmt = 227,
T_ConstraintsSetStmt = 228,
T_ReindexStmt = 229,
T_CreateConversionStmt = 230,
T_CreateCastStmt = 231,
T_CreateTransformStmt = 232,
T_PrepareStmt = 233,
T_ExecuteStmt = 234,
T_DeallocateStmt = 235,
T_DropOwnedStmt = 236,
T_ReassignOwnedStmt = 237,
T_AlterTSDictionaryStmt = 238,
T_AlterTSConfigurationStmt = 239,
T_PublicationTable = 240,
T_PublicationObjSpec = 241,
T_CreatePublicationStmt = 242,
T_AlterPublicationStmt = 243,
T_CreateSubscriptionStmt = 244,
T_AlterSubscriptionStmt = 245,
T_DropSubscriptionStmt = 246,
T_PlannerGlobal = 247,
T_PlannerInfo = 248,
T_RelOptInfo = 249,
T_IndexOptInfo = 250,
T_ForeignKeyOptInfo = 251,
T_StatisticExtInfo = 252,
T_JoinDomain = 253,
T_EquivalenceClass = 254,
T_EquivalenceMember = 255,
T_PathKey = 256,
T_PathTarget = 257,
T_ParamPathInfo = 258,
T_Path = 259,
T_IndexPath = 260,
T_IndexClause = 261,
T_BitmapHeapPath = 262,
T_BitmapAndPath = 263,
T_BitmapOrPath = 264,
T_TidPath = 265,
T_TidRangePath = 266,
T_SubqueryScanPath = 267,
T_ForeignPath = 268,
T_CustomPath = 269,
T_AppendPath = 270,
T_MergeAppendPath = 271,
T_GroupResultPath = 272,
T_MaterialPath = 273,
T_MemoizePath = 274,
T_UniquePath = 275,
T_GatherPath = 276,
T_GatherMergePath = 277,
T_NestPath = 278,
T_MergePath = 279,
T_HashPath = 280,
T_ProjectionPath = 281,
T_ProjectSetPath = 282,
T_SortPath = 283,
T_IncrementalSortPath = 284,
T_GroupPath = 285,
T_UpperUniquePath = 286,
T_AggPath = 287,
T_GroupingSetData = 288,
T_RollupData = 289,
T_GroupingSetsPath = 290,
T_MinMaxAggPath = 291,
T_WindowAggPath = 292,
T_SetOpPath = 293,
T_RecursiveUnionPath = 294,
T_LockRowsPath = 295,
T_ModifyTablePath = 296,
T_LimitPath = 297,
T_RestrictInfo = 298,
T_PlaceHolderVar = 299,
T_SpecialJoinInfo = 300,
T_OuterJoinClauseInfo = 301,
T_AppendRelInfo = 302,
T_RowIdentityVarInfo = 303,
T_PlaceHolderInfo = 304,
T_MinMaxAggInfo = 305,
T_PlannerParamItem = 306,
T_AggInfo = 307,
T_AggTransInfo = 308,
T_PlannedStmt = 309,
T_Result = 310,
T_ProjectSet = 311,
T_ModifyTable = 312,
T_Append = 313,
T_MergeAppend = 314,
T_RecursiveUnion = 315,
T_BitmapAnd = 316,
T_BitmapOr = 317,
T_SeqScan = 318,
T_SampleScan = 319,
T_IndexScan = 320,
T_IndexOnlyScan = 321,
T_BitmapIndexScan = 322,
T_BitmapHeapScan = 323,
T_TidScan = 324,
T_TidRangeScan = 325,
T_SubqueryScan = 326,
T_FunctionScan = 327,
T_ValuesScan = 328,
T_TableFuncScan = 329,
T_CteScan = 330,
T_NamedTuplestoreScan = 331,
T_WorkTableScan = 332,
T_ForeignScan = 333,
T_CustomScan = 334,
T_NestLoop = 335,
T_NestLoopParam = 336,
T_MergeJoin = 337,
T_HashJoin = 338,
T_Material = 339,
T_Memoize = 340,
T_Sort = 341,
T_IncrementalSort = 342,
T_Group = 343,
T_Agg = 344,
T_WindowAgg = 345,
T_Unique = 346,
T_Gather = 347,
T_GatherMerge = 348,
T_Hash = 349,
T_SetOp = 350,
T_LockRows = 351,
T_Limit = 352,
T_PlanRowMark = 353,
T_PartitionPruneInfo = 354,
T_PartitionedRelPruneInfo = 355,
T_PartitionPruneStepOp = 356,
T_PartitionPruneStepCombine = 357,
T_PlanInvalItem = 358,
T_ExprState = 359,
T_IndexInfo = 360,
T_ExprContext = 361,
T_ReturnSetInfo = 362,
T_ProjectionInfo = 363,
T_JunkFilter = 364,
T_OnConflictSetState = 365,
T_MergeActionState = 366,
T_ResultRelInfo = 367,
T_EState = 368,
T_WindowFuncExprState = 369,
T_SetExprState = 370,
T_SubPlanState = 371,
T_DomainConstraintState = 372,
T_ResultState = 373,
T_ProjectSetState = 374,
T_ModifyTableState = 375,
T_AppendState = 376,
T_MergeAppendState = 377,
T_RecursiveUnionState = 378,
T_BitmapAndState = 379,
T_BitmapOrState = 380,
T_ScanState = 381,
T_SeqScanState = 382,
T_SampleScanState = 383,
T_IndexScanState = 384,
T_IndexOnlyScanState = 385,
T_BitmapIndexScanState = 386,
T_BitmapHeapScanState = 387,
T_TidScanState = 388,
T_TidRangeScanState = 389,
T_SubqueryScanState = 390,
T_FunctionScanState = 391,
T_ValuesScanState = 392,
T_TableFuncScanState = 393,
T_CteScanState = 394,
T_NamedTuplestoreScanState = 395,
T_WorkTableScanState = 396,
T_ForeignScanState = 397,
T_CustomScanState = 398,
T_JoinState = 399,
T_NestLoopState = 400,
T_MergeJoinState = 401,
T_HashJoinState = 402,
T_MaterialState = 403,
T_MemoizeState = 404,
T_SortState = 405,
T_IncrementalSortState = 406,
T_GroupState = 407,
T_AggState = 408,
T_WindowAggState = 409,
T_UniqueState = 410,
T_GatherState = 411,
T_GatherMergeState = 412,
T_HashState = 413,
T_SetOpState = 414,
T_LockRowsState = 415,
T_LimitState = 416,
T_IndexAmRoutine = 417,
T_TableAmRoutine = 418,
T_TsmRoutine = 419,
T_EventTriggerData = 420,
T_TriggerData = 421,
T_TupleTableSlot = 422,
T_FdwRoutine = 423,
T_Bitmapset = 424,
T_ExtensibleNode = 425,
T_ErrorSaveContext = 426,
T_IdentifySystemCmd = 427,
T_BaseBackupCmd = 428,
T_CreateReplicationSlotCmd = 429,
T_DropReplicationSlotCmd = 430,
T_StartReplicationCmd = 431,
T_ReadReplicationSlotCmd = 432,
T_TimeLineHistoryCmd = 433,
T_SupportRequestSimplify = 434,
T_SupportRequestSelectivity = 435,
T_SupportRequestCost = 436,
T_SupportRequestRows = 437,
T_SupportRequestIndexCondition = 438,
T_SupportRequestWFuncMonotonic = 439,
T_SupportRequestOptimizeWindowClause = 440,
T_Integer = 441,
T_Float = 442,
T_Boolean = 443,
T_String = 444,
T_BitString = 445,
T_ForeignKeyCacheInfo = 446,
T_IntList = 447,
T_OidList = 448,
T_XidList = 449,
T_AllocSetContext = 450,
T_GenerationContext = 451,
T_SlabContext = 452,
T_TIDBitmap = 453,
T_WindowObjectData = 454,

View File

@ -0,0 +1,803 @@
/* src/include/pg_config.h. Generated from pg_config.h.in by configure. */
/* src/include/pg_config.h.in. Generated from configure.in by autoheader. */
/* Define if building universal (internal helper macro) */
/* #undef AC_APPLE_UNIVERSAL_BUILD */
/* The normal alignment of `double', in bytes. */
#define ALIGNOF_DOUBLE 4
/* The normal alignment of `int', in bytes. */
#define ALIGNOF_INT 4
/* The normal alignment of `long', in bytes. */
#define ALIGNOF_LONG 4
/* The normal alignment of `long long int', in bytes. */
#define ALIGNOF_LONG_LONG_INT 4
/* The normal alignment of `short', in bytes. */
#define ALIGNOF_SHORT 2
/* Size of a disk block --- this also limits the size of a tuple. You can set
it bigger if you need bigger tuples (although TOAST should reduce the need
to have large tuples, since fields can be spread across multiple tuples).
BLCKSZ must be a power of 2. The maximum possible value of BLCKSZ is
currently 2^15 (32768). This is determined by the 15-bit widths of the
lp_off and lp_len fields in ItemIdData (see include/storage/itemid.h).
Changing BLCKSZ requires an initdb. */
#define BLCKSZ 8192
/* Define to the default TCP port number on which the server listens and to
which clients will try to connect. This can be overridden at run-time, but
it's convenient if your clients have the right default compiled in.
(--with-pgport=PORTNUM) */
#define DEF_PGPORT 5432
/* Define to the default TCP port number as a string constant. */
#define DEF_PGPORT_STR "5432"
/* Define to the file name extension of dynamically-loadable modules. */
#define DLSUFFIX ".so"
/* Define to build with GSSAPI support. (--with-gssapi) */
//#define ENABLE_GSS 0
/* Define to 1 if you want National Language Support. (--enable-nls) */
/* #undef ENABLE_NLS */
/* Define to 1 to build client libraries as thread-safe code.
(--enable-thread-safety) */
#define ENABLE_THREAD_SAFETY 1
/* Define to nothing if C supports flexible array members, and to 1 if it does
not. That way, with a declaration like `struct s { int n; double
d[FLEXIBLE_ARRAY_MEMBER]; };', the struct hack can be used with pre-C99
compilers. When computing the size of such an object, don't use 'sizeof
(struct s)' as it overestimates the size. Use 'offsetof (struct s, d)'
instead. Don't use 'offsetof (struct s, d[0])', as this doesn't work with
MSVC and with C++ compilers. */
#define FLEXIBLE_ARRAY_MEMBER /**/
/* float4 values are passed by value if 'true', by reference if 'false' */
#define FLOAT4PASSBYVAL true
/* float8, int8, and related values are passed by value if 'true', by
reference if 'false' */
#define FLOAT8PASSBYVAL false
/* Define to 1 if you have the `append_history' function. */
/* #undef HAVE_APPEND_HISTORY */
/* Define to 1 if you want to use atomics if available. */
#define HAVE_ATOMICS 1
/* Define to 1 if you have the <atomic.h> header file. */
/* #undef HAVE_ATOMIC_H */
/* Define to 1 if you have the `cbrt' function. */
#define HAVE_CBRT 1
/* Define to 1 if you have the `class' function. */
/* #undef HAVE_CLASS */
/* Define to 1 if you have the <crtdefs.h> header file. */
/* #undef HAVE_CRTDEFS_H */
/* Define to 1 if you have the `crypt' function. */
#define HAVE_CRYPT 1
/* Define to 1 if you have the <crypt.h> header file. */
#define HAVE_CRYPT_H 1
/* Define to 1 if you have the declaration of `fdatasync', and to 0 if you
don't. */
#define HAVE_DECL_FDATASYNC 1
/* Define to 1 if you have the declaration of `F_FULLFSYNC', and to 0 if you
don't. */
#define HAVE_DECL_F_FULLFSYNC 0
/* Define to 1 if you have the declaration of `posix_fadvise', and to 0 if you
don't. */
#define HAVE_DECL_POSIX_FADVISE 1
/* Define to 1 if you have the declaration of `snprintf', and to 0 if you
don't. */
#define HAVE_DECL_SNPRINTF 1
/* Define to 1 if you have the declaration of `strlcat', and to 0 if you
don't. */
#if OS_DARWIN
#define HAVE_DECL_STRLCAT 1
#endif
/* Define to 1 if you have the declaration of `strlcpy', and to 0 if you
don't. */
#if OS_DARWIN
#define HAVE_DECL_STRLCPY 1
#endif
/* Define to 1 if you have the declaration of `sys_siglist', and to 0 if you
don't. */
#define HAVE_DECL_SYS_SIGLIST 1
/* Define to 1 if you have the declaration of `vsnprintf', and to 0 if you
don't. */
#define HAVE_DECL_VSNPRINTF 1
/* Define to 1 if you have the <dld.h> header file. */
/* #undef HAVE_DLD_H */
/* Define to 1 if you have the <editline/history.h> header file. */
/* #undef HAVE_EDITLINE_HISTORY_H */
/* Define to 1 if you have the <editline/readline.h> header file. */
#define HAVE_EDITLINE_READLINE_H 1
/* Define to 1 if you have the `fpclass' function. */
/* #undef HAVE_FPCLASS */
/* Define to 1 if you have the `fp_class' function. */
/* #undef HAVE_FP_CLASS */
/* Define to 1 if you have the `fp_class_d' function. */
/* #undef HAVE_FP_CLASS_D */
/* Define to 1 if you have the <fp_class.h> header file. */
/* #undef HAVE_FP_CLASS_H */
/* Define to 1 if fseeko (and presumably ftello) exists and is declared. */
#define HAVE_FSEEKO 1
/* Define to 1 if you have __atomic_compare_exchange_n(int *, int *, int). */
/* #undef HAVE_GCC__ATOMIC_INT32_CAS */
/* Define to 1 if you have __atomic_compare_exchange_n(int64 *, int *, int64).
*/
/* #undef HAVE_GCC__ATOMIC_INT64_CAS */
/* Define to 1 if you have __sync_lock_test_and_set(char *) and friends. */
#define HAVE_GCC__SYNC_CHAR_TAS 1
/* Define to 1 if you have __sync_compare_and_swap(int *, int, int). */
/* #undef HAVE_GCC__SYNC_INT32_CAS */
/* Define to 1 if you have __sync_lock_test_and_set(int *) and friends. */
#define HAVE_GCC__SYNC_INT32_TAS 1
/* Define to 1 if you have __sync_compare_and_swap(int64 *, int64, int64). */
/* #undef HAVE_GCC__SYNC_INT64_CAS */
/* Define to 1 if you have the `getifaddrs' function. */
#define HAVE_GETIFADDRS 1
/* Define to 1 if you have the `getopt' function. */
#define HAVE_GETOPT 1
/* Define to 1 if you have the <getopt.h> header file. */
#define HAVE_GETOPT_H 1
/* Define to 1 if you have the `getopt_long' function. */
#define HAVE_GETOPT_LONG 1
/* Define to 1 if you have the `getpeereid' function. */
/* #undef HAVE_GETPEEREID */
/* Define to 1 if you have the `getpeerucred' function. */
/* #undef HAVE_GETPEERUCRED */
/* Define to 1 if you have the <gssapi_ext.h> header file. */
/* #undef HAVE_GSSAPI_EXT_H */
/* Define to 1 if you have the <gssapi/gssapi_ext.h> header file. */
/* #undef HAVE_GSSAPI_GSSAPI_EXT_H */
/* Define to 1 if you have the <gssapi/gssapi.h> header file. */
//#define HAVE_GSSAPI_GSSAPI_H 0
/* Define to 1 if you have the <gssapi.h> header file. */
/* #undef HAVE_GSSAPI_H */
/* Define to 1 if you have the <history.h> header file. */
/* #undef HAVE_HISTORY_H */
/* Define to 1 if you have the `history_truncate_file' function. */
#define HAVE_HISTORY_TRUNCATE_FILE 1
/* Define to 1 if you have the <ieeefp.h> header file. */
/* #undef HAVE_IEEEFP_H */
/* Define to 1 if you have the <ifaddrs.h> header file. */
#define HAVE_IFADDRS_H 1
/* Define to 1 if you have the `inet_aton' function. */
#define HAVE_INET_ATON 1
/* Define to 1 if you have the `inet_pton' function. */
#define HAVE_INET_PTON 1
/* Define to 1 if the system has the type `int64'. */
/* #undef HAVE_INT64 */
/* Define to 1 if the system has the type `int8'. */
/* #undef HAVE_INT8 */
/* Define to 1 if the system has the type `intptr_t'. */
#define HAVE_INTPTR_T 1
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* Define to 1 if you have the global variable 'int opterr'. */
#define HAVE_INT_OPTERR 1
/* Define to 1 if you have the global variable 'int optreset'. */
/* #undef HAVE_INT_OPTRESET */
/* Define to 1 if you have the global variable 'int timezone'. */
#define HAVE_INT_TIMEZONE 1
/* Define to 1 if you have isinf(). */
#define HAVE_ISINF 1
/* Define to 1 if you have the <langinfo.h> header file. */
#define HAVE_LANGINFO_H 1
/* Define to 1 if you have the `crypto' library (-lcrypto). */
#define HAVE_LIBCRYPTO 1
/* Define to 1 if you have the `ldap' library (-lldap). */
//#define HAVE_LIBLDAP 0
/* Define to 1 if you have the `m' library (-lm). */
#define HAVE_LIBM 1
/* Define to 1 if you have the `pam' library (-lpam). */
#define HAVE_LIBPAM 1
/* Define if you have a function readline library */
#define HAVE_LIBREADLINE 1
/* Define to 1 if you have the `selinux' library (-lselinux). */
/* #undef HAVE_LIBSELINUX */
/* Define to 1 if you have the `ssl' library (-lssl). */
#define HAVE_LIBSSL 0
/* Define to 1 if you have the `wldap32' library (-lwldap32). */
/* #undef HAVE_LIBWLDAP32 */
/* Define to 1 if you have the `xml2' library (-lxml2). */
#define HAVE_LIBXML2 1
/* Define to 1 if you have the `xslt' library (-lxslt). */
#define HAVE_LIBXSLT 1
/* Define to 1 if you have the `z' library (-lz). */
#define HAVE_LIBZ 1
/* Define to 1 if you have the `zstd' library (-lzstd). */
/* #undef HAVE_LIBZSTD */
/* Define to 1 if constants of type 'long long int' should have the suffix LL.
*/
#define HAVE_LL_CONSTANTS 1
/* Define to 1 if the system has the type `locale_t'. */
#define HAVE_LOCALE_T 1
/* Define to 1 if `long int' works and is 64 bits. */
/* #undef HAVE_LONG_INT_64 */
/* Define to 1 if the system has the type `long long int'. */
#define HAVE_LONG_LONG_INT 1
/* Define to 1 if `long long int' works and is 64 bits. */
#define HAVE_LONG_LONG_INT_64 1
/* Define to 1 if you have the <mbarrier.h> header file. */
/* #undef HAVE_MBARRIER_H */
/* Define to 1 if you have the `mbstowcs_l' function. */
/* #undef HAVE_MBSTOWCS_L */
/* Define to 1 if you have the `memmove' function. */
#define HAVE_MEMMOVE 1
/* Define to 1 if you have the <memory.h> header file. */
#define HAVE_MEMORY_H 1
/* Define to 1 if you have the `mkdtemp' function. */
#define HAVE_MKDTEMP 1
/* Define to 1 if you have the <net/if.h> header file. */
#define HAVE_NET_IF_H 1
/* Define to 1 if you have the <ossp/uuid.h> header file. */
/* #undef HAVE_OSSP_UUID_H */
/* Define to 1 if you have the <pam/pam_appl.h> header file. */
/* #undef HAVE_PAM_PAM_APPL_H */
/* Define to 1 if you have the `posix_fadvise' function. */
#define HAVE_POSIX_FADVISE 1
/* Define to 1 if you have the declaration of `preadv', and to 0 if you don't. */
/* #undef HAVE_DECL_PREADV */
/* Define to 1 if you have the declaration of `pwritev', and to 0 if you don't. */
/* #define HAVE_DECL_PWRITEV */
/* Define to 1 if you have the `X509_get_signature_info' function. */
/* #undef HAVE_X509_GET_SIGNATURE_INFO */
/* Define to 1 if you have the POSIX signal interface. */
#define HAVE_POSIX_SIGNALS 1
/* Define to 1 if the assembler supports PPC's LWARX mutex hint bit. */
/* #undef HAVE_PPC_LWARX_MUTEX_HINT */
/* Define to 1 if you have the `pthread_is_threaded_np' function. */
/* #undef HAVE_PTHREAD_IS_THREADED_NP */
/* Define to 1 if you have the <pwd.h> header file. */
#define HAVE_PWD_H 1
/* Define to 1 if you have the <readline.h> header file. */
/* #undef HAVE_READLINE_H */
/* Define to 1 if you have the <readline/history.h> header file. */
#define HAVE_READLINE_HISTORY_H 1
/* Define to 1 if you have the <readline/readline.h> header file. */
/* #undef HAVE_READLINE_READLINE_H */
/* Define to 1 if you have the `rint' function. */
#define HAVE_RINT 1
/* Define to 1 if you have the `rl_completion_matches' function. */
#define HAVE_RL_COMPLETION_MATCHES 1
/* Define to 1 if you have the `rl_filename_completion_function' function. */
#define HAVE_RL_FILENAME_COMPLETION_FUNCTION 1
/* Define to 1 if you have the `rl_reset_screen_size' function. */
/* #undef HAVE_RL_RESET_SCREEN_SIZE */
/* Define to 1 if you have the `rl_variable_bind' function. */
#define HAVE_RL_VARIABLE_BIND 1
/* Define to 1 if you have the <security/pam_appl.h> header file. */
#define HAVE_SECURITY_PAM_APPL_H 1
/* Define to 1 if you have the `setproctitle' function. */
/* #undef HAVE_SETPROCTITLE */
/* Define to 1 if the system has the type `socklen_t'. */
#define HAVE_SOCKLEN_T 1
/* Define to 1 if you have the `sigprocmask' function. */
#define HAVE_SIGPROCMASK 1
/* Define to 1 if you have sigsetjmp(). */
#define HAVE_SIGSETJMP 1
/* Define to 1 if the system has the type `sig_atomic_t'. */
#define HAVE_SIG_ATOMIC_T 1
/* Define to 1 if you have the `snprintf' function. */
#define HAVE_SNPRINTF 1
/* Define to 1 if you have spinlocks. */
#define HAVE_SPINLOCKS 1
/* Define to 1 if you have the `SSL_CTX_set_cert_cb' function. */
#define HAVE_SSL_CTX_SET_CERT_CB 1
/* Define to 1 if you have the `SSL_CTX_set_num_tickets' function. */
/* #define HAVE_SSL_CTX_SET_NUM_TICKETS */
/* Define to 1 if you have the `SSL_get_current_compression' function. */
#define HAVE_SSL_GET_CURRENT_COMPRESSION 0
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the `strerror' function. */
#define HAVE_STRERROR 1
/* Define to 1 if you have the `strerror_r' function. */
#define HAVE_STRERROR_R 1
/* Define to 1 if you have the <strings.h> header file. */
//#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if you have the `strlcat' function. */
/* #undef HAVE_STRLCAT */
/* Define to 1 if you have the `strlcpy' function. */
/* #undef HAVE_STRLCPY */
#if (!OS_DARWIN)
#define HAVE_STRCHRNUL 1
#endif
/* Define to 1 if the system has the type `struct option'. */
#define HAVE_STRUCT_OPTION 1
/* Define to 1 if `sa_len' is a member of `struct sockaddr'. */
/* #undef HAVE_STRUCT_SOCKADDR_SA_LEN */
/* Define to 1 if `tm_zone' is a member of `struct tm'. */
#define HAVE_STRUCT_TM_TM_ZONE 1
/* Define to 1 if you have the `sync_file_range' function. */
/* #undef HAVE_SYNC_FILE_RANGE */
/* Define to 1 if you have the syslog interface. */
#define HAVE_SYSLOG 1
/* Define to 1 if you have the <sys/ioctl.h> header file. */
#define HAVE_SYS_IOCTL_H 1
/* Define to 1 if you have the <sys/personality.h> header file. */
/* #undef HAVE_SYS_PERSONALITY_H */
/* Define to 1 if you have the <sys/poll.h> header file. */
#define HAVE_SYS_POLL_H 1
/* Define to 1 if you have the <sys/signalfd.h> header file. */
/* #undef HAVE_SYS_SIGNALFD_H */
/* Define to 1 if you have the <sys/socket.h> header file. */
#define HAVE_SYS_SOCKET_H 1
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/time.h> header file. */
#define HAVE_SYS_TIME_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* Define to 1 if you have the <sys/ucred.h> header file. */
#if (OS_DARWIN || OS_FREEBSD)
#define HAVE_SYS_UCRED_H 1
#endif
/* Define to 1 if you have the <sys/un.h> header file. */
#define _GNU_SOURCE 1 /* Needed for glibc struct ucred */
/* Define to 1 if you have the <termios.h> header file. */
#define HAVE_TERMIOS_H 1
/* Define to 1 if your `struct tm' has `tm_zone'. Deprecated, use
`HAVE_STRUCT_TM_TM_ZONE' instead. */
#define HAVE_TM_ZONE 1
/* Define to 1 if you have the `towlower' function. */
#define HAVE_TOWLOWER 1
/* Define to 1 if you have the external array `tzname'. */
#define HAVE_TZNAME 1
/* Define to 1 if you have the <ucred.h> header file. */
/* #undef HAVE_UCRED_H */
/* Define to 1 if the system has the type `uint64'. */
/* #undef HAVE_UINT64 */
/* Define to 1 if the system has the type `uint8'. */
/* #undef HAVE_UINT8 */
/* Define to 1 if the system has the type `uintptr_t'. */
#define HAVE_UINTPTR_T 1
/* Define to 1 if the system has the type `union semun'. */
/* #undef HAVE_UNION_SEMUN */
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* Define to 1 if you have unix sockets. */
#define HAVE_UNIX_SOCKETS 1
/* Define to 1 if the system has the type `unsigned long long int'. */
#define HAVE_UNSIGNED_LONG_LONG_INT 1
/* Define to 1 if you have the `utime' function. */
#define HAVE_UTIME 1
/* Define to 1 if you have the `utimes' function. */
#define HAVE_UTIMES 1
/* Define to 1 if you have the <utime.h> header file. */
#define HAVE_UTIME_H 1
/* Define to 1 if you have BSD UUID support. */
/* #undef HAVE_UUID_BSD */
/* Define to 1 if you have E2FS UUID support. */
/* #undef HAVE_UUID_E2FS */
/* Define to 1 if you have the <uuid.h> header file. */
#define HAVE_UUID_H 1
/* Define to 1 if you have OSSP UUID support. */
#define HAVE_UUID_OSSP 1
/* Define to 1 if you have the <uuid/uuid.h> header file. */
/* #undef HAVE_UUID_UUID_H */
/* Define to 1 if your compiler knows the visibility("hidden") attribute. */
/* #undef HAVE_VISIBILITY_ATTRIBUTE */
/* Define to 1 if you have the `vsnprintf' function. */
#define HAVE_VSNPRINTF 1
/* Define to 1 if you have the <wchar.h> header file. */
#define HAVE_WCHAR_H 1
/* Define to 1 if you have the `wcstombs' function. */
#define HAVE_WCSTOMBS 1
/* Define to 1 if you have the `wcstombs_l' function. */
/* #undef HAVE_WCSTOMBS_L */
/* Define to 1 if your compiler understands __builtin_bswap32. */
/* #undef HAVE__BUILTIN_BSWAP32 */
/* Define to 1 if your compiler understands __builtin_constant_p. */
#define HAVE__BUILTIN_CONSTANT_P 1
/* Define to 1 if your compiler understands __builtin_frame_address. */
/* #undef HAVE__BUILTIN_FRAME_ADDRESS */
/* Define to 1 if your compiler understands __builtin_types_compatible_p. */
#define HAVE__BUILTIN_TYPES_COMPATIBLE_P 1
/* Define to 1 if your compiler understands __builtin_unreachable. */
/* #undef HAVE__BUILTIN_UNREACHABLE */
/* Define to 1 if you have __cpuid. */
/* #undef HAVE__CPUID */
/* Define to 1 if you have __get_cpuid. */
/* #undef HAVE__GET_CPUID */
/* Define to 1 if your compiler understands _Static_assert. */
/* #undef HAVE__STATIC_ASSERT */
/* Define to 1 if your compiler understands __VA_ARGS__ in macros. */
#define HAVE__VA_ARGS 1
/* Define to the appropriate snprintf length modifier for 64-bit ints. */
#define INT64_MODIFIER "ll"
/* Define to 1 if `locale_t' requires <xlocale.h>. */
/* #undef LOCALE_T_IN_XLOCALE */
/* Define as the maximum alignment requirement of any C data type. */
#define MAXIMUM_ALIGNOF 4
/* Define bytes to use libc memset(). */
#define MEMSET_LOOP_LIMIT 1024
/* Define to the address where bug reports for this package should be sent. */
#define PACKAGE_BUGREPORT "pgsql-bugs@postgresql.org"
/* Define to the full name of this package. */
#define PACKAGE_NAME "PostgreSQL"
/* Define to the full name and version of this package. */
#define PACKAGE_STRING "PostgreSQL 9.5.4"
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME "postgresql"
/* Define to the home page for this package. */
#define PACKAGE_URL ""
/* Define to the version of this package. */
#define PACKAGE_VERSION "9.5.4"
/* Define to the name of a signed 128-bit integer type. */
/* #undef PG_INT128_TYPE */
/* Define to the name of a signed 64-bit integer type. */
#define PG_INT64_TYPE long long int
/* Define to the name of the default PostgreSQL service principal in Kerberos
(GSSAPI). (--with-krb-srvnam=NAME) */
#define PG_KRB_SRVNAM "postgres"
/* PostgreSQL major version as a string */
#define PG_MAJORVERSION "9.5"
/* Define to gnu_printf if compiler supports it, else printf. */
#define PG_PRINTF_ATTRIBUTE printf
/* Define to 1 if "static inline" works without unwanted warnings from
compilations where static inline functions are defined but not called. */
#define PG_USE_INLINE 1
/* PostgreSQL version as a string */
#define PG_VERSION "9.5.4"
/* PostgreSQL version as a number */
#define PG_VERSION_NUM 90504
/* A string containing the version number, platform, and C compiler */
#define PG_VERSION_STR "PostgreSQL 9.5.4 on i686-pc-linux-gnu, compiled by gcc (GCC) 4.1.2 20080704 (Red Hat 4.1.2-55), 32-bit"
/* Define to 1 to allow profiling output to be saved separately for each
process. */
/* #undef PROFILE_PID_DIR */
/* RELSEG_SIZE is the maximum number of blocks allowed in one disk file. Thus,
the maximum size of a single file is RELSEG_SIZE * BLCKSZ; relations bigger
than that are divided into multiple files. RELSEG_SIZE * BLCKSZ must be
less than your OS' limit on file size. This is often 2 GB or 4GB in a
32-bit operating system, unless you have large file support enabled. By
default, we make the limit 1 GB to avoid any possible integer-overflow
problems within the OS. A limit smaller than necessary only means we divide
a large relation into more chunks than necessary, so it seems best to err
in the direction of a small limit. A power-of-2 value is recommended to
save a few cycles in md.c, but is not absolutely required. Changing
RELSEG_SIZE requires an initdb. */
#define RELSEG_SIZE 131072
/* The size of `long', as computed by sizeof. */
#define SIZEOF_LONG 4
/* The size of `off_t', as computed by sizeof. */
#define SIZEOF_OFF_T 8
/* The size of `size_t', as computed by sizeof. */
#define SIZEOF_SIZE_T 4
/* The size of `void *', as computed by sizeof. */
#define SIZEOF_VOID_P 4
/* Define to 1 if you have the ANSI C header files. */
#define STDC_HEADERS 1
/* Define to 1 if strerror_r() returns a int. */
/* #undef STRERROR_R_INT */
/* Define to 1 if your <sys/time.h> declares `struct tm'. */
/* #undef TM_IN_SYS_TIME */
/* Define to 1 to build with assertion checks. (--enable-cassert) */
/* #undef USE_ASSERT_CHECKING */
/* Define to 1 to build with Bonjour support. (--with-bonjour) */
/* #undef USE_BONJOUR */
/* Define to 1 if you want float4 values to be passed by value.
(--enable-float4-byval) */
#define USE_FLOAT4_BYVAL 1
/* Define to 1 if you want float8, int8, etc values to be passed by value.
(--enable-float8-byval) */
/* #undef USE_FLOAT8_BYVAL */
/* Define to 1 if you want 64-bit integer timestamp and interval support.
(--enable-integer-datetimes) */
#define USE_INTEGER_DATETIMES 1
/* Define to 1 to build with LDAP support. (--with-ldap) */
//#define USE_LDAP 0
/* Define to 1 to build with XML support. (--with-libxml) */
#define USE_LIBXML 1
/* Define to 1 to use XSLT support when building contrib/xml2.
(--with-libxslt) */
#define USE_LIBXSLT 1
/* Define to select named POSIX semaphores. */
/* #undef USE_NAMED_POSIX_SEMAPHORES */
/* Define to build with OpenSSL support. (--with-openssl) */
#define USE_OPENSSL 0
#define USE_OPENSSL_RANDOM 0
#define FRONTEND 1
/* Define to 1 to build with PAM support. (--with-pam) */
#define USE_PAM 1
/* Use replacement snprintf() functions. */
/* #undef USE_REPL_SNPRINTF */
/* Define to 1 to use Intel SSE 4.2 CRC instructions with a runtime check. */
#define USE_SLICING_BY_8_CRC32C 1
/* Define to 1 use Intel SSE 4.2 CRC instructions. */
/* #undef USE_SSE42_CRC32C */
/* Define to 1 to use Intel SSSE 4.2 CRC instructions with a runtime check. */
/* #undef USE_SSE42_CRC32C_WITH_RUNTIME_CHECK */
/* Define to select SysV-style semaphores. */
#define USE_SYSV_SEMAPHORES 1
/* Define to select SysV-style shared memory. */
#define USE_SYSV_SHARED_MEMORY 1
/* Define to select unnamed POSIX semaphores. */
/* #undef USE_UNNAMED_POSIX_SEMAPHORES */
/* Define to select Win32-style semaphores. */
/* #undef USE_WIN32_SEMAPHORES */
/* Define to select Win32-style shared memory. */
/* #undef USE_WIN32_SHARED_MEMORY */
/* Define to 1 to build with ZSTD support. (--with-zstd) */
/* #undef USE_ZSTD */
/* Define to 1 if `wcstombs_l' requires <xlocale.h>. */
/* #undef WCSTOMBS_L_IN_XLOCALE */
/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
significant byte first (like Motorola and SPARC, unlike Intel). */
#if defined AC_APPLE_UNIVERSAL_BUILD
# if defined __BIG_ENDIAN__
# define WORDS_BIGENDIAN 1
# endif
#else
# ifndef WORDS_BIGENDIAN
/* # undef WORDS_BIGENDIAN */
# endif
#endif
/* Size of a WAL file block. This need have no particular relation to BLCKSZ.
XLOG_BLCKSZ must be a power of 2, and if your system supports O_DIRECT I/O,
XLOG_BLCKSZ must be a multiple of the alignment requirement for direct-I/O
buffers, else direct I/O may fail. Changing XLOG_BLCKSZ requires an initdb.
*/
#define XLOG_BLCKSZ 8192
/* XLOG_SEG_SIZE is the size of a single WAL file. This must be a power of 2
and larger than XLOG_BLCKSZ (preferably, a great deal larger than
XLOG_BLCKSZ). Changing XLOG_SEG_SIZE requires an initdb. */
#define XLOG_SEG_SIZE (16 * 1024 * 1024)
/* Number of bits in a file offset, on hosts where this is settable. */
#define _FILE_OFFSET_BITS 64
/* Define to 1 to make fseeko visible on some hosts (e.g. glibc 2.2). */
/* #undef _LARGEFILE_SOURCE */
/* Define for large files, on AIX-style hosts. */
/* #undef _LARGE_FILES */
/* Define to `__inline__' or `__inline' if that's what the C compiler
calls it, or to nothing if 'inline' is not supported under any name. */
#ifndef __cplusplus
/* #undef inline */
#endif
/* Define to the type of a signed integer type wide enough to hold a pointer,
if such a type exists, and if the system does not define it. */
/* #undef intptr_t */
/* Define to empty if the C compiler does not understand signed types. */
/* #undef signed */
/* Define to the type of an unsigned integer type wide enough to hold a
pointer, if such a type exists, and if the system does not define it. */
/* #undef uintptr_t */

View File

@ -0,0 +1,7 @@
/*
* * src/include/pg_config_ext.h.in. This is generated manually, not by
* * autoheader, since we want to limit which symbols get defined here.
* */
/* Define to the name of a signed 64-bit integer type. */
#define PG_INT64_TYPE long long int

View File

@ -0,0 +1,34 @@
#if defined(OS_DARWIN)
/* src/include/port/darwin.h */
#define __darwin__ 1
#if HAVE_DECL_F_FULLFSYNC /* not present before macOS 10.3 */
#define HAVE_FSYNC_WRITETHROUGH
#endif
#else
/* src/include/port/linux.h */
/*
* As of July 2007, all known versions of the Linux kernel will sometimes
* return EIDRM for a shmctl() operation when EINVAL is correct (it happens
* when the low-order 15 bits of the supplied shm ID match the slot number
* assigned to a newer shmem segment). We deal with this by assuming that
* EIDRM means EINVAL in PGSharedMemoryIsInUse(). This is reasonably safe
* since in fact Linux has no excuse for ever returning EIDRM; it doesn't
* track removed segments in a way that would allow distinguishing them from
* private ones. But someday that code might get upgraded, and we'd have
* to have a kernel version test here.
*/
#define HAVE_LINUX_EIDRM_BUG
/*
* Set the default wal_sync_method to fdatasync. With recent Linux versions,
* xlogdefs.h's normal rules will prefer open_datasync, which (a) doesn't
* perform better and (b) causes outright failures on ext4 data=journal
* filesystems, because those don't support O_DIRECT.
*/
#define PLATFORM_DEFAULT_SYNC_METHOD SYNC_METHOD_FDATASYNC
#endif

View File

@ -0,0 +1,12 @@
#define PGBINDIR "/bin"
#define PGSHAREDIR "/share"
#define SYSCONFDIR "/etc"
#define INCLUDEDIR "/include"
#define PKGINCLUDEDIR "/include"
#define INCLUDEDIRSERVER "/include/server"
#define LIBDIR "/lib"
#define PKGLIBDIR "/lib"
#define LOCALEDIR "/share/locale"
#define DOCDIR "/doc"
#define HTMLDIR "/doc"
#define MANDIR "/man"

View File

2
contrib/simdjson vendored

@ -1 +1 @@
Subproject commit 6060be2fdf62edf4a8f51a8b0883d57d09397b30 Subproject commit e341c8b43861b43de29c48ab65f292d997096953

View File

@ -14,5 +14,6 @@ git config submodule."contrib/icu".update '!../sparse-checkout/update-icu.sh'
git config submodule."contrib/boost".update '!../sparse-checkout/update-boost.sh' git config submodule."contrib/boost".update '!../sparse-checkout/update-boost.sh'
git config submodule."contrib/aws-s2n-tls".update '!../sparse-checkout/update-aws-s2n-tls.sh' git config submodule."contrib/aws-s2n-tls".update '!../sparse-checkout/update-aws-s2n-tls.sh'
git config submodule."contrib/protobuf".update '!../sparse-checkout/update-protobuf.sh' git config submodule."contrib/protobuf".update '!../sparse-checkout/update-protobuf.sh'
git config submodule."contrib/postgres".update '!../sparse-checkout/update-postgres.sh'
git config submodule."contrib/libxml2".update '!../sparse-checkout/update-libxml2.sh' git config submodule."contrib/libxml2".update '!../sparse-checkout/update-libxml2.sh'
git config submodule."contrib/brotli".update '!../sparse-checkout/update-brotli.sh' git config submodule."contrib/brotli".update '!../sparse-checkout/update-brotli.sh'

View File

@ -0,0 +1,16 @@
#!/bin/sh
echo "Using sparse checkout for postgres"
FILES_TO_CHECKOUT=$(git rev-parse --git-dir)/info/sparse-checkout
echo '!/*' > $FILES_TO_CHECKOUT
echo '/src/interfaces/libpq/*' >> $FILES_TO_CHECKOUT
echo '!/src/interfaces/libpq/*/*' >> $FILES_TO_CHECKOUT
echo '/src/common/*' >> $FILES_TO_CHECKOUT
echo '!/src/port/*/*' >> $FILES_TO_CHECKOUT
echo '/src/port/*' >> $FILES_TO_CHECKOUT
echo '/src/include/*' >> $FILES_TO_CHECKOUT
git config core.sparsecheckout true
git checkout $1
git read-tree -mu HEAD

2
contrib/sysroot vendored

@ -1 +1 @@
Subproject commit 5be834147d5b5dd77ca2b821f356982029320513 Subproject commit 738138e665809a5b28c453983c5f48f23a340ed6

2
contrib/usearch vendored

@ -1 +1 @@
Subproject commit 7a8967cb442b08ca20c3dd781414378e65957d37 Subproject commit d1d33eac94acd3b628e0b446c927ec3295ef63c7

View File

@ -1,14 +1,9 @@
set(FP16_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/FP16")
set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD")
set(USEARCH_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/usearch") set(USEARCH_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/usearch")
add_library(_usearch INTERFACE) add_library(_usearch INTERFACE)
target_include_directories(_usearch SYSTEM INTERFACE ${USEARCH_PROJECT_DIR}/include)
target_include_directories(_usearch SYSTEM INTERFACE target_link_libraries(_usearch INTERFACE _fp16)
${FP16_PROJECT_DIR}/include
${SIMSIMD_PROJECT_DIR}/include
${USEARCH_PROJECT_DIR}/include)
target_compile_definitions(_usearch INTERFACE USEARCH_USE_FP16LIB) target_compile_definitions(_usearch INTERFACE USEARCH_USE_FP16LIB)
# target_compile_definitions(_usearch INTERFACE USEARCH_USE_SIMSIMD) # target_compile_definitions(_usearch INTERFACE USEARCH_USE_SIMSIMD)

View File

@ -1,4 +1,11 @@
# docker build -t clickhouse/docs-builder . # docker build -t clickhouse/docs-builder .
FROM golang:alpine AS htmltest-builder
ARG HTMLTEST_VERSION=0.17.0
RUN CGO_ENABLED=0 go install github.com/wjdp/htmltest@v${HTMLTEST_VERSION} \
&& mv "${GOPATH}/bin/htmltest" /usr/bin/htmltest
# nodejs 17 prefers ipv6 and is broken in our environment # nodejs 17 prefers ipv6 and is broken in our environment
FROM node:16-alpine FROM node:16-alpine
@ -17,6 +24,13 @@ RUN yarn config set registry https://registry.npmjs.org \
&& yarn install \ && yarn install \
&& yarn cache clean && yarn cache clean
ENV HOME /opt/clickhouse-docs
RUN mkdir /output_path \
&& chmod -R a+w . /output_path \
&& git config --global --add safe.directory /opt/clickhouse-docs
COPY run.sh /run.sh COPY run.sh /run.sh
COPY --from=htmltest-builder /usr/bin/htmltest /usr/bin/htmltest
ENTRYPOINT ["/run.sh"] ENTRYPOINT ["/run.sh"]

View File

@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc # lts / testing / prestable / etc
ARG REPO_CHANNEL="stable" ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="24.8.4.13" ARG VERSION="24.9.2.42"
ARG PACKAGES="clickhouse-keeper" ARG PACKAGES="clickhouse-keeper"
ARG DIRECT_DOWNLOAD_URLS="" ARG DIRECT_DOWNLOAD_URLS=""

View File

@ -97,6 +97,10 @@ cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA "-DCMAKE_BUILD_TYPE=$BUI
# shellcheck disable=SC2086 # No quotes because I want it to expand to nothing if empty. # shellcheck disable=SC2086 # No quotes because I want it to expand to nothing if empty.
ninja $NINJA_FLAGS $BUILD_TARGET ninja $NINJA_FLAGS $BUILD_TARGET
# We don't allow dirty files in the source directory after build
git ls-files --others --exclude-standard | grep . && echo "^ Dirty files in the working copy after build" && exit 1
git submodule foreach --quiet git ls-files --others --exclude-standard | grep . && echo "^ Dirty files in submodules after build" && exit 1
ls -la ./programs ls -la ./programs
ccache_status ccache_status

View File

@ -2,19 +2,29 @@
# To run this script you must install docker and piddeptree python package # To run this script you must install docker and piddeptree python package
# #
import subprocess
import os import os
import subprocess
import sys import sys
def build_docker_deps(image_name, imagedir): def build_docker_deps(image_name: str, imagedir: str) -> None:
cmd = f"""docker run --entrypoint "/bin/bash" {image_name} -c "pip install pipdeptree 2>/dev/null 1>/dev/null && pipdeptree --freeze --warn silence | sed 's/ \+//g' | sort | uniq" > {imagedir}/requirements.txt""" print("Fetch the newest manifest for", image_name)
pip_cmd = (
"pip install pipdeptree 2>/dev/null 1>/dev/null && pipdeptree --freeze "
"--warn silence --exclude pipdeptree"
)
# /=/!d - remove dependencies without pin
# ubuntu - ignore system packages
# \s - remove spaces
sed = r"sed '/==/!d; /==.*+ubuntu/d; s/\s//g'"
cmd = rf"""docker run --rm --entrypoint "/bin/bash" {image_name} -c "{pip_cmd} | {sed} | sort -u" > {imagedir}/requirements.txt"""
print("Running the command:", cmd)
subprocess.check_call(cmd, shell=True) subprocess.check_call(cmd, shell=True)
def check_docker_file_install_with_pip(filepath): def check_docker_file_install_with_pip(filepath):
image_name = None image_name = None
with open(filepath, "r") as f: with open(filepath, "r", encoding="utf-8") as f:
for line in f: for line in f:
if "docker build" in line: if "docker build" in line:
arr = line.split(" ") arr = line.split(" ")
@ -25,7 +35,7 @@ def check_docker_file_install_with_pip(filepath):
return image_name, False return image_name, False
def process_affected_images(images_dir): def process_affected_images(images_dir: str) -> None:
for root, _dirs, files in os.walk(images_dir): for root, _dirs, files in os.walk(images_dir):
for f in files: for f in files:
if f == "Dockerfile": if f == "Dockerfile":

View File

@ -1,4 +1,4 @@
FROM ubuntu:20.04 AS glibc-donor FROM ubuntu:22.04 AS glibc-donor
ARG TARGETARCH ARG TARGETARCH
RUN arch=${TARGETARCH:-amd64} \ RUN arch=${TARGETARCH:-amd64} \
@ -6,8 +6,11 @@ RUN arch=${TARGETARCH:-amd64} \
amd64) rarch=x86_64 ;; \ amd64) rarch=x86_64 ;; \
arm64) rarch=aarch64 ;; \ arm64) rarch=aarch64 ;; \
esac \ esac \
&& ln -s "${rarch}-linux-gnu" /lib/linux-gnu && ln -s "${rarch}-linux-gnu" /lib/linux-gnu \
&& case $arch in \
amd64) ln /lib/linux-gnu/ld-linux-x86-64.so.2 /lib/linux-gnu/ld-2.35.so ;; \
arm64) ln /lib/linux-gnu/ld-linux-aarch64.so.1 /lib/linux-gnu/ld-2.35.so ;; \
esac
FROM alpine FROM alpine
@ -17,7 +20,7 @@ ENV LANG=en_US.UTF-8 \
TZ=UTC \ TZ=UTC \
CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml
COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.31.so /lib/ COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.35.so /lib/
COPY --from=glibc-donor /etc/nsswitch.conf /etc/ COPY --from=glibc-donor /etc/nsswitch.conf /etc/
COPY docker_related_config.xml /etc/clickhouse-server/config.d/ COPY docker_related_config.xml /etc/clickhouse-server/config.d/
COPY entrypoint.sh /entrypoint.sh COPY entrypoint.sh /entrypoint.sh
@ -25,14 +28,14 @@ COPY entrypoint.sh /entrypoint.sh
ARG TARGETARCH ARG TARGETARCH
RUN arch=${TARGETARCH:-amd64} \ RUN arch=${TARGETARCH:-amd64} \
&& case $arch in \ && case $arch in \
amd64) mkdir -p /lib64 && ln -sf /lib/ld-2.31.so /lib64/ld-linux-x86-64.so.2 ;; \ amd64) mkdir -p /lib64 && ln -sf /lib/ld-2.35.so /lib64/ld-linux-x86-64.so.2 ;; \
arm64) ln -sf /lib/ld-2.31.so /lib/ld-linux-aarch64.so.1 ;; \ arm64) ln -sf /lib/ld-2.35.so /lib/ld-linux-aarch64.so.1 ;; \
esac esac
# lts / testing / prestable / etc # lts / testing / prestable / etc
ARG REPO_CHANNEL="stable" ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="24.8.4.13" ARG VERSION="24.9.2.42"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
ARG DIRECT_DOWNLOAD_URLS="" ARG DIRECT_DOWNLOAD_URLS=""

View File

@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
ARG REPO_CHANNEL="stable" ARG REPO_CHANNEL="stable"
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
ARG VERSION="24.8.4.13" ARG VERSION="24.9.2.42"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
#docker-official-library:off #docker-official-library:off

View File

@ -1,9 +1,10 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from argparse import ArgumentParser
import os
import jinja2
import itertools import itertools
import os
from argparse import ArgumentParser
import jinja2
def removesuffix(text, suffix): def removesuffix(text, suffix):

View File

@ -1,7 +1,8 @@
import datetime
import os import os
import subprocess import subprocess
import datetime
from flask import Flask, flash, request, redirect, url_for from flask import Flask, flash, redirect, request, url_for
def run_command(command, wait=False): def run_command(command, wait=False):

View File

@ -48,7 +48,7 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
&& add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -c -s) ${DOCKER_CHANNEL}" \ && add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -c -s) ${DOCKER_CHANNEL}" \
&& apt-get update \ && apt-get update \
&& env DEBIAN_FRONTEND=noninteractive apt-get install --yes \ && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
docker-ce='5:23.*' \ docker-ce='5:23.*' docker-compose-plugin='2.29.*' \
&& rm -rf \ && rm -rf \
/var/lib/apt/lists/* \ /var/lib/apt/lists/* \
/var/cache/debconf \ /var/cache/debconf \

View File

@ -0,0 +1 @@
[rabbitmq_consistent_hash_exchange].

View File

@ -13,3 +13,5 @@ ssl_options.fail_if_no_peer_cert = false
ssl_options.cacertfile = /etc/rabbitmq/ca-cert.pem ssl_options.cacertfile = /etc/rabbitmq/ca-cert.pem
ssl_options.certfile = /etc/rabbitmq/server-cert.pem ssl_options.certfile = /etc/rabbitmq/server-cert.pem
ssl_options.keyfile = /etc/rabbitmq/server-key.pem ssl_options.keyfile = /etc/rabbitmq/server-key.pem
vm_memory_high_watermark.absolute = 2GB

View File

@ -1,15 +1,13 @@
PyHDFS==0.3.1 PyHDFS==0.3.1
PyJWT==2.3.0 PyJWT==2.4.0
PyMySQL==1.1.0 PyMySQL==1.1.1
PyNaCl==1.5.0 PyNaCl==1.5.0
PyYAML==5.3.1
SecretStorage==3.3.1 SecretStorage==3.3.1
argon2-cffi-bindings==21.2.0 argon2-cffi-bindings==21.2.0
argon2-cffi==23.1.0 argon2-cffi==23.1.0
async-timeout==4.0.3 async-timeout==4.0.3
asyncio==3.4.3 asyncio==3.4.3
attrs==23.2.0 avro==1.11.3
avro==1.10.2
azure-core==1.30.1 azure-core==1.30.1
azure-storage-blob==12.19.0 azure-storage-blob==12.19.0
bcrypt==4.1.3 bcrypt==4.1.3
@ -24,18 +22,13 @@ cffi==1.16.0
charset-normalizer==3.3.2 charset-normalizer==3.3.2
click==8.1.7 click==8.1.7
confluent-kafka==2.3.0 confluent-kafka==2.3.0
cryptography==3.4.8 cryptography==42.0.0
dbus-python==1.2.18 dbus-python==1.2.18
decorator==5.1.1
delta-spark==2.3.0 delta-spark==2.3.0
deltalake==0.16.0
dict2xml==1.7.4 dict2xml==1.7.4
dicttoxml==1.7.16 dicttoxml==1.7.16
distro-info==1.1+ubuntu0.2
distro==1.7.0
docker-compose==1.29.2
docker==6.1.3 docker==6.1.3
dockerpty==0.4.1
docopt==0.6.2
exceptiongroup==1.2.1 exceptiongroup==1.2.1
execnet==2.1.1 execnet==2.1.1
geomet==0.2.1.post1 geomet==0.2.1.post1
@ -49,7 +42,6 @@ iniconfig==2.0.0
isodate==0.6.1 isodate==0.6.1
jeepney==0.7.1 jeepney==0.7.1
jmespath==1.0.1 jmespath==1.0.1
jsonschema==3.2.0
jwcrypto==1.5.6 jwcrypto==1.5.6
kafka-python==2.0.2 kafka-python==2.0.2
kazoo==2.9.0 kazoo==2.9.0
@ -63,23 +55,22 @@ lz4==4.3.3
minio==7.2.3 minio==7.2.3
more-itertools==8.10.0 more-itertools==8.10.0
nats-py==2.6.0 nats-py==2.6.0
numpy==2.1.0
oauthlib==3.2.0 oauthlib==3.2.0
packaging==24.0 packaging==24.0
paramiko==3.4.0 paramiko==3.4.0
pika==1.2.0 pika==1.2.0
pip==24.1.1 pip==24.1.1
pipdeptree==2.23.0
pluggy==1.5.0 pluggy==1.5.0
protobuf==4.25.2 protobuf==4.25.2
psycopg2-binary==2.9.6 psycopg2-binary==2.9.6
py4j==0.10.9.5 py4j==0.10.9.5
py==1.11.0 pyarrow-hotfix==0.6
pyarrow==17.0.0 pyarrow==17.0.0
pycparser==2.22 pycparser==2.22
pycryptodome==3.20.0 pycryptodome==3.20.0
pymongo==3.11.0 pymongo==3.11.0
pyparsing==2.4.7 pyparsing==2.4.7
pyrsistent==0.20.0
pyspark==3.3.2 pyspark==3.3.2
pyspnego==0.10.2 pyspnego==0.10.2
pytest-order==1.0.0 pytest-order==1.0.0
@ -89,28 +80,22 @@ pytest-reportlog==0.4.0
pytest-timeout==2.2.0 pytest-timeout==2.2.0
pytest-xdist==3.5.0 pytest-xdist==3.5.0
pytest==7.4.4 pytest==7.4.4
python-apt==2.4.0+ubuntu3
python-dateutil==2.9.0.post0 python-dateutil==2.9.0.post0
python-dotenv==0.21.1
pytz==2023.3.post1 pytz==2023.3.post1
redis==5.0.1 redis==5.0.1
requests-kerberos==0.14.0 requests-kerberos==0.14.0
requests==2.31.0 requests==2.31.0
retry==0.9.2
s3transfer==0.10.1 s3transfer==0.10.1
setuptools==59.6.0 setuptools==70.0.0
simplejson==3.19.2 simplejson==3.19.2
six==1.16.0 six==1.16.0
soupsieve==2.5 soupsieve==2.5
texttable==1.7.0
tomli==2.0.1 tomli==2.0.1
typing_extensions==4.11.0 typing_extensions==4.11.0
tzlocal==2.1 tzlocal==2.1
unattended-upgrades==0.1 unattended-upgrades==0.1
urllib3==2.0.7 urllib3==2.0.7
wadllib==1.3.6 wadllib==1.3.6
websocket-client==0.59.0 websocket-client==1.8.0
wheel==0.37.1 wheel==0.38.1
zipp==1.0.0 zipp==1.0.0
deltalake==0.16.0

View File

@ -1,9 +1,9 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import os
import logging
import argparse import argparse
import csv import csv
import logging
import os
def process_result(result_folder): def process_result(result_folder):

View File

@ -1,12 +1,12 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import os
import yaml
import html import html
import os
import random import random
import string import string
from clickhouse_driver import Client
import yaml
from clickhouse_driver import Client
client = Client(host="localhost", port=9000) client = Client(host="localhost", port=9000)
settings = { settings = {

Some files were not shown because too many files have changed in this diff Show More