mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-28 18:42:26 +00:00
Merge remote-tracking branch 'upstream/master' into HEAD
This commit is contained in:
commit
0aa8dafe0a
6
.github/ISSUE_TEMPLATE/85_bug-report.md
vendored
6
.github/ISSUE_TEMPLATE/85_bug-report.md
vendored
@ -17,7 +17,7 @@ assignees: ''
|
|||||||
|
|
||||||
> A link to reproducer in [https://fiddle.clickhouse.com/](https://fiddle.clickhouse.com/).
|
> A link to reproducer in [https://fiddle.clickhouse.com/](https://fiddle.clickhouse.com/).
|
||||||
|
|
||||||
**Does it reproduce on recent release?**
|
**Does it reproduce on the most recent release?**
|
||||||
|
|
||||||
[The list of releases](https://github.com/ClickHouse/ClickHouse/blob/master/utils/list-versions/version_date.tsv)
|
[The list of releases](https://github.com/ClickHouse/ClickHouse/blob/master/utils/list-versions/version_date.tsv)
|
||||||
|
|
||||||
@ -34,11 +34,11 @@ assignees: ''
|
|||||||
**How to reproduce**
|
**How to reproduce**
|
||||||
|
|
||||||
* Which ClickHouse server version to use
|
* Which ClickHouse server version to use
|
||||||
* Which interface to use, if matters
|
* Which interface to use, if it matters
|
||||||
* Non-default settings, if any
|
* Non-default settings, if any
|
||||||
* `CREATE TABLE` statements for all tables involved
|
* `CREATE TABLE` statements for all tables involved
|
||||||
* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary
|
* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary
|
||||||
* Queries to run that lead to unexpected result
|
* Queries to run that lead to an unexpected result
|
||||||
|
|
||||||
**Expected behavior**
|
**Expected behavior**
|
||||||
|
|
||||||
|
15
.github/workflows/backport_branches.yml
vendored
15
.github/workflows/backport_branches.yml
vendored
@ -138,19 +138,26 @@ jobs:
|
|||||||
############################################################################################
|
############################################################################################
|
||||||
##################################### Docker images #######################################
|
##################################### Docker images #######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
DockerServerImages:
|
DockerServerImage:
|
||||||
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
|
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Docker server and keeper images
|
test_name: Docker server image
|
||||||
runner_type: style-checker
|
runner_type: style-checker
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
checkout_depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
|
|
||||||
run_command: |
|
run_command: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
|
||||||
python3 docker_server.py --release-type head --no-push \
|
python3 docker_server.py --release-type head --no-push \
|
||||||
--image-repo clickhouse/clickhouse-server --image-path docker/server --allow-build-reuse
|
--image-repo clickhouse/clickhouse-server --image-path docker/server --allow-build-reuse
|
||||||
|
DockerKeeperImage:
|
||||||
|
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
|
with:
|
||||||
|
test_name: Docker keeper image
|
||||||
|
runner_type: style-checker
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
run_command: |
|
||||||
python3 docker_server.py --release-type head --no-push \
|
python3 docker_server.py --release-type head --no-push \
|
||||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper --allow-build-reuse
|
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper --allow-build-reuse
|
||||||
############################################################################################
|
############################################################################################
|
||||||
|
138
.github/workflows/master.yml
vendored
138
.github/workflows/master.yml
vendored
@ -35,7 +35,7 @@ jobs:
|
|||||||
- name: PrepareRunConfig
|
- name: PrepareRunConfig
|
||||||
id: runconfig
|
id: runconfig
|
||||||
run: |
|
run: |
|
||||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --rebuild-all-binaries --outfile ${{ runner.temp }}/ci_run_data.json
|
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json
|
||||||
|
|
||||||
echo "::group::CI configuration"
|
echo "::group::CI configuration"
|
||||||
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
|
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
|
||||||
@ -55,7 +55,6 @@ jobs:
|
|||||||
uses: ./.github/workflows/reusable_docker.yml
|
uses: ./.github/workflows/reusable_docker.yml
|
||||||
with:
|
with:
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
set_latest: true
|
|
||||||
StyleCheck:
|
StyleCheck:
|
||||||
needs: [RunConfig, BuildDockers]
|
needs: [RunConfig, BuildDockers]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -98,6 +97,14 @@ jobs:
|
|||||||
build_name: package_release
|
build_name: package_release
|
||||||
checkout_depth: 0
|
checkout_depth: 0
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
BuilderDebReleaseCoverage:
|
||||||
|
needs: [RunConfig, BuildDockers]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
|
with:
|
||||||
|
build_name: package_release_coverage
|
||||||
|
checkout_depth: 0
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
BuilderDebAarch64:
|
BuilderDebAarch64:
|
||||||
needs: [RunConfig, BuildDockers]
|
needs: [RunConfig, BuildDockers]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -242,20 +249,26 @@ jobs:
|
|||||||
############################################################################################
|
############################################################################################
|
||||||
##################################### Docker images #######################################
|
##################################### Docker images #######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
DockerServerImages:
|
DockerServerImage:
|
||||||
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
|
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Docker server and keeper images
|
test_name: Docker server image
|
||||||
runner_type: style-checker
|
runner_type: style-checker
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
# FIXME: avoid using 0 checkout
|
|
||||||
checkout_depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
|
|
||||||
run_command: |
|
run_command: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
|
||||||
python3 docker_server.py --release-type head \
|
python3 docker_server.py --release-type head \
|
||||||
--image-repo clickhouse/clickhouse-server --image-path docker/server --allow-build-reuse
|
--image-repo clickhouse/clickhouse-server --image-path docker/server --allow-build-reuse
|
||||||
|
DockerKeeperImage:
|
||||||
|
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
|
with:
|
||||||
|
test_name: Docker keeper image
|
||||||
|
runner_type: style-checker
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
run_command: |
|
||||||
python3 docker_server.py --release-type head \
|
python3 docker_server.py --release-type head \
|
||||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper --allow-build-reuse
|
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper --allow-build-reuse
|
||||||
############################################################################################
|
############################################################################################
|
||||||
@ -272,6 +285,7 @@ jobs:
|
|||||||
- BuilderDebDebug
|
- BuilderDebDebug
|
||||||
- BuilderDebMsan
|
- BuilderDebMsan
|
||||||
- BuilderDebRelease
|
- BuilderDebRelease
|
||||||
|
- BuilderDebReleaseCoverage
|
||||||
- BuilderDebTsan
|
- BuilderDebTsan
|
||||||
- BuilderDebUBsan
|
- BuilderDebUBsan
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
@ -313,7 +327,7 @@ jobs:
|
|||||||
run_command: |
|
run_command: |
|
||||||
python3 build_report_check.py "$CHECK_NAME"
|
python3 build_report_check.py "$CHECK_NAME"
|
||||||
MarkReleaseReady:
|
MarkReleaseReady:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
||||||
needs:
|
needs:
|
||||||
- BuilderBinDarwin
|
- BuilderBinDarwin
|
||||||
- BuilderBinDarwinAarch64
|
- BuilderBinDarwinAarch64
|
||||||
@ -323,8 +337,6 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
- name: Mark Commit Release Ready
|
- name: Mark Commit Release Ready
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
@ -363,14 +375,6 @@ jobs:
|
|||||||
test_name: Stateless tests (release)
|
test_name: Stateless tests (release)
|
||||||
runner_type: func-tester
|
runner_type: func-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
FunctionalStatelessTestReleaseDatabaseOrdinary:
|
|
||||||
needs: [RunConfig, BuilderDebRelease]
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: Stateless tests (release, DatabaseOrdinary)
|
|
||||||
runner_type: func-tester
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
FunctionalStatelessTestReleaseDatabaseReplicated:
|
FunctionalStatelessTestReleaseDatabaseReplicated:
|
||||||
needs: [RunConfig, BuilderDebRelease]
|
needs: [RunConfig, BuilderDebRelease]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -395,6 +399,22 @@ jobs:
|
|||||||
test_name: Stateless tests (release, s3 storage)
|
test_name: Stateless tests (release, s3 storage)
|
||||||
runner_type: func-tester
|
runner_type: func-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
FunctionalStatelessTestS3Debug:
|
||||||
|
needs: [RunConfig, BuilderDebDebug]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
|
with:
|
||||||
|
test_name: Stateless tests (debug, s3 storage)
|
||||||
|
runner_type: func-tester
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
FunctionalStatelessTestS3Tsan:
|
||||||
|
needs: [RunConfig, BuilderDebTsan]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
|
with:
|
||||||
|
test_name: Stateless tests (tsan, s3 storage)
|
||||||
|
runner_type: func-tester
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
FunctionalStatelessTestAarch64:
|
FunctionalStatelessTestAarch64:
|
||||||
needs: [RunConfig, BuilderDebAarch64]
|
needs: [RunConfig, BuilderDebAarch64]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -503,6 +523,55 @@ jobs:
|
|||||||
test_name: Stateful tests (debug)
|
test_name: Stateful tests (debug)
|
||||||
runner_type: func-tester
|
runner_type: func-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
# Parallel replicas
|
||||||
|
FunctionalStatefulTestDebugParallelReplicas:
|
||||||
|
needs: [RunConfig, BuilderDebDebug]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
|
with:
|
||||||
|
test_name: Stateful tests (debug, ParallelReplicas)
|
||||||
|
runner_type: func-tester
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
FunctionalStatefulTestUBsanParallelReplicas:
|
||||||
|
needs: [RunConfig, BuilderDebUBsan]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
|
with:
|
||||||
|
test_name: Stateful tests (ubsan, ParallelReplicas)
|
||||||
|
runner_type: func-tester
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
FunctionalStatefulTestMsanParallelReplicas:
|
||||||
|
needs: [RunConfig, BuilderDebMsan]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
|
with:
|
||||||
|
test_name: Stateful tests (msan, ParallelReplicas)
|
||||||
|
runner_type: func-tester
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
FunctionalStatefulTestTsanParallelReplicas:
|
||||||
|
needs: [RunConfig, BuilderDebTsan]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
|
with:
|
||||||
|
test_name: Stateful tests (tsan, ParallelReplicas)
|
||||||
|
runner_type: func-tester
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
FunctionalStatefulTestAsanParallelReplicas:
|
||||||
|
needs: [RunConfig, BuilderDebAsan]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
|
with:
|
||||||
|
test_name: Stateful tests (asan, ParallelReplicas)
|
||||||
|
runner_type: func-tester
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
FunctionalStatefulTestReleaseParallelReplicas:
|
||||||
|
needs: [RunConfig, BuilderDebRelease]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
|
with:
|
||||||
|
test_name: Stateful tests (release, ParallelReplicas)
|
||||||
|
runner_type: func-tester
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
########################### ClickBench #######################################################
|
########################### ClickBench #######################################################
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
@ -710,6 +779,28 @@ jobs:
|
|||||||
runner_type: func-tester-aarch64
|
runner_type: func-tester-aarch64
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
|
############################ SQLLOGIC TEST ###################################################
|
||||||
|
##############################################################################################
|
||||||
|
SQLLogicTestRelease:
|
||||||
|
needs: [RunConfig, BuilderDebRelease]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
|
with:
|
||||||
|
test_name: Sqllogic test (release)
|
||||||
|
runner_type: func-tester
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
##############################################################################################
|
||||||
|
##################################### SQL TEST ###############################################
|
||||||
|
##############################################################################################
|
||||||
|
SQLTest:
|
||||||
|
needs: [RunConfig, BuilderDebRelease]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
|
with:
|
||||||
|
test_name: SQLTest
|
||||||
|
runner_type: fuzzer-unit-tester
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
##############################################################################################
|
||||||
###################################### SQLANCER FUZZERS ######################################
|
###################################### SQLANCER FUZZERS ######################################
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
SQLancerTestRelease:
|
SQLancerTestRelease:
|
||||||
@ -734,7 +825,6 @@ jobs:
|
|||||||
- MarkReleaseReady
|
- MarkReleaseReady
|
||||||
- FunctionalStatelessTestDebug
|
- FunctionalStatelessTestDebug
|
||||||
- FunctionalStatelessTestRelease
|
- FunctionalStatelessTestRelease
|
||||||
- FunctionalStatelessTestReleaseDatabaseOrdinary
|
|
||||||
- FunctionalStatelessTestReleaseDatabaseReplicated
|
- FunctionalStatelessTestReleaseDatabaseReplicated
|
||||||
- FunctionalStatelessTestReleaseAnalyzer
|
- FunctionalStatelessTestReleaseAnalyzer
|
||||||
- FunctionalStatelessTestReleaseS3
|
- FunctionalStatelessTestReleaseS3
|
||||||
@ -743,6 +833,8 @@ jobs:
|
|||||||
- FunctionalStatelessTestTsan
|
- FunctionalStatelessTestTsan
|
||||||
- FunctionalStatelessTestMsan
|
- FunctionalStatelessTestMsan
|
||||||
- FunctionalStatelessTestUBsan
|
- FunctionalStatelessTestUBsan
|
||||||
|
- FunctionalStatelessTestS3Debug
|
||||||
|
- FunctionalStatelessTestS3Tsan
|
||||||
- FunctionalStatefulTestDebug
|
- FunctionalStatefulTestDebug
|
||||||
- FunctionalStatefulTestRelease
|
- FunctionalStatefulTestRelease
|
||||||
- FunctionalStatefulTestAarch64
|
- FunctionalStatefulTestAarch64
|
||||||
@ -750,6 +842,12 @@ jobs:
|
|||||||
- FunctionalStatefulTestTsan
|
- FunctionalStatefulTestTsan
|
||||||
- FunctionalStatefulTestMsan
|
- FunctionalStatefulTestMsan
|
||||||
- FunctionalStatefulTestUBsan
|
- FunctionalStatefulTestUBsan
|
||||||
|
- FunctionalStatefulTestDebugParallelReplicas
|
||||||
|
- FunctionalStatefulTestUBsanParallelReplicas
|
||||||
|
- FunctionalStatefulTestMsanParallelReplicas
|
||||||
|
- FunctionalStatefulTestTsanParallelReplicas
|
||||||
|
- FunctionalStatefulTestAsanParallelReplicas
|
||||||
|
- FunctionalStatefulTestReleaseParallelReplicas
|
||||||
- StressTestDebug
|
- StressTestDebug
|
||||||
- StressTestAsan
|
- StressTestAsan
|
||||||
- StressTestTsan
|
- StressTestTsan
|
||||||
@ -775,6 +873,8 @@ jobs:
|
|||||||
- UnitTestsReleaseClang
|
- UnitTestsReleaseClang
|
||||||
- SQLancerTestRelease
|
- SQLancerTestRelease
|
||||||
- SQLancerTestDebug
|
- SQLancerTestDebug
|
||||||
|
- SQLLogicTestRelease
|
||||||
|
- SQLTest
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
|
2
.github/workflows/nightly.yml
vendored
2
.github/workflows/nightly.yml
vendored
@ -28,7 +28,7 @@ jobs:
|
|||||||
id: runconfig
|
id: runconfig
|
||||||
run: |
|
run: |
|
||||||
echo "::group::configure CI run"
|
echo "::group::configure CI run"
|
||||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --skip-jobs --rebuild-all-docker --outfile ${{ runner.temp }}/ci_run_data.json
|
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --skip-jobs --outfile ${{ runner.temp }}/ci_run_data.json
|
||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
|
|
||||||
echo "::group::CI run configure results"
|
echo "::group::CI run configure results"
|
||||||
|
53
.github/workflows/pull_request.yml
vendored
53
.github/workflows/pull_request.yml
vendored
@ -104,7 +104,7 @@ jobs:
|
|||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Fast tests
|
test_name: Fast test
|
||||||
runner_type: builder
|
runner_type: builder
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
run_command: |
|
run_command: |
|
||||||
@ -147,6 +147,14 @@ jobs:
|
|||||||
build_name: package_release
|
build_name: package_release
|
||||||
checkout_depth: 0
|
checkout_depth: 0
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
BuilderDebReleaseCoverage:
|
||||||
|
needs: [RunConfig, FastTest]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
|
with:
|
||||||
|
build_name: package_release_coverage
|
||||||
|
checkout_depth: 0
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
BuilderDebAarch64:
|
BuilderDebAarch64:
|
||||||
needs: [RunConfig, FastTest]
|
needs: [RunConfig, FastTest]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -273,19 +281,26 @@ jobs:
|
|||||||
############################################################################################
|
############################################################################################
|
||||||
##################################### Docker images #######################################
|
##################################### Docker images #######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
DockerServerImages:
|
DockerServerImage:
|
||||||
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
|
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Docker server and keeper images
|
test_name: Docker server image
|
||||||
runner_type: style-checker
|
runner_type: style-checker
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
checkout_depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
|
|
||||||
run_command: |
|
run_command: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
|
||||||
python3 docker_server.py --release-type head --no-push \
|
python3 docker_server.py --release-type head --no-push \
|
||||||
--image-repo clickhouse/clickhouse-server --image-path docker/server --allow-build-reuse
|
--image-repo clickhouse/clickhouse-server --image-path docker/server --allow-build-reuse
|
||||||
|
DockerKeeperImage:
|
||||||
|
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
|
with:
|
||||||
|
test_name: Docker keeper image
|
||||||
|
runner_type: style-checker
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
run_command: |
|
||||||
python3 docker_server.py --release-type head --no-push \
|
python3 docker_server.py --release-type head --no-push \
|
||||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper --allow-build-reuse
|
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper --allow-build-reuse
|
||||||
############################################################################################
|
############################################################################################
|
||||||
@ -302,6 +317,7 @@ jobs:
|
|||||||
- BuilderDebDebug
|
- BuilderDebDebug
|
||||||
- BuilderDebMsan
|
- BuilderDebMsan
|
||||||
- BuilderDebRelease
|
- BuilderDebRelease
|
||||||
|
- BuilderDebReleaseCoverage
|
||||||
- BuilderDebTsan
|
- BuilderDebTsan
|
||||||
- BuilderDebUBsan
|
- BuilderDebUBsan
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
@ -476,21 +492,9 @@ jobs:
|
|||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: tests bugfix validate check
|
test_name: Bugfix validation
|
||||||
runner_type: func-tester
|
runner_type: func-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
additional_envs: |
|
|
||||||
KILL_TIMEOUT=3600
|
|
||||||
run_command: |
|
|
||||||
TEMP_PATH="${TEMP_PATH}/integration" \
|
|
||||||
python3 integration_test_check.py "Integration $CHECK_NAME" \
|
|
||||||
--validate-bugfix --post-commit-status=file || echo 'ignore exit code'
|
|
||||||
|
|
||||||
TEMP_PATH="${TEMP_PATH}/stateless" \
|
|
||||||
python3 functional_test_check.py "Stateless $CHECK_NAME" "$KILL_TIMEOUT" \
|
|
||||||
--validate-bugfix --post-commit-status=file || echo 'ignore exit code'
|
|
||||||
|
|
||||||
python3 bugfix_validate_check.py "${TEMP_PATH}/stateless/functional_commit_status.tsv" "${TEMP_PATH}/integration/integration_commit_status.tsv"
|
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
############################ FUNCTIONAl STATEFUL TESTS #######################################
|
############################ FUNCTIONAl STATEFUL TESTS #######################################
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
@ -778,6 +782,15 @@ jobs:
|
|||||||
test_name: Integration tests (release)
|
test_name: Integration tests (release)
|
||||||
runner_type: stress-tester
|
runner_type: stress-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
IntegrationTestsAarch64:
|
||||||
|
needs: [RunConfig, BuilderDebAarch64]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
|
with:
|
||||||
|
test_name: Integration tests (aarch64)
|
||||||
|
# FIXME: there is no stress-tester for aarch64. func-tester-aarch64 is ok?
|
||||||
|
runner_type: func-tester-aarch64
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
IntegrationTestsFlakyCheck:
|
IntegrationTestsFlakyCheck:
|
||||||
needs: [RunConfig, BuilderDebAsan]
|
needs: [RunConfig, BuilderDebAsan]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -874,6 +887,7 @@ jobs:
|
|||||||
- BuilderSpecialReport
|
- BuilderSpecialReport
|
||||||
- DocsCheck
|
- DocsCheck
|
||||||
- FastTest
|
- FastTest
|
||||||
|
- TestsBugfixCheck
|
||||||
- FunctionalStatelessTestDebug
|
- FunctionalStatelessTestDebug
|
||||||
- FunctionalStatelessTestRelease
|
- FunctionalStatelessTestRelease
|
||||||
- FunctionalStatelessTestReleaseDatabaseReplicated
|
- FunctionalStatelessTestReleaseDatabaseReplicated
|
||||||
@ -917,6 +931,7 @@ jobs:
|
|||||||
- IntegrationTestsAnalyzerAsan
|
- IntegrationTestsAnalyzerAsan
|
||||||
- IntegrationTestsTsan
|
- IntegrationTestsTsan
|
||||||
- IntegrationTestsRelease
|
- IntegrationTestsRelease
|
||||||
|
- IntegrationTestsAarch64
|
||||||
- IntegrationTestsFlakyCheck
|
- IntegrationTestsFlakyCheck
|
||||||
- PerformanceComparisonX86
|
- PerformanceComparisonX86
|
||||||
- PerformanceComparisonAarch
|
- PerformanceComparisonAarch
|
||||||
@ -985,7 +1000,7 @@ jobs:
|
|||||||
####################################### libFuzzer ###########################################
|
####################################### libFuzzer ###########################################
|
||||||
#############################################################################################
|
#############################################################################################
|
||||||
libFuzzer:
|
libFuzzer:
|
||||||
if: ${{ !failure() && !cancelled() && contains(github.event.pull_request.labels.*.name, 'libFuzzer') }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
needs: [RunConfig, StyleCheck]
|
needs: [RunConfig, StyleCheck]
|
||||||
uses: ./.github/workflows/libfuzzer.yml
|
uses: ./.github/workflows/libfuzzer.yml
|
||||||
with:
|
with:
|
||||||
|
41
.github/workflows/release_branches.yml
vendored
41
.github/workflows/release_branches.yml
vendored
@ -41,7 +41,7 @@ jobs:
|
|||||||
id: runconfig
|
id: runconfig
|
||||||
run: |
|
run: |
|
||||||
echo "::group::configure CI run"
|
echo "::group::configure CI run"
|
||||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --rebuild-all-binaries --outfile ${{ runner.temp }}/ci_run_data.json
|
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json
|
||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
echo "::group::CI run configure results"
|
echo "::group::CI run configure results"
|
||||||
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
|
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
|
||||||
@ -91,6 +91,8 @@ jobs:
|
|||||||
build_name: package_release
|
build_name: package_release
|
||||||
checkout_depth: 0
|
checkout_depth: 0
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
# always rebuild on release branches to be able to publish from any commit
|
||||||
|
force: true
|
||||||
BuilderDebAarch64:
|
BuilderDebAarch64:
|
||||||
needs: [RunConfig, BuildDockers]
|
needs: [RunConfig, BuildDockers]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -99,6 +101,8 @@ jobs:
|
|||||||
build_name: package_aarch64
|
build_name: package_aarch64
|
||||||
checkout_depth: 0
|
checkout_depth: 0
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
# always rebuild on release branches to be able to publish from any commit
|
||||||
|
force: true
|
||||||
BuilderDebAsan:
|
BuilderDebAsan:
|
||||||
needs: [RunConfig, BuildDockers]
|
needs: [RunConfig, BuildDockers]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -142,6 +146,8 @@ jobs:
|
|||||||
build_name: binary_darwin
|
build_name: binary_darwin
|
||||||
checkout_depth: 0
|
checkout_depth: 0
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
# always rebuild on release branches to be able to publish from any commit
|
||||||
|
force: true
|
||||||
BuilderBinDarwinAarch64:
|
BuilderBinDarwinAarch64:
|
||||||
needs: [RunConfig, BuildDockers]
|
needs: [RunConfig, BuildDockers]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -150,22 +156,31 @@ jobs:
|
|||||||
build_name: binary_darwin_aarch64
|
build_name: binary_darwin_aarch64
|
||||||
checkout_depth: 0
|
checkout_depth: 0
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
# always rebuild on release branches to be able to publish from any commit
|
||||||
|
force: true
|
||||||
############################################################################################
|
############################################################################################
|
||||||
##################################### Docker images #######################################
|
##################################### Docker images #######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
DockerServerImages:
|
DockerServerImage:
|
||||||
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
|
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Docker server and keeper images
|
test_name: Docker server image
|
||||||
runner_type: style-checker
|
runner_type: style-checker
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
checkout_depth: 0
|
|
||||||
run_command: |
|
run_command: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
|
||||||
python3 docker_server.py --release-type head --no-push \
|
python3 docker_server.py --release-type head --no-push \
|
||||||
--image-repo clickhouse/clickhouse-server --image-path docker/server --allow-build-reuse
|
--image-repo clickhouse/clickhouse-server --image-path docker/server --allow-build-reuse
|
||||||
|
DockerKeeperImage:
|
||||||
|
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
|
with:
|
||||||
|
test_name: Docker keeper image
|
||||||
|
runner_type: style-checker
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
run_command: |
|
||||||
python3 docker_server.py --release-type head --no-push \
|
python3 docker_server.py --release-type head --no-push \
|
||||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper --allow-build-reuse
|
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper --allow-build-reuse
|
||||||
############################################################################################
|
############################################################################################
|
||||||
@ -199,13 +214,8 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
- RunConfig
|
- RunConfig
|
||||||
- BuilderDebRelease
|
- BuilderBinDarwin
|
||||||
- BuilderDebAarch64
|
- BuilderBinDarwinAarch64
|
||||||
- BuilderDebAsan
|
|
||||||
- BuilderDebTsan
|
|
||||||
- BuilderDebUBsan
|
|
||||||
- BuilderDebMsan
|
|
||||||
- BuilderDebDebug
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: ClickHouse special build check
|
test_name: ClickHouse special build check
|
||||||
@ -218,7 +228,7 @@ jobs:
|
|||||||
run_command: |
|
run_command: |
|
||||||
python3 build_report_check.py "$CHECK_NAME"
|
python3 build_report_check.py "$CHECK_NAME"
|
||||||
MarkReleaseReady:
|
MarkReleaseReady:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
||||||
needs:
|
needs:
|
||||||
- BuilderBinDarwin
|
- BuilderBinDarwin
|
||||||
- BuilderBinDarwinAarch64
|
- BuilderBinDarwinAarch64
|
||||||
@ -228,8 +238,6 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
- name: Mark Commit Release Ready
|
- name: Mark Commit Release Ready
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
@ -456,7 +464,8 @@ jobs:
|
|||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
- DockerServerImages
|
- DockerServerImage
|
||||||
|
- DockerKeeperImage
|
||||||
- BuilderReport
|
- BuilderReport
|
||||||
- BuilderSpecialReport
|
- BuilderSpecialReport
|
||||||
- MarkReleaseReady
|
- MarkReleaseReady
|
||||||
|
10
.github/workflows/reusable_build.yml
vendored
10
.github/workflows/reusable_build.yml
vendored
@ -26,6 +26,10 @@ name: Build ClickHouse
|
|||||||
description: json ci data
|
description: json ci data
|
||||||
type: string
|
type: string
|
||||||
required: true
|
required: true
|
||||||
|
force:
|
||||||
|
description: disallow job skipping
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
additional_envs:
|
additional_envs:
|
||||||
description: additional ENV variables to setup the job
|
description: additional ENV variables to setup the job
|
||||||
type: string
|
type: string
|
||||||
@ -33,7 +37,7 @@ name: Build ClickHouse
|
|||||||
jobs:
|
jobs:
|
||||||
Build:
|
Build:
|
||||||
name: Build-${{inputs.build_name}}
|
name: Build-${{inputs.build_name}}
|
||||||
if: contains(fromJson(inputs.data).jobs_data.jobs_to_do, inputs.build_name)
|
if: ${{ contains(fromJson(inputs.data).jobs_data.jobs_to_do, inputs.build_name) || inputs.force }}
|
||||||
env:
|
env:
|
||||||
GITHUB_JOB_OVERRIDDEN: Build-${{inputs.build_name}}
|
GITHUB_JOB_OVERRIDDEN: Build-${{inputs.build_name}}
|
||||||
runs-on: [self-hosted, '${{inputs.runner_type}}']
|
runs-on: [self-hosted, '${{inputs.runner_type}}']
|
||||||
@ -78,13 +82,15 @@ jobs:
|
|||||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" \
|
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" \
|
||||||
--infile ${{ toJson(inputs.data) }} \
|
--infile ${{ toJson(inputs.data) }} \
|
||||||
--job-name "$BUILD_NAME" \
|
--job-name "$BUILD_NAME" \
|
||||||
--run
|
--run \
|
||||||
|
${{ inputs.force && '--force' || '' }}
|
||||||
- name: Post
|
- name: Post
|
||||||
# it still be build report to upload for failed build job
|
# it still be build report to upload for failed build job
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
run: |
|
run: |
|
||||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.build_name}}'
|
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.build_name}}'
|
||||||
- name: Mark as done
|
- name: Mark as done
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
run: |
|
run: |
|
||||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.build_name}}'
|
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.build_name}}'
|
||||||
- name: Clean
|
- name: Clean
|
||||||
|
16
.github/workflows/reusable_docker.yml
vendored
16
.github/workflows/reusable_docker.yml
vendored
@ -46,7 +46,7 @@ jobs:
|
|||||||
needs: [DockerBuildAmd64, DockerBuildAarch64]
|
needs: [DockerBuildAmd64, DockerBuildAarch64]
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
if: |
|
if: |
|
||||||
!failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_multi) != '[]'
|
!failure() && !cancelled() && (toJson(fromJson(inputs.data).docker_data.missing_multi) != '[]' || inputs.set_latest)
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
@ -55,14 +55,12 @@ jobs:
|
|||||||
- name: Build images
|
- name: Build images
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
FLAG_LATEST=''
|
||||||
if [ "${{ inputs.set_latest }}" == "true" ]; then
|
if [ "${{ inputs.set_latest }}" == "true" ]; then
|
||||||
|
FLAG_LATEST='--set-latest'
|
||||||
echo "latest tag will be set for resulting manifests"
|
echo "latest tag will be set for resulting manifests"
|
||||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64 \
|
|
||||||
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
|
|
||||||
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_multi) }}' \
|
|
||||||
--set-latest
|
|
||||||
else
|
|
||||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64 \
|
|
||||||
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
|
|
||||||
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_multi) }}'
|
|
||||||
fi
|
fi
|
||||||
|
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64 \
|
||||||
|
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
|
||||||
|
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_multi) }}' \
|
||||||
|
$FLAG_LATEST
|
||||||
|
1
.github/workflows/reusable_test.yml
vendored
1
.github/workflows/reusable_test.yml
vendored
@ -107,6 +107,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.test_name}}'
|
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.test_name}}'
|
||||||
- name: Mark as done
|
- name: Mark as done
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
run: |
|
run: |
|
||||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.test_name}}' --batch ${{matrix.batch}}
|
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.test_name}}' --batch ${{matrix.batch}}
|
||||||
- name: Clean
|
- name: Clean
|
||||||
|
2
.github/workflows/tags_stable.yml
vendored
2
.github/workflows/tags_stable.yml
vendored
@ -55,7 +55,7 @@ jobs:
|
|||||||
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
||||||
git diff HEAD
|
git diff HEAD
|
||||||
- name: Create Pull Request
|
- name: Create Pull Request
|
||||||
uses: peter-evans/create-pull-request@v3
|
uses: peter-evans/create-pull-request@v6
|
||||||
with:
|
with:
|
||||||
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||||
token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||||
|
11
.gitmessage
11
.gitmessage
@ -1,6 +1,6 @@
|
|||||||
|
|
||||||
|
|
||||||
### CI modificators (add a leading space to apply):
|
### CI modificators (add a leading space to apply) ###
|
||||||
|
|
||||||
## To avoid a merge commit in CI:
|
## To avoid a merge commit in CI:
|
||||||
#no_merge_commit
|
#no_merge_commit
|
||||||
@ -8,12 +8,21 @@
|
|||||||
## To discard CI cache:
|
## To discard CI cache:
|
||||||
#no_ci_cache
|
#no_ci_cache
|
||||||
|
|
||||||
|
## To not test (only style check):
|
||||||
|
#do_not_test
|
||||||
|
|
||||||
## To run specified set of tests in CI:
|
## To run specified set of tests in CI:
|
||||||
#ci_set_<SET_NAME>
|
#ci_set_<SET_NAME>
|
||||||
#ci_set_reduced
|
#ci_set_reduced
|
||||||
|
#ci_set_arm
|
||||||
|
#ci_set_integration
|
||||||
|
|
||||||
## To run specified job in CI:
|
## To run specified job in CI:
|
||||||
#job_<JOB NAME>
|
#job_<JOB NAME>
|
||||||
#job_stateless_tests_release
|
#job_stateless_tests_release
|
||||||
#job_package_debug
|
#job_package_debug
|
||||||
#job_integration_tests_asan
|
#job_integration_tests_asan
|
||||||
|
|
||||||
|
## To run only specified batches for multi-batch job(s)
|
||||||
|
#batch_2
|
||||||
|
#btach_1_2_3
|
||||||
|
2
.gitmodules
vendored
2
.gitmodules
vendored
@ -99,7 +99,7 @@
|
|||||||
url = https://github.com/awslabs/aws-c-event-stream
|
url = https://github.com/awslabs/aws-c-event-stream
|
||||||
[submodule "aws-c-common"]
|
[submodule "aws-c-common"]
|
||||||
path = contrib/aws-c-common
|
path = contrib/aws-c-common
|
||||||
url = https://github.com/ClickHouse/aws-c-common
|
url = https://github.com/awslabs/aws-c-common.git
|
||||||
[submodule "aws-checksums"]
|
[submodule "aws-checksums"]
|
||||||
path = contrib/aws-checksums
|
path = contrib/aws-checksums
|
||||||
url = https://github.com/awslabs/aws-checksums
|
url = https://github.com/awslabs/aws-checksums
|
||||||
|
2305
CHANGELOG.md
2305
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
@ -254,10 +254,17 @@ endif()
|
|||||||
|
|
||||||
include(cmake/cpu_features.cmake)
|
include(cmake/cpu_features.cmake)
|
||||||
|
|
||||||
# Asynchronous unwind tables are needed for Query Profiler.
|
|
||||||
# They are already by default on some platforms but possibly not on all platforms.
|
# Query Profiler doesn't work on MacOS for several reasons
|
||||||
# Enable it explicitly.
|
# - PHDR cache is not available
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -fasynchronous-unwind-tables")
|
# - We use native functionality to get stacktraces which is not async signal safe
|
||||||
|
# and thus we don't need to generate asynchronous unwind tables
|
||||||
|
if (NOT OS_DARWIN)
|
||||||
|
# Asynchronous unwind tables are needed for Query Profiler.
|
||||||
|
# They are already by default on some platforms but possibly not on all platforms.
|
||||||
|
# Enable it explicitly.
|
||||||
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -fasynchronous-unwind-tables")
|
||||||
|
endif()
|
||||||
|
|
||||||
# Reproducible builds.
|
# Reproducible builds.
|
||||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||||
@ -348,7 +355,7 @@ if (COMPILER_CLANG)
|
|||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-absolute-paths")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-absolute-paths")
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-absolute-paths")
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-absolute-paths")
|
||||||
|
|
||||||
if (NOT ENABLE_TESTS AND NOT SANITIZE AND OS_LINUX)
|
if (NOT ENABLE_TESTS AND NOT SANITIZE AND NOT SANITIZE_COVERAGE AND OS_LINUX)
|
||||||
# https://clang.llvm.org/docs/ThinLTO.html
|
# https://clang.llvm.org/docs/ThinLTO.html
|
||||||
# Applies to clang and linux only.
|
# Applies to clang and linux only.
|
||||||
# Disabled when building with tests or sanitizers.
|
# Disabled when building with tests or sanitizers.
|
||||||
@ -546,7 +553,7 @@ if (ENABLE_RUST)
|
|||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND NOT SANITIZE AND OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64))
|
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND NOT SANITIZE AND NOT SANITIZE_COVERAGE AND OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64))
|
||||||
set(CHECK_LARGE_OBJECT_SIZES_DEFAULT ON)
|
set(CHECK_LARGE_OBJECT_SIZES_DEFAULT ON)
|
||||||
else ()
|
else ()
|
||||||
set(CHECK_LARGE_OBJECT_SIZES_DEFAULT OFF)
|
set(CHECK_LARGE_OBJECT_SIZES_DEFAULT OFF)
|
||||||
|
@ -37,7 +37,7 @@ Keep an eye out for upcoming meetups around the world. Somewhere else you want u
|
|||||||
|
|
||||||
## Recent Recordings
|
## Recent Recordings
|
||||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||||
* **Recording available**: [**v23.10 Release Webinar**](https://www.youtube.com/watch?v=PGQS6uPb970) All the features of 23.10, one convenient video! Watch it now!
|
* **Recording available**: [**v24.1 Release Webinar**](https://www.youtube.com/watch?v=pBF9g0wGAGs) All the features of 24.1, one convenient video! Watch it now!
|
||||||
* **All release webinar recordings**: [YouTube playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3jAlSy1JxyP8zluvXaN3nxU)
|
* **All release webinar recordings**: [YouTube playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3jAlSy1JxyP8zluvXaN3nxU)
|
||||||
|
|
||||||
|
|
||||||
|
@ -13,9 +13,10 @@ The following versions of ClickHouse server are currently being supported with s
|
|||||||
|
|
||||||
| Version | Supported |
|
| Version | Supported |
|
||||||
|:-|:-|
|
|:-|:-|
|
||||||
|
| 24.1 | ✔️ |
|
||||||
| 23.12 | ✔️ |
|
| 23.12 | ✔️ |
|
||||||
| 23.11 | ✔️ |
|
| 23.11 | ✔️ |
|
||||||
| 23.10 | ✔️ |
|
| 23.10 | ❌ |
|
||||||
| 23.9 | ❌ |
|
| 23.9 | ❌ |
|
||||||
| 23.8 | ✔️ |
|
| 23.8 | ✔️ |
|
||||||
| 23.7 | ❌ |
|
| 23.7 | ❌ |
|
||||||
|
@ -17,6 +17,7 @@ set (SRCS
|
|||||||
getMemoryAmount.cpp
|
getMemoryAmount.cpp
|
||||||
getPageSize.cpp
|
getPageSize.cpp
|
||||||
getThreadId.cpp
|
getThreadId.cpp
|
||||||
|
int8_to_string.cpp
|
||||||
JSON.cpp
|
JSON.cpp
|
||||||
mremap.cpp
|
mremap.cpp
|
||||||
phdr_cache.cpp
|
phdr_cache.cpp
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <bit>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <type_traits>
|
#include <type_traits>
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
#include "coverage.h"
|
#include "coverage.h"
|
||||||
|
#include <sys/mman.h>
|
||||||
|
|
||||||
#pragma GCC diagnostic ignored "-Wreserved-identifier"
|
#pragma GCC diagnostic ignored "-Wreserved-identifier"
|
||||||
|
|
||||||
@ -52,11 +53,21 @@ namespace
|
|||||||
uint32_t * guards_start = nullptr;
|
uint32_t * guards_start = nullptr;
|
||||||
uint32_t * guards_end = nullptr;
|
uint32_t * guards_end = nullptr;
|
||||||
|
|
||||||
uintptr_t * coverage_array = nullptr;
|
uintptr_t * current_coverage_array = nullptr;
|
||||||
|
uintptr_t * cumulative_coverage_array = nullptr;
|
||||||
size_t coverage_array_size = 0;
|
size_t coverage_array_size = 0;
|
||||||
|
|
||||||
uintptr_t * all_addresses_array = nullptr;
|
uintptr_t * all_addresses_array = nullptr;
|
||||||
size_t all_addresses_array_size = 0;
|
size_t all_addresses_array_size = 0;
|
||||||
|
|
||||||
|
uintptr_t * allocate(size_t size)
|
||||||
|
{
|
||||||
|
/// Note: mmap return zero-initialized memory, and we count on that.
|
||||||
|
void * map = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||||
|
if (MAP_FAILED == map)
|
||||||
|
return nullptr;
|
||||||
|
return static_cast<uintptr_t*>(map);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
@ -79,7 +90,8 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t * start, uint32_t * stop)
|
|||||||
coverage_array_size = stop - start;
|
coverage_array_size = stop - start;
|
||||||
|
|
||||||
/// Note: we will leak this.
|
/// Note: we will leak this.
|
||||||
coverage_array = static_cast<uintptr_t*>(malloc(sizeof(uintptr_t) * coverage_array_size));
|
current_coverage_array = allocate(sizeof(uintptr_t) * coverage_array_size);
|
||||||
|
cumulative_coverage_array = allocate(sizeof(uintptr_t) * coverage_array_size);
|
||||||
|
|
||||||
resetCoverage();
|
resetCoverage();
|
||||||
}
|
}
|
||||||
@ -92,8 +104,8 @@ void __sanitizer_cov_pcs_init(const uintptr_t * pcs_begin, const uintptr_t * pcs
|
|||||||
return;
|
return;
|
||||||
pc_table_initialized = true;
|
pc_table_initialized = true;
|
||||||
|
|
||||||
all_addresses_array = static_cast<uintptr_t*>(malloc(sizeof(uintptr_t) * coverage_array_size));
|
|
||||||
all_addresses_array_size = pcs_end - pcs_begin;
|
all_addresses_array_size = pcs_end - pcs_begin;
|
||||||
|
all_addresses_array = allocate(sizeof(uintptr_t) * all_addresses_array_size);
|
||||||
|
|
||||||
/// They are not a real pointers, but also contain a flag in the most significant bit,
|
/// They are not a real pointers, but also contain a flag in the most significant bit,
|
||||||
/// in which we are not interested for now. Reset it.
|
/// in which we are not interested for now. Reset it.
|
||||||
@ -115,17 +127,24 @@ void __sanitizer_cov_trace_pc_guard(uint32_t * guard)
|
|||||||
/// The values of `*guard` are as you set them in
|
/// The values of `*guard` are as you set them in
|
||||||
/// __sanitizer_cov_trace_pc_guard_init and so you can make them consecutive
|
/// __sanitizer_cov_trace_pc_guard_init and so you can make them consecutive
|
||||||
/// and use them to dereference an array or a bit vector.
|
/// and use them to dereference an array or a bit vector.
|
||||||
void * pc = __builtin_return_address(0);
|
intptr_t pc = reinterpret_cast<uintptr_t>(__builtin_return_address(0));
|
||||||
|
|
||||||
coverage_array[guard - guards_start] = reinterpret_cast<uintptr_t>(pc);
|
current_coverage_array[guard - guards_start] = pc;
|
||||||
|
cumulative_coverage_array[guard - guards_start] = pc;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
__attribute__((no_sanitize("coverage")))
|
__attribute__((no_sanitize("coverage")))
|
||||||
std::span<const uintptr_t> getCoverage()
|
std::span<const uintptr_t> getCurrentCoverage()
|
||||||
{
|
{
|
||||||
return {coverage_array, coverage_array_size};
|
return {current_coverage_array, coverage_array_size};
|
||||||
|
}
|
||||||
|
|
||||||
|
__attribute__((no_sanitize("coverage")))
|
||||||
|
std::span<const uintptr_t> getCumulativeCoverage()
|
||||||
|
{
|
||||||
|
return {cumulative_coverage_array, coverage_array_size};
|
||||||
}
|
}
|
||||||
|
|
||||||
__attribute__((no_sanitize("coverage")))
|
__attribute__((no_sanitize("coverage")))
|
||||||
@ -137,7 +156,7 @@ std::span<const uintptr_t> getAllInstrumentedAddresses()
|
|||||||
__attribute__((no_sanitize("coverage")))
|
__attribute__((no_sanitize("coverage")))
|
||||||
void resetCoverage()
|
void resetCoverage()
|
||||||
{
|
{
|
||||||
memset(coverage_array, 0, coverage_array_size * sizeof(*coverage_array));
|
memset(current_coverage_array, 0, coverage_array_size * sizeof(*current_coverage_array));
|
||||||
|
|
||||||
/// The guard defines whether the __sanitizer_cov_trace_pc_guard should be called.
|
/// The guard defines whether the __sanitizer_cov_trace_pc_guard should be called.
|
||||||
/// For example, you can unset it after first invocation to prevent excessive work.
|
/// For example, you can unset it after first invocation to prevent excessive work.
|
||||||
|
@ -15,7 +15,10 @@ void dumpCoverageReportIfPossible();
|
|||||||
/// Get accumulated unique program addresses of the instrumented parts of the code,
|
/// Get accumulated unique program addresses of the instrumented parts of the code,
|
||||||
/// seen so far after program startup or after previous reset.
|
/// seen so far after program startup or after previous reset.
|
||||||
/// The returned span will be represented as a sparse map, containing mostly zeros, which you should filter away.
|
/// The returned span will be represented as a sparse map, containing mostly zeros, which you should filter away.
|
||||||
std::span<const uintptr_t> getCoverage();
|
std::span<const uintptr_t> getCurrentCoverage();
|
||||||
|
|
||||||
|
/// Similar but not being reset.
|
||||||
|
std::span<const uintptr_t> getCumulativeCoverage();
|
||||||
|
|
||||||
/// Get all instrumented addresses that could be in the coverage.
|
/// Get all instrumented addresses that could be in the coverage.
|
||||||
std::span<const uintptr_t> getAllInstrumentedAddresses();
|
std::span<const uintptr_t> getAllInstrumentedAddresses();
|
||||||
|
@ -1,8 +1,11 @@
|
|||||||
#include <stdexcept>
|
|
||||||
#include <fstream>
|
|
||||||
#include <base/getMemoryAmount.h>
|
#include <base/getMemoryAmount.h>
|
||||||
|
|
||||||
#include <base/getPageSize.h>
|
#include <base/getPageSize.h>
|
||||||
|
|
||||||
|
#include <fstream>
|
||||||
|
#include <sstream>
|
||||||
|
#include <stdexcept>
|
||||||
|
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#include <sys/param.h>
|
#include <sys/param.h>
|
||||||
@ -11,6 +14,80 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
std::optional<uint64_t> getCgroupsV2MemoryLimit()
|
||||||
|
{
|
||||||
|
#if defined(OS_LINUX)
|
||||||
|
const std::filesystem::path default_cgroups_mount = "/sys/fs/cgroup";
|
||||||
|
|
||||||
|
/// This file exists iff the host has cgroups v2 enabled.
|
||||||
|
std::ifstream controllers_file(default_cgroups_mount / "cgroup.controllers");
|
||||||
|
if (!controllers_file.is_open())
|
||||||
|
return {};
|
||||||
|
|
||||||
|
/// Make sure that the memory controller is enabled.
|
||||||
|
/// - cgroup.controllers defines which controllers *can* be enabled.
|
||||||
|
/// - cgroup.subtree_control defines which controllers *are* enabled.
|
||||||
|
/// (see https://docs.kernel.org/admin-guide/cgroup-v2.html)
|
||||||
|
/// Caveat: nested groups may disable controllers. For simplicity, check only the top-level group.
|
||||||
|
/// ReadBufferFromFile subtree_control_file(default_cgroups_mount / "cgroup.subtree_control");
|
||||||
|
/// std::string subtree_control;
|
||||||
|
/// readString(subtree_control, subtree_control_file);
|
||||||
|
/// if (subtree_control.find("memory") == std::string::npos)
|
||||||
|
/// return {};
|
||||||
|
std::ifstream subtree_control_file(default_cgroups_mount / "cgroup.subtree_control");
|
||||||
|
std::stringstream subtree_control_buf;
|
||||||
|
subtree_control_buf << subtree_control_file.rdbuf();
|
||||||
|
std::string subtree_control = subtree_control_buf.str();
|
||||||
|
if (subtree_control.find("memory") == std::string::npos)
|
||||||
|
return {};
|
||||||
|
|
||||||
|
/// Identify the cgroup the process belongs to
|
||||||
|
/// All PIDs assigned to a cgroup are in /sys/fs/cgroups/{cgroup_name}/cgroup.procs
|
||||||
|
/// A simpler way to get the membership is:
|
||||||
|
std::ifstream cgroup_name_file("/proc/self/cgroup");
|
||||||
|
if (!cgroup_name_file.is_open())
|
||||||
|
return {};
|
||||||
|
|
||||||
|
std::stringstream cgroup_name_buf;
|
||||||
|
cgroup_name_buf << cgroup_name_file.rdbuf();
|
||||||
|
std::string cgroup_name = cgroup_name_buf.str();
|
||||||
|
if (!cgroup_name.empty() && cgroup_name.back() == '\n')
|
||||||
|
cgroup_name.pop_back(); /// remove trailing newline, if any
|
||||||
|
/// With cgroups v2, there will be a *single* line with prefix "0::/"
|
||||||
|
const std::string v2_prefix = "0::/";
|
||||||
|
if (!cgroup_name.starts_with(v2_prefix))
|
||||||
|
return {};
|
||||||
|
cgroup_name = cgroup_name.substr(v2_prefix.length());
|
||||||
|
|
||||||
|
std::filesystem::path current_cgroup = cgroup_name.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup_name);
|
||||||
|
|
||||||
|
/// Open the bottom-most nested memory limit setting file. If there is no such file at the current
|
||||||
|
/// level, try again at the parent level as memory settings are inherited.
|
||||||
|
while (current_cgroup != default_cgroups_mount.parent_path())
|
||||||
|
{
|
||||||
|
std::ifstream setting_file(current_cgroup / "memory.max");
|
||||||
|
if (setting_file.is_open())
|
||||||
|
{
|
||||||
|
uint64_t value;
|
||||||
|
if (setting_file >> value)
|
||||||
|
return {value};
|
||||||
|
else
|
||||||
|
return {}; /// e.g. the cgroups default "max"
|
||||||
|
}
|
||||||
|
current_cgroup = current_cgroup.parent_path();
|
||||||
|
}
|
||||||
|
|
||||||
|
return {};
|
||||||
|
#else
|
||||||
|
return {};
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
/** Returns the size of physical memory (RAM) in bytes.
|
/** Returns the size of physical memory (RAM) in bytes.
|
||||||
* Returns 0 on unsupported platform
|
* Returns 0 on unsupported platform
|
||||||
*/
|
*/
|
||||||
@ -26,34 +103,27 @@ uint64_t getMemoryAmountOrZero()
|
|||||||
|
|
||||||
uint64_t memory_amount = num_pages * page_size;
|
uint64_t memory_amount = num_pages * page_size;
|
||||||
|
|
||||||
#if defined(OS_LINUX)
|
/// Respect the memory limit set by cgroups v2.
|
||||||
// Try to lookup at the Cgroup limit
|
auto limit_v2 = getCgroupsV2MemoryLimit();
|
||||||
|
if (limit_v2.has_value() && *limit_v2 < memory_amount)
|
||||||
// CGroups v2
|
memory_amount = *limit_v2;
|
||||||
std::ifstream cgroupv2_limit("/sys/fs/cgroup/memory.max");
|
|
||||||
if (cgroupv2_limit.is_open())
|
|
||||||
{
|
|
||||||
uint64_t memory_limit = 0;
|
|
||||||
cgroupv2_limit >> memory_limit;
|
|
||||||
if (memory_limit > 0 && memory_limit < memory_amount)
|
|
||||||
memory_amount = memory_limit;
|
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// CGroups v1
|
/// Cgroups v1 were replaced by v2 in 2015. The only reason we keep supporting v1 is that the transition to v2
|
||||||
std::ifstream cgroup_limit("/sys/fs/cgroup/memory/memory.limit_in_bytes");
|
/// has been slow. Caveat : Hierarchical groups as in v2 are not supported for v1, the location of the memory
|
||||||
if (cgroup_limit.is_open())
|
/// limit (virtual) file is hard-coded.
|
||||||
|
/// TODO: check at the end of 2024 if we can get rid of v1.
|
||||||
|
std::ifstream limit_file_v1("/sys/fs/cgroup/memory/memory.limit_in_bytes");
|
||||||
|
if (limit_file_v1.is_open())
|
||||||
{
|
{
|
||||||
uint64_t memory_limit = 0; // in case of read error
|
uint64_t limit_v1;
|
||||||
cgroup_limit >> memory_limit;
|
if (limit_file_v1 >> limit_v1)
|
||||||
if (memory_limit > 0 && memory_limit < memory_amount)
|
if (limit_v1 < memory_amount)
|
||||||
memory_amount = memory_limit;
|
memory_amount = limit_v1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
return memory_amount;
|
return memory_amount;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
9
base/base/int8_to_string.cpp
Normal file
9
base/base/int8_to_string.cpp
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
#include <base/int8_to_string.h>
|
||||||
|
|
||||||
|
namespace std
|
||||||
|
{
|
||||||
|
std::string to_string(Int8 v) /// NOLINT (cert-dcl58-cpp)
|
||||||
|
{
|
||||||
|
return to_string(int8_t{v});
|
||||||
|
}
|
||||||
|
}
|
17
base/base/int8_to_string.h
Normal file
17
base/base/int8_to_string.h
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <base/defines.h>
|
||||||
|
#include <base/types.h>
|
||||||
|
|
||||||
|
#include <fmt/format.h>
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct fmt::formatter<Int8> : fmt::formatter<int8_t>
|
||||||
|
{
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
namespace std
|
||||||
|
{
|
||||||
|
std::string to_string(Int8 v); /// NOLINT (cert-dcl58-cpp)
|
||||||
|
}
|
@ -64,19 +64,14 @@ using ComparatorWrapper = Comparator;
|
|||||||
|
|
||||||
#include <miniselect/floyd_rivest_select.h>
|
#include <miniselect/floyd_rivest_select.h>
|
||||||
|
|
||||||
template <typename RandomIt>
|
template <typename RandomIt, typename Compare>
|
||||||
void nth_element(RandomIt first, RandomIt nth, RandomIt last)
|
void nth_element(RandomIt first, RandomIt nth, RandomIt last, Compare compare)
|
||||||
{
|
{
|
||||||
using value_type = typename std::iterator_traits<RandomIt>::value_type;
|
|
||||||
using comparator = std::less<value_type>;
|
|
||||||
|
|
||||||
comparator compare;
|
|
||||||
ComparatorWrapper<comparator> compare_wrapper = compare;
|
|
||||||
|
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
::shuffle(first, last);
|
::shuffle(first, last);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
ComparatorWrapper<Compare> compare_wrapper = compare;
|
||||||
::miniselect::floyd_rivest_select(first, nth, last, compare_wrapper);
|
::miniselect::floyd_rivest_select(first, nth, last, compare_wrapper);
|
||||||
|
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
@ -87,6 +82,15 @@ void nth_element(RandomIt first, RandomIt nth, RandomIt last)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename RandomIt>
|
||||||
|
void nth_element(RandomIt first, RandomIt nth, RandomIt last)
|
||||||
|
{
|
||||||
|
using value_type = typename std::iterator_traits<RandomIt>::value_type;
|
||||||
|
using comparator = std::less<value_type>;
|
||||||
|
|
||||||
|
::nth_element(first, nth, last, comparator());
|
||||||
|
}
|
||||||
|
|
||||||
template <typename RandomIt, typename Compare>
|
template <typename RandomIt, typename Compare>
|
||||||
void partial_sort(RandomIt first, RandomIt middle, RandomIt last, Compare compare)
|
void partial_sort(RandomIt first, RandomIt middle, RandomIt last, Compare compare)
|
||||||
{
|
{
|
||||||
|
@ -3,14 +3,29 @@
|
|||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
/// This is needed for more strict aliasing. https://godbolt.org/z/xpJBSb https://stackoverflow.com/a/57453713
|
/// Using char8_t more strict aliasing (https://stackoverflow.com/a/57453713)
|
||||||
using UInt8 = char8_t;
|
using UInt8 = char8_t;
|
||||||
|
|
||||||
|
/// Same for using signed _BitInt(8) (there isn't a signed char8_t, which would be more convenient)
|
||||||
|
/// See https://godbolt.org/z/fafnWEnnf
|
||||||
|
#pragma clang diagnostic push
|
||||||
|
#pragma clang diagnostic ignored "-Wbit-int-extension"
|
||||||
|
using Int8 = signed _BitInt(8);
|
||||||
|
#pragma clang diagnostic pop
|
||||||
|
|
||||||
|
namespace std
|
||||||
|
{
|
||||||
|
template <>
|
||||||
|
struct hash<Int8> /// NOLINT (cert-dcl58-cpp)
|
||||||
|
{
|
||||||
|
size_t operator()(const Int8 x) const { return std::hash<int8_t>()(int8_t{x}); }
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
using UInt16 = uint16_t;
|
using UInt16 = uint16_t;
|
||||||
using UInt32 = uint32_t;
|
using UInt32 = uint32_t;
|
||||||
using UInt64 = uint64_t;
|
using UInt64 = uint64_t;
|
||||||
|
|
||||||
using Int8 = int8_t;
|
|
||||||
using Int16 = int16_t;
|
using Int16 = int16_t;
|
||||||
using Int32 = int32_t;
|
using Int32 = int32_t;
|
||||||
using Int64 = int64_t;
|
using Int64 = int64_t;
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
|
|
||||||
#include "throwError.h"
|
#include "throwError.h"
|
||||||
|
|
||||||
|
#include <bit>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <cfloat>
|
#include <cfloat>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "Poco/Channel.h"
|
#include "Poco/Channel.h"
|
||||||
#include "Poco/Format.h"
|
#include "Poco/Format.h"
|
||||||
#include "Poco/Foundation.h"
|
#include "Poco/Foundation.h"
|
||||||
@ -33,7 +34,8 @@ namespace Poco
|
|||||||
|
|
||||||
|
|
||||||
class Exception;
|
class Exception;
|
||||||
|
class Logger;
|
||||||
|
using LoggerPtr = std::shared_ptr<Logger>;
|
||||||
|
|
||||||
class Foundation_API Logger : public Channel
|
class Foundation_API Logger : public Channel
|
||||||
/// Logger is a special Channel that acts as the main
|
/// Logger is a special Channel that acts as the main
|
||||||
@ -870,21 +872,21 @@ public:
|
|||||||
/// If the Logger does not yet exist, it is created, based
|
/// If the Logger does not yet exist, it is created, based
|
||||||
/// on its parent logger.
|
/// on its parent logger.
|
||||||
|
|
||||||
static Logger & unsafeGet(const std::string & name);
|
static LoggerPtr getShared(const std::string & name, bool should_be_owned_by_shared_ptr_if_created = true);
|
||||||
/// Returns a reference to the Logger with the given name.
|
/// Returns a shared pointer to the Logger with the given name.
|
||||||
/// If the Logger does not yet exist, it is created, based
|
/// If the Logger does not yet exist, it is created, based
|
||||||
/// on its parent logger.
|
/// on its parent logger.
|
||||||
///
|
|
||||||
/// WARNING: This method is not thread safe. You should
|
|
||||||
/// probably use get() instead.
|
|
||||||
/// The only time this method should be used is during
|
|
||||||
/// program initialization, when only one thread is running.
|
|
||||||
|
|
||||||
static Logger & create(const std::string & name, Channel * pChannel, int level = Message::PRIO_INFORMATION);
|
static Logger & create(const std::string & name, Channel * pChannel, int level = Message::PRIO_INFORMATION);
|
||||||
/// Creates and returns a reference to a Logger with the
|
/// Creates and returns a reference to a Logger with the
|
||||||
/// given name. The Logger's Channel and log level as set as
|
/// given name. The Logger's Channel and log level as set as
|
||||||
/// specified.
|
/// specified.
|
||||||
|
|
||||||
|
static LoggerPtr createShared(const std::string & name, Channel * pChannel, int level = Message::PRIO_INFORMATION);
|
||||||
|
/// Creates and returns a shared pointer to a Logger with the
|
||||||
|
/// given name. The Logger's Channel and log level as set as
|
||||||
|
/// specified.
|
||||||
|
|
||||||
static Logger & root();
|
static Logger & root();
|
||||||
/// Returns a reference to the root logger, which is the ultimate
|
/// Returns a reference to the root logger, which is the ultimate
|
||||||
/// ancestor of all Loggers.
|
/// ancestor of all Loggers.
|
||||||
@ -893,13 +895,6 @@ public:
|
|||||||
/// Returns a pointer to the Logger with the given name if it
|
/// Returns a pointer to the Logger with the given name if it
|
||||||
/// exists, or a null pointer otherwise.
|
/// exists, or a null pointer otherwise.
|
||||||
|
|
||||||
static void destroy(const std::string & name);
|
|
||||||
/// Destroys the logger with the specified name. Does nothing
|
|
||||||
/// if the logger is not found.
|
|
||||||
///
|
|
||||||
/// After a logger has been destroyed, all references to it
|
|
||||||
/// become invalid.
|
|
||||||
|
|
||||||
static void shutdown();
|
static void shutdown();
|
||||||
/// Shuts down the logging framework and releases all
|
/// Shuts down the logging framework and releases all
|
||||||
/// Loggers.
|
/// Loggers.
|
||||||
@ -928,9 +923,17 @@ public:
|
|||||||
|
|
||||||
static const std::string ROOT; /// The name of the root logger ("").
|
static const std::string ROOT; /// The name of the root logger ("").
|
||||||
|
|
||||||
protected:
|
public:
|
||||||
typedef std::map<std::string, Logger *> LoggerMap;
|
struct LoggerEntry
|
||||||
|
{
|
||||||
|
Poco::Logger * logger;
|
||||||
|
bool owned_by_shared_ptr = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
using LoggerMap = std::unordered_map<std::string, LoggerEntry>;
|
||||||
|
using LoggerMapIterator = LoggerMap::iterator;
|
||||||
|
|
||||||
|
protected:
|
||||||
Logger(const std::string & name, Channel * pChannel, int level);
|
Logger(const std::string & name, Channel * pChannel, int level);
|
||||||
~Logger();
|
~Logger();
|
||||||
|
|
||||||
@ -938,11 +941,16 @@ protected:
|
|||||||
void log(const std::string & text, Message::Priority prio, const char * file, int line);
|
void log(const std::string & text, Message::Priority prio, const char * file, int line);
|
||||||
|
|
||||||
static std::string format(const std::string & fmt, int argc, std::string argv[]);
|
static std::string format(const std::string & fmt, int argc, std::string argv[]);
|
||||||
static Logger & parent(const std::string & name);
|
|
||||||
static void add(Logger * pLogger);
|
|
||||||
static Logger * find(const std::string & name);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
static std::pair<Logger::LoggerMapIterator, bool> unsafeGet(const std::string & name, bool get_shared);
|
||||||
|
static Logger * unsafeGetRawPtr(const std::string & name);
|
||||||
|
static std::pair<LoggerMapIterator, bool> unsafeCreate(const std::string & name, Channel * pChannel, int level = Message::PRIO_INFORMATION);
|
||||||
|
static Logger & parent(const std::string & name);
|
||||||
|
static std::pair<LoggerMapIterator, bool> add(Logger * pLogger);
|
||||||
|
static std::optional<LoggerMapIterator> find(const std::string & name);
|
||||||
|
static Logger * findRawPtr(const std::string & name);
|
||||||
|
|
||||||
Logger();
|
Logger();
|
||||||
Logger(const Logger &);
|
Logger(const Logger &);
|
||||||
Logger & operator=(const Logger &);
|
Logger & operator=(const Logger &);
|
||||||
@ -950,9 +958,6 @@ private:
|
|||||||
std::string _name;
|
std::string _name;
|
||||||
Channel * _pChannel;
|
Channel * _pChannel;
|
||||||
std::atomic_int _level;
|
std::atomic_int _level;
|
||||||
|
|
||||||
static LoggerMap * _pLoggerMap;
|
|
||||||
static Mutex _mapMtx;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -38,15 +38,15 @@ public:
|
|||||||
/// Creates the RefCountedObject.
|
/// Creates the RefCountedObject.
|
||||||
/// The initial reference count is one.
|
/// The initial reference count is one.
|
||||||
|
|
||||||
void duplicate() const;
|
size_t duplicate() const;
|
||||||
/// Increments the object's reference count.
|
/// Increments the object's reference count, returns reference count before call.
|
||||||
|
|
||||||
void release() const throw();
|
size_t release() const throw();
|
||||||
/// Decrements the object's reference count
|
/// Decrements the object's reference count
|
||||||
/// and deletes the object if the count
|
/// and deletes the object if the count
|
||||||
/// reaches zero.
|
/// reaches zero, returns reference count before call.
|
||||||
|
|
||||||
int referenceCount() const;
|
size_t referenceCount() const;
|
||||||
/// Returns the reference count.
|
/// Returns the reference count.
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
@ -57,36 +57,40 @@ private:
|
|||||||
RefCountedObject(const RefCountedObject &);
|
RefCountedObject(const RefCountedObject &);
|
||||||
RefCountedObject & operator=(const RefCountedObject &);
|
RefCountedObject & operator=(const RefCountedObject &);
|
||||||
|
|
||||||
mutable AtomicCounter _counter;
|
mutable std::atomic<size_t> _counter;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// inlines
|
// inlines
|
||||||
//
|
//
|
||||||
inline int RefCountedObject::referenceCount() const
|
inline size_t RefCountedObject::referenceCount() const
|
||||||
{
|
{
|
||||||
return _counter.value();
|
return _counter.load(std::memory_order_acquire);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
inline void RefCountedObject::duplicate() const
|
inline size_t RefCountedObject::duplicate() const
|
||||||
{
|
{
|
||||||
++_counter;
|
return _counter.fetch_add(1, std::memory_order_acq_rel);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
inline void RefCountedObject::release() const throw()
|
inline size_t RefCountedObject::release() const throw()
|
||||||
{
|
{
|
||||||
|
size_t reference_count_before = _counter.fetch_sub(1, std::memory_order_acq_rel);
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
if (--_counter == 0)
|
if (reference_count_before == 1)
|
||||||
delete this;
|
delete this;
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
poco_unexpected();
|
poco_unexpected();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return reference_count_before;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -20,12 +20,31 @@
|
|||||||
#include "Poco/NumberParser.h"
|
#include "Poco/NumberParser.h"
|
||||||
#include "Poco/String.h"
|
#include "Poco/String.h"
|
||||||
|
|
||||||
|
#include <cassert>
|
||||||
|
#include <mutex>
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
std::mutex & getLoggerMutex()
|
||||||
|
{
|
||||||
|
auto get_logger_mutex_placeholder_memory = []()
|
||||||
|
{
|
||||||
|
static char buffer[sizeof(std::mutex)]{};
|
||||||
|
return buffer;
|
||||||
|
};
|
||||||
|
|
||||||
|
static std::mutex * logger_mutex = new (get_logger_mutex_placeholder_memory()) std::mutex();
|
||||||
|
return *logger_mutex;
|
||||||
|
}
|
||||||
|
|
||||||
|
Poco::Logger::LoggerMap * _pLoggerMap = nullptr;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
namespace Poco {
|
namespace Poco {
|
||||||
|
|
||||||
|
|
||||||
Logger::LoggerMap* Logger::_pLoggerMap = 0;
|
|
||||||
Mutex Logger::_mapMtx;
|
|
||||||
const std::string Logger::ROOT;
|
const std::string Logger::ROOT;
|
||||||
|
|
||||||
|
|
||||||
@ -73,7 +92,7 @@ void Logger::setProperty(const std::string& name, const std::string& value)
|
|||||||
setChannel(LoggingRegistry::defaultRegistry().channelForName(value));
|
setChannel(LoggingRegistry::defaultRegistry().channelForName(value));
|
||||||
else if (name == "level")
|
else if (name == "level")
|
||||||
setLevel(value);
|
setLevel(value);
|
||||||
else
|
else
|
||||||
Channel::setProperty(name, value);
|
Channel::setProperty(name, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -112,17 +131,17 @@ void Logger::dump(const std::string& msg, const void* buffer, std::size_t length
|
|||||||
|
|
||||||
void Logger::setLevel(const std::string& name, int level)
|
void Logger::setLevel(const std::string& name, int level)
|
||||||
{
|
{
|
||||||
Mutex::ScopedLock lock(_mapMtx);
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
if (_pLoggerMap)
|
if (_pLoggerMap)
|
||||||
{
|
{
|
||||||
std::string::size_type len = name.length();
|
std::string::size_type len = name.length();
|
||||||
for (LoggerMap::iterator it = _pLoggerMap->begin(); it != _pLoggerMap->end(); ++it)
|
for (auto & it : *_pLoggerMap)
|
||||||
{
|
{
|
||||||
if (len == 0 ||
|
if (len == 0 ||
|
||||||
(it->first.compare(0, len, name) == 0 && (it->first.length() == len || it->first[len] == '.')))
|
(it.first.compare(0, len, name) == 0 && (it.first.length() == len || it.first[len] == '.')))
|
||||||
{
|
{
|
||||||
it->second->setLevel(level);
|
it.second.logger->setLevel(level);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -131,17 +150,17 @@ void Logger::setLevel(const std::string& name, int level)
|
|||||||
|
|
||||||
void Logger::setChannel(const std::string& name, Channel* pChannel)
|
void Logger::setChannel(const std::string& name, Channel* pChannel)
|
||||||
{
|
{
|
||||||
Mutex::ScopedLock lock(_mapMtx);
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
if (_pLoggerMap)
|
if (_pLoggerMap)
|
||||||
{
|
{
|
||||||
std::string::size_type len = name.length();
|
std::string::size_type len = name.length();
|
||||||
for (LoggerMap::iterator it = _pLoggerMap->begin(); it != _pLoggerMap->end(); ++it)
|
for (auto & it : *_pLoggerMap)
|
||||||
{
|
{
|
||||||
if (len == 0 ||
|
if (len == 0 ||
|
||||||
(it->first.compare(0, len, name) == 0 && (it->first.length() == len || it->first[len] == '.')))
|
(it.first.compare(0, len, name) == 0 && (it.first.length() == len || it.first[len] == '.')))
|
||||||
{
|
{
|
||||||
it->second->setChannel(pChannel);
|
it.second.logger->setChannel(pChannel);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -150,17 +169,17 @@ void Logger::setChannel(const std::string& name, Channel* pChannel)
|
|||||||
|
|
||||||
void Logger::setProperty(const std::string& loggerName, const std::string& propertyName, const std::string& value)
|
void Logger::setProperty(const std::string& loggerName, const std::string& propertyName, const std::string& value)
|
||||||
{
|
{
|
||||||
Mutex::ScopedLock lock(_mapMtx);
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
if (_pLoggerMap)
|
if (_pLoggerMap)
|
||||||
{
|
{
|
||||||
std::string::size_type len = loggerName.length();
|
std::string::size_type len = loggerName.length();
|
||||||
for (LoggerMap::iterator it = _pLoggerMap->begin(); it != _pLoggerMap->end(); ++it)
|
for (auto & it : *_pLoggerMap)
|
||||||
{
|
{
|
||||||
if (len == 0 ||
|
if (len == 0 ||
|
||||||
(it->first.compare(0, len, loggerName) == 0 && (it->first.length() == len || it->first[len] == '.')))
|
(it.first.compare(0, len, loggerName) == 0 && (it.first.length() == len || it.first[len] == '.')))
|
||||||
{
|
{
|
||||||
it->second->setProperty(propertyName, value);
|
it.second.logger->setProperty(propertyName, value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -280,108 +299,200 @@ void Logger::formatDump(std::string& message, const void* buffer, std::size_t le
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Logger& Logger::get(const std::string& name)
|
namespace
|
||||||
{
|
{
|
||||||
Mutex::ScopedLock lock(_mapMtx);
|
|
||||||
|
|
||||||
return unsafeGet(name);
|
struct LoggerDeleter
|
||||||
|
{
|
||||||
|
void operator()(Poco::Logger * logger)
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
|
/// If logger infrastructure is destroyed just decrement logger reference count
|
||||||
|
if (!_pLoggerMap)
|
||||||
|
{
|
||||||
|
logger->release();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto it = _pLoggerMap->find(logger->name());
|
||||||
|
assert(it != _pLoggerMap->end());
|
||||||
|
|
||||||
|
/** If reference count is 1, this means this shared pointer owns logger
|
||||||
|
* and need destroy it.
|
||||||
|
*/
|
||||||
|
size_t reference_count_before_release = logger->release();
|
||||||
|
if (reference_count_before_release == 1)
|
||||||
|
{
|
||||||
|
assert(it->second.owned_by_shared_ptr);
|
||||||
|
_pLoggerMap->erase(it);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
inline LoggerPtr makeLoggerPtr(Logger & logger, bool owned_by_shared_ptr)
|
||||||
|
{
|
||||||
|
if (owned_by_shared_ptr)
|
||||||
|
return LoggerPtr(&logger, LoggerDeleter());
|
||||||
|
|
||||||
|
return LoggerPtr(std::shared_ptr<void>{}, &logger);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Logger& Logger::unsafeGet(const std::string& name)
|
Logger& Logger::get(const std::string& name)
|
||||||
{
|
{
|
||||||
Logger* pLogger = find(name);
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
if (!pLogger)
|
|
||||||
|
auto [it, inserted] = unsafeGet(name, false /*get_shared*/);
|
||||||
|
return *it->second.logger;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
LoggerPtr Logger::getShared(const std::string & name, bool should_be_owned_by_shared_ptr_if_created)
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
auto [it, inserted] = unsafeGet(name, true /*get_shared*/);
|
||||||
|
|
||||||
|
/** If during `unsafeGet` logger was created, then this shared pointer owns it.
|
||||||
|
* If logger was already created, then this shared pointer does not own it.
|
||||||
|
*/
|
||||||
|
if (inserted && should_be_owned_by_shared_ptr_if_created)
|
||||||
|
it->second.owned_by_shared_ptr = true;
|
||||||
|
|
||||||
|
return makeLoggerPtr(*it->second.logger, it->second.owned_by_shared_ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::pair<Logger::LoggerMapIterator, bool> Logger::unsafeGet(const std::string& name, bool get_shared)
|
||||||
|
{
|
||||||
|
std::optional<Logger::LoggerMapIterator> optional_logger_it = find(name);
|
||||||
|
|
||||||
|
if (optional_logger_it)
|
||||||
{
|
{
|
||||||
|
auto & logger_it = *optional_logger_it;
|
||||||
|
|
||||||
|
if (logger_it->second.owned_by_shared_ptr)
|
||||||
|
{
|
||||||
|
logger_it->second.logger->duplicate();
|
||||||
|
|
||||||
|
if (!get_shared)
|
||||||
|
logger_it->second.owned_by_shared_ptr = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!optional_logger_it)
|
||||||
|
{
|
||||||
|
Logger * logger = nullptr;
|
||||||
|
|
||||||
if (name == ROOT)
|
if (name == ROOT)
|
||||||
{
|
{
|
||||||
pLogger = new Logger(name, 0, Message::PRIO_INFORMATION);
|
logger = new Logger(name, nullptr, Message::PRIO_INFORMATION);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
Logger& par = parent(name);
|
Logger& par = parent(name);
|
||||||
pLogger = new Logger(name, par.getChannel(), par.getLevel());
|
logger = new Logger(name, par.getChannel(), par.getLevel());
|
||||||
}
|
}
|
||||||
add(pLogger);
|
|
||||||
|
return add(logger);
|
||||||
}
|
}
|
||||||
return *pLogger;
|
|
||||||
|
return std::make_pair(*optional_logger_it, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Logger * Logger::unsafeGetRawPtr(const std::string & name)
|
||||||
|
{
|
||||||
|
return unsafeGet(name, false /*get_shared*/).first->second.logger;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Logger& Logger::create(const std::string& name, Channel* pChannel, int level)
|
Logger& Logger::create(const std::string& name, Channel* pChannel, int level)
|
||||||
{
|
{
|
||||||
Mutex::ScopedLock lock(_mapMtx);
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
if (find(name)) throw ExistsException();
|
return *unsafeCreate(name, pChannel, level).first->second.logger;
|
||||||
Logger* pLogger = new Logger(name, pChannel, level);
|
|
||||||
add(pLogger);
|
|
||||||
return *pLogger;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
LoggerPtr Logger::createShared(const std::string & name, Channel * pChannel, int level)
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
|
auto [it, inserted] = unsafeCreate(name, pChannel, level);
|
||||||
|
it->second.owned_by_shared_ptr = true;
|
||||||
|
|
||||||
|
return makeLoggerPtr(*it->second.logger, it->second.owned_by_shared_ptr);
|
||||||
|
}
|
||||||
|
|
||||||
Logger& Logger::root()
|
Logger& Logger::root()
|
||||||
{
|
{
|
||||||
Mutex::ScopedLock lock(_mapMtx);
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
return unsafeGet(ROOT);
|
return *unsafeGetRawPtr(ROOT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Logger* Logger::has(const std::string& name)
|
Logger* Logger::has(const std::string& name)
|
||||||
{
|
{
|
||||||
Mutex::ScopedLock lock(_mapMtx);
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
return find(name);
|
auto optional_it = find(name);
|
||||||
|
if (!optional_it)
|
||||||
|
return nullptr;
|
||||||
|
|
||||||
|
return (*optional_it)->second.logger;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Logger::shutdown()
|
void Logger::shutdown()
|
||||||
{
|
{
|
||||||
Mutex::ScopedLock lock(_mapMtx);
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
if (_pLoggerMap)
|
if (_pLoggerMap)
|
||||||
{
|
{
|
||||||
for (LoggerMap::iterator it = _pLoggerMap->begin(); it != _pLoggerMap->end(); ++it)
|
for (auto & it : *_pLoggerMap)
|
||||||
{
|
{
|
||||||
it->second->release();
|
if (it.second.owned_by_shared_ptr)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
it.second.logger->release();
|
||||||
}
|
}
|
||||||
|
|
||||||
delete _pLoggerMap;
|
delete _pLoggerMap;
|
||||||
_pLoggerMap = 0;
|
_pLoggerMap = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Logger* Logger::find(const std::string& name)
|
std::optional<Logger::LoggerMapIterator> Logger::find(const std::string& name)
|
||||||
{
|
{
|
||||||
if (_pLoggerMap)
|
if (_pLoggerMap)
|
||||||
{
|
{
|
||||||
LoggerMap::iterator it = _pLoggerMap->find(name);
|
LoggerMap::iterator it = _pLoggerMap->find(name);
|
||||||
if (it != _pLoggerMap->end())
|
if (it != _pLoggerMap->end())
|
||||||
return it->second;
|
return it;
|
||||||
|
|
||||||
|
return {};
|
||||||
}
|
}
|
||||||
return 0;
|
|
||||||
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Logger * Logger::findRawPtr(const std::string & name)
|
||||||
void Logger::destroy(const std::string& name)
|
|
||||||
{
|
{
|
||||||
Mutex::ScopedLock lock(_mapMtx);
|
auto optional_it = find(name);
|
||||||
|
if (!optional_it)
|
||||||
|
return nullptr;
|
||||||
|
|
||||||
if (_pLoggerMap)
|
return (*optional_it)->second.logger;
|
||||||
{
|
|
||||||
LoggerMap::iterator it = _pLoggerMap->find(name);
|
|
||||||
if (it != _pLoggerMap->end())
|
|
||||||
{
|
|
||||||
it->second->release();
|
|
||||||
_pLoggerMap->erase(it);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Logger::names(std::vector<std::string>& names)
|
void Logger::names(std::vector<std::string>& names)
|
||||||
{
|
{
|
||||||
Mutex::ScopedLock lock(_mapMtx);
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
names.clear();
|
names.clear();
|
||||||
if (_pLoggerMap)
|
if (_pLoggerMap)
|
||||||
@ -394,19 +505,27 @@ void Logger::names(std::vector<std::string>& names)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::pair<Logger::LoggerMapIterator, bool> Logger::unsafeCreate(const std::string & name, Channel * pChannel, int level)
|
||||||
|
{
|
||||||
|
if (find(name)) throw ExistsException();
|
||||||
|
Logger* pLogger = new Logger(name, pChannel, level);
|
||||||
|
return add(pLogger);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
Logger& Logger::parent(const std::string& name)
|
Logger& Logger::parent(const std::string& name)
|
||||||
{
|
{
|
||||||
std::string::size_type pos = name.rfind('.');
|
std::string::size_type pos = name.rfind('.');
|
||||||
if (pos != std::string::npos)
|
if (pos != std::string::npos)
|
||||||
{
|
{
|
||||||
std::string pname = name.substr(0, pos);
|
std::string pname = name.substr(0, pos);
|
||||||
Logger* pParent = find(pname);
|
Logger* pParent = findRawPtr(pname);
|
||||||
if (pParent)
|
if (pParent)
|
||||||
return *pParent;
|
return *pParent;
|
||||||
else
|
else
|
||||||
return parent(pname);
|
return parent(pname);
|
||||||
}
|
}
|
||||||
else return unsafeGet(ROOT);
|
else return *unsafeGetRawPtr(ROOT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -474,11 +593,14 @@ namespace
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Logger::add(Logger* pLogger)
|
std::pair<Logger::LoggerMapIterator, bool> Logger::add(Logger* pLogger)
|
||||||
{
|
{
|
||||||
if (!_pLoggerMap)
|
if (!_pLoggerMap)
|
||||||
_pLoggerMap = new LoggerMap;
|
_pLoggerMap = new Logger::LoggerMap;
|
||||||
_pLoggerMap->insert(LoggerMap::value_type(pLogger->name(), pLogger));
|
|
||||||
|
auto result = _pLoggerMap->emplace(pLogger->name(), LoggerEntry{pLogger, false /*owned_by_shared_ptr*/});
|
||||||
|
assert(result.second);
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -2,11 +2,11 @@
|
|||||||
|
|
||||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||||
SET(VERSION_REVISION 54482)
|
SET(VERSION_REVISION 54483)
|
||||||
SET(VERSION_MAJOR 24)
|
SET(VERSION_MAJOR 24)
|
||||||
SET(VERSION_MINOR 1)
|
SET(VERSION_MINOR 2)
|
||||||
SET(VERSION_PATCH 1)
|
SET(VERSION_PATCH 1)
|
||||||
SET(VERSION_GITHASH a2faa65b080a587026c86844f3a20c74d23a86f8)
|
SET(VERSION_GITHASH 5a024dfc0936e062770d0cfaad0805b57c1fba17)
|
||||||
SET(VERSION_DESCRIBE v24.1.1.1-testing)
|
SET(VERSION_DESCRIBE v24.2.1.1-testing)
|
||||||
SET(VERSION_STRING 24.1.1.1)
|
SET(VERSION_STRING 24.2.1.1)
|
||||||
# end of autochange
|
# end of autochange
|
||||||
|
@ -63,14 +63,14 @@ endif()
|
|||||||
option(WITH_COVERAGE "Instrumentation for code coverage with default implementation" OFF)
|
option(WITH_COVERAGE "Instrumentation for code coverage with default implementation" OFF)
|
||||||
|
|
||||||
if (WITH_COVERAGE)
|
if (WITH_COVERAGE)
|
||||||
message (INFORMATION "Enabled instrumentation for code coverage")
|
message (STATUS "Enabled instrumentation for code coverage")
|
||||||
set(COVERAGE_FLAGS "-fprofile-instr-generate -fcoverage-mapping")
|
set(COVERAGE_FLAGS "-fprofile-instr-generate -fcoverage-mapping")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
option (SANITIZE_COVERAGE "Instrumentation for code coverage with custom callbacks" OFF)
|
option (SANITIZE_COVERAGE "Instrumentation for code coverage with custom callbacks" OFF)
|
||||||
|
|
||||||
if (SANITIZE_COVERAGE)
|
if (SANITIZE_COVERAGE)
|
||||||
message (INFORMATION "Enabled instrumentation for code coverage")
|
message (STATUS "Enabled instrumentation for code coverage")
|
||||||
|
|
||||||
# We set this define for whole build to indicate that at least some parts are compiled with coverage.
|
# We set this define for whole build to indicate that at least some parts are compiled with coverage.
|
||||||
# And to expose it in system.build_options.
|
# And to expose it in system.build_options.
|
||||||
@ -79,7 +79,10 @@ if (SANITIZE_COVERAGE)
|
|||||||
|
|
||||||
# But the actual coverage will be enabled on per-library basis: for ClickHouse code, but not for 3rd-party.
|
# But the actual coverage will be enabled on per-library basis: for ClickHouse code, but not for 3rd-party.
|
||||||
set (COVERAGE_FLAGS "-fsanitize-coverage=trace-pc-guard,pc-table")
|
set (COVERAGE_FLAGS "-fsanitize-coverage=trace-pc-guard,pc-table")
|
||||||
endif()
|
|
||||||
|
|
||||||
set (WITHOUT_COVERAGE_FLAGS "-fno-profile-instr-generate -fno-coverage-mapping -fno-sanitize-coverage=trace-pc-guard,pc-table")
|
set (WITHOUT_COVERAGE_FLAGS "-fno-profile-instr-generate -fno-coverage-mapping -fno-sanitize-coverage=trace-pc-guard,pc-table")
|
||||||
set (WITHOUT_COVERAGE_FLAGS_LIST -fno-profile-instr-generate -fno-coverage-mapping -fno-sanitize-coverage=trace-pc-guard,pc-table)
|
set (WITHOUT_COVERAGE_FLAGS_LIST -fno-profile-instr-generate -fno-coverage-mapping -fno-sanitize-coverage=trace-pc-guard,pc-table)
|
||||||
|
else()
|
||||||
|
set (WITHOUT_COVERAGE_FLAGS "")
|
||||||
|
set (WITHOUT_COVERAGE_FLAGS_LIST "")
|
||||||
|
endif()
|
||||||
|
2
contrib/aws
vendored
2
contrib/aws
vendored
@ -1 +1 @@
|
|||||||
Subproject commit ca02358dcc7ce3ab733dd4cbcc32734eecfa4ee3
|
Subproject commit 9eb5097a0abfa837722cca7a5114a25837817bf2
|
2
contrib/aws-c-auth
vendored
2
contrib/aws-c-auth
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 97133a2b5dbca1ccdf88cd6f44f39d0531d27d12
|
Subproject commit baeffa791d9d1cf61460662a6d9ac2186aaf05df
|
2
contrib/aws-c-cal
vendored
2
contrib/aws-c-cal
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 85dd7664b786a389c6fb1a6f031ab4bb2282133d
|
Subproject commit 9453687ff5493ba94eaccf8851200565c4364c77
|
2
contrib/aws-c-common
vendored
2
contrib/aws-c-common
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 45dcb2849c891dba2100b270b4676765c92949ff
|
Subproject commit 80f21b3cac5ac51c6b8a62c7d2a5ef58a75195ee
|
2
contrib/aws-c-compression
vendored
2
contrib/aws-c-compression
vendored
@ -1 +1 @@
|
|||||||
Subproject commit b517b7decd0dac30be2162f5186c250221c53aff
|
Subproject commit 99ec79ee2970f1a045d4ced1501b97ee521f2f85
|
2
contrib/aws-c-event-stream
vendored
2
contrib/aws-c-event-stream
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 2f9b60c42f90840ec11822acda3d8cdfa97a773d
|
Subproject commit 08f24e384e5be20bcffa42b49213d24dad7881ae
|
2
contrib/aws-c-http
vendored
2
contrib/aws-c-http
vendored
@ -1 +1 @@
|
|||||||
Subproject commit dd34461987947672444d0bc872c5a733dfdb9711
|
Subproject commit a082f8a2067e4a31db73f1d4ffd702a8dc0f7089
|
2
contrib/aws-c-io
vendored
2
contrib/aws-c-io
vendored
@ -1 +1 @@
|
|||||||
Subproject commit d58ed4f272b1cb4f89ac9196526ceebe5f2b0d89
|
Subproject commit 11ce3c750a1dac7b04069fc5bff89e97e91bad4d
|
2
contrib/aws-c-mqtt
vendored
2
contrib/aws-c-mqtt
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 33c3455cec82b16feb940e12006cefd7b3ef4194
|
Subproject commit 6d36cd3726233cb757468d0ea26f6cd8dad151ec
|
2
contrib/aws-c-s3
vendored
2
contrib/aws-c-s3
vendored
@ -1 +1 @@
|
|||||||
Subproject commit d7bfe602d6925948f1fff95784e3613cca6a3900
|
Subproject commit de36fee8fe7ab02f10987877ae94a805bf440c1f
|
2
contrib/aws-c-sdkutils
vendored
2
contrib/aws-c-sdkutils
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 208a701fa01e99c7c8cc3dcebc8317da71362972
|
Subproject commit fd8c0ba2e233997eaaefe82fb818b8b444b956d3
|
2
contrib/aws-checksums
vendored
2
contrib/aws-checksums
vendored
@ -1 +1 @@
|
|||||||
Subproject commit ad53be196a25bbefa3700a01187fdce573a7d2d0
|
Subproject commit 321b805559c8e911be5bddba13fcbd222a3e2d3a
|
@ -25,6 +25,7 @@ include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsFeatureTests.cmake")
|
|||||||
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsThreadAffinity.cmake")
|
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsThreadAffinity.cmake")
|
||||||
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsThreadName.cmake")
|
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsThreadName.cmake")
|
||||||
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsSIMD.cmake")
|
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsSIMD.cmake")
|
||||||
|
include("${ClickHouse_SOURCE_DIR}/contrib/aws-crt-cpp/cmake/AwsGetVersion.cmake")
|
||||||
|
|
||||||
|
|
||||||
# Gather sources and options.
|
# Gather sources and options.
|
||||||
@ -35,6 +36,8 @@ set(AWS_PUBLIC_COMPILE_DEFS)
|
|||||||
set(AWS_PRIVATE_COMPILE_DEFS)
|
set(AWS_PRIVATE_COMPILE_DEFS)
|
||||||
set(AWS_PRIVATE_LIBS)
|
set(AWS_PRIVATE_LIBS)
|
||||||
|
|
||||||
|
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DINTEL_NO_ITTNOTIFY_API")
|
||||||
|
|
||||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||||
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DDEBUG_BUILD")
|
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DDEBUG_BUILD")
|
||||||
endif()
|
endif()
|
||||||
@ -85,14 +88,20 @@ file(GLOB AWS_SDK_CORE_SRC
|
|||||||
"${AWS_SDK_CORE_DIR}/source/external/cjson/*.cpp"
|
"${AWS_SDK_CORE_DIR}/source/external/cjson/*.cpp"
|
||||||
"${AWS_SDK_CORE_DIR}/source/external/tinyxml2/*.cpp"
|
"${AWS_SDK_CORE_DIR}/source/external/tinyxml2/*.cpp"
|
||||||
"${AWS_SDK_CORE_DIR}/source/http/*.cpp"
|
"${AWS_SDK_CORE_DIR}/source/http/*.cpp"
|
||||||
|
"${AWS_SDK_CORE_DIR}/source/http/crt/*.cpp"
|
||||||
"${AWS_SDK_CORE_DIR}/source/http/standard/*.cpp"
|
"${AWS_SDK_CORE_DIR}/source/http/standard/*.cpp"
|
||||||
"${AWS_SDK_CORE_DIR}/source/internal/*.cpp"
|
"${AWS_SDK_CORE_DIR}/source/internal/*.cpp"
|
||||||
"${AWS_SDK_CORE_DIR}/source/monitoring/*.cpp"
|
"${AWS_SDK_CORE_DIR}/source/monitoring/*.cpp"
|
||||||
|
"${AWS_SDK_CORE_DIR}/source/net/*.cpp"
|
||||||
|
"${AWS_SDK_CORE_DIR}/source/net/linux-shared/*.cpp"
|
||||||
|
"${AWS_SDK_CORE_DIR}/source/platform/linux-shared/*.cpp"
|
||||||
|
"${AWS_SDK_CORE_DIR}/source/smithy/tracing/*.cpp"
|
||||||
"${AWS_SDK_CORE_DIR}/source/utils/*.cpp"
|
"${AWS_SDK_CORE_DIR}/source/utils/*.cpp"
|
||||||
"${AWS_SDK_CORE_DIR}/source/utils/base64/*.cpp"
|
"${AWS_SDK_CORE_DIR}/source/utils/base64/*.cpp"
|
||||||
|
"${AWS_SDK_CORE_DIR}/source/utils/component-registry/*.cpp"
|
||||||
"${AWS_SDK_CORE_DIR}/source/utils/crypto/*.cpp"
|
"${AWS_SDK_CORE_DIR}/source/utils/crypto/*.cpp"
|
||||||
"${AWS_SDK_CORE_DIR}/source/utils/crypto/openssl/*.cpp"
|
|
||||||
"${AWS_SDK_CORE_DIR}/source/utils/crypto/factory/*.cpp"
|
"${AWS_SDK_CORE_DIR}/source/utils/crypto/factory/*.cpp"
|
||||||
|
"${AWS_SDK_CORE_DIR}/source/utils/crypto/openssl/*.cpp"
|
||||||
"${AWS_SDK_CORE_DIR}/source/utils/event/*.cpp"
|
"${AWS_SDK_CORE_DIR}/source/utils/event/*.cpp"
|
||||||
"${AWS_SDK_CORE_DIR}/source/utils/json/*.cpp"
|
"${AWS_SDK_CORE_DIR}/source/utils/json/*.cpp"
|
||||||
"${AWS_SDK_CORE_DIR}/source/utils/logging/*.cpp"
|
"${AWS_SDK_CORE_DIR}/source/utils/logging/*.cpp"
|
||||||
@ -115,9 +124,8 @@ OPTION(USE_AWS_MEMORY_MANAGEMENT "Aws memory management" OFF)
|
|||||||
configure_file("${AWS_SDK_CORE_DIR}/include/aws/core/SDKConfig.h.in"
|
configure_file("${AWS_SDK_CORE_DIR}/include/aws/core/SDKConfig.h.in"
|
||||||
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/core/SDKConfig.h" @ONLY)
|
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/core/SDKConfig.h" @ONLY)
|
||||||
|
|
||||||
list(APPEND AWS_PUBLIC_COMPILE_DEFS "-DAWS_SDK_VERSION_MAJOR=1")
|
aws_get_version(AWS_CRT_CPP_VERSION_MAJOR AWS_CRT_CPP_VERSION_MINOR AWS_CRT_CPP_VERSION_PATCH FULL_VERSION GIT_HASH)
|
||||||
list(APPEND AWS_PUBLIC_COMPILE_DEFS "-DAWS_SDK_VERSION_MINOR=10")
|
configure_file("${AWS_CRT_DIR}/include/aws/crt/Config.h.in" "${AWS_CRT_DIR}/include/aws/crt/Config.h" @ONLY)
|
||||||
list(APPEND AWS_PUBLIC_COMPILE_DEFS "-DAWS_SDK_VERSION_PATCH=36")
|
|
||||||
|
|
||||||
list(APPEND AWS_SOURCES ${AWS_SDK_CORE_SRC} ${AWS_SDK_CORE_NET_SRC} ${AWS_SDK_CORE_PLATFORM_SRC})
|
list(APPEND AWS_SOURCES ${AWS_SDK_CORE_SRC} ${AWS_SDK_CORE_NET_SRC} ${AWS_SDK_CORE_PLATFORM_SRC})
|
||||||
|
|
||||||
@ -176,6 +184,7 @@ file(GLOB AWS_COMMON_SRC
|
|||||||
"${AWS_COMMON_DIR}/source/*.c"
|
"${AWS_COMMON_DIR}/source/*.c"
|
||||||
"${AWS_COMMON_DIR}/source/external/*.c"
|
"${AWS_COMMON_DIR}/source/external/*.c"
|
||||||
"${AWS_COMMON_DIR}/source/posix/*.c"
|
"${AWS_COMMON_DIR}/source/posix/*.c"
|
||||||
|
"${AWS_COMMON_DIR}/source/linux/*.c"
|
||||||
)
|
)
|
||||||
|
|
||||||
file(GLOB AWS_COMMON_ARCH_SRC
|
file(GLOB AWS_COMMON_ARCH_SRC
|
||||||
|
2
contrib/aws-crt-cpp
vendored
2
contrib/aws-crt-cpp
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 8a301b7e842f1daed478090c869207300972379f
|
Subproject commit f532d6abc0d2b0d8b5d6fe9e7c51eaedbe4afbd0
|
2
contrib/aws-s2n-tls
vendored
2
contrib/aws-s2n-tls
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 71f4794b7580cf780eb4aca77d69eded5d3c7bb4
|
Subproject commit 9a1e75454023e952b366ce1eab9c54007250119f
|
@ -1,8 +1,5 @@
|
|||||||
if (NOT ENABLE_LIBRARIES)
|
if (NOT ENABLE_LIBRARIES)
|
||||||
set(DEFAULT_ENABLE_RUST FALSE)
|
set(DEFAULT_ENABLE_RUST FALSE)
|
||||||
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "darwin") AND (CMAKE_TOOLCHAIN_FILE MATCHES "aarch64"))
|
|
||||||
message(STATUS "Rust is not available on aarch64-apple-darwin")
|
|
||||||
set(DEFAULT_ENABLE_RUST FALSE)
|
|
||||||
else()
|
else()
|
||||||
list (APPEND CMAKE_MODULE_PATH "${ClickHouse_SOURCE_DIR}/contrib/corrosion/cmake")
|
list (APPEND CMAKE_MODULE_PATH "${ClickHouse_SOURCE_DIR}/contrib/corrosion/cmake")
|
||||||
find_package(Rust)
|
find_package(Rust)
|
||||||
@ -19,27 +16,30 @@ message(STATUS "Checking Rust toolchain for current target")
|
|||||||
|
|
||||||
# See https://doc.rust-lang.org/nightly/rustc/platform-support.html
|
# See https://doc.rust-lang.org/nightly/rustc/platform-support.html
|
||||||
|
|
||||||
if((CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-x86_64") AND (CMAKE_TOOLCHAIN_FILE MATCHES "musl"))
|
if(DEFINED CMAKE_TOOLCHAIN_FILE)
|
||||||
set(Rust_CARGO_TARGET "x86_64-unknown-linux-musl")
|
if(CMAKE_TOOLCHAIN_FILE MATCHES "ppc64le")
|
||||||
elseif(CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-x86_64")
|
set(Rust_CARGO_TARGET "powerpc64le-unknown-linux-gnu")
|
||||||
set(Rust_CARGO_TARGET "x86_64-unknown-linux-gnu")
|
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-x86_64") AND (CMAKE_TOOLCHAIN_FILE MATCHES "musl"))
|
||||||
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-aarch64") AND (CMAKE_TOOLCHAIN_FILE MATCHES "musl"))
|
set(Rust_CARGO_TARGET "x86_64-unknown-linux-musl")
|
||||||
set(Rust_CARGO_TARGET "aarch64-unknown-linux-musl")
|
elseif(CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-x86_64")
|
||||||
elseif(CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-aarch64")
|
set(Rust_CARGO_TARGET "x86_64-unknown-linux-gnu")
|
||||||
set(Rust_CARGO_TARGET "aarch64-unknown-linux-gnu")
|
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-aarch64") AND (CMAKE_TOOLCHAIN_FILE MATCHES "musl"))
|
||||||
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "darwin") AND (CMAKE_TOOLCHAIN_FILE MATCHES "x86_64"))
|
set(Rust_CARGO_TARGET "aarch64-unknown-linux-musl")
|
||||||
set(Rust_CARGO_TARGET "x86_64-apple-darwin")
|
elseif(CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-aarch64")
|
||||||
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "freebsd") AND (CMAKE_TOOLCHAIN_FILE MATCHES "x86_64"))
|
set(Rust_CARGO_TARGET "aarch64-unknown-linux-gnu")
|
||||||
set(Rust_CARGO_TARGET "x86_64-unknown-freebsd")
|
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "darwin") AND (CMAKE_TOOLCHAIN_FILE MATCHES "x86_64"))
|
||||||
elseif(CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-riscv64")
|
set(Rust_CARGO_TARGET "x86_64-apple-darwin")
|
||||||
set(Rust_CARGO_TARGET "riscv64gc-unknown-linux-gnu")
|
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "darwin") AND (CMAKE_TOOLCHAIN_FILE MATCHES "aarch64"))
|
||||||
endif()
|
set(Rust_CARGO_TARGET "aarch64-apple-darwin")
|
||||||
|
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "freebsd") AND (CMAKE_TOOLCHAIN_FILE MATCHES "x86_64"))
|
||||||
if(CMAKE_TOOLCHAIN_FILE MATCHES "ppc64le")
|
set(Rust_CARGO_TARGET "x86_64-unknown-freebsd")
|
||||||
set(Rust_CARGO_TARGET "powerpc64le-unknown-linux-gnu")
|
elseif(CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-riscv64")
|
||||||
endif()
|
set(Rust_CARGO_TARGET "riscv64gc-unknown-linux-gnu")
|
||||||
|
else()
|
||||||
message(STATUS "Switched Rust target to ${Rust_CARGO_TARGET}")
|
message(FATAL_ERROR "Unsupported rust target")
|
||||||
|
endif()
|
||||||
|
message(STATUS "Switched Rust target to ${Rust_CARGO_TARGET}")
|
||||||
|
endif ()
|
||||||
|
|
||||||
# FindRust.cmake
|
# FindRust.cmake
|
||||||
list(APPEND CMAKE_MODULE_PATH "${ClickHouse_SOURCE_DIR}/contrib/corrosion/cmake")
|
list(APPEND CMAKE_MODULE_PATH "${ClickHouse_SOURCE_DIR}/contrib/corrosion/cmake")
|
||||||
|
2
contrib/curl
vendored
2
contrib/curl
vendored
@ -1 +1 @@
|
|||||||
Subproject commit d755a5f7c009dd63a61b2c745180d8ba937cbfeb
|
Subproject commit 5ce164e0e9290c96eb7d502173426c0a135ec008
|
2
contrib/libssh
vendored
2
contrib/libssh
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 2c76332ef56d90f55965ab24da6b6dbcbef29c4c
|
Subproject commit ed4011b91873836713576475a98cd132cd834539
|
@ -1,4 +1,4 @@
|
|||||||
option (ENABLE_SSH "Enable support for SSH keys and protocol" ON)
|
option (ENABLE_SSH "Enable support for SSH keys and protocol" ${ENABLE_LIBRARIES})
|
||||||
|
|
||||||
if (NOT ENABLE_SSH)
|
if (NOT ENABLE_SSH)
|
||||||
message(STATUS "Not using SSH")
|
message(STATUS "Not using SSH")
|
||||||
@ -8,24 +8,12 @@ endif()
|
|||||||
set(LIB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libssh")
|
set(LIB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libssh")
|
||||||
set(LIB_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/libssh")
|
set(LIB_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/libssh")
|
||||||
|
|
||||||
project(libssh VERSION 0.9.7 LANGUAGES C)
|
# Set CMake variables which are used in libssh_version.h.cmake
|
||||||
|
project(libssh VERSION 0.9.8 LANGUAGES C)
|
||||||
|
|
||||||
# global needed variable
|
set(LIBRARY_VERSION "4.8.8")
|
||||||
set(APPLICATION_NAME ${PROJECT_NAME})
|
|
||||||
|
|
||||||
# SOVERSION scheme: CURRENT.AGE.REVISION
|
|
||||||
# If there was an incompatible interface change:
|
|
||||||
# Increment CURRENT. Set AGE and REVISION to 0
|
|
||||||
# If there was a compatible interface change:
|
|
||||||
# Increment AGE. Set REVISION to 0
|
|
||||||
# If the source code was changed, but there were no interface changes:
|
|
||||||
# Increment REVISION.
|
|
||||||
set(LIBRARY_VERSION "4.8.7")
|
|
||||||
set(LIBRARY_SOVERSION "4")
|
set(LIBRARY_SOVERSION "4")
|
||||||
|
|
||||||
# Copy library files to a lib sub-directory
|
|
||||||
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${LIB_BINARY_DIR}/lib")
|
|
||||||
|
|
||||||
set(CMAKE_THREAD_PREFER_PTHREADS ON)
|
set(CMAKE_THREAD_PREFER_PTHREADS ON)
|
||||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||||
|
|
||||||
@ -33,7 +21,87 @@ set(WITH_ZLIB OFF)
|
|||||||
set(WITH_SYMBOL_VERSIONING OFF)
|
set(WITH_SYMBOL_VERSIONING OFF)
|
||||||
set(WITH_SERVER ON)
|
set(WITH_SERVER ON)
|
||||||
|
|
||||||
include(IncludeSources.cmake)
|
set(libssh_SRCS
|
||||||
|
${LIB_SOURCE_DIR}/src/agent.c
|
||||||
|
${LIB_SOURCE_DIR}/src/auth.c
|
||||||
|
${LIB_SOURCE_DIR}/src/base64.c
|
||||||
|
${LIB_SOURCE_DIR}/src/bignum.c
|
||||||
|
${LIB_SOURCE_DIR}/src/buffer.c
|
||||||
|
${LIB_SOURCE_DIR}/src/callbacks.c
|
||||||
|
${LIB_SOURCE_DIR}/src/channels.c
|
||||||
|
${LIB_SOURCE_DIR}/src/client.c
|
||||||
|
${LIB_SOURCE_DIR}/src/config.c
|
||||||
|
${LIB_SOURCE_DIR}/src/connect.c
|
||||||
|
${LIB_SOURCE_DIR}/src/connector.c
|
||||||
|
${LIB_SOURCE_DIR}/src/curve25519.c
|
||||||
|
${LIB_SOURCE_DIR}/src/dh.c
|
||||||
|
${LIB_SOURCE_DIR}/src/ecdh.c
|
||||||
|
${LIB_SOURCE_DIR}/src/error.c
|
||||||
|
${LIB_SOURCE_DIR}/src/getpass.c
|
||||||
|
${LIB_SOURCE_DIR}/src/init.c
|
||||||
|
${LIB_SOURCE_DIR}/src/kdf.c
|
||||||
|
${LIB_SOURCE_DIR}/src/kex.c
|
||||||
|
${LIB_SOURCE_DIR}/src/known_hosts.c
|
||||||
|
${LIB_SOURCE_DIR}/src/knownhosts.c
|
||||||
|
${LIB_SOURCE_DIR}/src/legacy.c
|
||||||
|
${LIB_SOURCE_DIR}/src/log.c
|
||||||
|
${LIB_SOURCE_DIR}/src/match.c
|
||||||
|
${LIB_SOURCE_DIR}/src/messages.c
|
||||||
|
${LIB_SOURCE_DIR}/src/misc.c
|
||||||
|
${LIB_SOURCE_DIR}/src/options.c
|
||||||
|
${LIB_SOURCE_DIR}/src/packet.c
|
||||||
|
${LIB_SOURCE_DIR}/src/packet_cb.c
|
||||||
|
${LIB_SOURCE_DIR}/src/packet_crypt.c
|
||||||
|
${LIB_SOURCE_DIR}/src/pcap.c
|
||||||
|
${LIB_SOURCE_DIR}/src/pki.c
|
||||||
|
${LIB_SOURCE_DIR}/src/pki_container_openssh.c
|
||||||
|
${LIB_SOURCE_DIR}/src/poll.c
|
||||||
|
${LIB_SOURCE_DIR}/src/session.c
|
||||||
|
${LIB_SOURCE_DIR}/src/scp.c
|
||||||
|
${LIB_SOURCE_DIR}/src/socket.c
|
||||||
|
${LIB_SOURCE_DIR}/src/string.c
|
||||||
|
${LIB_SOURCE_DIR}/src/threads.c
|
||||||
|
${LIB_SOURCE_DIR}/src/wrapper.c
|
||||||
|
${LIB_SOURCE_DIR}/src/external/bcrypt_pbkdf.c
|
||||||
|
${LIB_SOURCE_DIR}/src/external/blowfish.c
|
||||||
|
${LIB_SOURCE_DIR}/src/external/chacha.c
|
||||||
|
${LIB_SOURCE_DIR}/src/external/poly1305.c
|
||||||
|
${LIB_SOURCE_DIR}/src/chachapoly.c
|
||||||
|
${LIB_SOURCE_DIR}/src/config_parser.c
|
||||||
|
${LIB_SOURCE_DIR}/src/token.c
|
||||||
|
${LIB_SOURCE_DIR}/src/pki_ed25519_common.c
|
||||||
|
|
||||||
|
${LIB_SOURCE_DIR}/src/threads/noop.c
|
||||||
|
${LIB_SOURCE_DIR}/src/threads/pthread.c
|
||||||
|
|
||||||
|
# LIBCRYPT specific
|
||||||
|
${libssh_SRCS}
|
||||||
|
${LIB_SOURCE_DIR}/src/threads/libcrypto.c
|
||||||
|
${LIB_SOURCE_DIR}/src/pki_crypto.c
|
||||||
|
${LIB_SOURCE_DIR}/src/ecdh_crypto.c
|
||||||
|
${LIB_SOURCE_DIR}/src/libcrypto.c
|
||||||
|
${LIB_SOURCE_DIR}/src/dh_crypto.c
|
||||||
|
|
||||||
|
${LIB_SOURCE_DIR}/src/options.c
|
||||||
|
${LIB_SOURCE_DIR}/src/server.c
|
||||||
|
${LIB_SOURCE_DIR}/src/bind.c
|
||||||
|
${LIB_SOURCE_DIR}/src/bind_config.c
|
||||||
|
)
|
||||||
|
|
||||||
|
if (NOT (ENABLE_OPENSSL OR ENABLE_OPENSSL_DYNAMIC))
|
||||||
|
add_compile_definitions(USE_BORINGSSL=1)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
configure_file(${LIB_SOURCE_DIR}/include/libssh/libssh_version.h.cmake ${LIB_BINARY_DIR}/include/libssh/libssh_version.h @ONLY)
|
||||||
|
|
||||||
|
add_library(_ssh STATIC ${libssh_SRCS})
|
||||||
|
add_library(ch_contrib::ssh ALIAS _ssh)
|
||||||
|
|
||||||
|
target_link_libraries(_ssh PRIVATE OpenSSL::Crypto)
|
||||||
|
|
||||||
|
target_include_directories(_ssh PUBLIC "${LIB_SOURCE_DIR}/include" "${LIB_BINARY_DIR}/include")
|
||||||
|
|
||||||
|
# These headers need to be generated using the native build system on each platform.
|
||||||
if (OS_LINUX)
|
if (OS_LINUX)
|
||||||
if (ARCH_AMD64)
|
if (ARCH_AMD64)
|
||||||
if (USE_MUSL)
|
if (USE_MUSL)
|
||||||
@ -63,7 +131,3 @@ elseif (OS_FREEBSD)
|
|||||||
else ()
|
else ()
|
||||||
message(FATAL_ERROR "Platform is not supported")
|
message(FATAL_ERROR "Platform is not supported")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
configure_file(${LIB_SOURCE_DIR}/include/libssh/libssh_version.h.cmake
|
|
||||||
${LIB_BINARY_DIR}/include/libssh/libssh_version.h
|
|
||||||
@ONLY)
|
|
||||||
|
@ -1,98 +0,0 @@
|
|||||||
set(LIBSSH_LINK_LIBRARIES
|
|
||||||
${LIBSSH_LINK_LIBRARIES}
|
|
||||||
OpenSSL::Crypto
|
|
||||||
)
|
|
||||||
|
|
||||||
set(libssh_SRCS
|
|
||||||
${LIB_SOURCE_DIR}/src/agent.c
|
|
||||||
${LIB_SOURCE_DIR}/src/auth.c
|
|
||||||
${LIB_SOURCE_DIR}/src/base64.c
|
|
||||||
${LIB_SOURCE_DIR}/src/bignum.c
|
|
||||||
${LIB_SOURCE_DIR}/src/buffer.c
|
|
||||||
${LIB_SOURCE_DIR}/src/callbacks.c
|
|
||||||
${LIB_SOURCE_DIR}/src/channels.c
|
|
||||||
${LIB_SOURCE_DIR}/src/client.c
|
|
||||||
${LIB_SOURCE_DIR}/src/config.c
|
|
||||||
${LIB_SOURCE_DIR}/src/connect.c
|
|
||||||
${LIB_SOURCE_DIR}/src/connector.c
|
|
||||||
${LIB_SOURCE_DIR}/src/curve25519.c
|
|
||||||
${LIB_SOURCE_DIR}/src/dh.c
|
|
||||||
${LIB_SOURCE_DIR}/src/ecdh.c
|
|
||||||
${LIB_SOURCE_DIR}/src/error.c
|
|
||||||
${LIB_SOURCE_DIR}/src/getpass.c
|
|
||||||
${LIB_SOURCE_DIR}/src/init.c
|
|
||||||
${LIB_SOURCE_DIR}/src/kdf.c
|
|
||||||
${LIB_SOURCE_DIR}/src/kex.c
|
|
||||||
${LIB_SOURCE_DIR}/src/known_hosts.c
|
|
||||||
${LIB_SOURCE_DIR}/src/knownhosts.c
|
|
||||||
${LIB_SOURCE_DIR}/src/legacy.c
|
|
||||||
${LIB_SOURCE_DIR}/src/log.c
|
|
||||||
${LIB_SOURCE_DIR}/src/match.c
|
|
||||||
${LIB_SOURCE_DIR}/src/messages.c
|
|
||||||
${LIB_SOURCE_DIR}/src/misc.c
|
|
||||||
${LIB_SOURCE_DIR}/src/options.c
|
|
||||||
${LIB_SOURCE_DIR}/src/packet.c
|
|
||||||
${LIB_SOURCE_DIR}/src/packet_cb.c
|
|
||||||
${LIB_SOURCE_DIR}/src/packet_crypt.c
|
|
||||||
${LIB_SOURCE_DIR}/src/pcap.c
|
|
||||||
${LIB_SOURCE_DIR}/src/pki.c
|
|
||||||
${LIB_SOURCE_DIR}/src/pki_container_openssh.c
|
|
||||||
${LIB_SOURCE_DIR}/src/poll.c
|
|
||||||
${LIB_SOURCE_DIR}/src/session.c
|
|
||||||
${LIB_SOURCE_DIR}/src/scp.c
|
|
||||||
${LIB_SOURCE_DIR}/src/socket.c
|
|
||||||
${LIB_SOURCE_DIR}/src/string.c
|
|
||||||
${LIB_SOURCE_DIR}/src/threads.c
|
|
||||||
${LIB_SOURCE_DIR}/src/wrapper.c
|
|
||||||
${LIB_SOURCE_DIR}/src/external/bcrypt_pbkdf.c
|
|
||||||
${LIB_SOURCE_DIR}/src/external/blowfish.c
|
|
||||||
${LIB_SOURCE_DIR}/src/external/chacha.c
|
|
||||||
${LIB_SOURCE_DIR}/src/external/poly1305.c
|
|
||||||
${LIB_SOURCE_DIR}/src/chachapoly.c
|
|
||||||
${LIB_SOURCE_DIR}/src/config_parser.c
|
|
||||||
${LIB_SOURCE_DIR}/src/token.c
|
|
||||||
${LIB_SOURCE_DIR}/src/pki_ed25519_common.c
|
|
||||||
)
|
|
||||||
|
|
||||||
set(libssh_SRCS
|
|
||||||
${libssh_SRCS}
|
|
||||||
${LIB_SOURCE_DIR}/src/threads/noop.c
|
|
||||||
${LIB_SOURCE_DIR}/src/threads/pthread.c
|
|
||||||
)
|
|
||||||
|
|
||||||
# LIBCRYPT specific
|
|
||||||
set(libssh_SRCS
|
|
||||||
${libssh_SRCS}
|
|
||||||
${LIB_SOURCE_DIR}/src/threads/libcrypto.c
|
|
||||||
${LIB_SOURCE_DIR}/src/pki_crypto.c
|
|
||||||
${LIB_SOURCE_DIR}/src/ecdh_crypto.c
|
|
||||||
${LIB_SOURCE_DIR}/src/libcrypto.c
|
|
||||||
${LIB_SOURCE_DIR}/src/dh_crypto.c
|
|
||||||
)
|
|
||||||
|
|
||||||
if (NOT (ENABLE_OPENSSL OR ENABLE_OPENSSL_DYNAMIC))
|
|
||||||
add_compile_definitions(USE_BORINGSSL=1)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(libssh_SRCS
|
|
||||||
${libssh_SRCS}
|
|
||||||
${LIB_SOURCE_DIR}/src/options.c
|
|
||||||
${LIB_SOURCE_DIR}/src/server.c
|
|
||||||
${LIB_SOURCE_DIR}/src/bind.c
|
|
||||||
${LIB_SOURCE_DIR}/src/bind_config.c
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
add_library(_ssh STATIC ${libssh_SRCS})
|
|
||||||
|
|
||||||
target_include_directories(_ssh PRIVATE ${LIB_BINARY_DIR})
|
|
||||||
target_include_directories(_ssh PUBLIC "${LIB_SOURCE_DIR}/include" "${LIB_BINARY_DIR}/include")
|
|
||||||
target_link_libraries(_ssh
|
|
||||||
PRIVATE ${LIBSSH_LINK_LIBRARIES})
|
|
||||||
|
|
||||||
add_library(ch_contrib::ssh ALIAS _ssh)
|
|
||||||
|
|
||||||
target_compile_options(_ssh
|
|
||||||
PRIVATE
|
|
||||||
${DEFAULT_C_COMPILE_FLAGS}
|
|
||||||
-D_GNU_SOURCE)
|
|
@ -1,6 +1,10 @@
|
|||||||
#include <libunwind.h>
|
#include <libunwind.h>
|
||||||
|
|
||||||
|
/// On MacOS this function will be replaced with a dynamic symbol
|
||||||
|
/// from the system library.
|
||||||
|
#if !defined(OS_DARWIN)
|
||||||
int backtrace(void ** buffer, int size)
|
int backtrace(void ** buffer, int size)
|
||||||
{
|
{
|
||||||
return unw_backtrace(buffer, size);
|
return unw_backtrace(buffer, size);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
2
contrib/libuv
vendored
2
contrib/libuv
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 3a85b2eb3d83f369b8a8cafd329d7e9dc28f60cf
|
Subproject commit 4482964660c77eec1166cd7d14fb915e3dbd774a
|
2
contrib/llvm-project
vendored
2
contrib/llvm-project
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 2568a7cd1297c7c3044b0f3cc0c23a6f6444d856
|
Subproject commit d2142eed98046a47ff7112e3cc1e197c8a5cd80f
|
@ -1,5 +1,6 @@
|
|||||||
if (APPLE OR SANITIZE STREQUAL "undefined" OR SANITIZE STREQUAL "memory")
|
if (APPLE OR SANITIZE STREQUAL "memory")
|
||||||
# llvm-tblgen, that is used during LLVM build, doesn't work with UBSan.
|
# llvm-tblgen, that is used during LLVM build, will throw MSAN errors when running (breaking the build)
|
||||||
|
# TODO: Retest when upgrading LLVM or build only llvm-tblgen without sanitizers
|
||||||
set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF)
|
set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF)
|
||||||
set (ENABLE_DWARF_PARSER_DEFAULT OFF)
|
set (ENABLE_DWARF_PARSER_DEFAULT OFF)
|
||||||
else()
|
else()
|
||||||
|
2
contrib/lz4
vendored
2
contrib/lz4
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 92ebf1870b9acbefc0e7970409a181954a10ff40
|
Subproject commit ce45a9dbdb059511a3e9576b19db3e7f1a4f172e
|
2
contrib/simdjson
vendored
2
contrib/simdjson
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 1075e8609c4afa253162d441437af929c29e31bb
|
Subproject commit 6060be2fdf62edf4a8f51a8b0883d57d09397b30
|
2
contrib/update-submodules.sh
vendored
2
contrib/update-submodules.sh
vendored
@ -24,7 +24,7 @@ git config --file .gitmodules --get-regexp '.*path' | sed 's/[^ ]* //' | xargs -
|
|||||||
# We don't want to depend on any third-party CMake files.
|
# We don't want to depend on any third-party CMake files.
|
||||||
# To check it, find and delete them.
|
# To check it, find and delete them.
|
||||||
grep -o -P '"contrib/[^"]+"' .gitmodules |
|
grep -o -P '"contrib/[^"]+"' .gitmodules |
|
||||||
grep -v -P 'contrib/(llvm-project|google-protobuf|grpc|abseil-cpp|corrosion)' |
|
grep -v -P 'contrib/(llvm-project|google-protobuf|grpc|abseil-cpp|corrosion|aws-crt-cpp)' |
|
||||||
xargs -I@ find @ \
|
xargs -I@ find @ \
|
||||||
-'(' -name 'CMakeLists.txt' -or -name '*.cmake' -')' -and -not -name '*.h.cmake' \
|
-'(' -name 'CMakeLists.txt' -or -name '*.cmake' -')' -and -not -name '*.h.cmake' \
|
||||||
-delete
|
-delete
|
||||||
|
@ -62,7 +62,6 @@
|
|||||||
"dependent": []
|
"dependent": []
|
||||||
},
|
},
|
||||||
"docker/test/integration/runner": {
|
"docker/test/integration/runner": {
|
||||||
"only_amd64": true,
|
|
||||||
"name": "clickhouse/integration-tests-runner",
|
"name": "clickhouse/integration-tests-runner",
|
||||||
"dependent": []
|
"dependent": []
|
||||||
},
|
},
|
||||||
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="23.12.2.59"
|
ARG VERSION="24.1.5.6"
|
||||||
ARG PACKAGES="clickhouse-keeper"
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
|
@ -72,7 +72,7 @@ RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
|
|||||||
zstd \
|
zstd \
|
||||||
zip \
|
zip \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
# Download toolchain and SDK for Darwin
|
# Download toolchain and SDK for Darwin
|
||||||
RUN curl -sL -O https://github.com/phracker/MacOSX-SDKs/releases/download/11.3/MacOSX11.0.sdk.tar.xz
|
RUN curl -sL -O https://github.com/phracker/MacOSX-SDKs/releases/download/11.3/MacOSX11.0.sdk.tar.xz
|
||||||
|
@ -115,12 +115,17 @@ def run_docker_image_with_env(
|
|||||||
subprocess.check_call(cmd, shell=True)
|
subprocess.check_call(cmd, shell=True)
|
||||||
|
|
||||||
|
|
||||||
def is_release_build(debug_build: bool, package_type: str, sanitizer: str) -> bool:
|
def is_release_build(
|
||||||
return not debug_build and package_type == "deb" and sanitizer == ""
|
debug_build: bool, package_type: str, sanitizer: str, coverage: bool
|
||||||
|
) -> bool:
|
||||||
|
return (
|
||||||
|
not debug_build and package_type == "deb" and sanitizer == "" and not coverage
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def parse_env_variables(
|
def parse_env_variables(
|
||||||
debug_build: bool,
|
debug_build: bool,
|
||||||
|
coverage: bool,
|
||||||
compiler: str,
|
compiler: str,
|
||||||
sanitizer: str,
|
sanitizer: str,
|
||||||
package_type: str,
|
package_type: str,
|
||||||
@ -261,7 +266,7 @@ def parse_env_variables(
|
|||||||
build_target = (
|
build_target = (
|
||||||
f"{build_target} clickhouse-odbc-bridge clickhouse-library-bridge"
|
f"{build_target} clickhouse-odbc-bridge clickhouse-library-bridge"
|
||||||
)
|
)
|
||||||
if is_release_build(debug_build, package_type, sanitizer):
|
if is_release_build(debug_build, package_type, sanitizer, coverage):
|
||||||
cmake_flags.append("-DSPLIT_DEBUG_SYMBOLS=ON")
|
cmake_flags.append("-DSPLIT_DEBUG_SYMBOLS=ON")
|
||||||
result.append("WITH_PERFORMANCE=1")
|
result.append("WITH_PERFORMANCE=1")
|
||||||
if is_cross_arm:
|
if is_cross_arm:
|
||||||
@ -287,6 +292,9 @@ def parse_env_variables(
|
|||||||
else:
|
else:
|
||||||
result.append("BUILD_TYPE=None")
|
result.append("BUILD_TYPE=None")
|
||||||
|
|
||||||
|
if coverage:
|
||||||
|
cmake_flags.append("-DSANITIZE_COVERAGE=1 -DBUILD_STANDALONE_KEEPER=0")
|
||||||
|
|
||||||
if not cache:
|
if not cache:
|
||||||
cmake_flags.append("-DCOMPILER_CACHE=disabled")
|
cmake_flags.append("-DCOMPILER_CACHE=disabled")
|
||||||
|
|
||||||
@ -415,6 +423,11 @@ def parse_args() -> argparse.Namespace:
|
|||||||
choices=("address", "thread", "memory", "undefined", ""),
|
choices=("address", "thread", "memory", "undefined", ""),
|
||||||
default="",
|
default="",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--coverage",
|
||||||
|
action="store_true",
|
||||||
|
help="enable granular coverage with introspection",
|
||||||
|
)
|
||||||
|
|
||||||
parser.add_argument("--clang-tidy", action="store_true")
|
parser.add_argument("--clang-tidy", action="store_true")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
@ -507,6 +520,7 @@ def main() -> None:
|
|||||||
|
|
||||||
env_prepared = parse_env_variables(
|
env_prepared = parse_env_variables(
|
||||||
args.debug_build,
|
args.debug_build,
|
||||||
|
args.coverage,
|
||||||
args.compiler,
|
args.compiler,
|
||||||
args.sanitizer,
|
args.sanitizer,
|
||||||
args.package_type,
|
args.package_type,
|
||||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="23.12.2.59"
|
ARG VERSION="24.1.5.6"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
|
@ -23,14 +23,11 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
tzdata \
|
tzdata \
|
||||||
wget \
|
wget \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf \
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
/var/lib/apt/lists/* \
|
|
||||||
/var/cache/debconf \
|
|
||||||
/tmp/*
|
|
||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="23.12.2.59"
|
ARG VERSION="24.1.5.6"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# set non-empty deb_location_url url to create a docker image
|
# set non-empty deb_location_url url to create a docker image
|
||||||
|
@ -118,13 +118,19 @@ if [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CL
|
|||||||
EOT
|
EOT
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS="${CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS:-}"
|
||||||
|
|
||||||
# checking $DATA_DIR for initialization
|
# checking $DATA_DIR for initialization
|
||||||
if [ -d "${DATA_DIR%/}/data" ]; then
|
if [ -d "${DATA_DIR%/}/data" ]; then
|
||||||
DATABASE_ALREADY_EXISTS='true'
|
DATABASE_ALREADY_EXISTS='true'
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# only run initialization on an empty data directory
|
# run initialization if flag CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS is not empty or data directory is empty
|
||||||
if [ -z "${DATABASE_ALREADY_EXISTS}" ]; then
|
if [[ -n "${CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS}" || -z "${DATABASE_ALREADY_EXISTS}" ]]; then
|
||||||
|
RUN_INITDB_SCRIPTS='true'
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${RUN_INITDB_SCRIPTS}" ]; then
|
||||||
if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then
|
if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then
|
||||||
# port is needed to check if clickhouse-server is ready for connections
|
# port is needed to check if clickhouse-server is ready for connections
|
||||||
HTTP_PORT="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=http_port --try)"
|
HTTP_PORT="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=http_port --try)"
|
||||||
|
@ -13,7 +13,10 @@ RUN apt-get update \
|
|||||||
zstd \
|
zstd \
|
||||||
locales \
|
locales \
|
||||||
sudo \
|
sudo \
|
||||||
--yes --no-install-recommends
|
--yes --no-install-recommends \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
|
|
||||||
# Sanitizer options for services (clickhouse-server)
|
# Sanitizer options for services (clickhouse-server)
|
||||||
# Set resident memory limit for TSAN to 45GiB (46080MiB) to avoid OOMs in Stress tests
|
# Set resident memory limit for TSAN to 45GiB (46080MiB) to avoid OOMs in Stress tests
|
||||||
|
@ -17,16 +17,20 @@ CLICKHOUSE_CI_LOGS_CLUSTER=${CLICKHOUSE_CI_LOGS_CLUSTER:-system_logs_export}
|
|||||||
|
|
||||||
EXTRA_COLUMNS=${EXTRA_COLUMNS:-"pull_request_number UInt32, commit_sha String, check_start_time DateTime('UTC'), check_name LowCardinality(String), instance_type LowCardinality(String), instance_id String, INDEX ix_pr (pull_request_number) TYPE set(100), INDEX ix_commit (commit_sha) TYPE set(100), INDEX ix_check_time (check_start_time) TYPE minmax, "}
|
EXTRA_COLUMNS=${EXTRA_COLUMNS:-"pull_request_number UInt32, commit_sha String, check_start_time DateTime('UTC'), check_name LowCardinality(String), instance_type LowCardinality(String), instance_id String, INDEX ix_pr (pull_request_number) TYPE set(100), INDEX ix_commit (commit_sha) TYPE set(100), INDEX ix_check_time (check_start_time) TYPE minmax, "}
|
||||||
EXTRA_COLUMNS_EXPRESSION=${EXTRA_COLUMNS_EXPRESSION:-"CAST(0 AS UInt32) AS pull_request_number, '' AS commit_sha, now() AS check_start_time, toLowCardinality('') AS check_name, toLowCardinality('') AS instance_type, '' AS instance_id"}
|
EXTRA_COLUMNS_EXPRESSION=${EXTRA_COLUMNS_EXPRESSION:-"CAST(0 AS UInt32) AS pull_request_number, '' AS commit_sha, now() AS check_start_time, toLowCardinality('') AS check_name, toLowCardinality('') AS instance_type, '' AS instance_id"}
|
||||||
EXTRA_ORDER_BY_COLUMNS=${EXTRA_ORDER_BY_COLUMNS:-"check_name, "}
|
EXTRA_ORDER_BY_COLUMNS=${EXTRA_ORDER_BY_COLUMNS:-"check_name"}
|
||||||
|
|
||||||
# trace_log needs more columns for symbolization
|
# trace_log needs more columns for symbolization
|
||||||
EXTRA_COLUMNS_TRACE_LOG="${EXTRA_COLUMNS} symbols Array(LowCardinality(String)), lines Array(LowCardinality(String)), "
|
EXTRA_COLUMNS_TRACE_LOG="${EXTRA_COLUMNS} symbols Array(LowCardinality(String)), lines Array(LowCardinality(String)), "
|
||||||
EXTRA_COLUMNS_EXPRESSION_TRACE_LOG="${EXTRA_COLUMNS_EXPRESSION}, arrayMap(x -> demangle(addressToSymbol(x)), trace)::Array(LowCardinality(String)) AS symbols, arrayMap(x -> addressToLine(x), trace)::Array(LowCardinality(String)) AS lines"
|
EXTRA_COLUMNS_EXPRESSION_TRACE_LOG="${EXTRA_COLUMNS_EXPRESSION}, arrayMap(x -> demangle(addressToSymbol(x)), trace)::Array(LowCardinality(String)) AS symbols, arrayMap(x -> addressToLine(x), trace)::Array(LowCardinality(String)) AS lines"
|
||||||
|
|
||||||
|
# coverage_log needs more columns for symbolization, but only symbol names (the line numbers are too heavy to calculate)
|
||||||
|
EXTRA_COLUMNS_COVERAGE_LOG="${EXTRA_COLUMNS} symbols Array(LowCardinality(String)), "
|
||||||
|
EXTRA_COLUMNS_EXPRESSION_COVERAGE_LOG="${EXTRA_COLUMNS_EXPRESSION}, arrayMap(x -> demangle(addressToSymbol(x)), coverage)::Array(LowCardinality(String)) AS symbols"
|
||||||
|
|
||||||
|
|
||||||
function __set_connection_args
|
function __set_connection_args
|
||||||
{
|
{
|
||||||
# It's impossible to use generous $CONNECTION_ARGS string, it's unsafe from word splitting perspective.
|
# It's impossible to use a generic $CONNECTION_ARGS string, it's unsafe from word splitting perspective.
|
||||||
# That's why we must stick to the generated option
|
# That's why we must stick to the generated option
|
||||||
CONNECTION_ARGS=(
|
CONNECTION_ARGS=(
|
||||||
--receive_timeout=45 --send_timeout=45 --secure
|
--receive_timeout=45 --send_timeout=45 --secure
|
||||||
@ -129,6 +133,19 @@ function setup_logs_replication
|
|||||||
debug_or_sanitizer_build=$(clickhouse-client -q "WITH ((SELECT value FROM system.build_options WHERE name='BUILD_TYPE') AS build, (SELECT value FROM system.build_options WHERE name='CXX_FLAGS') as flags) SELECT build='Debug' OR flags LIKE '%fsanitize%'")
|
debug_or_sanitizer_build=$(clickhouse-client -q "WITH ((SELECT value FROM system.build_options WHERE name='BUILD_TYPE') AS build, (SELECT value FROM system.build_options WHERE name='CXX_FLAGS') as flags) SELECT build='Debug' OR flags LIKE '%fsanitize%'")
|
||||||
echo "Build is debug or sanitizer: $debug_or_sanitizer_build"
|
echo "Build is debug or sanitizer: $debug_or_sanitizer_build"
|
||||||
|
|
||||||
|
# We will pre-create a table system.coverage_log.
|
||||||
|
# It is normally created by clickhouse-test rather than the server,
|
||||||
|
# so we will create it in advance to make it be picked up by the next commands:
|
||||||
|
|
||||||
|
clickhouse-client --query "
|
||||||
|
CREATE TABLE IF NOT EXISTS system.coverage_log
|
||||||
|
(
|
||||||
|
time DateTime COMMENT 'The time of test run',
|
||||||
|
test_name String COMMENT 'The name of the test',
|
||||||
|
coverage Array(UInt64) COMMENT 'An array of addresses of the code (a subset of addresses instrumented for coverage) that were encountered during the test run'
|
||||||
|
) ENGINE = Null COMMENT 'Contains information about per-test coverage from the CI, but used only for exporting to the CI cluster'
|
||||||
|
"
|
||||||
|
|
||||||
# For each system log table:
|
# For each system log table:
|
||||||
echo 'Create %_log tables'
|
echo 'Create %_log tables'
|
||||||
clickhouse-client --query "SHOW TABLES FROM system LIKE '%\\_log'" | while read -r table
|
clickhouse-client --query "SHOW TABLES FROM system LIKE '%\\_log'" | while read -r table
|
||||||
@ -139,11 +156,16 @@ function setup_logs_replication
|
|||||||
# Do not try to resolve stack traces in case of debug/sanitizers
|
# Do not try to resolve stack traces in case of debug/sanitizers
|
||||||
# build, since it is too slow (flushing of trace_log can take ~1min
|
# build, since it is too slow (flushing of trace_log can take ~1min
|
||||||
# with such MV attached)
|
# with such MV attached)
|
||||||
if [[ "$debug_or_sanitizer_build" = 1 ]]; then
|
if [[ "$debug_or_sanitizer_build" = 1 ]]
|
||||||
|
then
|
||||||
EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION}"
|
EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION}"
|
||||||
else
|
else
|
||||||
EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION_TRACE_LOG}"
|
EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION_TRACE_LOG}"
|
||||||
fi
|
fi
|
||||||
|
elif [[ "$table" = "coverage_log" ]]
|
||||||
|
then
|
||||||
|
EXTRA_COLUMNS_FOR_TABLE="${EXTRA_COLUMNS_COVERAGE_LOG}"
|
||||||
|
EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION_COVERAGE_LOG}"
|
||||||
else
|
else
|
||||||
EXTRA_COLUMNS_FOR_TABLE="${EXTRA_COLUMNS}"
|
EXTRA_COLUMNS_FOR_TABLE="${EXTRA_COLUMNS}"
|
||||||
EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION}"
|
EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION}"
|
||||||
@ -160,7 +182,7 @@ function setup_logs_replication
|
|||||||
# Create the destination table with adapted name and structure:
|
# Create the destination table with adapted name and structure:
|
||||||
statement=$(clickhouse-client --format TSVRaw --query "SHOW CREATE TABLE system.${table}" | sed -r -e '
|
statement=$(clickhouse-client --format TSVRaw --query "SHOW CREATE TABLE system.${table}" | sed -r -e '
|
||||||
s/^\($/('"$EXTRA_COLUMNS_FOR_TABLE"'/;
|
s/^\($/('"$EXTRA_COLUMNS_FOR_TABLE"'/;
|
||||||
s/ORDER BY \(/ORDER BY ('"$EXTRA_ORDER_BY_COLUMNS"'/;
|
s/^ORDER BY (([^\(].+?)|\((.+?)\))$/ORDER BY ('"$EXTRA_ORDER_BY_COLUMNS"', \2\3)/;
|
||||||
s/^CREATE TABLE system\.\w+_log$/CREATE TABLE IF NOT EXISTS '"$table"'_'"$hash"'/;
|
s/^CREATE TABLE system\.\w+_log$/CREATE TABLE IF NOT EXISTS '"$table"'_'"$hash"'/;
|
||||||
/^TTL /d
|
/^TTL /d
|
||||||
')
|
')
|
||||||
@ -168,7 +190,7 @@ function setup_logs_replication
|
|||||||
echo -e "Creating remote destination table ${table}_${hash} with statement:\n${statement}" >&2
|
echo -e "Creating remote destination table ${table}_${hash} with statement:\n${statement}" >&2
|
||||||
|
|
||||||
echo "$statement" | clickhouse-client --database_replicated_initial_query_timeout_sec=10 \
|
echo "$statement" | clickhouse-client --database_replicated_initial_query_timeout_sec=10 \
|
||||||
--distributed_ddl_task_timeout=30 \
|
--distributed_ddl_task_timeout=30 --distributed_ddl_output_mode=throw_only_active \
|
||||||
"${CONNECTION_ARGS[@]}" || continue
|
"${CONNECTION_ARGS[@]}" || continue
|
||||||
|
|
||||||
echo "Creating table system.${table}_sender" >&2
|
echo "Creating table system.${table}_sender" >&2
|
||||||
|
@ -20,9 +20,11 @@ RUN apt-get update \
|
|||||||
pv \
|
pv \
|
||||||
jq \
|
jq \
|
||||||
zstd \
|
zstd \
|
||||||
--yes --no-install-recommends
|
--yes --no-install-recommends \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
RUN pip3 install numpy scipy pandas Jinja2
|
RUN pip3 install numpy==1.26.3 scipy==1.12.0 pandas==1.5.3 Jinja2==3.1.3
|
||||||
|
|
||||||
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz"
|
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz"
|
||||||
|
|
||||||
@ -31,12 +33,14 @@ RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
|||||||
&& cp /tmp/clickhouse-odbc-tmp/lib64/*.so /usr/local/lib/ \
|
&& cp /tmp/clickhouse-odbc-tmp/lib64/*.so /usr/local/lib/ \
|
||||||
&& odbcinst -i -d -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbcinst.ini.sample \
|
&& odbcinst -i -d -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbcinst.ini.sample \
|
||||||
&& odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \
|
&& odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \
|
||||||
&& rm -rf /tmp/clickhouse-odbc-tmp \
|
&& rm -rf /tmp/clickhouse-odbc-tmp
|
||||||
|
|
||||||
|
# Give suid to gdb to grant it attach permissions
|
||||||
|
# chmod 777 to make the container user independent
|
||||||
|
RUN chmod u+s /usr/bin/gdb \
|
||||||
&& mkdir -p /var/lib/clickhouse \
|
&& mkdir -p /var/lib/clickhouse \
|
||||||
&& chmod 777 /var/lib/clickhouse
|
&& chmod 777 /var/lib/clickhouse
|
||||||
|
|
||||||
# chmod 777 to make the container user independent
|
|
||||||
|
|
||||||
ENV TZ=Europe/Amsterdam
|
ENV TZ=Europe/Amsterdam
|
||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
|
|
||||||
|
@ -211,6 +211,17 @@ function build
|
|||||||
echo "build_clickhouse_fasttest_binary: [ OK ] $BUILD_SECONDS_ELAPSED sec." \
|
echo "build_clickhouse_fasttest_binary: [ OK ] $BUILD_SECONDS_ELAPSED sec." \
|
||||||
| ts '%Y-%m-%d %H:%M:%S' \
|
| ts '%Y-%m-%d %H:%M:%S' \
|
||||||
| tee "$FASTTEST_OUTPUT/test_result.txt"
|
| tee "$FASTTEST_OUTPUT/test_result.txt"
|
||||||
|
|
||||||
|
(
|
||||||
|
# This query should fail, and print stacktrace with proper symbol names (even on a stripped binary)
|
||||||
|
clickhouse_output=$(programs/clickhouse-stripped --stacktrace -q 'select' 2>&1 || :)
|
||||||
|
if [[ $clickhouse_output =~ DB::LocalServer::main ]]; then
|
||||||
|
echo "stripped_clickhouse_shows_symbols_names: [ OK ] 0 sec."
|
||||||
|
else
|
||||||
|
echo -e "stripped_clickhouse_shows_symbols_names: [ FAIL ] 0 sec. - clickhouse output:\n\n$clickhouse_output\n"
|
||||||
|
fi
|
||||||
|
) | ts '%Y-%m-%d %H:%M:%S' | tee -a "$FASTTEST_OUTPUT/test_result.txt"
|
||||||
|
|
||||||
if [ "$COPY_CLICKHOUSE_BINARY_TO_OUTPUT" -eq "1" ]; then
|
if [ "$COPY_CLICKHOUSE_BINARY_TO_OUTPUT" -eq "1" ]; then
|
||||||
mkdir -p "$FASTTEST_OUTPUT/binaries/"
|
mkdir -p "$FASTTEST_OUTPUT/binaries/"
|
||||||
cp programs/clickhouse "$FASTTEST_OUTPUT/binaries/clickhouse"
|
cp programs/clickhouse "$FASTTEST_OUTPUT/binaries/clickhouse"
|
||||||
|
@ -29,7 +29,7 @@ RUN apt-get update \
|
|||||||
wget \
|
wget \
|
||||||
&& apt-get autoremove --yes \
|
&& apt-get autoremove --yes \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
RUN pip3 install Jinja2
|
RUN pip3 install Jinja2
|
||||||
|
|
||||||
|
@ -389,8 +389,8 @@ fi
|
|||||||
rg --text -F '<Fatal>' server.log > fatal.log ||:
|
rg --text -F '<Fatal>' server.log > fatal.log ||:
|
||||||
dmesg -T > dmesg.log ||:
|
dmesg -T > dmesg.log ||:
|
||||||
|
|
||||||
zstd --threads=0 server.log
|
zstd --threads=0 --rm server.log
|
||||||
zstd --threads=0 fuzzer.log
|
zstd --threads=0 --rm fuzzer.log
|
||||||
|
|
||||||
cat > report.html <<EOF ||:
|
cat > report.html <<EOF ||:
|
||||||
<!DOCTYPE html>
|
<!DOCTYPE html>
|
||||||
|
@ -10,13 +10,13 @@ ENV \
|
|||||||
init=/lib/systemd/systemd
|
init=/lib/systemd/systemd
|
||||||
|
|
||||||
# install systemd packages
|
# install systemd packages
|
||||||
RUN apt-get update && \
|
RUN apt-get update \
|
||||||
apt-get install -y --no-install-recommends \
|
&& apt-get install -y --no-install-recommends \
|
||||||
sudo \
|
sudo \
|
||||||
systemd \
|
systemd \
|
||||||
&& \
|
\
|
||||||
apt-get clean && \
|
&& apt-get clean \
|
||||||
rm -rf /var/lib/apt/lists
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
# configure systemd
|
# configure systemd
|
||||||
# remove systemd 'wants' triggers
|
# remove systemd 'wants' triggers
|
||||||
|
@ -1,31 +1,27 @@
|
|||||||
FROM ubuntu:20.04
|
FROM ubuntu:20.04
|
||||||
MAINTAINER lgbo-ustc <lgbo.ustc@gmail.com>
|
MAINTAINER lgbo-ustc <lgbo.ustc@gmail.com>
|
||||||
|
|
||||||
RUN apt-get update
|
RUN apt-get update \
|
||||||
RUN apt-get install -y wget openjdk-8-jre
|
&& apt-get install -y wget openjdk-8-jre \
|
||||||
|
&& wget https://archive.apache.org/dist/hadoop/common/hadoop-3.1.0/hadoop-3.1.0.tar.gz \
|
||||||
RUN wget https://archive.apache.org/dist/hadoop/common/hadoop-3.1.0/hadoop-3.1.0.tar.gz && \
|
&& tar -xf hadoop-3.1.0.tar.gz && rm -rf hadoop-3.1.0.tar.gz \
|
||||||
tar -xf hadoop-3.1.0.tar.gz && rm -rf hadoop-3.1.0.tar.gz
|
&& wget https://apache.apache.org/dist/hive/hive-2.3.9/apache-hive-2.3.9-bin.tar.gz \
|
||||||
RUN wget https://apache.apache.org/dist/hive/hive-2.3.9/apache-hive-2.3.9-bin.tar.gz && \
|
&& tar -xf apache-hive-2.3.9-bin.tar.gz && rm -rf apache-hive-2.3.9-bin.tar.gz \
|
||||||
tar -xf apache-hive-2.3.9-bin.tar.gz && rm -rf apache-hive-2.3.9-bin.tar.gz
|
&& apt install -y vim \
|
||||||
RUN apt install -y vim
|
&& apt install -y openssh-server openssh-client \
|
||||||
|
&& apt install -y mysql-server \
|
||||||
RUN apt install -y openssh-server openssh-client
|
&& mkdir -p /root/.ssh \
|
||||||
|
&& ssh-keygen -t rsa -b 2048 -P '' -f /root/.ssh/id_rsa \
|
||||||
RUN apt install -y mysql-server
|
&& cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys \
|
||||||
|
&& cp /root/.ssh/id_rsa /etc/ssh/ssh_host_rsa_key \
|
||||||
RUN mkdir -p /root/.ssh && \
|
&& cp /root/.ssh/id_rsa.pub /etc/ssh/ssh_host_rsa_key.pub \
|
||||||
ssh-keygen -t rsa -b 2048 -P '' -f /root/.ssh/id_rsa && \
|
&& wget https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.27.tar.gz \
|
||||||
cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys && \
|
&& tar -xf mysql-connector-java-8.0.27.tar.gz \
|
||||||
cp /root/.ssh/id_rsa /etc/ssh/ssh_host_rsa_key && \
|
&& mv mysql-connector-java-8.0.27/mysql-connector-java-8.0.27.jar /apache-hive-2.3.9-bin/lib/ \
|
||||||
cp /root/.ssh/id_rsa.pub /etc/ssh/ssh_host_rsa_key.pub
|
&& rm -rf mysql-connector-java-8.0.27.tar.gz mysql-connector-java-8.0.27 \
|
||||||
|
&& apt install -y iputils-ping net-tools \
|
||||||
RUN wget https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.27.tar.gz &&\
|
&& apt-get clean \
|
||||||
tar -xf mysql-connector-java-8.0.27.tar.gz && \
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
mv mysql-connector-java-8.0.27/mysql-connector-java-8.0.27.jar /apache-hive-2.3.9-bin/lib/ && \
|
|
||||||
rm -rf mysql-connector-java-8.0.27.tar.gz mysql-connector-java-8.0.27
|
|
||||||
|
|
||||||
RUN apt install -y iputils-ping net-tools
|
|
||||||
|
|
||||||
ENV JAVA_HOME=/usr
|
ENV JAVA_HOME=/usr
|
||||||
ENV HADOOP_HOME=/hadoop-3.1.0
|
ENV HADOOP_HOME=/hadoop-3.1.0
|
||||||
@ -44,4 +40,3 @@ COPY demo_data.txt /
|
|||||||
ENV PATH=/apache-hive-2.3.9-bin/bin:/hadoop-3.1.0/bin:/hadoop-3.1.0/sbin:$PATH
|
ENV PATH=/apache-hive-2.3.9-bin/bin:/hadoop-3.1.0/bin:/hadoop-3.1.0/sbin:$PATH
|
||||||
RUN service ssh start && sed s/HOSTNAME/$HOSTNAME/ /hadoop-3.1.0/etc/hadoop/core-site.xml.template > /hadoop-3.1.0/etc/hadoop/core-site.xml && hdfs namenode -format
|
RUN service ssh start && sed s/HOSTNAME/$HOSTNAME/ /hadoop-3.1.0/etc/hadoop/core-site.xml.template > /hadoop-3.1.0/etc/hadoop/core-site.xml && hdfs namenode -format
|
||||||
COPY start.sh /
|
COPY start.sh /
|
||||||
|
|
||||||
|
@ -3,14 +3,10 @@
|
|||||||
|
|
||||||
FROM ubuntu:18.04
|
FROM ubuntu:18.04
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update \
|
||||||
apt-get install -y software-properties-common build-essential openjdk-8-jdk curl
|
&& apt-get install -y software-properties-common build-essential openjdk-8-jdk curl \
|
||||||
|
&& apt-get clean \
|
||||||
RUN rm -rf \
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
/var/lib/apt/lists/* \
|
|
||||||
/var/cache/debconf \
|
|
||||||
/tmp/* \
|
|
||||||
RUN apt-get clean
|
|
||||||
|
|
||||||
ARG ver=42.2.12
|
ARG ver=42.2.12
|
||||||
RUN curl -L -o /postgresql-java-${ver}.jar https://repo1.maven.org/maven2/org/postgresql/postgresql/${ver}/postgresql-${ver}.jar
|
RUN curl -L -o /postgresql-java-${ver}.jar https://repo1.maven.org/maven2/org/postgresql/postgresql/${ver}/postgresql-${ver}.jar
|
||||||
|
@ -37,11 +37,8 @@ RUN apt-get update \
|
|||||||
libkrb5-dev \
|
libkrb5-dev \
|
||||||
krb5-user \
|
krb5-user \
|
||||||
g++ \
|
g++ \
|
||||||
&& rm -rf \
|
&& apt-get clean \
|
||||||
/var/lib/apt/lists/* \
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
/var/cache/debconf \
|
|
||||||
/tmp/* \
|
|
||||||
&& apt-get clean
|
|
||||||
|
|
||||||
ENV TZ=Etc/UTC
|
ENV TZ=Etc/UTC
|
||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
@ -62,47 +59,49 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
|||||||
&& dockerd --version; docker --version
|
&& dockerd --version; docker --version
|
||||||
|
|
||||||
|
|
||||||
|
# kazoo 2.10.0 is broken
|
||||||
|
# https://s3.amazonaws.com/clickhouse-test-reports/59337/524625a1d2f4cc608a3f1059e3df2c30f353a649/integration_tests__asan__analyzer__[5_6].html
|
||||||
RUN python3 -m pip install --no-cache-dir \
|
RUN python3 -m pip install --no-cache-dir \
|
||||||
PyMySQL \
|
PyMySQL==1.1.0 \
|
||||||
aerospike==11.1.0 \
|
asyncio==3.4.3 \
|
||||||
asyncio \
|
|
||||||
avro==1.10.2 \
|
avro==1.10.2 \
|
||||||
azure-storage-blob \
|
azure-storage-blob==12.19.0 \
|
||||||
boto3 \
|
boto3==1.34.24 \
|
||||||
cassandra-driver \
|
cassandra-driver==3.29.0 \
|
||||||
confluent-kafka==1.9.2 \
|
confluent-kafka==2.3.0 \
|
||||||
delta-spark==2.3.0 \
|
delta-spark==2.3.0 \
|
||||||
dict2xml \
|
dict2xml==1.7.4 \
|
||||||
dicttoxml \
|
dicttoxml==1.7.16 \
|
||||||
docker==6.1.3 \
|
docker==6.1.3 \
|
||||||
docker-compose==1.29.2 \
|
docker-compose==1.29.2 \
|
||||||
grpcio \
|
grpcio==1.60.0 \
|
||||||
grpcio-tools \
|
grpcio-tools==1.60.0 \
|
||||||
kafka-python \
|
kafka-python==2.0.2 \
|
||||||
kazoo \
|
lz4==4.3.3 \
|
||||||
lz4 \
|
minio==7.2.3 \
|
||||||
minio \
|
nats-py==2.6.0 \
|
||||||
nats-py \
|
protobuf==4.25.2 \
|
||||||
protobuf \
|
kazoo==2.9.0 \
|
||||||
psycopg2-binary==2.9.6 \
|
psycopg2-binary==2.9.6 \
|
||||||
pyhdfs \
|
pyhdfs==0.3.1 \
|
||||||
pymongo==3.11.0 \
|
pymongo==3.11.0 \
|
||||||
pyspark==3.3.2 \
|
pyspark==3.3.2 \
|
||||||
pytest \
|
pytest==7.4.4 \
|
||||||
pytest-order==1.0.0 \
|
pytest-order==1.0.0 \
|
||||||
pytest-random \
|
pytest-random==0.2 \
|
||||||
pytest-repeat \
|
pytest-repeat==0.9.3 \
|
||||||
pytest-timeout \
|
pytest-timeout==2.2.0 \
|
||||||
pytest-xdist \
|
pytest-xdist==3.5.0 \
|
||||||
pytz \
|
pytest-reportlog==0.4.0 \
|
||||||
|
pytz==2023.3.post1 \
|
||||||
pyyaml==5.3.1 \
|
pyyaml==5.3.1 \
|
||||||
redis \
|
redis==5.0.1 \
|
||||||
requests-kerberos \
|
requests-kerberos==0.14.0 \
|
||||||
tzlocal==2.1 \
|
tzlocal==2.1 \
|
||||||
retry \
|
retry==0.9.2 \
|
||||||
bs4 \
|
bs4==0.0.2 \
|
||||||
lxml \
|
lxml==5.1.0 \
|
||||||
urllib3
|
urllib3==2.0.7
|
||||||
# bs4, lxml are for cloud tests, do not delete
|
# bs4, lxml are for cloud tests, do not delete
|
||||||
|
|
||||||
# Hudi supports only spark 3.3.*, not 3.4
|
# Hudi supports only spark 3.3.*, not 3.4
|
||||||
|
@ -23,13 +23,15 @@ if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
|
|||||||
> /sys/fs/cgroup/cgroup.subtree_control
|
> /sys/fs/cgroup/cgroup.subtree_control
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# In case of test hung it is convenient to use pytest --pdb to debug it,
|
|
||||||
# and on hung you can simply press Ctrl-C and it will spawn a python pdb,
|
|
||||||
# but on SIGINT dockerd will exit, so ignore it to preserve the daemon.
|
|
||||||
trap '' INT
|
|
||||||
# Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed
|
# Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed
|
||||||
# unless --tls=false or --tlsverify=false is set
|
# unless --tls=false or --tlsverify=false is set
|
||||||
dockerd --host=unix:///var/run/docker.sock --tls=false --host=tcp://0.0.0.0:2375 --default-address-pool base=172.17.0.0/12,size=24 &>/ClickHouse/tests/integration/dockerd.log &
|
#
|
||||||
|
# In case of test hung it is convenient to use pytest --pdb to debug it,
|
||||||
|
# and on hung you can simply press Ctrl-C and it will spawn a python pdb,
|
||||||
|
# but on SIGINT dockerd will exit, so we spawn new session to ignore SIGINT by
|
||||||
|
# docker.
|
||||||
|
# Note, that if you will run it via runner, it will send SIGINT to docker anyway.
|
||||||
|
setsid dockerd --host=unix:///var/run/docker.sock --tls=false --host=tcp://0.0.0.0:2375 --default-address-pool base=172.17.0.0/12,size=24 &>/ClickHouse/tests/integration/dockerd.log &
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
reties=0
|
reties=0
|
||||||
|
@ -24,7 +24,10 @@ RUN mkdir "/root/.ssh"
|
|||||||
RUN touch "/root/.ssh/known_hosts"
|
RUN touch "/root/.ssh/known_hosts"
|
||||||
|
|
||||||
# install java
|
# install java
|
||||||
RUN apt-get update && apt-get install default-jre default-jdk libjna-java libjna-jni ssh gnuplot graphviz --yes --no-install-recommends
|
RUN apt-get update && \
|
||||||
|
apt-get install default-jre default-jdk libjna-java libjna-jni ssh gnuplot graphviz --yes --no-install-recommends \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
# install clojure
|
# install clojure
|
||||||
RUN curl -O "https://download.clojure.org/install/linux-install-${CLOJURE_VERSION}.sh" && \
|
RUN curl -O "https://download.clojure.org/install/linux-install-${CLOJURE_VERSION}.sh" && \
|
||||||
|
@ -27,7 +27,7 @@ RUN apt-get update \
|
|||||||
wget \
|
wget \
|
||||||
&& apt-get autoremove --yes \
|
&& apt-get autoremove --yes \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
RUN pip3 install Jinja2
|
RUN pip3 install Jinja2
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ RUN apt-get update \
|
|||||||
&& apt-get purge --yes python3-dev g++ \
|
&& apt-get purge --yes python3-dev g++ \
|
||||||
&& apt-get autoremove --yes \
|
&& apt-get autoremove --yes \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
COPY run.sh /
|
COPY run.sh /
|
||||||
|
|
||||||
|
@ -31,7 +31,9 @@ RUN mkdir "/root/.ssh"
|
|||||||
RUN touch "/root/.ssh/known_hosts"
|
RUN touch "/root/.ssh/known_hosts"
|
||||||
|
|
||||||
# install java
|
# install java
|
||||||
RUN apt-get update && apt-get install default-jre default-jdk libjna-java libjna-jni ssh gnuplot graphviz --yes --no-install-recommends
|
RUN apt-get update && apt-get install default-jre default-jdk libjna-java libjna-jni ssh gnuplot graphviz --yes --no-install-recommends \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
# install clojure
|
# install clojure
|
||||||
RUN curl -O "https://download.clojure.org/install/linux-install-${CLOJURE_VERSION}.sh" && \
|
RUN curl -O "https://download.clojure.org/install/linux-install-${CLOJURE_VERSION}.sh" && \
|
||||||
|
@ -5,9 +5,10 @@ FROM ubuntu:22.04
|
|||||||
ARG apt_archive="http://archive.ubuntu.com"
|
ARG apt_archive="http://archive.ubuntu.com"
|
||||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||||
|
|
||||||
RUN apt-get update --yes && \
|
RUN apt-get update --yes \
|
||||||
env DEBIAN_FRONTEND=noninteractive apt-get install wget git default-jdk maven python3 --yes --no-install-recommends && \
|
&& env DEBIAN_FRONTEND=noninteractive apt-get install wget git default-jdk maven python3 --yes --no-install-recommends \
|
||||||
apt-get clean
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
# We need to get the repository's HEAD each time despite, so we invalidate layers' cache
|
# We need to get the repository's HEAD each time despite, so we invalidate layers' cache
|
||||||
ARG CACHE_INVALIDATOR=0
|
ARG CACHE_INVALIDATOR=0
|
||||||
|
@ -15,7 +15,8 @@ RUN apt-get update --yes \
|
|||||||
unixodbc-dev \
|
unixodbc-dev \
|
||||||
odbcinst \
|
odbcinst \
|
||||||
sudo \
|
sudo \
|
||||||
&& apt-get clean
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
RUN pip3 install \
|
RUN pip3 install \
|
||||||
numpy \
|
numpy \
|
||||||
|
@ -11,7 +11,8 @@ RUN apt-get update --yes \
|
|||||||
python3-dev \
|
python3-dev \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
sudo \
|
sudo \
|
||||||
&& apt-get clean
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
RUN pip3 install \
|
RUN pip3 install \
|
||||||
pyyaml \
|
pyyaml \
|
||||||
|
@ -9,7 +9,8 @@ RUN apt-get update -y \
|
|||||||
python3-requests \
|
python3-requests \
|
||||||
nodejs \
|
nodejs \
|
||||||
npm \
|
npm \
|
||||||
&& apt-get clean
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
COPY create.sql /
|
COPY create.sql /
|
||||||
COPY run.sh /
|
COPY run.sh /
|
||||||
|
@ -44,9 +44,10 @@ RUN apt-get update -y \
|
|||||||
pv \
|
pv \
|
||||||
zip \
|
zip \
|
||||||
p7zip-full \
|
p7zip-full \
|
||||||
&& apt-get clean
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
RUN pip3 install numpy scipy pandas Jinja2 pyarrow
|
RUN pip3 install numpy==1.26.3 scipy==1.12.0 pandas==1.5.3 Jinja2==3.1.3 pyarrow==15.0.0
|
||||||
|
|
||||||
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
||||||
&& wget -nv -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \
|
&& wget -nv -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \
|
||||||
@ -73,7 +74,6 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
&& wget "https://dl.min.io/client/mc/release/linux-${arch}/archive/mc.RELEASE.${MINIO_CLIENT_VERSION}" -O ./mc \
|
&& wget "https://dl.min.io/client/mc/release/linux-${arch}/archive/mc.RELEASE.${MINIO_CLIENT_VERSION}" -O ./mc \
|
||||||
&& chmod +x ./mc ./minio
|
&& chmod +x ./mc ./minio
|
||||||
|
|
||||||
|
|
||||||
RUN wget --no-verbose 'https://archive.apache.org/dist/hadoop/common/hadoop-3.3.1/hadoop-3.3.1.tar.gz' \
|
RUN wget --no-verbose 'https://archive.apache.org/dist/hadoop/common/hadoop-3.3.1/hadoop-3.3.1.tar.gz' \
|
||||||
&& tar -xvf hadoop-3.3.1.tar.gz \
|
&& tar -xvf hadoop-3.3.1.tar.gz \
|
||||||
&& rm -rf hadoop-3.3.1.tar.gz
|
&& rm -rf hadoop-3.3.1.tar.gz
|
||||||
|
@ -9,6 +9,8 @@ FROM ubuntu:20.04 as clickhouse-test-runner-base
|
|||||||
VOLUME /packages
|
VOLUME /packages
|
||||||
|
|
||||||
CMD apt-get update ;\
|
CMD apt-get update ;\
|
||||||
DEBIAN_FRONTEND=noninteractive \
|
DEBIAN_FRONTEND=noninteractive \
|
||||||
apt install -y /packages/clickhouse-common-static_*.deb \
|
apt install -y /packages/clickhouse-common-static_*.deb \
|
||||||
/packages/clickhouse-client_*.deb
|
/packages/clickhouse-client_*.deb \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
@ -185,11 +185,15 @@ function run_tests()
|
|||||||
|
|
||||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
ADDITIONAL_OPTIONS+=('--replicated-database')
|
ADDITIONAL_OPTIONS+=('--replicated-database')
|
||||||
|
# Too many tests fail for DatabaseReplicated in parallel.
|
||||||
ADDITIONAL_OPTIONS+=('--jobs')
|
ADDITIONAL_OPTIONS+=('--jobs')
|
||||||
ADDITIONAL_OPTIONS+=('2')
|
ADDITIONAL_OPTIONS+=('2')
|
||||||
|
elif [[ 1 == $(clickhouse-client --query "SELECT value LIKE '%SANITIZE_COVERAGE%' FROM system.build_options WHERE name = 'CXX_FLAGS'") ]]; then
|
||||||
|
# Coverage on a per-test basis could only be collected sequentially.
|
||||||
|
# Do not set the --jobs parameter.
|
||||||
|
echo "Running tests with coverage collection."
|
||||||
else
|
else
|
||||||
# Too many tests fail for DatabaseReplicated in parallel. All other
|
# All other configurations are OK.
|
||||||
# configurations are OK.
|
|
||||||
ADDITIONAL_OPTIONS+=('--jobs')
|
ADDITIONAL_OPTIONS+=('--jobs')
|
||||||
ADDITIONAL_OPTIONS+=('8')
|
ADDITIONAL_OPTIONS+=('8')
|
||||||
fi
|
fi
|
||||||
@ -246,16 +250,19 @@ clickhouse-client -q "system flush logs" ||:
|
|||||||
stop_logs_replication
|
stop_logs_replication
|
||||||
|
|
||||||
# Try to get logs while server is running
|
# Try to get logs while server is running
|
||||||
successfuly_saved=0
|
failed_to_save_logs=0
|
||||||
for table in query_log zookeeper_log trace_log transactions_info_log
|
for table in query_log zookeeper_log trace_log transactions_info_log metric_log
|
||||||
do
|
do
|
||||||
clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst
|
err=$( { clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst; } 2>&1 )
|
||||||
successfuly_saved=$?
|
echo "$err"
|
||||||
|
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst
|
err=$( { clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 )
|
||||||
successfuly_saved=$((successfuly_saved | $?))
|
echo "$err"
|
||||||
clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst
|
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||||
successfuly_saved=$((successfuly_saved | $?))
|
err=$( { clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst; } 2>&1 )
|
||||||
|
echo "$err"
|
||||||
|
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
@ -280,7 +287,7 @@ fi
|
|||||||
|
|
||||||
|
|
||||||
# If server crashed dump system logs with clickhouse-local
|
# If server crashed dump system logs with clickhouse-local
|
||||||
if [ $successfuly_saved -ne 0 ]; then
|
if [ $failed_to_save_logs -ne 0 ]; then
|
||||||
# Compress tables.
|
# Compress tables.
|
||||||
#
|
#
|
||||||
# NOTE:
|
# NOTE:
|
||||||
@ -288,12 +295,12 @@ if [ $successfuly_saved -ne 0 ]; then
|
|||||||
# directly
|
# directly
|
||||||
# - even though ci auto-compress some files (but not *.tsv) it does this only
|
# - even though ci auto-compress some files (but not *.tsv) it does this only
|
||||||
# for files >64MB, we want this files to be compressed explicitly
|
# for files >64MB, we want this files to be compressed explicitly
|
||||||
for table in query_log zookeeper_log trace_log transactions_info_log
|
for table in query_log zookeeper_log trace_log transactions_info_log metric_log
|
||||||
do
|
do
|
||||||
clickhouse-local "$data_path_config" --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst ||:
|
clickhouse-local "$data_path_config" --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst ||:
|
||||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
clickhouse-local --path /var/lib/clickhouse1/ --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst ||:
|
clickhouse-local --path /var/lib/clickhouse1/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst ||:
|
||||||
clickhouse-local --path /var/lib/clickhouse2/ --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst ||:
|
clickhouse-local --path /var/lib/clickhouse2/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst ||:
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
@ -78,6 +78,8 @@ function configure()
|
|||||||
randomize_config_boolean_value use_compression zookeeper
|
randomize_config_boolean_value use_compression zookeeper
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
randomize_config_boolean_value allow_experimental_block_number_column block_number
|
||||||
|
|
||||||
# for clickhouse-server (via service)
|
# for clickhouse-server (via service)
|
||||||
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
||||||
# for clickhouse-client
|
# for clickhouse-client
|
||||||
|
@ -19,7 +19,8 @@ RUN apt-get update -y \
|
|||||||
openssl \
|
openssl \
|
||||||
netcat-openbsd \
|
netcat-openbsd \
|
||||||
brotli \
|
brotli \
|
||||||
&& apt-get clean
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
COPY run.sh /
|
COPY run.sh /
|
||||||
|
|
||||||
|
@ -21,7 +21,8 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
|||||||
locales \
|
locales \
|
||||||
&& pip3 install black==23.1.0 boto3 codespell==2.2.1 mypy==1.3.0 PyGithub unidiff pylint==2.6.2 \
|
&& pip3 install black==23.1.0 boto3 codespell==2.2.1 mypy==1.3.0 PyGithub unidiff pylint==2.6.2 \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /root/.cache/pip
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* \
|
||||||
|
&& rm -rf /root/.cache/pip
|
||||||
|
|
||||||
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8
|
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8
|
||||||
ENV LC_ALL en_US.UTF-8
|
ENV LC_ALL en_US.UTF-8
|
||||||
|
@ -19,7 +19,8 @@ RUN apt-get update -y \
|
|||||||
openssl \
|
openssl \
|
||||||
netcat-openbsd \
|
netcat-openbsd \
|
||||||
brotli \
|
brotli \
|
||||||
&& apt-get clean
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
COPY run.sh /
|
COPY run.sh /
|
||||||
|
|
||||||
|
@ -122,6 +122,7 @@ rm /etc/clickhouse-server/config.d/merge_tree.xml
|
|||||||
rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml
|
rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml
|
||||||
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
|
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
|
||||||
rm /etc/clickhouse-server/config.d/storage_conf_02963.xml
|
rm /etc/clickhouse-server/config.d/storage_conf_02963.xml
|
||||||
|
rm /etc/clickhouse-server/config.d/block_number.xml
|
||||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||||
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
|
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
|
||||||
rm /etc/clickhouse-server/users.d/replicated_ddl_entry.xml
|
rm /etc/clickhouse-server/users.d/replicated_ddl_entry.xml
|
||||||
|
@ -5,7 +5,6 @@ FROM ubuntu:22.04
|
|||||||
ARG apt_archive="http://archive.ubuntu.com"
|
ARG apt_archive="http://archive.ubuntu.com"
|
||||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||||
|
|
||||||
# 15.0.2
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=17
|
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=17
|
||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
@ -27,9 +26,10 @@ RUN apt-get update \
|
|||||||
&& export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
|
&& export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
|
||||||
&& echo "deb https://apt.llvm.org/${CODENAME}/ llvm-toolchain-${CODENAME}-${LLVM_VERSION} main" >> \
|
&& echo "deb https://apt.llvm.org/${CODENAME}/ llvm-toolchain-${CODENAME}-${LLVM_VERSION} main" >> \
|
||||||
/etc/apt/sources.list \
|
/etc/apt/sources.list \
|
||||||
&& apt-get clean
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
# Install cmake 3.20+ for rust support
|
# Install cmake 3.20+ for Rust support
|
||||||
# Used https://askubuntu.com/a/1157132 as reference
|
# Used https://askubuntu.com/a/1157132 as reference
|
||||||
RUN curl -s https://apt.kitware.com/keys/kitware-archive-latest.asc | \
|
RUN curl -s https://apt.kitware.com/keys/kitware-archive-latest.asc | \
|
||||||
gpg --dearmor - > /etc/apt/trusted.gpg.d/kitware.gpg && \
|
gpg --dearmor - > /etc/apt/trusted.gpg.d/kitware.gpg && \
|
||||||
@ -60,9 +60,10 @@ RUN apt-get update \
|
|||||||
software-properties-common \
|
software-properties-common \
|
||||||
tzdata \
|
tzdata \
|
||||||
--yes --no-install-recommends \
|
--yes --no-install-recommends \
|
||||||
&& apt-get clean
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
# This symlink required by gcc to find lld compiler
|
# This symlink is required by gcc to find the lld linker
|
||||||
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
|
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
|
||||||
# for external_symbolizer_path
|
# for external_symbolizer_path
|
||||||
RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer
|
RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer
|
||||||
@ -107,5 +108,4 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
&& mv "/tmp/sccache-$SCCACHE_VERSION-$rarch-unknown-linux-musl/sccache" /usr/bin \
|
&& mv "/tmp/sccache-$SCCACHE_VERSION-$rarch-unknown-linux-musl/sccache" /usr/bin \
|
||||||
&& rm "/tmp/sccache-$SCCACHE_VERSION-$rarch-unknown-linux-musl" -r
|
&& rm "/tmp/sccache-$SCCACHE_VERSION-$rarch-unknown-linux-musl" -r
|
||||||
|
|
||||||
|
|
||||||
COPY process_functional_tests_result.py /
|
COPY process_functional_tests_result.py /
|
||||||
|
31
docs/changelogs/v23.11.5.29-stable.md
Normal file
31
docs/changelogs/v23.11.5.29-stable.md
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.11.5.29-stable (d83b108deca) FIXME as compared to v23.11.4.24-stable (e79d840d7fe)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#58815](https://github.com/ClickHouse/ClickHouse/issues/58815): Add `SYSTEM JEMALLOC PURGE` for purging unused jemalloc pages, `SYSTEM JEMALLOC [ ENABLE | DISABLE | FLUSH ] PROFILE` for controlling jemalloc profile if the profiler is enabled. Add jemalloc-related 4LW command in Keeper: `jmst` for dumping jemalloc stats, `jmfp`, `jmep`, `jmdp` for controlling jemalloc profile if the profiler is enabled. [#58665](https://github.com/ClickHouse/ClickHouse/pull/58665) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#59234](https://github.com/ClickHouse/ClickHouse/issues/59234): Allow to ignore schema evolution in Iceberg table engine and read all data using schema specified by the user on table creation or latest schema parsed from metadata on table creation. This is done under a setting `iceberg_engine_ignore_schema_evolution` that is disabled by default. Note that enabling this setting can lead to incorrect result as in case of evolved schema all data files will be read using the same schema. [#59133](https://github.com/ClickHouse/ClickHouse/pull/59133) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix a stupid case of intersecting parts [#58482](https://github.com/ClickHouse/ClickHouse/pull/58482) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix stream partitioning in parallel window functions [#58739](https://github.com/ClickHouse/ClickHouse/pull/58739) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Fix double destroy call on exception throw in addBatchLookupTable8 [#58745](https://github.com/ClickHouse/ClickHouse/pull/58745) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix JSONExtract function for LowCardinality(Nullable) columns [#58808](https://github.com/ClickHouse/ClickHouse/pull/58808) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix: LIMIT BY and LIMIT in distributed query [#59153](https://github.com/ClickHouse/ClickHouse/pull/59153) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix not-ready set for system.tables [#59351](https://github.com/ClickHouse/ClickHouse/pull/59351) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix translate() with FixedString input [#59356](https://github.com/ClickHouse/ClickHouse/pull/59356) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* refine error message [#57991](https://github.com/ClickHouse/ClickHouse/pull/57991) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Fix rare race in external sort/aggregation with temporary data in cache [#58013](https://github.com/ClickHouse/ClickHouse/pull/58013) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Follow-up to [#58482](https://github.com/ClickHouse/ClickHouse/issues/58482) [#58574](https://github.com/ClickHouse/ClickHouse/pull/58574) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix possible race in ManyAggregatedData dtor. [#58624](https://github.com/ClickHouse/ClickHouse/pull/58624) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Decrease log level for one log message [#59168](https://github.com/ClickHouse/ClickHouse/pull/59168) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
|
36
docs/changelogs/v23.12.3.40-stable.md
Normal file
36
docs/changelogs/v23.12.3.40-stable.md
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.12.3.40-stable (a594704ae75) FIXME as compared to v23.12.2.59-stable (17ab210e761)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#58660](https://github.com/ClickHouse/ClickHouse/issues/58660): When executing some queries, which require a lot of streams for reading data, the error `"Paste JOIN requires sorted tables only"` was previously thrown. Now the numbers of streams resize to 1 in that case. [#58608](https://github.com/ClickHouse/ClickHouse/pull/58608) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Backported in [#58817](https://github.com/ClickHouse/ClickHouse/issues/58817): Add `SYSTEM JEMALLOC PURGE` for purging unused jemalloc pages, `SYSTEM JEMALLOC [ ENABLE | DISABLE | FLUSH ] PROFILE` for controlling jemalloc profile if the profiler is enabled. Add jemalloc-related 4LW command in Keeper: `jmst` for dumping jemalloc stats, `jmfp`, `jmep`, `jmdp` for controlling jemalloc profile if the profiler is enabled. [#58665](https://github.com/ClickHouse/ClickHouse/pull/58665) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#59235](https://github.com/ClickHouse/ClickHouse/issues/59235): Allow to ignore schema evolution in Iceberg table engine and read all data using schema specified by the user on table creation or latest schema parsed from metadata on table creation. This is done under a setting `iceberg_engine_ignore_schema_evolution` that is disabled by default. Note that enabling this setting can lead to incorrect result as in case of evolved schema all data files will be read using the same schema. [#59133](https://github.com/ClickHouse/ClickHouse/pull/59133) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Delay reading from StorageKafka to allow multiple reads in materialized views [#58477](https://github.com/ClickHouse/ClickHouse/pull/58477) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fix a stupid case of intersecting parts [#58482](https://github.com/ClickHouse/ClickHouse/pull/58482) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Disable max_joined_block_rows in ConcurrentHashJoin [#58595](https://github.com/ClickHouse/ClickHouse/pull/58595) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix stream partitioning in parallel window functions [#58739](https://github.com/ClickHouse/ClickHouse/pull/58739) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Fix double destroy call on exception throw in addBatchLookupTable8 [#58745](https://github.com/ClickHouse/ClickHouse/pull/58745) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix JSONExtract function for LowCardinality(Nullable) columns [#58808](https://github.com/ClickHouse/ClickHouse/pull/58808) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Multiple read file log storage in mv [#58877](https://github.com/ClickHouse/ClickHouse/pull/58877) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fix: LIMIT BY and LIMIT in distributed query [#59153](https://github.com/ClickHouse/ClickHouse/pull/59153) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix not-ready set for system.tables [#59351](https://github.com/ClickHouse/ClickHouse/pull/59351) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix translate() with FixedString input [#59356](https://github.com/ClickHouse/ClickHouse/pull/59356) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Follow-up to [#58482](https://github.com/ClickHouse/ClickHouse/issues/58482) [#58574](https://github.com/ClickHouse/ClickHouse/pull/58574) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix possible race in ManyAggregatedData dtor. [#58624](https://github.com/ClickHouse/ClickHouse/pull/58624) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Change log level for super imporant message in Keeper [#59010](https://github.com/ClickHouse/ClickHouse/pull/59010) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Decrease log level for one log message [#59168](https://github.com/ClickHouse/ClickHouse/pull/59168) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix fasttest by pinning pip dependencies [#59256](https://github.com/ClickHouse/ClickHouse/pull/59256) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* No debug symbols in Rust [#59306](https://github.com/ClickHouse/ClickHouse/pull/59306) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
21
docs/changelogs/v23.12.4.15-stable.md
Normal file
21
docs/changelogs/v23.12.4.15-stable.md
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.12.4.15-stable (4233d111d20) FIXME as compared to v23.12.3.40-stable (a594704ae75)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix incorrect result of arrayElement / map[] on empty value [#59594](https://github.com/ClickHouse/ClickHouse/pull/59594) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix crash in topK when merging empty states [#59603](https://github.com/ClickHouse/ClickHouse/pull/59603) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix distributed table with a constant sharding key [#59606](https://github.com/ClickHouse/ClickHouse/pull/59606) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix leftPad / rightPad function with FixedString input [#59739](https://github.com/ClickHouse/ClickHouse/pull/59739) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Fix 02720_row_policy_column_with_dots [#59453](https://github.com/ClickHouse/ClickHouse/pull/59453) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Pin python dependencies in stateless tests [#59663](https://github.com/ClickHouse/ClickHouse/pull/59663) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
@ -11,6 +11,7 @@ sidebar_label: 2023
|
|||||||
* Remove the `status_info` configuration option and dictionaries status from the default Prometheus handler. [#54090](https://github.com/ClickHouse/ClickHouse/pull/54090) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Remove the `status_info` configuration option and dictionaries status from the default Prometheus handler. [#54090](https://github.com/ClickHouse/ClickHouse/pull/54090) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* The experimental parts metadata cache is removed from the codebase. [#54215](https://github.com/ClickHouse/ClickHouse/pull/54215) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* The experimental parts metadata cache is removed from the codebase. [#54215](https://github.com/ClickHouse/ClickHouse/pull/54215) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Disable setting `input_format_json_try_infer_numbers_from_strings` by default, so we don't try to infer numbers from strings in JSON formats by default to avoid possible parsing errors when sample data contains strings that looks like a number. [#55099](https://github.com/ClickHouse/ClickHouse/pull/55099) ([Kruglov Pavel](https://github.com/Avogar)).
|
* Disable setting `input_format_json_try_infer_numbers_from_strings` by default, so we don't try to infer numbers from strings in JSON formats by default to avoid possible parsing errors when sample data contains strings that looks like a number. [#55099](https://github.com/ClickHouse/ClickHouse/pull/55099) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* IPv6 bloom filter indexes created prior to March 2023 are not compatible with current version and have to be rebuilt. [#54200](https://github.com/ClickHouse/ClickHouse/pull/54200) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
|
||||||
#### New Feature
|
#### New Feature
|
||||||
* Added new type of authentication based on SSH keys. It works only for Native TCP protocol. [#41109](https://github.com/ClickHouse/ClickHouse/pull/41109) ([George Gamezardashvili](https://github.com/InfJoker)).
|
* Added new type of authentication based on SSH keys. It works only for Native TCP protocol. [#41109](https://github.com/ClickHouse/ClickHouse/pull/41109) ([George Gamezardashvili](https://github.com/InfJoker)).
|
||||||
|
438
docs/changelogs/v24.1.1.2048-stable.md
Normal file
438
docs/changelogs/v24.1.1.2048-stable.md
Normal file
@ -0,0 +1,438 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.1.1.2048-stable (5a024dfc093) FIXME as compared to v23.12.1.1368-stable (a2faa65b080)
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
* The setting `print_pretty_type_names` is turned on by default. You can turn it off to keep the old behavior or `SET compatibility = '23.12'`. [#57726](https://github.com/ClickHouse/ClickHouse/pull/57726) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* The MergeTree setting `clean_deleted_rows` is deprecated, it has no effect anymore. The `CLEANUP` keyword for `OPTIMIZE` is not allowed by default (unless `allow_experimental_replacing_merge_with_cleanup` is enabled). [#58316](https://github.com/ClickHouse/ClickHouse/pull/58316) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* The function `reverseDNSQuery` is no longer available. This closes [#58368](https://github.com/ClickHouse/ClickHouse/issues/58368). [#58369](https://github.com/ClickHouse/ClickHouse/pull/58369) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Enable various changes to improve the access control in the configuration file. These changes affect the behavior, and you check the `config.xml` in the `access_control_improvements` section. In case you are not confident, keep the values in the configuration file as they were in the previous version. [#58584](https://github.com/ClickHouse/ClickHouse/pull/58584) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Allow queries without aliases for subqueries for `PASTE JOIN`. [#58654](https://github.com/ClickHouse/ClickHouse/pull/58654) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Fix sumMapFiltered with NaN values. NaN values are now placed at the end (instead of randomly) and considered different from any values. `-0` is now also treated as equal to `0`; since 0 values are discarded, `-0` values are discarded too. [#58959](https://github.com/ClickHouse/ClickHouse/pull/58959) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* The function `visibleWidth` will behave according to the docs. In previous versions, it simply counted code points after string serialization, like the `lengthUTF8` function, but didn't consider zero-width and combining characters, full-width characters, tabs, and deletes. Now the behavior is changed accordingly. If you want to keep the old behavior, set `function_visible_width_behavior` to `0`, or set `compatibility` to `23.12` or lower. [#59022](https://github.com/ClickHouse/ClickHouse/pull/59022) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Kusto dialect is disabled until these two bugs will be fixed: [#59037](https://github.com/ClickHouse/ClickHouse/issues/59037) and [#59036](https://github.com/ClickHouse/ClickHouse/issues/59036). [#59305](https://github.com/ClickHouse/ClickHouse/pull/59305) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
* Allow partitions from tables with different partition expressions to be attached when the destination table partition expression doesn't re-partition/ split the part. [#39507](https://github.com/ClickHouse/ClickHouse/pull/39507) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||||
|
* Added statement `SYSTEM RELOAD ASYNCHRONOUS METRICS` which updates the asynchronous metrics. Mostly useful for testing and development. [#53710](https://github.com/ClickHouse/ClickHouse/pull/53710) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Certain settings (currently `min_compress_block_size` and `max_compress_block_size`) can now be specified at column-level where they take precedence over the corresponding table-level setting. Example: `CREATE TABLE tab (col String SETTINGS (min_compress_block_size = 81920, max_compress_block_size = 163840)) ENGINE = MergeTree ORDER BY tuple();`. [#55201](https://github.com/ClickHouse/ClickHouse/pull/55201) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Add `quantileDDSketch` aggregate function as well as the corresponding `quantilesDDSketch` and `medianDDSketch`. It is based on the DDSketch https://www.vldb.org/pvldb/vol12/p2195-masson.pdf. ### Documentation entry for user-facing changes. [#56342](https://github.com/ClickHouse/ClickHouse/pull/56342) ([Srikanth Chekuri](https://github.com/srikanthccv)).
|
||||||
|
* Added function `seriesDecomposeSTL()` which decomposes a time series into a season, a trend and a residual component. [#57078](https://github.com/ClickHouse/ClickHouse/pull/57078) ([Bhavna Jindal](https://github.com/bhavnajindal)).
|
||||||
|
* Introduced MySQL Binlog Client for MaterializedMySQL: One binlog connection for many databases. [#57323](https://github.com/ClickHouse/ClickHouse/pull/57323) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* Intel QuickAssist Technology (QAT) provides hardware-accelerated compression and cryptograpy. ClickHouse got a new compression codec `ZSTD_QAT` which utilizes QAT for zstd compression. The codec uses [Intel's QATlib](https://github.com/intel/qatlib) and [Inte's QAT ZSTD Plugin](https://github.com/intel/QAT-ZSTD-Plugin). Right now, only compression can be accelerated in hardware (a software fallback kicks in in case QAT could not be initialized), decompression always runs in software. [#57509](https://github.com/ClickHouse/ClickHouse/pull/57509) ([jasperzhu](https://github.com/jinjunzh)).
|
||||||
|
* Implementing the new way how object storage keys are generated for s3 disks. Now the format could be defined in terms of `re2` regex syntax with `key_template` option in disc description. [#57663](https://github.com/ClickHouse/ClickHouse/pull/57663) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Table system.dropped_tables_parts contains parts of system.dropped_tables tables (dropped but not yet removed tables). [#58038](https://github.com/ClickHouse/ClickHouse/pull/58038) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Implement Variant data type that represents a union of other data types. Type `Variant(T1, T2, ..., TN)` means that each row of this type has a value of either type `T1` or `T2` or ... or `TN` or none of them (`NULL` value). Variant type is available under a setting `allow_experimental_variant_type`. Reference: [#54864](https://github.com/ClickHouse/ClickHouse/issues/54864). [#58047](https://github.com/ClickHouse/ClickHouse/pull/58047) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Add settings `max_materialized_views_size_for_table` to limit the number of materialized views attached to a table. [#58068](https://github.com/ClickHouse/ClickHouse/pull/58068) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||||
|
* `clickhouse-format` improvements: * support INSERT queries with `VALUES` * support comments (use `--comments` to output them) * support `--max_line_length` option to format only long queries in multiline. [#58246](https://github.com/ClickHouse/ClickHouse/pull/58246) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Added `null_status_on_timeout_only_active` and `throw_only_active` modes for `distributed_ddl_output_mode` that allow to avoid waiting for inactive replicas. [#58350](https://github.com/ClickHouse/ClickHouse/pull/58350) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Add table `system.database_engines`. [#58390](https://github.com/ClickHouse/ClickHouse/pull/58390) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||||
|
* Added FROM <Replicas> modifier for SYSTEM SYNC REPLICA LIGHTWEIGHT query. The FROM modifier ensures we wait for for fetches and drop-ranges only for the specified source replicas, as well as any replica not in zookeeper or with an empty source_replica. [#58393](https://github.com/ClickHouse/ClickHouse/pull/58393) ([Jayme Bird](https://github.com/jaymebrd)).
|
||||||
|
* Add function `arrayShingles()` to compute subarrays, e.g. `arrayShingles([1, 2, 3, 4, 5], 3)` returns `[[1,2,3],[2,3,4],[3,4,5]]`. [#58396](https://github.com/ClickHouse/ClickHouse/pull/58396) ([Zheng Miao](https://github.com/zenmiao7)).
|
||||||
|
* Added functions `punycodeEncode()`, `punycodeDecode()`, `idnaEncode()` and `idnaDecode()` which are useful for translating international domain names to an ASCII representation according to the IDNA standard. [#58454](https://github.com/ClickHouse/ClickHouse/pull/58454) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Added string similarity functions `dramerauLevenshteinDistance()`, `jaroSimilarity()` and `jaroWinklerSimilarity()`. [#58531](https://github.com/ClickHouse/ClickHouse/pull/58531) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Add two settings `output_format_compression_level` to change output compression level and `output_format_compression_zstd_window_log` to explicitly set compression window size and enable long-range mode for zstd compression if output compression method is `zstd`. Applied for `INTO OUTFILE` and when writing to table functions `file`, `url`, `hdfs`, `s3`, and `azureBlobStorage`. [#58539](https://github.com/ClickHouse/ClickHouse/pull/58539) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Automatically disable ANSI escape sequences in Pretty formats if the output is not a terminal. Add new `auto` mode to setting `output_format_pretty_color`. [#58614](https://github.com/ClickHouse/ClickHouse/pull/58614) ([Shaun Struwig](https://github.com/Blargian)).
|
||||||
|
* Added setting `update_insert_deduplication_token_in_dependent_materialized_views`. This setting allows to update insert deduplication token with table identifier during insert in dependent materialized views. Closes [#59165](https://github.com/ClickHouse/ClickHouse/issues/59165). [#59238](https://github.com/ClickHouse/ClickHouse/pull/59238) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
* More cache-friendly final implementation. Note on the behaviour change: previously queries with `FINAL` modifier that read with a single stream (e.g. `max_threads=1`) produced sorted output without explicitly provided `ORDER BY` clause. This behaviour no longer exists when `enable_vertical_final = true` (and it is so by default). [#54366](https://github.com/ClickHouse/ClickHouse/pull/54366) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Optimize array element function when input is array(map)/array(array(num)/array(array(string))/array(bigint)/array(decimal). Current implementation causes too many reallocs. The optimization speed up by ~6x especially when input type is array(map). [#56403](https://github.com/ClickHouse/ClickHouse/pull/56403) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Bypass `Poco::BasicBufferedStreamBuf` abstraction when reading from S3 (namely `ReadBufferFromIStream`) to avoid extra copying of data. [#56961](https://github.com/ClickHouse/ClickHouse/pull/56961) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Read column once while reading more that one subcolumn from it in Compact parts. [#57631](https://github.com/ClickHouse/ClickHouse/pull/57631) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Rewrite the AST of sum(column + literal) function. [#57853](https://github.com/ClickHouse/ClickHouse/pull/57853) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||||
|
* The evaluation of function `match()` now utilizes skipping indices `ngrambf_v1` and `tokenbf_v1`. [#57882](https://github.com/ClickHouse/ClickHouse/pull/57882) ([凌涛](https://github.com/lingtaolf)).
|
||||||
|
* Default coordinator for parallel replicas is rewritten for better cache locality (same mark ranges are almost always assigned to the same replicas). Consistent hashing is used also during work stealing, so better tail latency is expected. It has been tested for linear scalability on a hundred of replicas. [#57968](https://github.com/ClickHouse/ClickHouse/pull/57968) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* MergeTree FINAL to not compare rows from same non-L0 part. [#58142](https://github.com/ClickHouse/ClickHouse/pull/58142) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Speed up iota calls (filling array with consecutive numbers). [#58271](https://github.com/ClickHouse/ClickHouse/pull/58271) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* The evaluation of function `match()` now utilizes inverted indices. [#58284](https://github.com/ClickHouse/ClickHouse/pull/58284) ([凌涛](https://github.com/lingtaolf)).
|
||||||
|
* Speedup MIN/MAX for non numeric types. [#58334](https://github.com/ClickHouse/ClickHouse/pull/58334) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Enable JIT compilation for aggregation without a key. Closes [#41461](https://github.com/ClickHouse/ClickHouse/issues/41461). Originally [#53757](https://github.com/ClickHouse/ClickHouse/issues/53757). [#58440](https://github.com/ClickHouse/ClickHouse/pull/58440) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* The performance experiments of **OnTime** on the Intel server with up to AVX2 (and BMI2) support show that this change could effectively improve the QPS of **Q2** and **Q3** by **5.0%** and **3.7%** through reducing the cycle ratio of the hotspot, **_DB::MergeTreeRangeReader::ReadResult::optimize_**, **from 11.48% to 1.09%** and **from 8.09% to 0.67%** respectively while having no impact on others. [#58800](https://github.com/ClickHouse/ClickHouse/pull/58800) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||||
|
* Use one thread less in `clickhouse-local`. [#58968](https://github.com/ClickHouse/ClickHouse/pull/58968) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Large aggregation states of `uniqExact` will be merged in parallel in distrubuted queries. [#59009](https://github.com/ClickHouse/ClickHouse/pull/59009) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Lower memory usage after reading from `MergeTree` tables. [#59290](https://github.com/ClickHouse/ClickHouse/pull/59290) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Lower memory usage in vertical merges. [#59340](https://github.com/ClickHouse/ClickHouse/pull/59340) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Enable MySQL/MariaDB on macOS. This closes [#21191](https://github.com/ClickHouse/ClickHouse/issues/21191). [#46316](https://github.com/ClickHouse/ClickHouse/pull/46316) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Do not interpret numbers with leading zeroes as octals. [#55575](https://github.com/ClickHouse/ClickHouse/pull/55575) ([Joanna Hulboj](https://github.com/jh0x)).
|
||||||
|
* Replace HTTP outgoing buffering based on std ostream with CH Buffer. Add bytes counting metrics for interfaces. [#56064](https://github.com/ClickHouse/ClickHouse/pull/56064) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Disable `max_rows_in_set_to_optimize_join` by default. [#56396](https://github.com/ClickHouse/ClickHouse/pull/56396) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Add `<host_name>` config parameter that allows avoiding resolving hostnames in DDLWorker. This mitigates the possibility of the queue being stuck in case of a change in cluster definition. Closes [#57573](https://github.com/ClickHouse/ClickHouse/issues/57573). [#57603](https://github.com/ClickHouse/ClickHouse/pull/57603) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Increase `load_metadata_threads` to 16 for the filesystem cache. It will make the server start up faster. [#57732](https://github.com/ClickHouse/ClickHouse/pull/57732) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Improve the `multiIf` function performance when the type is Nullable. [#57745](https://github.com/ClickHouse/ClickHouse/pull/57745) ([KevinyhZou](https://github.com/KevinyhZou)).
|
||||||
|
* Add ability to throttle merges/mutations (`max_mutations_bandwidth_for_server`/`max_merges_bandwidth_for_server`). [#57877](https://github.com/ClickHouse/ClickHouse/pull/57877) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Replaced undocumented (boolean) column `is_hot_reloadable` in system table `system.server_settings` by (Enum8) column `changeable_without_restart` with possible values `No`, `Yes`, `IncreaseOnly` and `DecreaseOnly`. Also documented the column. [#58029](https://github.com/ClickHouse/ClickHouse/pull/58029) ([skyoct](https://github.com/skyoct)).
|
||||||
|
* ClusterDiscovery supports setting username and password, close [#58063](https://github.com/ClickHouse/ClickHouse/issues/58063). [#58123](https://github.com/ClickHouse/ClickHouse/pull/58123) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Support query parameters in ALTER TABLE ... PART. [#58297](https://github.com/ClickHouse/ClickHouse/pull/58297) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Create consumers for Kafka tables on fly (but keep them for some period - `kafka_consumers_pool_ttl_ms`, since last used), this should fix problem with statistics for `system.kafka_consumers` (that does not consumed when nobody reads from Kafka table, which leads to live memory leak and slow table detach) and also this PR enables stats for `system.kafka_consumers` by default again. [#58310](https://github.com/ClickHouse/ClickHouse/pull/58310) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Sparkbar as an alias to sparkbar. [#58335](https://github.com/ClickHouse/ClickHouse/pull/58335) ([凌涛](https://github.com/lingtaolf)).
|
||||||
|
* Avoid sending ComposeObject requests after upload to GCS. [#58343](https://github.com/ClickHouse/ClickHouse/pull/58343) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Correctly handle keys with dot in the name in configurations XMLs. [#58354](https://github.com/ClickHouse/ClickHouse/pull/58354) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Added comments (brief descriptions) to all columns of system tables. The are several reasons fro this: - We use system tables a lot and sometimes is could be very difficult for developer to understand the purpose and the meaning of a particular column. - We change (add new ones or modify existing) system tables a lot and the documentation for them is always outdated. For example take a look at the documentation page for [`system.parts`](https://clickhouse.com/docs/en/operations/system-tables/parts). It misses a lot of columns - We would like to eventually generate documentation directly from ClickHouse. [#58356](https://github.com/ClickHouse/ClickHouse/pull/58356) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Allow to configure any kind of object storage with any kind of metadata type. [#58357](https://github.com/ClickHouse/ClickHouse/pull/58357) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Make function `format` return constant on constant arguments. This closes [#58355](https://github.com/ClickHouse/ClickHouse/issues/58355). [#58358](https://github.com/ClickHouse/ClickHouse/pull/58358) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Attach all system tables in `clickhouse-local`, including `system.parts`. This closes [#58312](https://github.com/ClickHouse/ClickHouse/issues/58312). [#58359](https://github.com/ClickHouse/ClickHouse/pull/58359) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Support for `Enum` data types in function `transform`. This closes [#58241](https://github.com/ClickHouse/ClickHouse/issues/58241). [#58360](https://github.com/ClickHouse/ClickHouse/pull/58360) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Allow registering database engines independently. [#58365](https://github.com/ClickHouse/ClickHouse/pull/58365) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||||
|
* Adding a setting `max_estimated_execution_time` to separate `max_execution_time` and `max_estimated_execution_time`. [#58402](https://github.com/ClickHouse/ClickHouse/pull/58402) ([Zhang Yifan](https://github.com/zhangyifan27)).
|
||||||
|
* Allow registering interpreters independently. [#58443](https://github.com/ClickHouse/ClickHouse/pull/58443) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||||
|
* Provide hint when an invalid database engine name is used. [#58444](https://github.com/ClickHouse/ClickHouse/pull/58444) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||||
|
* Avoid huge memory consumption during Keeper startup for more cases. [#58455](https://github.com/ClickHouse/ClickHouse/pull/58455) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Add settings for better control of indexes type in Arrow dictionary. Use signed integer type for indexes by default as Arrow recommends. Closes [#57401](https://github.com/ClickHouse/ClickHouse/issues/57401). [#58519](https://github.com/ClickHouse/ClickHouse/pull/58519) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Added function `sqidDecode()` which decodes [Sqids](https://sqids.org/). [#58544](https://github.com/ClickHouse/ClickHouse/pull/58544) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Allow to read Bool values into String in JSON input formats. It's done under a setting `input_format_json_read_bools_as_strings` that is enabled by default. [#58561](https://github.com/ClickHouse/ClickHouse/pull/58561) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Implement [#58575](https://github.com/ClickHouse/ClickHouse/issues/58575) Support `CLICKHOUSE_PASSWORD_FILE ` environment variable when running the docker image. [#58583](https://github.com/ClickHouse/ClickHouse/pull/58583) ([Eyal Halpern Shalev](https://github.com/Eyal-Shalev)).
|
||||||
|
* When executing some queries, which require a lot of streams for reading data, the error `"Paste JOIN requires sorted tables only"` was previously thrown. Now the numbers of streams resize to 1 in that case. [#58608](https://github.com/ClickHouse/ClickHouse/pull/58608) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Add `SYSTEM JEMALLOC PURGE` for purging unused jemalloc pages, `SYSTEM JEMALLOC [ ENABLE | DISABLE | FLUSH ] PROFILE` for controlling jemalloc profile if the profiler is enabled. Add jemalloc-related 4LW command in Keeper: `jmst` for dumping jemalloc stats, `jmfp`, `jmep`, `jmdp` for controlling jemalloc profile if the profiler is enabled. [#58665](https://github.com/ClickHouse/ClickHouse/pull/58665) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Better message for INVALID_IDENTIFIER error. [#58703](https://github.com/ClickHouse/ClickHouse/pull/58703) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Improved handling of signed numeric literals in normalizeQuery. [#58710](https://github.com/ClickHouse/ClickHouse/pull/58710) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
* Support Point data type for MySQL. [#58721](https://github.com/ClickHouse/ClickHouse/pull/58721) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* When comparing a Float32 column and a const string, read the string as Float32 (instead of Float64). [#58724](https://github.com/ClickHouse/ClickHouse/pull/58724) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Improve S3 compatible, add Ecloud EOS storage support. [#58786](https://github.com/ClickHouse/ClickHouse/pull/58786) ([xleoken](https://github.com/xleoken)).
|
||||||
|
* Allow `KILL QUERY` to cancel backups / restores. This PR also makes running backups and restores visible in `system.processes`. Also there is a new setting in the server configuration now - `shutdown_wait_backups_and_restores` (default=true) which makes the server either wait on shutdown for all running backups and restores to finish or just cancel them. [#58804](https://github.com/ClickHouse/ClickHouse/pull/58804) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Avro format support Zstd codec. Closes [#58735](https://github.com/ClickHouse/ClickHouse/issues/58735). [#58805](https://github.com/ClickHouse/ClickHouse/pull/58805) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* MySQL interface gained support for `net_write_timeout` and `net_read_timeout` settings. `net_write_timeout` is translated into the native `send_timeout` ClickHouse setting and, similarly, `net_read_timeout` into `receive_timeout`. Fixed an issue where it was possible to set MySQL `sql_select_limit` setting only if the entire statement was in upper case. [#58835](https://github.com/ClickHouse/ClickHouse/pull/58835) ([Serge Klochkov](https://github.com/slvrtrn)).
|
||||||
|
* Fixing a problem described in [#58719](https://github.com/ClickHouse/ClickHouse/issues/58719). [#58841](https://github.com/ClickHouse/ClickHouse/pull/58841) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Make sure that for custom (created from SQL) disks ether `filesystem_caches_path` (a common directory prefix for all filesystem caches) or `custom_cached_disks_base_directory` (a common directory prefix for only filesystem caches created from custom disks) is specified in server config. `custom_cached_disks_base_directory` has higher priority for custom disks over `filesystem_caches_path`, which is used if the former one is absent. Filesystem cache setting `path` must lie inside that directory, otherwise exception will be thrown preventing disk to be created. This will not affect disks created on an older version and server was upgraded - then the exception will not be thrown to allow the server to successfully start). `custom_cached_disks_base_directory` is added to default server config as `/var/lib/clickhouse/caches/`. Closes [#57825](https://github.com/ClickHouse/ClickHouse/issues/57825). [#58869](https://github.com/ClickHouse/ClickHouse/pull/58869) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* MySQL interface gained compatibility with `SHOW WARNINGS`/`SHOW COUNT(*) WARNINGS` queries, though the returned result is always an empty set. [#58929](https://github.com/ClickHouse/ClickHouse/pull/58929) ([Serge Klochkov](https://github.com/slvrtrn)).
|
||||||
|
* Skip unavailable replicas when executing parallel distributed `INSERT SELECT`. [#58931](https://github.com/ClickHouse/ClickHouse/pull/58931) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Display word-descriptive log level while enabling structured log formatting in json. [#58936](https://github.com/ClickHouse/ClickHouse/pull/58936) ([Tim Liou](https://github.com/wheatdog)).
|
||||||
|
* MySQL interface gained support for `CAST(x AS SIGNED)` and `CAST(x AS UNSIGNED)` statements via data type aliases: `SIGNED` for Int64, and `UNSIGNED` for UInt64. This improves compatibility with BI tools such as Looker Studio. [#58954](https://github.com/ClickHouse/ClickHouse/pull/58954) ([Serge Klochkov](https://github.com/slvrtrn)).
|
||||||
|
* Function `seriesDecomposeSTL()` now returns a baseline component as season + trend components. [#58961](https://github.com/ClickHouse/ClickHouse/pull/58961) ([Bhavna Jindal](https://github.com/bhavnajindal)).
|
||||||
|
* Fix memory management in copyDataToS3File. [#58962](https://github.com/ClickHouse/ClickHouse/pull/58962) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Change working directory to data path in docker container. [#58975](https://github.com/ClickHouse/ClickHouse/pull/58975) ([cangyin](https://github.com/cangyin)).
|
||||||
|
* Added setting for Azure Blob Storage `azure_max_unexpected_write_error_retries` , can also be set from config under azure section. [#59001](https://github.com/ClickHouse/ClickHouse/pull/59001) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Keeper improvement: reduce Keeper's memory usage for stored nodes. [#59002](https://github.com/ClickHouse/ClickHouse/pull/59002) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Allow server to start with broken data lake table. Closes [#58625](https://github.com/ClickHouse/ClickHouse/issues/58625). [#59080](https://github.com/ClickHouse/ClickHouse/pull/59080) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fixes https://github.com/ClickHouse/ClickHouse/pull/59120#issuecomment-1906177350. [#59122](https://github.com/ClickHouse/ClickHouse/pull/59122) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||||
|
* The state of URL's #hash in the dashboard is now compressed using [lz-string](https://github.com/pieroxy/lz-string). The default size of the state string is compressed from 6856B to 2823B. [#59124](https://github.com/ClickHouse/ClickHouse/pull/59124) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Allow to ignore schema evolution in Iceberg table engine and read all data using schema specified by the user on table creation or latest schema parsed from metadata on table creation. This is done under a setting `iceberg_engine_ignore_schema_evolution` that is disabled by default. Note that enabling this setting can lead to incorrect result as in case of evolved schema all data files will be read using the same schema. [#59133](https://github.com/ClickHouse/ClickHouse/pull/59133) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Prohibit mutable operations (`INSERT`/`ALTER`/`OPTIMIZE`/...) on read-only/write-once storages with a proper `TABLE_IS_READ_ONLY` error (to avoid leftovers). Avoid leaving left-overs on write-once disks (`format_version.txt`) on `CREATE`/`ATTACH`. Ignore `DROP` for `ReplicatedMergeTree` (so as for `MergeTree`). Fix iterating over `s3_plain` (`MetadataStorageFromPlainObjectStorage::iterateDirectory`). Note read-only is `web` disk, and write-once is `s3_plain`. [#59170](https://github.com/ClickHouse/ClickHouse/pull/59170) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* MySQL interface gained support for `net_write_timeout` and `net_read_timeout` settings. `net_write_timeout` is translated into the native `send_timeout` ClickHouse setting and, similarly, `net_read_timeout` into `receive_timeout`. Fixed an issue where it was possible to set MySQL `sql_select_limit` setting only if the entire statement was in upper case. [#59293](https://github.com/ClickHouse/ClickHouse/pull/59293) ([Serge Klochkov](https://github.com/slvrtrn)).
|
||||||
|
* Fix bug in experimental `_block_number` column which could lead to logical error during complex combination of `ALTER`s and `merge`s. Fixes [#56202](https://github.com/ClickHouse/ClickHouse/issues/56202). Replaces [#58601](https://github.com/ClickHouse/ClickHouse/issues/58601). CC @SmitaRKulkarni. [#59295](https://github.com/ClickHouse/ClickHouse/pull/59295) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Play UI understands when an exception is returned inside JSON. Adjustment for [#52853](https://github.com/ClickHouse/ClickHouse/issues/52853). [#59303](https://github.com/ClickHouse/ClickHouse/pull/59303) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* `/binary` HTTP handler allows to specify user, host, and optionally, password in the query string. [#59311](https://github.com/ClickHouse/ClickHouse/pull/59311) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Support backups for compressed in-memory tables. This closes [#57893](https://github.com/ClickHouse/ClickHouse/issues/57893). [#59315](https://github.com/ClickHouse/ClickHouse/pull/59315) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Improve exception message of function regexp_extract, close [#56393](https://github.com/ClickHouse/ClickHouse/issues/56393). [#59319](https://github.com/ClickHouse/ClickHouse/pull/59319) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Support the FORMAT clause in BACKUP and RESTORE queries. [#59338](https://github.com/ClickHouse/ClickHouse/pull/59338) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Function `concatWithSeparator()` now supports arbitrary argument types (instead of only `String` and `FixedString` arguments). For example, `SELECT concatWithSeparator('.', 'number', 1)` now returns `number.1`. [#59341](https://github.com/ClickHouse/ClickHouse/pull/59341) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Improve aliases for clickhouse binary (now `ch`/`clickhouse` is `clickhouse-local` or `clickhouse` depends on the arguments) and add bash completion for new aliases. [#58344](https://github.com/ClickHouse/ClickHouse/pull/58344) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Add settings changes check to CI to check that all settings changes are reflected in settings changes history. [#58555](https://github.com/ClickHouse/ClickHouse/pull/58555) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Use tables directly attached from S3 in stateful tests. [#58791](https://github.com/ClickHouse/ClickHouse/pull/58791) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Save the whole `fuzzer.log` as an archive instead of the last 100k lines. `tail -n 100000` often removes lines with table definitions. Example:. [#58821](https://github.com/ClickHouse/ClickHouse/pull/58821) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Enable Rust on OSX ARM64 (this will add fuzzy search in client with skim and prql language, though I don't think that are people who hosts ClickHouse on darwin, so it is mostly for fuzzy search in client I would say). [#59272](https://github.com/ClickHouse/ClickHouse/pull/59272) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Add join keys conversion for nested lowcardinality [#51550](https://github.com/ClickHouse/ClickHouse/pull/51550) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Flatten only true Nested type if flatten_nested=1, not all Array(Tuple) [#56132](https://github.com/ClickHouse/ClickHouse/pull/56132) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix a bug with projections and the aggregate_functions_null_for_empty setting during insertion. [#56944](https://github.com/ClickHouse/ClickHouse/pull/56944) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fixed potential exception due to stale profile UUID [#57263](https://github.com/ClickHouse/ClickHouse/pull/57263) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||||
|
* Fix working with read buffers in StreamingFormatExecutor [#57438](https://github.com/ClickHouse/ClickHouse/pull/57438) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Ignore MVs with dropped target table during pushing to views [#57520](https://github.com/ClickHouse/ClickHouse/pull/57520) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* [RFC] Eliminate possible race between ALTER_METADATA and MERGE_PARTS [#57755](https://github.com/ClickHouse/ClickHouse/pull/57755) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix the exprs order bug in group by with rollup [#57786](https://github.com/ClickHouse/ClickHouse/pull/57786) ([Chen768959](https://github.com/Chen768959)).
|
||||||
|
* Fix lost blobs after dropping a replica with broken detached parts [#58333](https://github.com/ClickHouse/ClickHouse/pull/58333) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Allow users to work with symlinks in user_files_path (again) [#58447](https://github.com/ClickHouse/ClickHouse/pull/58447) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Fix segfault when graphite table does not have agg function [#58453](https://github.com/ClickHouse/ClickHouse/pull/58453) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Delay reading from StorageKafka to allow multiple reads in materialized views [#58477](https://github.com/ClickHouse/ClickHouse/pull/58477) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fix a stupid case of intersecting parts [#58482](https://github.com/ClickHouse/ClickHouse/pull/58482) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* MergeTreePrefetchedReadPool disable for LIMIT only queries [#58505](https://github.com/ClickHouse/ClickHouse/pull/58505) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Enable ordinary databases while restoration [#58520](https://github.com/ClickHouse/ClickHouse/pull/58520) ([Jihyuk Bok](https://github.com/tomahawk28)).
|
||||||
|
* Fix hive threadpool read ORC/Parquet/... Failed [#58537](https://github.com/ClickHouse/ClickHouse/pull/58537) ([sunny](https://github.com/sunny19930321)).
|
||||||
|
* Hide credentials in system.backup_log base_backup_name column [#58550](https://github.com/ClickHouse/ClickHouse/pull/58550) ([Daniel Pozo Escalona](https://github.com/danipozo)).
|
||||||
|
* toStartOfInterval for milli- microsencods values rounding [#58557](https://github.com/ClickHouse/ClickHouse/pull/58557) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Disable max_joined_block_rows in ConcurrentHashJoin [#58595](https://github.com/ClickHouse/ClickHouse/pull/58595) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix join using nullable in old analyzer [#58596](https://github.com/ClickHouse/ClickHouse/pull/58596) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* `makeDateTime64()`: Allow non-const fraction argument [#58597](https://github.com/ClickHouse/ClickHouse/pull/58597) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix possible NULL dereference during symbolizing inline frames [#58607](https://github.com/ClickHouse/ClickHouse/pull/58607) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Improve isolation of query cache entries under re-created users or role switches [#58611](https://github.com/ClickHouse/ClickHouse/pull/58611) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix broken partition key analysis when doing projection optimization [#58638](https://github.com/ClickHouse/ClickHouse/pull/58638) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Query cache: Fix per-user quota [#58731](https://github.com/ClickHouse/ClickHouse/pull/58731) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix stream partitioning in parallel window functions [#58739](https://github.com/ClickHouse/ClickHouse/pull/58739) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Fix double destroy call on exception throw in addBatchLookupTable8 [#58745](https://github.com/ClickHouse/ClickHouse/pull/58745) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Don't process requests in Keeper during shutdown [#58765](https://github.com/ClickHouse/ClickHouse/pull/58765) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix Segfault in `SlabsPolygonIndex::find` [#58771](https://github.com/ClickHouse/ClickHouse/pull/58771) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Fix JSONExtract function for LowCardinality(Nullable) columns [#58808](https://github.com/ClickHouse/ClickHouse/pull/58808) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Table CREATE DROP Poco::Logger memory leak fix [#58831](https://github.com/ClickHouse/ClickHouse/pull/58831) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix HTTP compressors finalization [#58846](https://github.com/ClickHouse/ClickHouse/pull/58846) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Multiple read file log storage in mv [#58877](https://github.com/ClickHouse/ClickHouse/pull/58877) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Restriction for the access key id for s3. [#58900](https://github.com/ClickHouse/ClickHouse/pull/58900) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||||
|
* Fix possible crash in clickhouse-local during loading suggestions [#58907](https://github.com/ClickHouse/ClickHouse/pull/58907) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix crash when indexHint() is used [#58911](https://github.com/ClickHouse/ClickHouse/pull/58911) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Fix StorageURL forgetting headers on server restart [#58933](https://github.com/ClickHouse/ClickHouse/pull/58933) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Analyzer: fix storage replacement with insertion block [#58958](https://github.com/ClickHouse/ClickHouse/pull/58958) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Fix seek in ReadBufferFromZipArchive [#58966](https://github.com/ClickHouse/ClickHouse/pull/58966) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* `DROP INDEX` of inverted index now removes all relevant files from persistence [#59040](https://github.com/ClickHouse/ClickHouse/pull/59040) ([mochi](https://github.com/MochiXu)).
|
||||||
|
* Fix data race on query_factories_info [#59049](https://github.com/ClickHouse/ClickHouse/pull/59049) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Disable "Too many redirects" error retry [#59099](https://github.com/ClickHouse/ClickHouse/pull/59099) ([skyoct](https://github.com/skyoct)).
|
||||||
|
* Fix aggregation issue in mixed x86_64 and ARM clusters [#59132](https://github.com/ClickHouse/ClickHouse/pull/59132) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
* Fix not started database shutdown deadlock [#59137](https://github.com/ClickHouse/ClickHouse/pull/59137) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Fix: LIMIT BY and LIMIT in distributed query [#59153](https://github.com/ClickHouse/ClickHouse/pull/59153) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix crash with nullable timezone for `toString` [#59190](https://github.com/ClickHouse/ClickHouse/pull/59190) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Fix abort in iceberg metadata on bad file paths [#59275](https://github.com/ClickHouse/ClickHouse/pull/59275) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix architecture name in select of Rust target [#59307](https://github.com/ClickHouse/ClickHouse/pull/59307) ([p1rattttt](https://github.com/p1rattttt)).
|
||||||
|
* Fix not-ready set for system.tables [#59351](https://github.com/ClickHouse/ClickHouse/pull/59351) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix lazy initialization in RabbitMQ [#59352](https://github.com/ClickHouse/ClickHouse/pull/59352) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### NO CL ENTRY
|
||||||
|
|
||||||
|
* NO CL ENTRY: 'Revert "Refreshable materialized views (takeover)"'. [#58296](https://github.com/ClickHouse/ClickHouse/pull/58296) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* NO CL ENTRY: 'Revert "Fix an error in the release script - it didn't allow to make 23.12."'. [#58381](https://github.com/ClickHouse/ClickHouse/pull/58381) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* NO CL ENTRY: 'Revert "Use CH Buffer for HTTP out stream, add metrics for interfaces"'. [#58450](https://github.com/ClickHouse/ClickHouse/pull/58450) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* NO CL ENTRY: 'Second attempt: Use CH Buffer for HTTP out stream, add metrics for interfaces'. [#58475](https://github.com/ClickHouse/ClickHouse/pull/58475) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* NO CL ENTRY: 'Revert "Merging [#53757](https://github.com/ClickHouse/ClickHouse/issues/53757)"'. [#58542](https://github.com/ClickHouse/ClickHouse/pull/58542) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* NO CL ENTRY: 'Revert "Add support for MySQL `net_write_timeout` and `net_read_timeout` settings"'. [#58872](https://github.com/ClickHouse/ClickHouse/pull/58872) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* NO CL ENTRY: 'Revert "Extend performance test norm_dist.xml"'. [#58989](https://github.com/ClickHouse/ClickHouse/pull/58989) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* NO CL ENTRY: 'Revert "Add a test for [#47892](https://github.com/ClickHouse/ClickHouse/issues/47892)"'. [#58990](https://github.com/ClickHouse/ClickHouse/pull/58990) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* NO CL ENTRY: 'Revert "Allow parallel replicas for JOIN with analyzer [part 1]."'. [#59059](https://github.com/ClickHouse/ClickHouse/pull/59059) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* NO CL ENTRY: 'Revert "Consume leading zeroes when parsing a number in ConstantExpressionTemplate"'. [#59070](https://github.com/ClickHouse/ClickHouse/pull/59070) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* NO CL ENTRY: 'Revert "Revert "Allow parallel replicas for JOIN with analyzer [part 1].""'. [#59076](https://github.com/ClickHouse/ClickHouse/pull/59076) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* NO CL ENTRY: 'Revert "Allow to attach partition from table with different partition expression when destination partition expression doesn't re-partition"'. [#59120](https://github.com/ClickHouse/ClickHouse/pull/59120) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* NO CL ENTRY: 'DisksApp.cpp: fix typo (specifiged → specified)'. [#59140](https://github.com/ClickHouse/ClickHouse/pull/59140) ([Nikolay Edigaryev](https://github.com/edigaryev)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Analyzer: Fix resolving subcolumns in JOIN [#49703](https://github.com/ClickHouse/ClickHouse/pull/49703) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Analyzer: always qualify execution names [#53705](https://github.com/ClickHouse/ClickHouse/pull/53705) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Insert quorum: check host node version in addition [#55528](https://github.com/ClickHouse/ClickHouse/pull/55528) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Remove more old code of projection analysis [#55579](https://github.com/ClickHouse/ClickHouse/pull/55579) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Better exception messages in input formats [#57053](https://github.com/ClickHouse/ClickHouse/pull/57053) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Parallel replicas custom key: skip unavailable replicas [#57235](https://github.com/ClickHouse/ClickHouse/pull/57235) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Small change in log message in MergeTreeDataMergerMutator [#57550](https://github.com/ClickHouse/ClickHouse/pull/57550) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* fs cache: small optimization [#57615](https://github.com/ClickHouse/ClickHouse/pull/57615) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Customizable dependency failure handling for AsyncLoader [#57697](https://github.com/ClickHouse/ClickHouse/pull/57697) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Bring test back [#57700](https://github.com/ClickHouse/ClickHouse/pull/57700) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Change default database name in clickhouse-local to 'default' [#57774](https://github.com/ClickHouse/ClickHouse/pull/57774) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Add option `--show-whitespaces-in-diff` to clickhouse-test [#57870](https://github.com/ClickHouse/ClickHouse/pull/57870) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Update `query_masking_rules` when reloading the config, attempt 2 [#57993](https://github.com/ClickHouse/ClickHouse/pull/57993) ([Mikhail Koviazin](https://github.com/mkmkme)).
|
||||||
|
* Remove unneeded parameter `use_external_buffer` from `AsynchronousReadBuffer*` [#58077](https://github.com/ClickHouse/ClickHouse/pull/58077) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Print another message in Bugfix check if internal check had been failed [#58091](https://github.com/ClickHouse/ClickHouse/pull/58091) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Refactor StorageMerge virtual columns filtering. [#58255](https://github.com/ClickHouse/ClickHouse/pull/58255) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Analyzer: fix tuple comparison when result is always null [#58266](https://github.com/ClickHouse/ClickHouse/pull/58266) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix an error in the release script - it didn't allow to make 23.12. [#58288](https://github.com/ClickHouse/ClickHouse/pull/58288) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.12.1.1368-stable [#58290](https://github.com/ClickHouse/ClickHouse/pull/58290) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Fix test_storage_s3_queue/test.py::test_drop_table [#58293](https://github.com/ClickHouse/ClickHouse/pull/58293) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix timeout in 01732_race_condition_storage_join_long [#58298](https://github.com/ClickHouse/ClickHouse/pull/58298) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Handle another case for preprocessing in Keeper [#58308](https://github.com/ClickHouse/ClickHouse/pull/58308) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Disable max_bytes_before_external* in 00172_hits_joins [#58309](https://github.com/ClickHouse/ClickHouse/pull/58309) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Analyzer: support functional arguments in USING clause [#58317](https://github.com/ClickHouse/ClickHouse/pull/58317) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Fixed logical error in CheckSortedTransform [#58318](https://github.com/ClickHouse/ClickHouse/pull/58318) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Refreshable materialized views again [#58320](https://github.com/ClickHouse/ClickHouse/pull/58320) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Organize symbols from src/* into DB namespace [#58336](https://github.com/ClickHouse/ClickHouse/pull/58336) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Add a style check against DOS and Windows [#58345](https://github.com/ClickHouse/ClickHouse/pull/58345) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Check what happen if remove array joined columns from KeyCondition [#58346](https://github.com/ClickHouse/ClickHouse/pull/58346) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Upload time of the perf tests into artifacts as test_duration_ms [#58348](https://github.com/ClickHouse/ClickHouse/pull/58348) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Keep exception format string in retries ctl [#58351](https://github.com/ClickHouse/ClickHouse/pull/58351) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix replication.lib helper (system.mutations has database not current_database) [#58352](https://github.com/ClickHouse/ClickHouse/pull/58352) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Refactor StorageHDFS and StorageFile virtual columns filtering [#58353](https://github.com/ClickHouse/ClickHouse/pull/58353) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix suspended workers for AsyncLoader [#58362](https://github.com/ClickHouse/ClickHouse/pull/58362) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Remove stale events from README [#58364](https://github.com/ClickHouse/ClickHouse/pull/58364) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Do not fail the CI on an expired token [#58384](https://github.com/ClickHouse/ClickHouse/pull/58384) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Add a test for [#38534](https://github.com/ClickHouse/ClickHouse/issues/38534) [#58391](https://github.com/ClickHouse/ClickHouse/pull/58391) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* fix database engine validation inside database factory [#58395](https://github.com/ClickHouse/ClickHouse/pull/58395) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||||
|
* Fix bad formatting of the `timeDiff` compatibility alias [#58398](https://github.com/ClickHouse/ClickHouse/pull/58398) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix a comment; remove unused method; stop using pointers [#58399](https://github.com/ClickHouse/ClickHouse/pull/58399) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test_user_valid_until [#58409](https://github.com/ClickHouse/ClickHouse/pull/58409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Make a test not depend on the lack of floating point associativity [#58439](https://github.com/ClickHouse/ClickHouse/pull/58439) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix `02944_dynamically_change_filesystem_cache_size` [#58445](https://github.com/ClickHouse/ClickHouse/pull/58445) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Analyzer: Fix LOGICAL_ERROR with LowCardinality [#58457](https://github.com/ClickHouse/ClickHouse/pull/58457) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Replace `std::regex` by re2 [#58458](https://github.com/ClickHouse/ClickHouse/pull/58458) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Improve perf tests [#58478](https://github.com/ClickHouse/ClickHouse/pull/58478) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Check if I can remove KeyCondition analysis on AST. [#58480](https://github.com/ClickHouse/ClickHouse/pull/58480) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix some thread pool settings not updating at runtime [#58485](https://github.com/ClickHouse/ClickHouse/pull/58485) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Lower log levels for some Raft logs to new test level [#58487](https://github.com/ClickHouse/ClickHouse/pull/58487) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* PartsSplitter small refactoring [#58506](https://github.com/ClickHouse/ClickHouse/pull/58506) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Sync content of the docker test images [#58507](https://github.com/ClickHouse/ClickHouse/pull/58507) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* CI: move ci-specifics from job scripts to ci.py [#58516](https://github.com/ClickHouse/ClickHouse/pull/58516) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Minor fixups for `sqid()` [#58517](https://github.com/ClickHouse/ClickHouse/pull/58517) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.12.2.59-stable [#58545](https://github.com/ClickHouse/ClickHouse/pull/58545) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.11.4.24-stable [#58546](https://github.com/ClickHouse/ClickHouse/pull/58546) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.8.9.54-lts [#58547](https://github.com/ClickHouse/ClickHouse/pull/58547) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.10.6.60-stable [#58548](https://github.com/ClickHouse/ClickHouse/pull/58548) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.3.19.32-lts [#58549](https://github.com/ClickHouse/ClickHouse/pull/58549) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update CHANGELOG.md [#58559](https://github.com/ClickHouse/ClickHouse/pull/58559) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Fix test 02932_kill_query_sleep [#58560](https://github.com/ClickHouse/ClickHouse/pull/58560) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* CI fix. Add packager script to build digest [#58571](https://github.com/ClickHouse/ClickHouse/pull/58571) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* fix and test that S3Clients are reused [#58573](https://github.com/ClickHouse/ClickHouse/pull/58573) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Follow-up to [#58482](https://github.com/ClickHouse/ClickHouse/issues/58482) [#58574](https://github.com/ClickHouse/ClickHouse/pull/58574) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Do not load database engines in suggest [#58586](https://github.com/ClickHouse/ClickHouse/pull/58586) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix wrong message in Keeper [#58588](https://github.com/ClickHouse/ClickHouse/pull/58588) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add some missing LLVM includes [#58594](https://github.com/ClickHouse/ClickHouse/pull/58594) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Small fix in Keeper [#58598](https://github.com/ClickHouse/ClickHouse/pull/58598) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Update analyzer_tech_debt.txt [#58599](https://github.com/ClickHouse/ClickHouse/pull/58599) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Simplify release.py script [#58600](https://github.com/ClickHouse/ClickHouse/pull/58600) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Update analyzer_tech_debt.txt [#58602](https://github.com/ClickHouse/ClickHouse/pull/58602) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Refactor stacktrace symbolizer to avoid copy-paste [#58610](https://github.com/ClickHouse/ClickHouse/pull/58610) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Add intel AMX checking [#58617](https://github.com/ClickHouse/ClickHouse/pull/58617) ([Roman Glinskikh](https://github.com/omgronny)).
|
||||||
|
* Optional `client` argument for `S3Helper` [#58619](https://github.com/ClickHouse/ClickHouse/pull/58619) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Add sorting to 02366_kql_summarize.sql [#58621](https://github.com/ClickHouse/ClickHouse/pull/58621) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix possible race in ManyAggregatedData dtor. [#58624](https://github.com/ClickHouse/ClickHouse/pull/58624) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Remove more projections code [#58628](https://github.com/ClickHouse/ClickHouse/pull/58628) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Remove finalize() from ~WriteBufferFromEncryptedFile [#58629](https://github.com/ClickHouse/ClickHouse/pull/58629) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Update test_replicated_database/test.py [#58647](https://github.com/ClickHouse/ClickHouse/pull/58647) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Try disabling `muzzy_decay_ms` in jemalloc [#58648](https://github.com/ClickHouse/ClickHouse/pull/58648) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix test_replicated_database::test_startup_without_zk flakiness [#58649](https://github.com/ClickHouse/ClickHouse/pull/58649) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix 01600_remerge_sort_lowered_memory_bytes_ratio flakiness (due to settings randomization) [#58650](https://github.com/ClickHouse/ClickHouse/pull/58650) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Analyzer: Fix assertion in HashJoin with duplicate columns [#58652](https://github.com/ClickHouse/ClickHouse/pull/58652) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Document that `match()` can use `ngrambf_v1` and `tokenbf_v1` indexes [#58655](https://github.com/ClickHouse/ClickHouse/pull/58655) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix perf tests duration (checks.test_duration_ms) [#58656](https://github.com/ClickHouse/ClickHouse/pull/58656) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Analyzer: Correctly handle constant set in index [#58657](https://github.com/ClickHouse/ClickHouse/pull/58657) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* fix a typo in stress randomization setting [#58658](https://github.com/ClickHouse/ClickHouse/pull/58658) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Small follow-up to `std::regex` --> `re2` conversion ([#58458](https://github.com/ClickHouse/ClickHouse/issues/58458)) [#58678](https://github.com/ClickHouse/ClickHouse/pull/58678) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Remove `<regex>` from libcxx [#58681](https://github.com/ClickHouse/ClickHouse/pull/58681) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix bad log message [#58698](https://github.com/ClickHouse/ClickHouse/pull/58698) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Some small improvements to version_helper from [#57203](https://github.com/ClickHouse/ClickHouse/issues/57203) [#58712](https://github.com/ClickHouse/ClickHouse/pull/58712) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Small fixes in different helpers [#58717](https://github.com/ClickHouse/ClickHouse/pull/58717) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix bug in new (not released yet) parallel replicas coordinator [#58722](https://github.com/ClickHouse/ClickHouse/pull/58722) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Analyzer: Fix LOGICAL_ERROR in CountDistinctPass [#58723](https://github.com/ClickHouse/ClickHouse/pull/58723) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Fix reading of offsets subcolumn (`size0`) from `Nested` [#58729](https://github.com/ClickHouse/ClickHouse/pull/58729) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix Mac OS X [#58733](https://github.com/ClickHouse/ClickHouse/pull/58733) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* fix stress with generate-template-key [#58740](https://github.com/ClickHouse/ClickHouse/pull/58740) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* more relaxed check [#58751](https://github.com/ClickHouse/ClickHouse/pull/58751) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Fix usage of small buffers for remote reading [#58768](https://github.com/ClickHouse/ClickHouse/pull/58768) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Add missing includes when _LIBCPP_REMOVE_TRANSITIVE_INCLUDES enabled [#58770](https://github.com/ClickHouse/ClickHouse/pull/58770) ([Artem Alperin](https://github.com/hdnpth)).
|
||||||
|
* Remove some code [#58772](https://github.com/ClickHouse/ClickHouse/pull/58772) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove some code [#58790](https://github.com/ClickHouse/ClickHouse/pull/58790) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix trash in performance tests [#58794](https://github.com/ClickHouse/ClickHouse/pull/58794) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix data race in Keeper [#58806](https://github.com/ClickHouse/ClickHouse/pull/58806) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Increase log level to trace to help debug `00993_system_parts_race_condition_drop_zookeeper` [#58809](https://github.com/ClickHouse/ClickHouse/pull/58809) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* DatabaseCatalog background tasks add log names [#58832](https://github.com/ClickHouse/ClickHouse/pull/58832) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Analyzer: Resolve GROUPING function on shards [#58833](https://github.com/ClickHouse/ClickHouse/pull/58833) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Allow parallel replicas for JOIN with analyzer [part 1]. [#58838](https://github.com/ClickHouse/ClickHouse/pull/58838) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix `isRetry` method [#58839](https://github.com/ClickHouse/ClickHouse/pull/58839) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* fs cache: fix data race in slru [#58842](https://github.com/ClickHouse/ClickHouse/pull/58842) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix reading from an invisible part in new (not released yet) parallel replicas coordinator [#58844](https://github.com/ClickHouse/ClickHouse/pull/58844) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix bad log message [#58849](https://github.com/ClickHouse/ClickHouse/pull/58849) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Set max_bytes_before_external_group_by in 01961_roaring_memory_tracking [#58863](https://github.com/ClickHouse/ClickHouse/pull/58863) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix `00089_group_by_arrays_of_fixed` with external aggregation [#58873](https://github.com/ClickHouse/ClickHouse/pull/58873) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* DiskWeb minor improvement in loading [#58874](https://github.com/ClickHouse/ClickHouse/pull/58874) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix RPN construction for indexHint [#58875](https://github.com/ClickHouse/ClickHouse/pull/58875) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Analyzer: add test with GROUP BY on shards [#58876](https://github.com/ClickHouse/ClickHouse/pull/58876) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Jepsen job to reuse builds [#58881](https://github.com/ClickHouse/ClickHouse/pull/58881) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Fix ambiguity in the setting description [#58883](https://github.com/ClickHouse/ClickHouse/pull/58883) ([Denny Crane](https://github.com/den-crane)).
|
||||||
|
* Less error prone interface of read buffers [#58886](https://github.com/ClickHouse/ClickHouse/pull/58886) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Add metric for keeper memory soft limit [#58890](https://github.com/ClickHouse/ClickHouse/pull/58890) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||||
|
* Add a test for [#47988](https://github.com/ClickHouse/ClickHouse/issues/47988) [#58893](https://github.com/ClickHouse/ClickHouse/pull/58893) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Whitespaces [#58894](https://github.com/ClickHouse/ClickHouse/pull/58894) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix data race in `AggregatingTransform` [#58896](https://github.com/ClickHouse/ClickHouse/pull/58896) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Update SLRUFileCachePriority.cpp [#58898](https://github.com/ClickHouse/ClickHouse/pull/58898) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add tests for [#57193](https://github.com/ClickHouse/ClickHouse/issues/57193) [#58899](https://github.com/ClickHouse/ClickHouse/pull/58899) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Add log for already download binary in Jepsen [#58901](https://github.com/ClickHouse/ClickHouse/pull/58901) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* fs cache: minor refactoring [#58902](https://github.com/ClickHouse/ClickHouse/pull/58902) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Checking on flaky test_parallel_replicas_custom_key_failover [#58909](https://github.com/ClickHouse/ClickHouse/pull/58909) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Style fix [#58913](https://github.com/ClickHouse/ClickHouse/pull/58913) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Opentelemetry spans to analyze CPU and S3 bottlenecks on inserts [#58914](https://github.com/ClickHouse/ClickHouse/pull/58914) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Fix fault handler in case of thread (for fault handler) cannot be spawned [#58917](https://github.com/ClickHouse/ClickHouse/pull/58917) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Analyzer: Support GROUP BY injective function elimination [#58919](https://github.com/ClickHouse/ClickHouse/pull/58919) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Cancel MasterCI in PRs [#58920](https://github.com/ClickHouse/ClickHouse/pull/58920) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix and test for azure [#58697](https://github.com/ClickHouse/ClickHouse/issues/58697) [#58921](https://github.com/ClickHouse/ClickHouse/pull/58921) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Extend performance test norm_dist.xml [#58922](https://github.com/ClickHouse/ClickHouse/pull/58922) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Add regression test for parallel replicas (follow up [#58722](https://github.com/ClickHouse/ClickHouse/issues/58722), [#58844](https://github.com/ClickHouse/ClickHouse/issues/58844)) [#58923](https://github.com/ClickHouse/ClickHouse/pull/58923) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Add a test for [#47892](https://github.com/ClickHouse/ClickHouse/issues/47892) [#58927](https://github.com/ClickHouse/ClickHouse/pull/58927) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix `FunctionToSubcolumnsPass` in debug build [#58930](https://github.com/ClickHouse/ClickHouse/pull/58930) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Call `getMaxFileDescriptorCount` once in Keeper [#58938](https://github.com/ClickHouse/ClickHouse/pull/58938) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Add missing files to digests [#58942](https://github.com/ClickHouse/ClickHouse/pull/58942) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Analyzer: fix join column not found with compound identifiers [#58943](https://github.com/ClickHouse/ClickHouse/pull/58943) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* CI: pr_info to provide event_type for job scripts [#58947](https://github.com/ClickHouse/ClickHouse/pull/58947) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Using the destination object for paths generation in S3copy. [#58949](https://github.com/ClickHouse/ClickHouse/pull/58949) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||||
|
* Fix data race in slru (2) [#58950](https://github.com/ClickHouse/ClickHouse/pull/58950) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix flaky test_postgresql_replica_database_engine_2/test.py::test_dependent_loading [#58951](https://github.com/ClickHouse/ClickHouse/pull/58951) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* More safe way to dump system logs in tests [#58955](https://github.com/ClickHouse/ClickHouse/pull/58955) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Add a comment about sparse checkout [#58960](https://github.com/ClickHouse/ClickHouse/pull/58960) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Follow up to [#58357](https://github.com/ClickHouse/ClickHouse/issues/58357) [#58963](https://github.com/ClickHouse/ClickHouse/pull/58963) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Better error message about tuples [#58971](https://github.com/ClickHouse/ClickHouse/pull/58971) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix timeout for prometheus exporter for HTTP/1.1 (due to keep-alive) [#58981](https://github.com/ClickHouse/ClickHouse/pull/58981) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix 02891_array_shingles with analyzer [#58982](https://github.com/ClickHouse/ClickHouse/pull/58982) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix script name in SQL example in executable.md [#58984](https://github.com/ClickHouse/ClickHouse/pull/58984) ([Lino Uruñuela](https://github.com/Wachynaky)).
|
||||||
|
* Fix typo [#58986](https://github.com/ClickHouse/ClickHouse/pull/58986) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Revert flaky [#58992](https://github.com/ClickHouse/ClickHouse/pull/58992) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Revive: Parallel replicas custom key: skip unavailable replicas [#58993](https://github.com/ClickHouse/ClickHouse/pull/58993) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Make performance test `test norm_dist.xml` more realistic [#58995](https://github.com/ClickHouse/ClickHouse/pull/58995) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix 02404_memory_bound_merging with analyzer (follow up [#56419](https://github.com/ClickHouse/ClickHouse/issues/56419)) [#58996](https://github.com/ClickHouse/ClickHouse/pull/58996) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Add test for [#58930](https://github.com/ClickHouse/ClickHouse/issues/58930) [#58999](https://github.com/ClickHouse/ClickHouse/pull/58999) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* initialization ConnectionTimeouts [#59000](https://github.com/ClickHouse/ClickHouse/pull/59000) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* DiskWeb fix loading [#59006](https://github.com/ClickHouse/ClickHouse/pull/59006) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Update log level for http buffer [#59008](https://github.com/ClickHouse/ClickHouse/pull/59008) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Change log level for super imporant message in Keeper [#59010](https://github.com/ClickHouse/ClickHouse/pull/59010) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix async loader stress test [#59011](https://github.com/ClickHouse/ClickHouse/pull/59011) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Remove `StaticResourceManager` [#59013](https://github.com/ClickHouse/ClickHouse/pull/59013) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* preserve 'amz-sdk-invocation-id' and 'amz-sdk-request' headers with gcp [#59015](https://github.com/ClickHouse/ClickHouse/pull/59015) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Update rename.md [#59017](https://github.com/ClickHouse/ClickHouse/pull/59017) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* очепятка [#59024](https://github.com/ClickHouse/ClickHouse/pull/59024) ([edpyt](https://github.com/edpyt)).
|
||||||
|
* Split resource scheduler off `IO/` into `Common/Scheduler/` [#59025](https://github.com/ClickHouse/ClickHouse/pull/59025) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Add a parameter for testing purposes [#59027](https://github.com/ClickHouse/ClickHouse/pull/59027) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix test 02932_kill_query_sleep when running with query cache [#59041](https://github.com/ClickHouse/ClickHouse/pull/59041) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* CI: Jepsen: fix sanity check in ci.py [#59043](https://github.com/ClickHouse/ClickHouse/pull/59043) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* CI: add ci_config classes for job and build names [#59046](https://github.com/ClickHouse/ClickHouse/pull/59046) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* remove flaky test [#59066](https://github.com/ClickHouse/ClickHouse/pull/59066) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Followup to 57853 [#59068](https://github.com/ClickHouse/ClickHouse/pull/59068) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Follow-up to [#59027](https://github.com/ClickHouse/ClickHouse/issues/59027) [#59075](https://github.com/ClickHouse/ClickHouse/pull/59075) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix `test_parallel_replicas_invisible_parts` [#59077](https://github.com/ClickHouse/ClickHouse/pull/59077) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Increase max_bytes_before_external_group_by for 00165_jit_aggregate_functions [#59078](https://github.com/ClickHouse/ClickHouse/pull/59078) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix stateless/run.sh [#59079](https://github.com/ClickHouse/ClickHouse/pull/59079) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* CI: hot fix for reuse [#59081](https://github.com/ClickHouse/ClickHouse/pull/59081) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Fix server shutdown due to exception while loading metadata [#59083](https://github.com/ClickHouse/ClickHouse/pull/59083) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Coordinator returns ranges for reading in sorted order [#59089](https://github.com/ClickHouse/ClickHouse/pull/59089) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Raise timeout in 02294_decimal_second_errors [#59090](https://github.com/ClickHouse/ClickHouse/pull/59090) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Add `[[nodiscard]]` to a couple of methods [#59093](https://github.com/ClickHouse/ClickHouse/pull/59093) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Docs: Update integer and float aliases [#59100](https://github.com/ClickHouse/ClickHouse/pull/59100) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Avoid election timeouts during startup in Keeper [#59102](https://github.com/ClickHouse/ClickHouse/pull/59102) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Add missing setting max_estimated_execution_time in SettingsChangesHistory [#59104](https://github.com/ClickHouse/ClickHouse/pull/59104) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Rename some inverted index test files [#59106](https://github.com/ClickHouse/ClickHouse/pull/59106) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Further reduce runtime of `norm_distance.xml` [#59108](https://github.com/ClickHouse/ClickHouse/pull/59108) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Minor follow-up to [#53710](https://github.com/ClickHouse/ClickHouse/issues/53710) [#59109](https://github.com/ClickHouse/ClickHouse/pull/59109) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Update stateless/run.sh [#59116](https://github.com/ClickHouse/ClickHouse/pull/59116) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Followup 57875 [#59117](https://github.com/ClickHouse/ClickHouse/pull/59117) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Fixing build [#59130](https://github.com/ClickHouse/ClickHouse/pull/59130) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Capability check for `s3_plain` [#59145](https://github.com/ClickHouse/ClickHouse/pull/59145) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix `02015_async_inserts_stress_long` [#59146](https://github.com/ClickHouse/ClickHouse/pull/59146) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix AggregateFunctionNothing result type issues introducing it with different names [#59147](https://github.com/ClickHouse/ClickHouse/pull/59147) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix url encoding issue [#59162](https://github.com/ClickHouse/ClickHouse/pull/59162) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Upgrade simdjson to v3.6.3 [#59166](https://github.com/ClickHouse/ClickHouse/pull/59166) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Decrease log level for one log message [#59168](https://github.com/ClickHouse/ClickHouse/pull/59168) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix broken cache for non-existing temp_path [#59172](https://github.com/ClickHouse/ClickHouse/pull/59172) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Move some headers [#59175](https://github.com/ClickHouse/ClickHouse/pull/59175) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Analyzer: Fix CTE name clash resolution [#59177](https://github.com/ClickHouse/ClickHouse/pull/59177) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Fix another place with special symbols in the URL [#59184](https://github.com/ClickHouse/ClickHouse/pull/59184) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Actions dag build filter actions refactoring [#59228](https://github.com/ClickHouse/ClickHouse/pull/59228) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Minor cleanup of msan usage [#59229](https://github.com/ClickHouse/ClickHouse/pull/59229) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Load server configs in clickhouse local [#59231](https://github.com/ClickHouse/ClickHouse/pull/59231) ([pufit](https://github.com/pufit)).
|
||||||
|
* Make libssh build dependent on `-DENABLE_LIBRARIES` [#59242](https://github.com/ClickHouse/ClickHouse/pull/59242) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Disable copy constructor for MultiVersion [#59244](https://github.com/ClickHouse/ClickHouse/pull/59244) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* CI: fix ci configuration for nightly job [#59252](https://github.com/ClickHouse/ClickHouse/pull/59252) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Fix 02475_bson_each_row_format flakiness (due to small parsing block) [#59253](https://github.com/ClickHouse/ClickHouse/pull/59253) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Improve pytest --pdb experience by preserving dockerd on SIGINT (v2) [#59255](https://github.com/ClickHouse/ClickHouse/pull/59255) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix fasttest by pinning pip dependencies [#59256](https://github.com/ClickHouse/ClickHouse/pull/59256) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Added AtomicLogger [#59273](https://github.com/ClickHouse/ClickHouse/pull/59273) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Update test_reload_after_fail_in_cache_dictionary for analyzer [#59274](https://github.com/ClickHouse/ClickHouse/pull/59274) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Update run.sh [#59280](https://github.com/ClickHouse/ClickHouse/pull/59280) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add missing setting optimize_injective_functions_in_group_by to SettingsChangesHistory [#59283](https://github.com/ClickHouse/ClickHouse/pull/59283) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix perf tests (after sumMap starts to filter out -0.) [#59287](https://github.com/ClickHouse/ClickHouse/pull/59287) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Use fresh ZooKeeper client on DROP (to have higher chances on success) [#59288](https://github.com/ClickHouse/ClickHouse/pull/59288) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Additional check [#59292](https://github.com/ClickHouse/ClickHouse/pull/59292) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* No debug symbols in Rust [#59306](https://github.com/ClickHouse/ClickHouse/pull/59306) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix deadlock in `AsyncLoader::stop()` [#59308](https://github.com/ClickHouse/ClickHouse/pull/59308) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Speed up `00165_jit_aggregate_functions` [#59312](https://github.com/ClickHouse/ClickHouse/pull/59312) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* CI: WA for issue with perf test with artifact reuse [#59325](https://github.com/ClickHouse/ClickHouse/pull/59325) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Fix typo [#59329](https://github.com/ClickHouse/ClickHouse/pull/59329) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Simplify query_run_metric_arrays in perf tests [#59333](https://github.com/ClickHouse/ClickHouse/pull/59333) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* IVolume constructor improve exception message [#59335](https://github.com/ClickHouse/ClickHouse/pull/59335) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix upgrade check for new setting [#59343](https://github.com/ClickHouse/ClickHouse/pull/59343) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Fix sccache when building without coverage [#59345](https://github.com/ClickHouse/ClickHouse/pull/59345) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Loggers initialization fix [#59347](https://github.com/ClickHouse/ClickHouse/pull/59347) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Add setting update_insert_deduplication_token_in_dependent_materialized_views to settings changes history [#59349](https://github.com/ClickHouse/ClickHouse/pull/59349) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Slightly better memory usage in `AsynchronousBoundedReadBuffer` [#59354](https://github.com/ClickHouse/ClickHouse/pull/59354) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Try to make variant tests a bit faster [#59355](https://github.com/ClickHouse/ClickHouse/pull/59355) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Minor typos in Settings.h [#59371](https://github.com/ClickHouse/ClickHouse/pull/59371) ([Jordi Villar](https://github.com/jrdi)).
|
||||||
|
* Rename `quantileDDSketch` to `quantileDD` [#59372](https://github.com/ClickHouse/ClickHouse/pull/59372) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
14
docs/changelogs/v24.1.2.5-stable.md
Normal file
14
docs/changelogs/v24.1.2.5-stable.md
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.1.2.5-stable (b2605dd4a5a) FIXME as compared to v24.1.1.2048-stable (5a024dfc093)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix translate() with FixedString input [#59356](https://github.com/ClickHouse/ClickHouse/pull/59356) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix stacktraces for binaries without debug symbols [#59444](https://github.com/ClickHouse/ClickHouse/pull/59444) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
|
34
docs/changelogs/v24.1.3.31-stable.md
Normal file
34
docs/changelogs/v24.1.3.31-stable.md
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.1.3.31-stable (135b08cbd28) FIXME as compared to v24.1.2.5-stable (b2605dd4a5a)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#59569](https://github.com/ClickHouse/ClickHouse/issues/59569): Now dashboard understands both compressed and uncompressed state of URL's #hash (backward compatibility). Continuation of [#59124](https://github.com/ClickHouse/ClickHouse/issues/59124) . [#59548](https://github.com/ClickHouse/ClickHouse/pull/59548) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Backported in [#59776](https://github.com/ClickHouse/ClickHouse/issues/59776): Added settings `split_parts_ranges_into_intersecting_and_non_intersecting_final` and `split_intersecting_parts_ranges_into_layers_final`. This settings are needed to disable optimizations for queries with `FINAL` and needed for debug only. [#59705](https://github.com/ClickHouse/ClickHouse/pull/59705) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix `ASTAlterCommand::formatImpl` in case of column specific settings… [#59445](https://github.com/ClickHouse/ClickHouse/pull/59445) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Make MAX use the same rules as permutation for complex types [#59498](https://github.com/ClickHouse/ClickHouse/pull/59498) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix corner case when passing `update_insert_deduplication_token_in_dependent_materialized_views` [#59544](https://github.com/ClickHouse/ClickHouse/pull/59544) ([Jordi Villar](https://github.com/jrdi)).
|
||||||
|
* Fix incorrect result of arrayElement / map[] on empty value [#59594](https://github.com/ClickHouse/ClickHouse/pull/59594) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix crash in topK when merging empty states [#59603](https://github.com/ClickHouse/ClickHouse/pull/59603) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Maintain function alias in RewriteSumFunctionWithSumAndCountVisitor [#59658](https://github.com/ClickHouse/ClickHouse/pull/59658) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix leftPad / rightPad function with FixedString input [#59739](https://github.com/ClickHouse/ClickHouse/pull/59739) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### NO CL ENTRY
|
||||||
|
|
||||||
|
* NO CL ENTRY: 'Revert "Backport [#59650](https://github.com/ClickHouse/ClickHouse/issues/59650) to 24.1: MergeTree FINAL optimization diagnostics and settings"'. [#59701](https://github.com/ClickHouse/ClickHouse/pull/59701) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Fix 02720_row_policy_column_with_dots [#59453](https://github.com/ClickHouse/ClickHouse/pull/59453) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Refactoring of dashboard state encoding [#59554](https://github.com/ClickHouse/ClickHouse/pull/59554) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* MergeTree FINAL optimization diagnostics and settings [#59650](https://github.com/ClickHouse/ClickHouse/pull/59650) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Pin python dependencies in stateless tests [#59663](https://github.com/ClickHouse/ClickHouse/pull/59663) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user