mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge branch 'master' into Fix_endpoint_for_azureblobstorage
This commit is contained in:
commit
a168a84624
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -12,6 +12,7 @@ tests/ci/cancel_and_rerun_workflow_lambda/app.py
|
||||
- Build/Testing/Packaging Improvement
|
||||
- Documentation (changelog entry is not required)
|
||||
- Bug Fix (user-visible misbehavior in an official stable release)
|
||||
- CI Fix or Improvement (changelog entry is not required)
|
||||
- Not for changelog (changelog entry is not required)
|
||||
|
||||
|
||||
|
2
.github/workflows/backport_branches.yml
vendored
2
.github/workflows/backport_branches.yml
vendored
@ -11,7 +11,7 @@ on: # yamllint disable-line rule:truthy
|
||||
- 'backport/**'
|
||||
jobs:
|
||||
RunConfig:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
|
43
.github/workflows/master.yml
vendored
43
.github/workflows/master.yml
vendored
@ -11,7 +11,7 @@ on: # yamllint disable-line rule:truthy
|
||||
- 'master'
|
||||
jobs:
|
||||
RunConfig:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
@ -327,6 +327,7 @@ jobs:
|
||||
run_command: |
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
MarkReleaseReady:
|
||||
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
||||
needs:
|
||||
- BuilderBinDarwin
|
||||
- BuilderBinDarwinAarch64
|
||||
@ -374,36 +375,12 @@ jobs:
|
||||
test_name: Stateless tests (release)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestCoverage:
|
||||
needs: [RunConfig, BuilderDebReleaseCoverage]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (coverage)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestReleaseDatabaseReplicated:
|
||||
FunctionalStatelessTestReleaseAnalyzerS3Replicated:
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (release, DatabaseReplicated)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestReleaseAnalyzer:
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (release, analyzer)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestReleaseS3:
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (release, s3 storage)
|
||||
test_name: Stateless tests (release, analyzer, s3, DatabaseReplicated)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestS3Debug:
|
||||
@ -482,14 +459,6 @@ jobs:
|
||||
test_name: Stateful tests (release)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatefulTestCoverage:
|
||||
needs: [RunConfig, BuilderDebReleaseCoverage]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateful tests (coverage)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatefulTestAarch64:
|
||||
needs: [RunConfig, BuilderDebAarch64]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
@ -840,9 +809,7 @@ jobs:
|
||||
- MarkReleaseReady
|
||||
- FunctionalStatelessTestDebug
|
||||
- FunctionalStatelessTestRelease
|
||||
- FunctionalStatelessTestReleaseDatabaseReplicated
|
||||
- FunctionalStatelessTestReleaseAnalyzer
|
||||
- FunctionalStatelessTestReleaseS3
|
||||
- FunctionalStatelessTestReleaseAnalyzerS3Replicated
|
||||
- FunctionalStatelessTestAarch64
|
||||
- FunctionalStatelessTestAsan
|
||||
- FunctionalStatelessTestTsan
|
||||
|
2
.github/workflows/nightly.yml
vendored
2
.github/workflows/nightly.yml
vendored
@ -14,7 +14,7 @@ jobs:
|
||||
# The task for having a preserved ENV and event.json for later investigation
|
||||
uses: ./.github/workflows/debug.yml
|
||||
RunConfig:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
|
75
.github/workflows/pull_request.yml
vendored
75
.github/workflows/pull_request.yml
vendored
@ -18,7 +18,7 @@ on: # yamllint disable-line rule:truthy
|
||||
##########################################################################################
|
||||
jobs:
|
||||
RunConfig:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
@ -391,36 +391,12 @@ jobs:
|
||||
test_name: Stateless tests (release)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestCoverage:
|
||||
needs: [RunConfig, BuilderDebReleaseCoverage]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (coverage)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestReleaseDatabaseReplicated:
|
||||
FunctionalStatelessTestReleaseAnalyzerS3Replicated:
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (release, DatabaseReplicated)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestReleaseAnalyzer:
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (release, analyzer)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestReleaseS3:
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (release, s3 storage)
|
||||
test_name: Stateless tests (release, analyzer, s3, DatabaseReplicated)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestS3Debug:
|
||||
@ -500,21 +476,9 @@ jobs:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: tests bugfix validate check
|
||||
test_name: Bugfix validation
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=3600
|
||||
run_command: |
|
||||
TEMP_PATH="${TEMP_PATH}/integration" \
|
||||
python3 integration_test_check.py "Integration $CHECK_NAME" \
|
||||
--validate-bugfix --post-commit-status=file || echo "ignore exit code"
|
||||
|
||||
TEMP_PATH="${TEMP_PATH}/stateless" \
|
||||
python3 functional_test_check.py "Stateless $CHECK_NAME" "$KILL_TIMEOUT" \
|
||||
--validate-bugfix --post-commit-status=file || echo "ignore exit code"
|
||||
|
||||
python3 bugfix_validate_check.py "${TEMP_PATH}/stateless/functional_commit_status.tsv" "${TEMP_PATH}/integration/integration_commit_status.tsv"
|
||||
##############################################################################################
|
||||
############################ FUNCTIONAl STATEFUL TESTS #######################################
|
||||
##############################################################################################
|
||||
@ -526,14 +490,6 @@ jobs:
|
||||
test_name: Stateful tests (release)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatefulTestCoverage:
|
||||
needs: [RunConfig, BuilderDebReleaseCoverage]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateful tests (coverage)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatefulTestAarch64:
|
||||
needs: [RunConfig, BuilderDebAarch64]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
@ -778,14 +734,6 @@ jobs:
|
||||
#############################################################################################
|
||||
############################# INTEGRATION TESTS #############################################
|
||||
#############################################################################################
|
||||
IntegrationTestsAsan:
|
||||
needs: [RunConfig, BuilderDebAsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (asan)
|
||||
runner_type: stress-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
IntegrationTestsAnalyzerAsan:
|
||||
needs: [RunConfig, BuilderDebAsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
@ -802,14 +750,6 @@ jobs:
|
||||
test_name: Integration tests (tsan)
|
||||
runner_type: stress-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
IntegrationTestsRelease:
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (release)
|
||||
runner_type: stress-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
IntegrationTestsAarch64:
|
||||
needs: [RunConfig, BuilderDebAarch64]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
@ -915,10 +855,9 @@ jobs:
|
||||
- BuilderSpecialReport
|
||||
- DocsCheck
|
||||
- FastTest
|
||||
- TestsBugfixCheck
|
||||
- FunctionalStatelessTestDebug
|
||||
- FunctionalStatelessTestRelease
|
||||
- FunctionalStatelessTestReleaseDatabaseReplicated
|
||||
- FunctionalStatelessTestReleaseAnalyzer
|
||||
- FunctionalStatelessTestAarch64
|
||||
- FunctionalStatelessTestAsan
|
||||
- FunctionalStatelessTestTsan
|
||||
@ -931,9 +870,9 @@ jobs:
|
||||
- FunctionalStatefulTestTsan
|
||||
- FunctionalStatefulTestMsan
|
||||
- FunctionalStatefulTestUBsan
|
||||
- FunctionalStatelessTestReleaseS3
|
||||
- FunctionalStatelessTestS3Debug
|
||||
- FunctionalStatelessTestS3Tsan
|
||||
- FunctionalStatelessTestReleaseAnalyzerS3Replicated
|
||||
- FunctionalStatefulTestReleaseParallelReplicas
|
||||
- FunctionalStatefulTestAsanParallelReplicas
|
||||
- FunctionalStatefulTestTsanParallelReplicas
|
||||
@ -954,10 +893,8 @@ jobs:
|
||||
- ASTFuzzerTestTsan
|
||||
- ASTFuzzerTestMSan
|
||||
- ASTFuzzerTestUBSan
|
||||
- IntegrationTestsAsan
|
||||
- IntegrationTestsAnalyzerAsan
|
||||
- IntegrationTestsTsan
|
||||
- IntegrationTestsRelease
|
||||
- IntegrationTestsAarch64
|
||||
- IntegrationTestsFlakyCheck
|
||||
- PerformanceComparisonX86
|
||||
|
3
.github/workflows/release_branches.yml
vendored
3
.github/workflows/release_branches.yml
vendored
@ -14,7 +14,7 @@ on: # yamllint disable-line rule:truthy
|
||||
|
||||
jobs:
|
||||
RunConfig:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
@ -228,6 +228,7 @@ jobs:
|
||||
run_command: |
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
MarkReleaseReady:
|
||||
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
||||
needs:
|
||||
- BuilderBinDarwin
|
||||
- BuilderBinDarwinAarch64
|
||||
|
10
.gitmessage
10
.gitmessage
@ -1,6 +1,6 @@
|
||||
|
||||
|
||||
### CI modificators (add a leading space to apply):
|
||||
### CI modificators (add a leading space to apply) ###
|
||||
|
||||
## To avoid a merge commit in CI:
|
||||
#no_merge_commit
|
||||
@ -8,13 +8,21 @@
|
||||
## To discard CI cache:
|
||||
#no_ci_cache
|
||||
|
||||
## To not test (only style check):
|
||||
#do_not_test
|
||||
|
||||
## To run specified set of tests in CI:
|
||||
#ci_set_<SET_NAME>
|
||||
#ci_set_reduced
|
||||
#ci_set_arm
|
||||
#ci_set_integration
|
||||
|
||||
## To run specified job in CI:
|
||||
#job_<JOB NAME>
|
||||
#job_stateless_tests_release
|
||||
#job_package_debug
|
||||
#job_integration_tests_asan
|
||||
|
||||
## To run only specified batches for multi-batch job(s)
|
||||
#batch_2
|
||||
#btach_1_2_3
|
||||
|
155
CHANGELOG.md
155
CHANGELOG.md
@ -1,9 +1,164 @@
|
||||
### Table of Contents
|
||||
**[ClickHouse release v24.2, 2024-02-29](#242)**<br/>
|
||||
**[ClickHouse release v24.1, 2024-01-30](#241)**<br/>
|
||||
**[Changelog for 2023](https://clickhouse.com/docs/en/whats-new/changelog/2023/)**<br/>
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### <a id="242"></a> ClickHouse release 24.2, 2024-02-29
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Validate suspicious/experimental types in nested types. Previously we didn't validate such types (except JSON) in nested types like Array/Tuple/Map. [#59385](https://github.com/ClickHouse/ClickHouse/pull/59385) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add sanity check for number of threads and block sizes. [#60138](https://github.com/ClickHouse/ClickHouse/pull/60138) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Don't infer floats in exponential notation by default. Add a setting `input_format_try_infer_exponent_floats` that will restore previous behaviour (disabled by default). Closes [#59476](https://github.com/ClickHouse/ClickHouse/issues/59476). [#59500](https://github.com/ClickHouse/ClickHouse/pull/59500) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow alter operations to be surrounded by parenthesis. The emission of parentheses can be controlled by the `format_alter_operations_with_parentheses` config. By default, in formatted queries the parentheses are emitted as we store the formatted alter operations in some places as metadata (e.g.: mutations). The new syntax clarifies some of the queries where alter operations end in a list. E.g.: `ALTER TABLE x MODIFY TTL date GROUP BY a, b, DROP COLUMN c` cannot be parsed properly with the old syntax. In the new syntax the query `ALTER TABLE x (MODIFY TTL date GROUP BY a, b), (DROP COLUMN c)` is obvious. Older versions are not able to read the new syntax, therefore using the new syntax might cause issues if newer and older version of ClickHouse are mixed in a single cluster. [#59532](https://github.com/ClickHouse/ClickHouse/pull/59532) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
|
||||
#### New Feature
|
||||
* Added new syntax which allows to specify definer user in View/Materialized View. This allows to execute selects/inserts from views without explicit grants for underlying tables. So, a View will encapsulate the grants. [#54901](https://github.com/ClickHouse/ClickHouse/pull/54901) [#60439](https://github.com/ClickHouse/ClickHouse/pull/60439) ([pufit](https://github.com/pufit)).
|
||||
* Try to detect file format automatically during schema inference if it's unknown in `file/s3/hdfs/url/azureBlobStorage` engines. Closes [#50576](https://github.com/ClickHouse/ClickHouse/issues/50576). [#59092](https://github.com/ClickHouse/ClickHouse/pull/59092) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Implement auto-adjustment for asynchronous insert timeouts. The following settings are introduced: async_insert_poll_timeout_ms, async_insert_use_adaptive_busy_timeout, async_insert_busy_timeout_min_ms, async_insert_busy_timeout_max_ms, async_insert_busy_timeout_increase_rate, async_insert_busy_timeout_decrease_rate. [#58486](https://github.com/ClickHouse/ClickHouse/pull/58486) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* Allow to set up a quota for maximum sequential login failures. [#54737](https://github.com/ClickHouse/ClickHouse/pull/54737) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* A new aggregate function `groupArrayIntersect`. Follows up: [#49862](https://github.com/ClickHouse/ClickHouse/issues/49862). [#59598](https://github.com/ClickHouse/ClickHouse/pull/59598) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Backup & Restore support for `AzureBlobStorage`. Resolves [#50747](https://github.com/ClickHouse/ClickHouse/issues/50747). [#56988](https://github.com/ClickHouse/ClickHouse/pull/56988) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* The user can now specify the template string directly in the query using `format_schema_rows_template` as an alternative to `format_template_row`. Closes [#31363](https://github.com/ClickHouse/ClickHouse/issues/31363). [#59088](https://github.com/ClickHouse/ClickHouse/pull/59088) ([Shaun Struwig](https://github.com/Blargian)).
|
||||
* Implemented automatic conversion of merge tree tables of different kinds to replicated engine. Create empty `convert_to_replicated` file in table's data directory (`/clickhouse/store/xxx/xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy/`) and that table will be converted automatically on next server start. [#57798](https://github.com/ClickHouse/ClickHouse/pull/57798) ([Kirill](https://github.com/kirillgarbar)).
|
||||
* Added function `seriesOutliersTukey` to detect outliers in series data using Tukey's fences algorithm. [#58632](https://github.com/ClickHouse/ClickHouse/pull/58632) ([Bhavna Jindal](https://github.com/bhavnajindal)).
|
||||
* Added query `ALTER TABLE table FORGET PARTITION partition` that removes ZooKeeper nodes, related to an empty partition. [#59507](https://github.com/ClickHouse/ClickHouse/pull/59507) ([Sergei Trifonov](https://github.com/serxa)). This is an expert-level feature.
|
||||
* Support JWT credentials file for the NATS table engine. [#59543](https://github.com/ClickHouse/ClickHouse/pull/59543) ([Nickolaj Jepsen](https://github.com/nickolaj-jepsen)).
|
||||
* Implemented system.dns_cache table, which can be useful for debugging DNS issues. [#59856](https://github.com/ClickHouse/ClickHouse/pull/59856) ([Kirill Nikiforov](https://github.com/allmazz)).
|
||||
* The codec `LZ4HC` will accept a new level 2, which is faster than the previous minimum level 3, at the expense of less compression. In previous versions, `LZ4HC(2)` and less was the same as `LZ4HC(3)`. Author: [Cyan4973](https://github.com/Cyan4973). [#60090](https://github.com/ClickHouse/ClickHouse/pull/60090) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Implemented `system.dns_cache` table, which can be useful for debugging DNS issues. New server setting dns_cache_max_size. [#60257](https://github.com/ClickHouse/ClickHouse/pull/60257) ([Kirill Nikiforov](https://github.com/allmazz)).
|
||||
* Support single-argument version for the `merge` table function, as `merge(['db_name', ] 'tables_regexp')`. [#60372](https://github.com/ClickHouse/ClickHouse/pull/60372) ([豪肥肥](https://github.com/HowePa)).
|
||||
* Support negative positional arguments. Closes [#57736](https://github.com/ClickHouse/ClickHouse/issues/57736). [#58292](https://github.com/ClickHouse/ClickHouse/pull/58292) ([flynn](https://github.com/ucasfl)).
|
||||
* Support specifying a set of permitted users for specific S3 settings in config using `user` key. [#60144](https://github.com/ClickHouse/ClickHouse/pull/60144) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
||||
#### Experimental Feature
|
||||
* Add function `variantType` that returns Enum with variant type name for each row. [#59398](https://github.com/ClickHouse/ClickHouse/pull/59398) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Support `LEFT JOIN`, `ALL INNER JOIN`, and simple subqueries for parallel replicas (only with analyzer). New setting `parallel_replicas_prefer_local_join` chooses local `JOIN` execution (by default) vs `GLOBAL JOIN`. All tables should exist on every replica from `cluster_for_parallel_replicas`. New settings `min_external_table_block_size_rows` and `min_external_table_block_size_bytes` are used to squash small blocks that are sent for temporary tables (only with analyzer). [#58916](https://github.com/ClickHouse/ClickHouse/pull/58916) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Allow concurrent table creation in the `Replicated` database during adding or recovering a new replica. [#59277](https://github.com/ClickHouse/ClickHouse/pull/59277) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Implement comparison operator for `Variant` values and proper Field inserting into `Variant` column. Don't allow creating `Variant` type with similar variant types by default (allow uder a setting `allow_suspicious_variant_types`) Closes [#59996](https://github.com/ClickHouse/ClickHouse/issues/59996). Closes [#59850](https://github.com/ClickHouse/ClickHouse/issues/59850). [#60198](https://github.com/ClickHouse/ClickHouse/pull/60198) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Disable parallel replicas JOIN with CTE (not analyzer) [#59239](https://github.com/ClickHouse/ClickHouse/pull/59239) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Primary key will use less amount of memory. [#60049](https://github.com/ClickHouse/ClickHouse/pull/60049) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve memory usage for primary key and some other operations. [#60050](https://github.com/ClickHouse/ClickHouse/pull/60050) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The tables' primary keys will be loaded in memory lazily on first access. This is controlled by the new MergeTree setting `primary_key_lazy_load`, which is on by default. This provides several advantages: - it will not be loaded for tables that are not used; - if there is not enough memory, an exception will be thrown on first use instead of at server startup. This provides several disadvantages: - the latency of loading the primary key will be paid on the first query rather than before accepting connections; this theoretically may introduce a thundering-herd problem. This closes [#11188](https://github.com/ClickHouse/ClickHouse/issues/11188). [#60093](https://github.com/ClickHouse/ClickHouse/pull/60093) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Vectorized distance functions used in vector search. [#58866](https://github.com/ClickHouse/ClickHouse/pull/58866) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Vectorized function `dotProduct` which is useful for vector search. [#60202](https://github.com/ClickHouse/ClickHouse/pull/60202) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add short-circuit ability for `dictGetOrDefault` function. Closes [#52098](https://github.com/ClickHouse/ClickHouse/issues/52098). [#57767](https://github.com/ClickHouse/ClickHouse/pull/57767) ([jsc0218](https://github.com/jsc0218)).
|
||||
* Keeper improvement: cache only a certain amount of logs in-memory controlled by `latest_logs_cache_size_threshold` and `commit_logs_cache_size_threshold`. [#59460](https://github.com/ClickHouse/ClickHouse/pull/59460) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Keeper improvement: reduce size of data node even more. [#59592](https://github.com/ClickHouse/ClickHouse/pull/59592) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Continue optimizing branch miss of `if` function when result type is `Float*/Decimal*/*Int*`, follow up of https://github.com/ClickHouse/ClickHouse/pull/57885. [#59148](https://github.com/ClickHouse/ClickHouse/pull/59148) ([李扬](https://github.com/taiyang-li)).
|
||||
* Optimize `if` function when the input type is `Map`, the speed-up is up to ~10x. [#59413](https://github.com/ClickHouse/ClickHouse/pull/59413) ([李扬](https://github.com/taiyang-li)).
|
||||
* Improve performance of the `Int8` type by implementing strict aliasing (we already have it for `UInt8` and all other integer types). [#59485](https://github.com/ClickHouse/ClickHouse/pull/59485) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Optimize performance of sum/avg conditionally for bigint and big decimal types by reducing branch miss. [#59504](https://github.com/ClickHouse/ClickHouse/pull/59504) ([李扬](https://github.com/taiyang-li)).
|
||||
* Improve performance of SELECTs with active mutations. [#59531](https://github.com/ClickHouse/ClickHouse/pull/59531) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Optimized function `isNotNull` with AVX2. [#59621](https://github.com/ClickHouse/ClickHouse/pull/59621) ([李扬](https://github.com/taiyang-li)).
|
||||
* Improve ASOF JOIN performance for sorted or almost sorted data. [#59731](https://github.com/ClickHouse/ClickHouse/pull/59731) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* The previous default value equals to 1 MB for `async_insert_max_data_size` appeared to be too small. The new one would be 10 MiB. [#59536](https://github.com/ClickHouse/ClickHouse/pull/59536) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Use multiple threads while reading the metadata of tables from a backup while executing the RESTORE command. [#60040](https://github.com/ClickHouse/ClickHouse/pull/60040) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Now if `StorageBuffer` has more than 1 shard (`num_layers` > 1) background flush will happen simultaneously for all shards in multiple threads. [#60111](https://github.com/ClickHouse/ClickHouse/pull/60111) ([alesapin](https://github.com/alesapin)).
|
||||
|
||||
#### Improvement
|
||||
* When output format is Pretty format and a block consists of a single numeric value which exceeds one million, A readable number will be printed on table right. [#60379](https://github.com/ClickHouse/ClickHouse/pull/60379) ([rogeryk](https://github.com/rogeryk)).
|
||||
* Added settings `split_parts_ranges_into_intersecting_and_non_intersecting_final` and `split_intersecting_parts_ranges_into_layers_final`. These settings are needed to disable optimizations for queries with `FINAL` and needed for debug only. [#59705](https://github.com/ClickHouse/ClickHouse/pull/59705) ([Maksim Kita](https://github.com/kitaisreal)). Actually not only for that - they can also lower memory usage at the expense of performance.
|
||||
* Rename the setting `extract_kvp_max_pairs_per_row` to `extract_key_value_pairs_max_pairs_per_row`. The issue (unnecessary abbreviation in the setting name) was introduced in https://github.com/ClickHouse/ClickHouse/pull/43606. Fix the documentation of this setting. [#59683](https://github.com/ClickHouse/ClickHouse/pull/59683) ([Alexey Milovidov](https://github.com/alexey-milovidov)). [#59960](https://github.com/ClickHouse/ClickHouse/pull/59960) ([jsc0218](https://github.com/jsc0218)).
|
||||
* Running `ALTER COLUMN MATERIALIZE` on a column with `DEFAULT` or `MATERIALIZED` expression now precisely follows the semantics. [#58023](https://github.com/ClickHouse/ClickHouse/pull/58023) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Enabled an exponential backoff logic for errors during mutations. It will reduce the CPU usage, memory usage and log file sizes. [#58036](https://github.com/ClickHouse/ClickHouse/pull/58036) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Add improvement to count the `InitialQuery` Profile Event. [#58195](https://github.com/ClickHouse/ClickHouse/pull/58195) ([Unalian](https://github.com/Unalian)).
|
||||
* Allow to define `volume_priority` in `storage_configuration`. [#58533](https://github.com/ClickHouse/ClickHouse/pull/58533) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Add support for the `Date32` type in the `T64` codec. [#58738](https://github.com/ClickHouse/ClickHouse/pull/58738) ([Hongbin Ma](https://github.com/binmahone)).
|
||||
* Allow trailing commas in types with several items. [#59119](https://github.com/ClickHouse/ClickHouse/pull/59119) ([Aleksandr Musorin](https://github.com/AVMusorin)).
|
||||
* Settings for the Distributed table engine can now be specified in the server configuration file (similar to MergeTree settings), e.g. `<distributed> <flush_on_detach>false</flush_on_detach> </distributed>`. [#59291](https://github.com/ClickHouse/ClickHouse/pull/59291) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Retry disconnects and expired sessions when reading `system.zookeeper`. This is helpful when reading many rows from `system.zookeeper` table especially in the presence of fault-injected disconnects. [#59388](https://github.com/ClickHouse/ClickHouse/pull/59388) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Do not interpret numbers with leading zeroes as octals when `input_format_values_interpret_expressions=0`. [#59403](https://github.com/ClickHouse/ClickHouse/pull/59403) ([Joanna Hulboj](https://github.com/jh0x)).
|
||||
* At startup and whenever config files are changed, ClickHouse updates the hard memory limits of its total memory tracker. These limits are computed based on various server settings and cgroups limits (on Linux). Previously, setting `/sys/fs/cgroup/memory.max` (for cgroups v2) was hard-coded. As a result, cgroup v2 memory limits configured for nested groups (hierarchies), e.g. `/sys/fs/cgroup/my/nested/group/memory.max` were ignored. This is now fixed. The behavior of v1 memory limits remains unchanged. [#59435](https://github.com/ClickHouse/ClickHouse/pull/59435) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* New profile events added to observe the time spent on calculating PK/projections/secondary indices during `INSERT`-s. [#59436](https://github.com/ClickHouse/ClickHouse/pull/59436) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Allow to define a starting point for S3Queue with Ordered mode at the creation using a setting `s3queue_last_processed_path`. [#59446](https://github.com/ClickHouse/ClickHouse/pull/59446) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Made comments for system tables also available in `system.tables` in `clickhouse-local`. [#59493](https://github.com/ClickHouse/ClickHouse/pull/59493) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* `system.zookeeper` table: previously the whole result was accumulated in memory and returned as one big chunk. This change should help to reduce memory consumption when reading many rows from `system.zookeeper`, allow showing intermediate progress (how many rows have been read so far) and avoid hitting connection timeout when result set is big. [#59545](https://github.com/ClickHouse/ClickHouse/pull/59545) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Now dashboard understands both compressed and uncompressed state of URL's #hash (backward compatibility). Continuation of [#59124](https://github.com/ClickHouse/ClickHouse/issues/59124) . [#59548](https://github.com/ClickHouse/ClickHouse/pull/59548) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Bumped Intel QPL (used by codec `DEFLATE_QPL`) from v1.3.1 to v1.4.0 . Also fixed a bug for polling timeout mechanism, as we observed in same cases timeout won't work properly, if timeout happen, IAA and CPU may process buffer concurrently. So far, we'd better make sure IAA codec status is not QPL_STS_BEING_PROCESSED, then fallback to SW codec. [#59551](https://github.com/ClickHouse/ClickHouse/pull/59551) ([jasperzhu](https://github.com/jinjunzh)).
|
||||
* Do not show a warning about the server version in ClickHouse Cloud because ClickHouse Cloud handles seamless upgrades automatically. [#59657](https://github.com/ClickHouse/ClickHouse/pull/59657) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* After self-extraction temporary binary is moved instead copying. [#59661](https://github.com/ClickHouse/ClickHouse/pull/59661) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix stack unwinding on Apple macOS. This closes [#53653](https://github.com/ClickHouse/ClickHouse/issues/53653). [#59690](https://github.com/ClickHouse/ClickHouse/pull/59690) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Check for stack overflow in parsers even if the user misconfigured the `max_parser_depth` setting to a very high value. This closes [#59622](https://github.com/ClickHouse/ClickHouse/issues/59622). [#59697](https://github.com/ClickHouse/ClickHouse/pull/59697) ([Alexey Milovidov](https://github.com/alexey-milovidov)). [#60434](https://github.com/ClickHouse/ClickHouse/pull/60434)
|
||||
* Unify XML and SQL created named collection behaviour in Kafka storage. [#59710](https://github.com/ClickHouse/ClickHouse/pull/59710) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
|
||||
* In case when `merge_max_block_size_bytes` is small enough and tables contain wide rows (strings or tuples) background merges may stuck in an endless loop. This behaviour is fixed. Follow-up for https://github.com/ClickHouse/ClickHouse/pull/59340. [#59812](https://github.com/ClickHouse/ClickHouse/pull/59812) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Allow uuid in replica_path if CREATE TABLE explicitly has it. [#59908](https://github.com/ClickHouse/ClickHouse/pull/59908) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add column `metadata_version` of ReplicatedMergeTree table in `system.tables` system table. [#59942](https://github.com/ClickHouse/ClickHouse/pull/59942) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Keeper improvement: send only Keeper related metrics/events for Prometheus. [#59945](https://github.com/ClickHouse/ClickHouse/pull/59945) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* The dashboard will display metrics across different ClickHouse versions even if the structure of system tables has changed after the upgrade. [#59967](https://github.com/ClickHouse/ClickHouse/pull/59967) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Allow loading AZ info from a file. [#59976](https://github.com/ClickHouse/ClickHouse/pull/59976) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Keeper improvement: add retries on failures for Disk related operations. [#59980](https://github.com/ClickHouse/ClickHouse/pull/59980) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Add new config setting `backups.remove_backup_files_after_failure`: `<clickhouse> <backups> <remove_backup_files_after_failure>true</remove_backup_files_after_failure> </backups> </clickhouse>`. [#60002](https://github.com/ClickHouse/ClickHouse/pull/60002) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Copy S3 file GCP fallback to buffer copy in case GCP returned `Internal Error` with `GATEWAY_TIMEOUT` HTTP error code. [#60164](https://github.com/ClickHouse/ClickHouse/pull/60164) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Short circuit execution for `ULIDStringToDateTime`. [#60211](https://github.com/ClickHouse/ClickHouse/pull/60211) ([Juan Madurga](https://github.com/jlmadurga)).
|
||||
* Added `query_id` column for tables `system.backups` and `system.backup_log`. Added error stacktrace to `error` column. [#60220](https://github.com/ClickHouse/ClickHouse/pull/60220) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Connections through the MySQL port now automatically run with setting `prefer_column_name_to_alias = 1` to support QuickSight out-of-the-box. Also, settings `mysql_map_string_to_text_in_show_columns` and `mysql_map_fixed_string_to_text_in_show_columns` are now enabled by default, affecting also only MySQL connections. This increases compatibility with more BI tools. [#60365](https://github.com/ClickHouse/ClickHouse/pull/60365) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix a race condition in JavaScript code leading to duplicate charts on top of each other. [#60392](https://github.com/ClickHouse/ClickHouse/pull/60392) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Added builds and tests with coverage collection with introspection. Continuation of [#56102](https://github.com/ClickHouse/ClickHouse/issues/56102). [#58792](https://github.com/ClickHouse/ClickHouse/pull/58792) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Update the Rust toolchain in `corrosion-cmake` when the CMake cross-compilation toolchain variable is set. [#59309](https://github.com/ClickHouse/ClickHouse/pull/59309) ([Aris Tritas](https://github.com/aris-aiven)).
|
||||
* Add some fuzzing to ASTLiterals. [#59383](https://github.com/ClickHouse/ClickHouse/pull/59383) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* If you want to run initdb scripts every time when ClickHouse container is starting you shoud initialize environment varible CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS. [#59808](https://github.com/ClickHouse/ClickHouse/pull/59808) ([Alexander Nikolaev](https://github.com/AlexNik)).
|
||||
* Remove ability to disable generic clickhouse components (like server/client/...), but keep some that requires extra libraries (like ODBC or keeper). [#59857](https://github.com/ClickHouse/ClickHouse/pull/59857) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Query fuzzer will fuzz SETTINGS inside queries. [#60087](https://github.com/ClickHouse/ClickHouse/pull/60087) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add support for building ClickHouse with clang-19 (master). [#60448](https://github.com/ClickHouse/ClickHouse/pull/60448) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Fix a "Non-ready set" error in TTL WHERE. [#57430](https://github.com/ClickHouse/ClickHouse/pull/57430) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix a bug in the `quantilesGK` function [#58216](https://github.com/ClickHouse/ClickHouse/pull/58216) ([李扬](https://github.com/taiyang-li)).
|
||||
* Fix a wrong behavior with `intDiv` for Decimal arguments [#59243](https://github.com/ClickHouse/ClickHouse/pull/59243) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Fix `translate` with FixedString input [#59356](https://github.com/ClickHouse/ClickHouse/pull/59356) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix digest calculation in Keeper [#59439](https://github.com/ClickHouse/ClickHouse/pull/59439) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix stacktraces for binaries without debug symbols [#59444](https://github.com/ClickHouse/ClickHouse/pull/59444) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix `ASTAlterCommand::formatImpl` in case of column specific settings… [#59445](https://github.com/ClickHouse/ClickHouse/pull/59445) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Fix `SELECT * FROM [...] ORDER BY ALL` with Analyzer [#59462](https://github.com/ClickHouse/ClickHouse/pull/59462) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||
* Fix possible uncaught exception during distributed query cancellation [#59487](https://github.com/ClickHouse/ClickHouse/pull/59487) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Make MAX use the same rules as permutation for complex types [#59498](https://github.com/ClickHouse/ClickHouse/pull/59498) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix corner case when passing `update_insert_deduplication_token_in_dependent_materialized_views` [#59544](https://github.com/ClickHouse/ClickHouse/pull/59544) ([Jordi Villar](https://github.com/jrdi)).
|
||||
* Fix incorrect result of arrayElement / map on empty value [#59594](https://github.com/ClickHouse/ClickHouse/pull/59594) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix crash in topK when merging empty states [#59603](https://github.com/ClickHouse/ClickHouse/pull/59603) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix distributed table with a constant sharding key [#59606](https://github.com/ClickHouse/ClickHouse/pull/59606) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix KQL issue found by WingFuzz [#59626](https://github.com/ClickHouse/ClickHouse/pull/59626) ([Yong Wang](https://github.com/kashwy)).
|
||||
* Fix error "Read beyond last offset" for AsynchronousBoundedReadBuffer [#59630](https://github.com/ClickHouse/ClickHouse/pull/59630) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Maintain function alias in RewriteSumFunctionWithSumAndCountVisitor [#59658](https://github.com/ClickHouse/ClickHouse/pull/59658) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix query start time on non initial queries [#59662](https://github.com/ClickHouse/ClickHouse/pull/59662) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Validate types of arguments for `minmax` skipping index [#59733](https://github.com/ClickHouse/ClickHouse/pull/59733) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix leftPad / rightPad function with FixedString input [#59739](https://github.com/ClickHouse/ClickHouse/pull/59739) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix AST fuzzer issue in function `countMatches` [#59752](https://github.com/ClickHouse/ClickHouse/pull/59752) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* RabbitMQ: fix having neither acked nor nacked messages [#59775](https://github.com/ClickHouse/ClickHouse/pull/59775) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix StorageURL doing some of the query execution in single thread [#59833](https://github.com/ClickHouse/ClickHouse/pull/59833) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* S3Queue: fix uninitialized value [#59897](https://github.com/ClickHouse/ClickHouse/pull/59897) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix parsing of partition expressions surrounded by parens [#59901](https://github.com/ClickHouse/ClickHouse/pull/59901) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Fix crash in JSONColumnsWithMetadata format over HTTP [#59925](https://github.com/ClickHouse/ClickHouse/pull/59925) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Do not rewrite sum to count if the return value differs in Analyzer [#59926](https://github.com/ClickHouse/ClickHouse/pull/59926) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* UniqExactSet read crash fix [#59928](https://github.com/ClickHouse/ClickHouse/pull/59928) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* ReplicatedMergeTree invalid metadata_version fix [#59946](https://github.com/ClickHouse/ClickHouse/pull/59946) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix data race in `StorageDistributed` [#59987](https://github.com/ClickHouse/ClickHouse/pull/59987) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Docker: run init scripts when option is enabled rather than disabled [#59991](https://github.com/ClickHouse/ClickHouse/pull/59991) ([jktng](https://github.com/jktng)).
|
||||
* Fix INSERT into `SQLite` with single quote (by escaping single quotes with a quote instead of backslash) [#60015](https://github.com/ClickHouse/ClickHouse/pull/60015) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix several logical errors in `arrayFold` [#60022](https://github.com/ClickHouse/ClickHouse/pull/60022) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix optimize_uniq_to_count removing the column alias [#60026](https://github.com/ClickHouse/ClickHouse/pull/60026) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix possible exception from S3Queue table on drop [#60036](https://github.com/ClickHouse/ClickHouse/pull/60036) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix formatting of NOT with single literals [#60042](https://github.com/ClickHouse/ClickHouse/pull/60042) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Use max_query_size from context in DDLLogEntry instead of hardcoded 4096 [#60083](https://github.com/ClickHouse/ClickHouse/pull/60083) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix inconsistent formatting of queries containing tables named `table`. Fix wrong formatting of queries with `UNION ALL`, `INTERSECT`, and `EXCEPT` when their structure wasn't linear. This closes #52349. Fix wrong formatting of `SYSTEM` queries, including `SYSTEM ... DROP FILESYSTEM CACHE`, `SYSTEM ... REFRESH/START/STOP/CANCEL/TEST VIEW`, `SYSTEM ENABLE/DISABLE FAILPOINT`. Fix formatting of parameterized DDL queries. Fix the formatting of the `DESCRIBE FILESYSTEM CACHE` query. Fix incorrect formatting of the `SET param_...` (a query setting a parameter). Fix incorrect formatting of `CREATE INDEX` queries. Fix inconsistent formatting of `CREATE USER` and similar queries. Fix inconsistent formatting of `CREATE SETTINGS PROFILE`. Fix incorrect formatting of `ALTER ... MODIFY REFRESH`. Fix inconsistent formatting of window functions if frame offsets were expressions. Fix inconsistent formatting of `RESPECT NULLS` and `IGNORE NULLS` if they were used after a function that implements an operator (such as `plus`). Fix idiotic formatting of `SYSTEM SYNC REPLICA ... LIGHTWEIGHT FROM ...`. Fix inconsistent formatting of invalid queries with `GROUP BY GROUPING SETS ... WITH ROLLUP/CUBE/TOTALS`. Fix inconsistent formatting of `GRANT CURRENT GRANTS`. Fix inconsistent formatting of `CREATE TABLE (... COLLATE)`. Additionally, I fixed the incorrect formatting of `EXPLAIN` in subqueries (#60102). Fixed incorrect formatting of lambda functions (#60012). Added a check so there is no way to miss these abominations in the future. [#60095](https://github.com/ClickHouse/ClickHouse/pull/60095) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix inconsistent formatting of explain in subqueries [#60102](https://github.com/ClickHouse/ClickHouse/pull/60102) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix cosineDistance crash with Nullable [#60150](https://github.com/ClickHouse/ClickHouse/pull/60150) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Allow casting of bools in string representation to true bools [#60160](https://github.com/ClickHouse/ClickHouse/pull/60160) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix `system.s3queue_log` [#60166](https://github.com/ClickHouse/ClickHouse/pull/60166) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix arrayReduce with nullable aggregate function name [#60188](https://github.com/ClickHouse/ClickHouse/pull/60188) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Hide sensitive info for `S3Queue` [#60233](https://github.com/ClickHouse/ClickHouse/pull/60233) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix http exception codes. [#60252](https://github.com/ClickHouse/ClickHouse/pull/60252) ([Austin Kothig](https://github.com/kothiga)).
|
||||
* S3Queue: fix a bug (also fixes flaky test_storage_s3_queue/test.py::test_shards_distributed) [#60282](https://github.com/ClickHouse/ClickHouse/pull/60282) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix use-of-uninitialized-value and invalid result in hashing functions with IPv6 [#60359](https://github.com/ClickHouse/ClickHouse/pull/60359) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix OptimizeDateOrDateTimeConverterWithPreimageVisitor with null arguments [#60453](https://github.com/ClickHouse/ClickHouse/pull/60453) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fixed a minor bug that prevented distributed table queries sent from either KQL or PRQL dialect clients to be executed on replicas. [#59674](https://github.com/ClickHouse/ClickHouse/issues/59674). [#60470](https://github.com/ClickHouse/ClickHouse/pull/60470) ([Alexey Milovidov](https://github.com/alexey-milovidov)) [#59674](https://github.com/ClickHouse/ClickHouse/pull/59674) ([Austin Kothig](https://github.com/kothiga)).
|
||||
|
||||
|
||||
### <a id="241"></a> ClickHouse release 24.1, 2024-01-30
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
@ -10,6 +10,7 @@ set (CMAKE_CXX_STANDARD 20)
|
||||
|
||||
set (SRCS
|
||||
argsToConfig.cpp
|
||||
cgroupsv2.cpp
|
||||
coverage.cpp
|
||||
demangle.cpp
|
||||
getAvailableMemoryAmount.cpp
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/types.h>
|
||||
#include <base/extended_types.h>
|
||||
|
||||
namespace wide
|
||||
{
|
||||
@ -44,3 +45,8 @@ concept is_over_big_int =
|
||||
|| std::is_same_v<T, Decimal128>
|
||||
|| std::is_same_v<T, Decimal256>;
|
||||
}
|
||||
|
||||
template <> struct is_signed<DB::Decimal32> { static constexpr bool value = true; };
|
||||
template <> struct is_signed<DB::Decimal64> { static constexpr bool value = true; };
|
||||
template <> struct is_signed<DB::Decimal128> { static constexpr bool value = true; };
|
||||
template <> struct is_signed<DB::Decimal256> { static constexpr bool value = true; };
|
||||
|
@ -185,7 +185,8 @@ inline bool memequalWide(const char * p1, const char * p2, size_t size)
|
||||
{
|
||||
case 3: if (!compare8(p1 + 32, p2 + 32)) return false; [[fallthrough]];
|
||||
case 2: if (!compare8(p1 + 16, p2 + 16)) return false; [[fallthrough]];
|
||||
case 1: if (!compare8(p1, p2)) return false;
|
||||
case 1: if (!compare8(p1, p2)) return false; [[fallthrough]];
|
||||
default: ;
|
||||
}
|
||||
|
||||
return compare8(p1 + size - 16, p2 + size - 16);
|
||||
|
64
base/base/cgroupsv2.cpp
Normal file
64
base/base/cgroupsv2.cpp
Normal file
@ -0,0 +1,64 @@
|
||||
#include <base/cgroupsv2.h>
|
||||
|
||||
#include <base/defines.h>
|
||||
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
|
||||
|
||||
bool cgroupsV2Enabled()
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
/// This file exists iff the host has cgroups v2 enabled.
|
||||
auto controllers_file = default_cgroups_mount / "cgroup.controllers";
|
||||
if (!std::filesystem::exists(controllers_file))
|
||||
return false;
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool cgroupsV2MemoryControllerEnabled()
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
chassert(cgroupsV2Enabled());
|
||||
/// According to https://docs.kernel.org/admin-guide/cgroup-v2.html:
|
||||
/// - file 'cgroup.controllers' defines which controllers *can* be enabled
|
||||
/// - file 'cgroup.subtree_control' defines which controllers *are* enabled
|
||||
/// Caveat: nested groups may disable controllers. For simplicity, check only the top-level group.
|
||||
std::ifstream subtree_control_file(default_cgroups_mount / "cgroup.subtree_control");
|
||||
if (!subtree_control_file.is_open())
|
||||
return false;
|
||||
std::string controllers;
|
||||
std::getline(subtree_control_file, controllers);
|
||||
if (controllers.find("memory") == std::string::npos)
|
||||
return false;
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
std::string cgroupV2OfProcess()
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
chassert(cgroupsV2Enabled());
|
||||
/// All PIDs assigned to a cgroup are in /sys/fs/cgroups/{cgroup_name}/cgroup.procs
|
||||
/// A simpler way to get the membership is:
|
||||
std::ifstream cgroup_name_file("/proc/self/cgroup");
|
||||
if (!cgroup_name_file.is_open())
|
||||
return "";
|
||||
/// With cgroups v2, there will be a *single* line with prefix "0::/"
|
||||
/// (see https://docs.kernel.org/admin-guide/cgroup-v2.html)
|
||||
std::string cgroup;
|
||||
std::getline(cgroup_name_file, cgroup);
|
||||
static const std::string v2_prefix = "0::/";
|
||||
if (!cgroup.starts_with(v2_prefix))
|
||||
return "";
|
||||
cgroup = cgroup.substr(v2_prefix.length());
|
||||
return cgroup;
|
||||
#else
|
||||
return "";
|
||||
#endif
|
||||
}
|
22
base/base/cgroupsv2.h
Normal file
22
base/base/cgroupsv2.h
Normal file
@ -0,0 +1,22 @@
|
||||
#pragma once
|
||||
|
||||
#include <filesystem>
|
||||
#include <string>
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
/// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers).
|
||||
/// /sys/fs/cgroup was still symlinked to the actual mount in the cases that I have seen.
|
||||
static inline const std::filesystem::path default_cgroups_mount = "/sys/fs/cgroup";
|
||||
#endif
|
||||
|
||||
/// Is cgroups v2 enabled on the system?
|
||||
bool cgroupsV2Enabled();
|
||||
|
||||
/// Is the memory controller of cgroups v2 enabled on the system?
|
||||
/// Assumes that cgroupsV2Enabled() is enabled.
|
||||
bool cgroupsV2MemoryControllerEnabled();
|
||||
|
||||
/// Which cgroup does the process belong to?
|
||||
/// Returns an empty string if the cgroup cannot be determined.
|
||||
/// Assumes that cgroupsV2Enabled() is enabled.
|
||||
std::string cgroupV2OfProcess();
|
@ -1,17 +1,14 @@
|
||||
#include <base/getMemoryAmount.h>
|
||||
|
||||
#include <base/cgroupsv2.h>
|
||||
#include <base/getPageSize.h>
|
||||
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
#include <stdexcept>
|
||||
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/param.h>
|
||||
#if defined(BSD)
|
||||
#include <sys/sysctl.h>
|
||||
#endif
|
||||
|
||||
|
||||
namespace
|
||||
@ -20,49 +17,14 @@ namespace
|
||||
std::optional<uint64_t> getCgroupsV2MemoryLimit()
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
const std::filesystem::path default_cgroups_mount = "/sys/fs/cgroup";
|
||||
|
||||
/// This file exists iff the host has cgroups v2 enabled.
|
||||
std::ifstream controllers_file(default_cgroups_mount / "cgroup.controllers");
|
||||
if (!controllers_file.is_open())
|
||||
if (!cgroupsV2Enabled())
|
||||
return {};
|
||||
|
||||
/// Make sure that the memory controller is enabled.
|
||||
/// - cgroup.controllers defines which controllers *can* be enabled.
|
||||
/// - cgroup.subtree_control defines which controllers *are* enabled.
|
||||
/// (see https://docs.kernel.org/admin-guide/cgroup-v2.html)
|
||||
/// Caveat: nested groups may disable controllers. For simplicity, check only the top-level group.
|
||||
/// ReadBufferFromFile subtree_control_file(default_cgroups_mount / "cgroup.subtree_control");
|
||||
/// std::string subtree_control;
|
||||
/// readString(subtree_control, subtree_control_file);
|
||||
/// if (subtree_control.find("memory") == std::string::npos)
|
||||
/// return {};
|
||||
std::ifstream subtree_control_file(default_cgroups_mount / "cgroup.subtree_control");
|
||||
std::stringstream subtree_control_buf;
|
||||
subtree_control_buf << subtree_control_file.rdbuf();
|
||||
std::string subtree_control = subtree_control_buf.str();
|
||||
if (subtree_control.find("memory") == std::string::npos)
|
||||
if (!cgroupsV2MemoryControllerEnabled())
|
||||
return {};
|
||||
|
||||
/// Identify the cgroup the process belongs to
|
||||
/// All PIDs assigned to a cgroup are in /sys/fs/cgroups/{cgroup_name}/cgroup.procs
|
||||
/// A simpler way to get the membership is:
|
||||
std::ifstream cgroup_name_file("/proc/self/cgroup");
|
||||
if (!cgroup_name_file.is_open())
|
||||
return {};
|
||||
|
||||
std::stringstream cgroup_name_buf;
|
||||
cgroup_name_buf << cgroup_name_file.rdbuf();
|
||||
std::string cgroup_name = cgroup_name_buf.str();
|
||||
if (!cgroup_name.empty() && cgroup_name.back() == '\n')
|
||||
cgroup_name.pop_back(); /// remove trailing newline, if any
|
||||
/// With cgroups v2, there will be a *single* line with prefix "0::/"
|
||||
const std::string v2_prefix = "0::/";
|
||||
if (!cgroup_name.starts_with(v2_prefix))
|
||||
return {};
|
||||
cgroup_name = cgroup_name.substr(v2_prefix.length());
|
||||
|
||||
std::filesystem::path current_cgroup = cgroup_name.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup_name);
|
||||
std::string cgroup = cgroupV2OfProcess();
|
||||
auto current_cgroup = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
|
||||
|
||||
/// Open the bottom-most nested memory limit setting file. If there is no such file at the current
|
||||
/// level, try again at the parent level as memory settings are inherited.
|
||||
|
@ -68,7 +68,7 @@ public:
|
||||
typedef typename Bucket::iterator BucketIterator;
|
||||
typedef typename BucketVec::iterator BucketVecIterator;
|
||||
|
||||
class ConstIterator : public std::iterator<std::forward_iterator_tag, Value>
|
||||
class ConstIterator
|
||||
{
|
||||
public:
|
||||
ConstIterator() : _initialized(false) { }
|
||||
|
@ -46,5 +46,6 @@ if (COMPILER_CLANG)
|
||||
no_warning(thread-safety-negative) # experimental flag, too many false positives
|
||||
no_warning(enum-constexpr-conversion) # breaks magic-enum library in clang-16
|
||||
no_warning(unsafe-buffer-usage) # too aggressive
|
||||
no_warning(switch-default) # conflicts with "defaults in a switch covering all enum values"
|
||||
# TODO Enable conversion, sign-conversion, double-promotion warnings.
|
||||
endif ()
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit 1278e32bb0d5dc489f947e002bdf8c71b0ddaa63
|
||||
Subproject commit 4a12f99dfc9d47c687ff7700b927cc76856225d1
|
2
contrib/aws
vendored
2
contrib/aws
vendored
@ -1 +1 @@
|
||||
Subproject commit 9eb5097a0abfa837722cca7a5114a25837817bf2
|
||||
Subproject commit 5f0542b3ad7eef25b0540d37d778207e0345ea8f
|
2
contrib/curl
vendored
2
contrib/curl
vendored
@ -1 +1 @@
|
||||
Subproject commit 7161cb17c01dcff1dc5bf89a18437d9d729f1ecd
|
||||
Subproject commit 5ce164e0e9290c96eb7d502173426c0a135ec008
|
2
contrib/liburing
vendored
2
contrib/liburing
vendored
@ -1 +1 @@
|
||||
Subproject commit f5a48392c4ea33f222cbebeb2e2fc31620162949
|
||||
Subproject commit f4e42a515cd78c8c9cac2be14222834be5f8df2b
|
2
contrib/lz4
vendored
2
contrib/lz4
vendored
@ -1 +1 @@
|
||||
Subproject commit 92ebf1870b9acbefc0e7970409a181954a10ff40
|
||||
Subproject commit ce45a9dbdb059511a3e9576b19db3e7f1a4f172e
|
2
contrib/qpl
vendored
2
contrib/qpl
vendored
@ -1 +1 @@
|
||||
Subproject commit a61bdd845fd7ca363b2bcc55454aa520dfcd8298
|
||||
Subproject commit d4715e0e79896b85612158e135ee1a85f3b3e04d
|
2
contrib/rapidjson
vendored
2
contrib/rapidjson
vendored
@ -1 +1 @@
|
||||
Subproject commit c4ef90ccdbc21d5d5a628d08316bfd301e32d6fa
|
||||
Subproject commit 800ca2f38fc3b387271d9e1926fcfc9070222104
|
@ -190,7 +190,7 @@ function setup_logs_replication
|
||||
echo -e "Creating remote destination table ${table}_${hash} with statement:\n${statement}" >&2
|
||||
|
||||
echo "$statement" | clickhouse-client --database_replicated_initial_query_timeout_sec=10 \
|
||||
--distributed_ddl_task_timeout=30 \
|
||||
--distributed_ddl_task_timeout=30 --distributed_ddl_output_mode=throw_only_active \
|
||||
"${CONNECTION_ARGS[@]}" || continue
|
||||
|
||||
echo "Creating table system.${table}_sender" >&2
|
||||
|
@ -86,7 +86,7 @@ function download
|
||||
|
||||
chmod +x clickhouse
|
||||
# clickhouse may be compressed - run once to decompress
|
||||
./clickhouse ||:
|
||||
./clickhouse --query "SELECT 1" ||:
|
||||
ln -s ./clickhouse ./clickhouse-server
|
||||
ln -s ./clickhouse ./clickhouse-client
|
||||
ln -s ./clickhouse ./clickhouse-local
|
||||
@ -387,6 +387,11 @@ if [ -f core.zst ]; then
|
||||
fi
|
||||
|
||||
rg --text -F '<Fatal>' server.log > fatal.log ||:
|
||||
FATAL_LINK=''
|
||||
if [ -s fatal.log ]; then
|
||||
FATAL_LINK='<a href="fatal.log">fatal.log</a>'
|
||||
fi
|
||||
|
||||
dmesg -T > dmesg.log ||:
|
||||
|
||||
zstd --threads=0 --rm server.log
|
||||
@ -419,6 +424,7 @@ p.links a { padding: 5px; margin: 3px; background: #FFF; line-height: 2; white-s
|
||||
<a href="main.log">main.log</a>
|
||||
<a href="dmesg.log">dmesg.log</a>
|
||||
${CORE_LINK}
|
||||
${FATAL_LINK}
|
||||
</p>
|
||||
<table>
|
||||
<tr>
|
||||
|
@ -1,7 +1,7 @@
|
||||
version: '2.3'
|
||||
services:
|
||||
mysql2:
|
||||
image: mysql:5.7
|
||||
image: mysql:8.0
|
||||
restart: always
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: clickhouse
|
||||
@ -23,7 +23,7 @@ services:
|
||||
source: ${MYSQL_CLUSTER_LOGS:-}
|
||||
target: /mysql/
|
||||
mysql3:
|
||||
image: mysql:5.7
|
||||
image: mysql:8.0
|
||||
restart: always
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: clickhouse
|
||||
@ -45,7 +45,7 @@ services:
|
||||
source: ${MYSQL_CLUSTER_LOGS:-}
|
||||
target: /mysql/
|
||||
mysql4:
|
||||
image: mysql:5.7
|
||||
image: mysql:8.0
|
||||
restart: always
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: clickhouse
|
||||
|
@ -214,8 +214,7 @@ function check_server_start()
|
||||
function check_logs_for_critical_errors()
|
||||
{
|
||||
# Sanitizer asserts
|
||||
rg -Fa "==================" /var/log/clickhouse-server/stderr.log | rg -v "in query:" >> /test_output/tmp
|
||||
rg -Fa "WARNING" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
|
||||
sed -n '/WARNING:.*anitizer/,/^$/p' >> /test_output/tmp
|
||||
rg -Fav -e "ASan doesn't fully support makecontext/swapcontext functions" -e "DB::Exception" /test_output/tmp > /dev/null \
|
||||
&& echo -e "Sanitizer assert (in stderr.log)$FAIL$(head_escaped /test_output/tmp)" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No sanitizer asserts$OK" >> /test_output/test_results.tsv
|
||||
@ -233,8 +232,8 @@ function check_logs_for_critical_errors()
|
||||
# Remove file logical_errors.txt if it's empty
|
||||
[ -s /test_output/logical_errors.txt ] || rm /test_output/logical_errors.txt
|
||||
|
||||
# No such key errors
|
||||
rg --text "Code: 499.*The specified key does not exist" /var/log/clickhouse-server/clickhouse-server*.log > /test_output/no_such_key_errors.txt \
|
||||
# No such key errors (ignore a.myext which is used in 02724_database_s3.sh and does not exist)
|
||||
rg --text "Code: 499.*The specified key does not exist" /var/log/clickhouse-server/clickhouse-server*.log | grep -v "a.myext" > /test_output/no_such_key_errors.txt \
|
||||
&& echo -e "S3_ERROR No such key thrown (see clickhouse-server.log or no_such_key_errors.txt)$FAIL$(trim_server_logs no_such_key_errors.txt)" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No lost s3 keys$OK" >> /test_output/test_results.tsv
|
||||
|
||||
|
@ -77,11 +77,18 @@ remove_keeper_config "async_replication" "1"
|
||||
# create_if_not_exists feature flag doesn't exist on some older versions
|
||||
remove_keeper_config "create_if_not_exists" "[01]"
|
||||
|
||||
# latest_logs_cache_size_threshold setting doesn't exist on some older versions
|
||||
remove_keeper_config "latest_logs_cache_size_threshold" "[[:digit:]]\+"
|
||||
|
||||
# commit_logs_cache_size_threshold setting doesn't exist on some older versions
|
||||
remove_keeper_config "commit_logs_cache_size_threshold" "[[:digit:]]\+"
|
||||
|
||||
# it contains some new settings, but we can safely remove it
|
||||
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
||||
rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml
|
||||
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
|
||||
rm /etc/clickhouse-server/config.d/storage_conf_02963.xml
|
||||
rm /etc/clickhouse-server/config.d/backoff_failed_mutation.xml
|
||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
|
||||
rm /etc/clickhouse-server/users.d/replicated_ddl_entry.xml
|
||||
@ -109,6 +116,12 @@ remove_keeper_config "async_replication" "1"
|
||||
# create_if_not_exists feature flag doesn't exist on some older versions
|
||||
remove_keeper_config "create_if_not_exists" "[01]"
|
||||
|
||||
# latest_logs_cache_size_threshold setting doesn't exist on some older versions
|
||||
remove_keeper_config "latest_logs_cache_size_threshold" "[[:digit:]]\+"
|
||||
|
||||
# commit_logs_cache_size_threshold setting doesn't exist on some older versions
|
||||
remove_keeper_config "commit_logs_cache_size_threshold" "[[:digit:]]\+"
|
||||
|
||||
# But we still need default disk because some tables loaded only into it
|
||||
sudo cat /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml \
|
||||
| sed "s|<main><disk>s3</disk></main>|<main><disk>s3</disk></main><default><disk>default</disk></default>|" \
|
||||
@ -122,6 +135,7 @@ rm /etc/clickhouse-server/config.d/merge_tree.xml
|
||||
rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml
|
||||
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
|
||||
rm /etc/clickhouse-server/config.d/storage_conf_02963.xml
|
||||
rm /etc/clickhouse-server/config.d/backoff_failed_mutation.xml
|
||||
rm /etc/clickhouse-server/config.d/block_number.xml
|
||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
|
||||
|
@ -403,4 +403,3 @@ sidebar_label: 2023
|
||||
* Do not remove part if `Too many open files` is thrown [#56238](https://github.com/ClickHouse/ClickHouse/pull/56238) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix ORC commit [#56261](https://github.com/ClickHouse/ClickHouse/pull/56261) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix typo in largestTriangleThreeBuckets.md [#56263](https://github.com/ClickHouse/ClickHouse/pull/56263) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
|
||||
|
@ -596,4 +596,3 @@ sidebar_label: 2023
|
||||
* Fix assertion from stress test [#50718](https://github.com/ClickHouse/ClickHouse/pull/50718) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix flaky unit test [#50719](https://github.com/ClickHouse/ClickHouse/pull/50719) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Show correct sharing state in system.query_cache [#50728](https://github.com/ClickHouse/ClickHouse/pull/50728) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
|
||||
|
@ -298,4 +298,3 @@ sidebar_label: 2023
|
||||
* Update version_date.tsv and changelogs after v23.4.5.22-stable [#51638](https://github.com/ClickHouse/ClickHouse/pull/51638) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v23.3.7.5-lts [#51639](https://github.com/ClickHouse/ClickHouse/pull/51639) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update parts.md [#51643](https://github.com/ClickHouse/ClickHouse/pull/51643) ([Ramazan Polat](https://github.com/ramazanpolat)).
|
||||
|
||||
|
@ -588,4 +588,3 @@ sidebar_label: 2023
|
||||
* tests: mark 02152_http_external_tables_memory_tracking as no-parallel [#54155](https://github.com/ClickHouse/ClickHouse/pull/54155) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* The external logs have had colliding arguments [#54165](https://github.com/ClickHouse/ClickHouse/pull/54165) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Rename macro [#54169](https://github.com/ClickHouse/ClickHouse/pull/54169) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
|
@ -379,4 +379,3 @@ sidebar_label: 2023
|
||||
* Fix typo in packager when ccache is used [#55104](https://github.com/ClickHouse/ClickHouse/pull/55104) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Reduce flakiness of 01455_opentelemetry_distributed [#55111](https://github.com/ClickHouse/ClickHouse/pull/55111) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix build [#55113](https://github.com/ClickHouse/ClickHouse/pull/55113) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
|
@ -166,11 +166,11 @@ For most external applications, we recommend using the HTTP interface because it
|
||||
|
||||
## Configuration {#configuration}
|
||||
|
||||
ClickHouse Server is based on POCO C++ Libraries and uses `Poco::Util::AbstractConfiguration` to represent it's configuration. Configuration is held by `Poco::Util::ServerApplication` class inherited by `DaemonBase` class, which in turn is inherited by `DB::Server` class, implementing clickhouse-server itself. So config can be accessed by `ServerApplication::config()` method.
|
||||
ClickHouse Server is based on POCO C++ Libraries and uses `Poco::Util::AbstractConfiguration` to represent its configuration. Configuration is held by `Poco::Util::ServerApplication` class inherited by `DaemonBase` class, which in turn is inherited by `DB::Server` class, implementing clickhouse-server itself. So config can be accessed by `ServerApplication::config()` method.
|
||||
|
||||
Config is read from multiple files (in XML or YAML format) and merged into single `AbstractConfiguration` by `ConfigProcessor` class. Configuration is loaded at server startup and can be reloaded later if one of config files is updated, removed or added. `ConfigReloader` class is responsible for periodic monitoring of these changes and reload procedure as well. `SYSTEM RELOAD CONFIG` query also triggers config to be reloaded.
|
||||
|
||||
For queries and subsystems other than `Server` config is accessible using `Context::getConfigRef()` method. Every subsystem that is capable of reloading it's config without server restart should register itself in reload callback in `Server::main()` method. Note that if newer config has an error, most subsystems will ignore new config, log warning messages and keep working with previously loaded config. Due to the nature of `AbstractConfiguration` it is not possible to pass reference to specific section, so `String config_prefix` is usually used instead.
|
||||
For queries and subsystems other than `Server` config is accessible using `Context::getConfigRef()` method. Every subsystem that is capable of reloading its config without server restart should register itself in reload callback in `Server::main()` method. Note that if newer config has an error, most subsystems will ignore new config, log warning messages and keep working with previously loaded config. Due to the nature of `AbstractConfiguration` it is not possible to pass reference to specific section, so `String config_prefix` is usually used instead.
|
||||
|
||||
## Threads and jobs {#threads-and-jobs}
|
||||
|
||||
@ -255,7 +255,7 @@ When we are going to read something from a part in `MergeTree`, we look at `prim
|
||||
|
||||
When you `INSERT` a bunch of data into `MergeTree`, that bunch is sorted by primary key order and forms a new part. There are background threads that periodically select some parts and merge them into a single sorted part to keep the number of parts relatively low. That’s why it is called `MergeTree`. Of course, merging leads to “write amplification”. All parts are immutable: they are only created and deleted, but not modified. When SELECT is executed, it holds a snapshot of the table (a set of parts). After merging, we also keep old parts for some time to make a recovery after failure easier, so if we see that some merged part is probably broken, we can replace it with its source parts.
|
||||
|
||||
`MergeTree` is not an LSM tree because it does not contain MEMTABLE and LOG: inserted data is written directly to the filesystem. This behavior makes MergeTree much more suitable to insert data in batches. Therefore frequently inserting small amounts of rows is not ideal for MergeTree. For example, a couple of rows per second is OK, but doing it a thousand times a second is not optimal for MergeTree. However, there is an async insert mode for small inserts to overcome this limitation. We did it this way for simplicity’s sake, and because we are already inserting data in batches in our applications
|
||||
`MergeTree` is not an LSM tree because it does not contain MEMTABLE and LOG: inserted data is written directly to the filesystem. This behavior makes MergeTree much more suitable to insert data in batches. Therefore, frequently inserting small amounts of rows is not ideal for MergeTree. For example, a couple of rows per second is OK, but doing it a thousand times a second is not optimal for MergeTree. However, there is an async insert mode for small inserts to overcome this limitation. We did it this way for simplicity’s sake, and because we are already inserting data in batches in our applications
|
||||
|
||||
There are MergeTree engines that are doing additional work during background merges. Examples are `CollapsingMergeTree` and `AggregatingMergeTree`. This could be treated as special support for updates. Keep in mind that these are not real updates because users usually have no control over the time when background merges are executed, and data in a `MergeTree` table is almost always stored in more than one part, not in completely merged form.
|
||||
|
||||
|
@ -38,7 +38,7 @@ ninja
|
||||
|
||||
## Running
|
||||
|
||||
Once built, the binary can be run with, eg.:
|
||||
Once built, the binary can be run with, e.g.:
|
||||
|
||||
```bash
|
||||
qemu-s390x-static -L /usr/s390x-linux-gnu ./clickhouse
|
||||
|
@ -37,7 +37,7 @@ sudo xcode-select --install
|
||||
|
||||
``` bash
|
||||
brew update
|
||||
brew install ccache cmake ninja libtool gettext llvm gcc binutils grep findutils
|
||||
brew install ccache cmake ninja libtool gettext llvm gcc binutils grep findutils nasm
|
||||
```
|
||||
|
||||
## Checkout ClickHouse Sources {#checkout-clickhouse-sources}
|
||||
|
@ -95,7 +95,7 @@ Complete below three steps mentioned in [Star Schema Benchmark](https://clickhou
|
||||
- Inserting data. Here should use `./benchmark_sample/rawdata_dir/ssb-dbgen/*.tbl` as input data.
|
||||
- Converting “star schema” to de-normalized “flat schema”
|
||||
|
||||
Set up database with with IAA Deflate codec
|
||||
Set up database with IAA Deflate codec
|
||||
|
||||
``` bash
|
||||
$ cd ./database_dir/deflate
|
||||
@ -104,7 +104,7 @@ $ [CLICKHOUSE_EXE] client
|
||||
```
|
||||
Complete three steps same as lz4 above
|
||||
|
||||
Set up database with with ZSTD codec
|
||||
Set up database with ZSTD codec
|
||||
|
||||
``` bash
|
||||
$ cd ./database_dir/zstd
|
||||
|
@ -13,7 +13,7 @@ ClickHouse utilizes third-party libraries for different purposes, e.g., to conne
|
||||
SELECT library_name, license_type, license_path FROM system.licenses ORDER BY library_name COLLATE 'en';
|
||||
```
|
||||
|
||||
(Note that the listed libraries are the ones located in the `contrib/` directory of the ClickHouse repository. Depending on the build options, some of of the libraries may have not been compiled, and as a result, their functionality may not be available at runtime.
|
||||
Note that the listed libraries are the ones located in the `contrib/` directory of the ClickHouse repository. Depending on the build options, some of the libraries may have not been compiled, and as a result, their functionality may not be available at runtime.
|
||||
|
||||
[Example](https://play.clickhouse.com/play?user=play#U0VMRUNUIGxpYnJhcnlfbmFtZSwgbGljZW5zZV90eXBlLCBsaWNlbnNlX3BhdGggRlJPTSBzeXN0ZW0ubGljZW5zZXMgT1JERVIgQlkgbGlicmFyeV9uYW1lIENPTExBVEUgJ2VuJw==)
|
||||
|
||||
|
@ -7,13 +7,13 @@ description: Prerequisites and an overview of how to build ClickHouse
|
||||
|
||||
# Getting Started Guide for Building ClickHouse
|
||||
|
||||
ClickHouse can be build on Linux, FreeBSD and macOS. If you use Windows, you can still build ClickHouse in a virtual machine running Linux, e.g. [VirtualBox](https://www.virtualbox.org/) with Ubuntu.
|
||||
ClickHouse can be built on Linux, FreeBSD and macOS. If you use Windows, you can still build ClickHouse in a virtual machine running Linux, e.g. [VirtualBox](https://www.virtualbox.org/) with Ubuntu.
|
||||
|
||||
ClickHouse requires a 64-bit system to compile and run, 32-bit systems do not work.
|
||||
|
||||
## Creating a Repository on GitHub {#creating-a-repository-on-github}
|
||||
|
||||
To start developing for ClickHouse you will need a [GitHub](https://www.virtualbox.org/) account. Please also generate a SSH key locally (if you don't have one already) and upload the public key to GitHub as this is a prerequisite for contributing patches.
|
||||
To start developing for ClickHouse you will need a [GitHub](https://www.virtualbox.org/) account. Please also generate an SSH key locally (if you don't have one already) and upload the public key to GitHub as this is a prerequisite for contributing patches.
|
||||
|
||||
Next, create a fork of the [ClickHouse repository](https://github.com/ClickHouse/ClickHouse/) in your personal account by clicking the "fork" button in the upper right corner.
|
||||
|
||||
@ -37,7 +37,7 @@ git clone git@github.com:your_github_username/ClickHouse.git # replace placehol
|
||||
cd ClickHouse
|
||||
```
|
||||
|
||||
This command creates a directory `ClickHouse/` containing the source code of ClickHouse. If you specify a custom checkout directory after the URL but it is important that this path does not contain whitespaces as it may lead to problems with the build later on.
|
||||
This command creates a directory `ClickHouse/` containing the source code of ClickHouse. If you specify a custom checkout directory after the URL, but it is important that this path does not contain whitespaces as it may lead to problems with the build later on.
|
||||
|
||||
The ClickHouse repository uses Git submodules, i.e. references to external repositories (usually 3rd party libraries used by ClickHouse). These are not checked out by default. To do so, you can either
|
||||
|
||||
@ -45,7 +45,7 @@ The ClickHouse repository uses Git submodules, i.e. references to external repos
|
||||
|
||||
- if `git clone` did not check out submodules, run `git submodule update --init --jobs <N>` (e.g. `<N> = 12` to parallelize the checkout) to achieve the same as the previous alternative, or
|
||||
|
||||
- if `git clone` did not check out submodules and you like to use [sparse](https://github.blog/2020-01-17-bring-your-monorepo-down-to-size-with-sparse-checkout/) and [shallow](https://github.blog/2020-12-21-get-up-to-speed-with-partial-clone-and-shallow-clone/) submodule checkout to omit unneeded files and history in submodules to save space (ca. 5 GB instead of ca. 15 GB), run `./contrib/update-submodules.sh`. Not really recommended as it generally makes working with submodules less convenient and slower.
|
||||
- if `git clone` did not check out submodules, and you like to use [sparse](https://github.blog/2020-01-17-bring-your-monorepo-down-to-size-with-sparse-checkout/) and [shallow](https://github.blog/2020-12-21-get-up-to-speed-with-partial-clone-and-shallow-clone/) submodule checkout to omit unneeded files and history in submodules to save space (ca. 5 GB instead of ca. 15 GB), run `./contrib/update-submodules.sh`. Not really recommended as it generally makes working with submodules less convenient and slower.
|
||||
|
||||
You can check the Git status with the command: `git submodule status`.
|
||||
|
||||
@ -143,7 +143,7 @@ When a large amount of RAM is available on build machine you should limit the nu
|
||||
|
||||
On machines with 4GB of RAM, it is recommended to specify 1, for 8GB of RAM `-j 2` is recommended.
|
||||
|
||||
If you get the message: `ninja: error: loading 'build.ninja': No such file or directory`, it means that generating a build configuration has failed and you need to inspect the message above.
|
||||
If you get the message: `ninja: error: loading 'build.ninja': No such file or directory`, it means that generating a build configuration has failed, and you need to inspect the message above.
|
||||
|
||||
Upon the successful start of the building process, you’ll see the build progress - the number of processed tasks and the total number of tasks.
|
||||
|
||||
@ -184,7 +184,7 @@ You can also run your custom-built ClickHouse binary with the config file from t
|
||||
|
||||
**CLion (recommended)**
|
||||
|
||||
If you do not know which IDE to use, we recommend that you use [CLion](https://www.jetbrains.com/clion/). CLion is commercial software but it offers a 30 day free trial. It is also free of charge for students. CLion can be used on both Linux and macOS.
|
||||
If you do not know which IDE to use, we recommend that you use [CLion](https://www.jetbrains.com/clion/). CLion is commercial software, but it offers a 30 day free trial. It is also free of charge for students. CLion can be used on both Linux and macOS.
|
||||
|
||||
A few things to know when using CLion to develop ClickHouse:
|
||||
|
||||
|
@ -10,7 +10,7 @@ Allows to connect to databases on a remote [PostgreSQL](https://www.postgresql.o
|
||||
|
||||
Gives the real-time access to table list and table structure from remote PostgreSQL with the help of `SHOW TABLES` and `DESCRIBE TABLE` queries.
|
||||
|
||||
Supports table structure modifications (`ALTER TABLE ... ADD|DROP COLUMN`). If `use_table_cache` parameter (see the Engine Parameters below) it set to `1`, the table structure is cached and not checked for being modified, but can be updated with `DETACH` and `ATTACH` queries.
|
||||
Supports table structure modifications (`ALTER TABLE ... ADD|DROP COLUMN`). If `use_table_cache` parameter (see the Engine Parameters below) is set to `1`, the table structure is cached and not checked for being modified, but can be updated with `DETACH` and `ATTACH` queries.
|
||||
|
||||
## Creating a Database {#creating-a-database}
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
Nearest neighborhood search is the problem of finding the M closest points for a given point in an N-dimensional vector space. The most
|
||||
straightforward approach to solve this problem is a brute force search where the distance between all points in the vector space and the
|
||||
reference point is computed. This method guarantees perfect accuracy but it is usually too slow for practical applications. Thus, nearest
|
||||
reference point is computed. This method guarantees perfect accuracy, but it is usually too slow for practical applications. Thus, nearest
|
||||
neighborhood search problems are often solved with [approximative algorithms](https://github.com/erikbern/ann-benchmarks). Approximative
|
||||
nearest neighborhood search techniques, in conjunction with [embedding
|
||||
methods](https://cloud.google.com/architecture/overview-extracting-and-serving-feature-embeddings-for-machine-learning) allow to search huge
|
||||
@ -24,7 +24,7 @@ LIMIT N
|
||||
|
||||
`vectors` contains N-dimensional values of type [Array](../../../sql-reference/data-types/array.md) or
|
||||
[Tuple](../../../sql-reference/data-types/tuple.md), for example embeddings. Function `Distance` computes the distance between two vectors.
|
||||
Often, the the Euclidean (L2) distance is chosen as distance function but [other
|
||||
Often, the Euclidean (L2) distance is chosen as distance function but [other
|
||||
distance functions](/docs/en/sql-reference/functions/distance-functions.md) are also possible. `Point` is the reference point, e.g. `(0.17,
|
||||
0.33, ...)`, and `N` limits the number of search results.
|
||||
|
||||
@ -109,7 +109,7 @@ clickhouse-client --param_vec='hello' --query="SELECT * FROM table_with_ann_inde
|
||||
|
||||
**Restrictions**: Queries that contain both a `WHERE Distance(vectors, Point) < MaxDistance` and an `ORDER BY Distance(vectors, Point)`
|
||||
clause cannot use ANN indexes. Also, the approximate algorithms used to determine the nearest neighbors require a limit, hence queries
|
||||
without `LIMIT` clause cannot utilize ANN indexes. Also ANN indexes are only used if the query has a `LIMIT` value smaller than setting
|
||||
without `LIMIT` clause cannot utilize ANN indexes. Also, ANN indexes are only used if the query has a `LIMIT` value smaller than setting
|
||||
`max_limit_for_ann_queries` (default: 1 million rows). This is a safeguard to prevent large memory allocations by external libraries for
|
||||
approximate neighbor search.
|
||||
|
||||
@ -120,9 +120,9 @@ then each indexed block will contain 16384 rows. However, data structures and al
|
||||
provided by external libraries) are inherently row-oriented. They store a compact representation of a set of rows and also return rows for
|
||||
ANN queries. This causes some rather unintuitive differences in the way ANN indexes behave compared to normal skip indexes.
|
||||
|
||||
When a user defines a ANN index on a column, ClickHouse internally creates a ANN "sub-index" for each index block. The sub-index is "local"
|
||||
When a user defines an ANN index on a column, ClickHouse internally creates an ANN "sub-index" for each index block. The sub-index is "local"
|
||||
in the sense that it only knows about the rows of its containing index block. In the previous example and assuming that a column has 65536
|
||||
rows, we obtain four index blocks (spanning eight granules) and a ANN sub-index for each index block. A sub-index is theoretically able to
|
||||
rows, we obtain four index blocks (spanning eight granules) and an ANN sub-index for each index block. A sub-index is theoretically able to
|
||||
return the rows with the N closest points within its index block directly. However, since ClickHouse loads data from disk to memory at the
|
||||
granularity of granules, sub-indexes extrapolate matching rows to granule granularity. This is different from regular skip indexes which
|
||||
skip data at the granularity of index blocks.
|
||||
@ -231,7 +231,7 @@ The Annoy index currently does not work with per-table, non-default `index_granu
|
||||
|
||||
## USearch {#usearch}
|
||||
|
||||
This type of ANN index is based on the [the USearch library](https://github.com/unum-cloud/usearch), which implements the [HNSW
|
||||
This type of ANN index is based on the [USearch library](https://github.com/unum-cloud/usearch), which implements the [HNSW
|
||||
algorithm](https://arxiv.org/abs/1603.09320), i.e., builds a hierarchical graph where each point represents a vector and the edges represent
|
||||
similarity. Such hierarchical structures can be very efficient on large collections. They may often fetch 0.05% or less data from the
|
||||
overall dataset, while still providing 99% recall. This is especially useful when working with high-dimensional vectors,
|
||||
|
@ -125,7 +125,7 @@ For each resulting data part ClickHouse saves:
|
||||
3. The first “cancel” row, if there are more “cancel” rows than “state” rows.
|
||||
4. None of the rows, in all other cases.
|
||||
|
||||
Also when there are at least 2 more “state” rows than “cancel” rows, or at least 2 more “cancel” rows then “state” rows, the merge continues, but ClickHouse treats this situation as a logical error and records it in the server log. This error can occur if the same data were inserted more than once.
|
||||
Also, when there are at least 2 more “state” rows than “cancel” rows, or at least 2 more “cancel” rows then “state” rows, the merge continues, but ClickHouse treats this situation as a logical error and records it in the server log. This error can occur if the same data were inserted more than once.
|
||||
|
||||
Thus, collapsing should not change the results of calculating statistics.
|
||||
Changes gradually collapsed so that in the end only the last state of almost every object left.
|
||||
@ -196,7 +196,7 @@ What do we see and where is collapsing?
|
||||
|
||||
With two `INSERT` queries, we created 2 data parts. The `SELECT` query was performed in 2 threads, and we got a random order of rows. Collapsing not occurred because there was no merge of the data parts yet. ClickHouse merges data part in an unknown moment which we can not predict.
|
||||
|
||||
Thus we need aggregation:
|
||||
Thus, we need aggregation:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
|
@ -870,6 +870,11 @@ Tags:
|
||||
- `load_balancing` - Policy for disk balancing, `round_robin` or `least_used`.
|
||||
- `least_used_ttl_ms` - Configure timeout (in milliseconds) for the updating available space on all disks (`0` - update always, `-1` - never update, default is `60000`). Note, if the disk can be used by ClickHouse only and is not subject to a online filesystem resize/shrink you can use `-1`, in all other cases it is not recommended, since eventually it will lead to incorrect space distribution.
|
||||
- `prefer_not_to_merge` — You should not use this setting. Disables merging of data parts on this volume (this is harmful and leads to performance degradation). When this setting is enabled (don't do it), merging data on this volume is not allowed (which is bad). This allows (but you don't need it) controlling (if you want to control something, you're making a mistake) how ClickHouse works with slow disks (but ClickHouse knows better, so please don't use this setting).
|
||||
- `volume_priority` — Defines the priority (order) in which volumes are filled. Lower value means higher priority. The parameter values should be natural numbers and collectively cover the range from 1 to N (lowest priority given) without skipping any numbers.
|
||||
* If _all_ volumes are tagged, they are prioritized in given order.
|
||||
* If only _some_ volumes are tagged, those without the tag have the lowest priority, and they are prioritized in the order they are defined in config.
|
||||
* If _no_ volumes are tagged, their priority is set correspondingly to their order they are declared in configuration.
|
||||
* Two volumes cannot have the same priority value.
|
||||
|
||||
Configuration examples:
|
||||
|
||||
@ -919,7 +924,8 @@ In given example, the `hdd_in_order` policy implements the [round-robin](https:/
|
||||
If there are different kinds of disks available in the system, `moving_from_ssd_to_hdd` policy can be used instead. The volume `hot` consists of an SSD disk (`fast_ssd`), and the maximum size of a part that can be stored on this volume is 1GB. All the parts with the size larger than 1GB will be stored directly on the `cold` volume, which contains an HDD disk `disk1`.
|
||||
Also, once the disk `fast_ssd` gets filled by more than 80%, data will be transferred to the `disk1` by a background process.
|
||||
|
||||
The order of volume enumeration within a storage policy is important. Once a volume is overfilled, data are moved to the next one. The order of disk enumeration is important as well because data are stored on them in turns.
|
||||
The order of volume enumeration within a storage policy is important in case at least one of the volumes listed has no explicit `volume_priority` parameter.
|
||||
Once a volume is overfilled, data are moved to the next one. The order of disk enumeration is important as well because data are stored on them in turns.
|
||||
|
||||
When creating a table, one can apply one of the configured storage policies to it:
|
||||
|
||||
|
@ -304,6 +304,24 @@ We use the term `MergeTree` to refer to all table engines in the `MergeTree fami
|
||||
|
||||
If you had a `MergeTree` table that was manually replicated, you can convert it to a replicated table. You might need to do this if you have already collected a large amount of data in a `MergeTree` table and now you want to enable replication.
|
||||
|
||||
`MergeTree` table can be automatically converted on server restart if `convert_to_replicated` flag is set at the table's data directory (`/var/lib/clickhouse/store/xxx/xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy/` for `Atomic` database).
|
||||
Create empty `convert_to_replicated` file and the table will be loaded as replicated on next server restart.
|
||||
|
||||
This query can be used to get the table's data path.
|
||||
|
||||
```sql
|
||||
SELECT data_paths FROM system.tables WHERE table = 'table_name' AND database = 'database_name';
|
||||
```
|
||||
|
||||
Note that ReplicatedMergeTree table will be created with values of `default_replica_path` and `default_replica_name` settings.
|
||||
To create a converted table on other replicas, you will need to explicitly specify its path in the first argument of the `ReplicatedMergeTree` engine. The following query can be used to get its path.
|
||||
|
||||
```sql
|
||||
SELECT zookeeper_path FROM system.replicas WHERE table = 'table_name';
|
||||
```
|
||||
|
||||
There is also a manual way to do this without server restart.
|
||||
|
||||
If the data differs on various replicas, first sync it, or delete this data on all the replicas except one.
|
||||
|
||||
Rename the existing MergeTree table, then create a `ReplicatedMergeTree` table with the old name.
|
||||
|
@ -72,7 +72,11 @@ Specifying the `sharding_key` is necessary for the following:
|
||||
|
||||
#### fsync_directories
|
||||
|
||||
`fsync_directories` - do the `fsync` for directories. Guarantees that the OS refreshed directory metadata after operations related to background inserts on Distributed table (after insert, after sending the data to shard, etc).
|
||||
`fsync_directories` - do the `fsync` for directories. Guarantees that the OS refreshed directory metadata after operations related to background inserts on Distributed table (after insert, after sending the data to shard, etc.).
|
||||
|
||||
#### skip_unavailable_shards
|
||||
|
||||
`skip_unavailable_shards` - If true, ClickHouse silently skips unavailable shards. Shard is marked as unavailable when: 1) The shard cannot be reached due to a connection failure. 2) Shard is unresolvable through DNS. 3) Table does not exist on the shard. Default false.
|
||||
|
||||
#### bytes_to_throw_insert
|
||||
|
||||
@ -102,6 +106,10 @@ Specifying the `sharding_key` is necessary for the following:
|
||||
|
||||
`background_insert_max_sleep_time_ms` - same as [distributed_background_insert_max_sleep_time_ms](../../../operations/settings/settings.md#distributed_background_insert_max_sleep_time_ms)
|
||||
|
||||
#### flush_on_detach
|
||||
|
||||
`flush_on_detach` - Flush data to remote nodes on DETACH/DROP/server shutdown. Default true.
|
||||
|
||||
:::note
|
||||
**Durability settings** (`fsync_...`):
|
||||
|
||||
@ -220,7 +228,7 @@ Second, you can perform `INSERT` statements on a `Distributed` table. In this ca
|
||||
|
||||
Each shard can have a `<weight>` defined in the config file. By default, the weight is `1`. Data is distributed across shards in the amount proportional to the shard weight. All shard weights are summed up, then each shard's weight is divided by the total to determine each shard's proportion. For example, if there are two shards and the first has a weight of 1 while the second has a weight of 2, the first will be sent one third (1 / 3) of inserted rows and the second will be sent two thirds (2 / 3).
|
||||
|
||||
Each shard can have the `internal_replication` parameter defined in the config file. If this parameter is set to `true`, the write operation selects the first healthy replica and writes data to it. Use this if the tables underlying the `Distributed` table are replicated tables (e.g. any of the `Replicated*MergeTree` table engines). One of the table replicas will receive the write and it will be replicated to the other replicas automatically.
|
||||
Each shard can have the `internal_replication` parameter defined in the config file. If this parameter is set to `true`, the write operation selects the first healthy replica and writes data to it. Use this if the tables underlying the `Distributed` table are replicated tables (e.g. any of the `Replicated*MergeTree` table engines). One of the table replicas will receive the write, and it will be replicated to the other replicas automatically.
|
||||
|
||||
If `internal_replication` is set to `false` (the default), data is written to all replicas. In this case, the `Distributed` table replicates data itself. This is worse than using replicated tables because the consistency of replicas is not checked and, over time, they will contain slightly different data.
|
||||
|
||||
|
@ -12,7 +12,7 @@ The queries below were executed on a **Production** instance of [ClickHouse Clou
|
||||
:::
|
||||
|
||||
|
||||
1. Without inserting the data into ClickHouse, we can query it in place. Let's grab some rows so we can see what they look like:
|
||||
1. Without inserting the data into ClickHouse, we can query it in place. Let's grab some rows, so we can see what they look like:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
|
@ -29,7 +29,7 @@ Here is a preview of the dashboard created in this guide:
|
||||
|
||||
This dataset is from [OpenCelliD](https://www.opencellid.org/) - The world's largest Open Database of Cell Towers.
|
||||
|
||||
As of 2021, it contains more than 40 million records about cell towers (GSM, LTE, UMTS, etc.) around the world with their geographical coordinates and metadata (country code, network, etc).
|
||||
As of 2021, it contains more than 40 million records about cell towers (GSM, LTE, UMTS, etc.) around the world with their geographical coordinates and metadata (country code, network, etc.).
|
||||
|
||||
OpenCelliD Project is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License, and we redistribute a snapshot of this dataset under the terms of the same license. The up-to-date version of the dataset is available to download after sign in.
|
||||
|
||||
@ -355,7 +355,7 @@ Click on **UPDATE CHART** to render the visualization.
|
||||
|
||||
### Add the charts to a **dashboard**
|
||||
|
||||
This screenshot shows cell tower locations with LTE, UMTS, and GSM radios. The charts are all created in the same way and they are added to a dashboard.
|
||||
This screenshot shows cell tower locations with LTE, UMTS, and GSM radios. The charts are all created in the same way, and they are added to a dashboard.
|
||||
|
||||
![Dashboard of cell towers by radio type in mcc 204](@site/docs/en/getting-started/example-datasets/images/superset-cell-tower-dashboard.png)
|
||||
|
||||
|
@ -132,7 +132,7 @@ FROM covid19;
|
||||
└────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
7. You will notice the data has a lot of 0's for dates - either weekends or days where numbers were not reported each day. We can use a window function to smooth out the daily averages of new cases:
|
||||
7. You will notice the data has a lot of 0's for dates - either weekends or days when numbers were not reported each day. We can use a window function to smooth out the daily averages of new cases:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -262,4 +262,4 @@ The results look like
|
||||
|
||||
:::note
|
||||
As mentioned in the [GitHub repo](https://github.com/GoogleCloudPlatform/covid-19-open-data), the dataset is no longer updated as of September 15, 2022.
|
||||
:::
|
||||
:::
|
||||
|
@ -79,10 +79,7 @@ It is recommended to use official pre-compiled `deb` packages for Debian or Ubun
|
||||
#### Setup the Debian repository
|
||||
``` bash
|
||||
sudo apt-get install -y apt-transport-https ca-certificates dirmngr
|
||||
GNUPGHOME=$(mktemp -d)
|
||||
sudo GNUPGHOME="$GNUPGHOME" gpg --no-default-keyring --keyring /usr/share/keyrings/clickhouse-keyring.gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 8919F6BD2B48D754
|
||||
sudo rm -rf "$GNUPGHOME"
|
||||
sudo chmod +r /usr/share/keyrings/clickhouse-keyring.gpg
|
||||
sudo gpg --no-default-keyring --keyring /usr/share/keyrings/clickhouse-keyring.gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 8919F6BD2B48D754
|
||||
|
||||
echo "deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb stable main" | sudo tee \
|
||||
/etc/apt/sources.list.d/clickhouse.list
|
||||
|
@ -243,7 +243,7 @@ If no database is specified, the `default` database will be used.
|
||||
|
||||
If the user name, password or database was specified in the connection string, it cannot be specified using `--user`, `--password` or `--database` (and vice versa).
|
||||
|
||||
The host component can either be an a host name and IP address. Put an IPv6 address in square brackets to specify it:
|
||||
The host component can either be a host name and IP address. Put an IPv6 address in square brackets to specify it:
|
||||
|
||||
```text
|
||||
clickhouse://[2001:db8::1234]
|
||||
|
@ -33,7 +33,7 @@ The supported formats are:
|
||||
| [JSONAsString](#jsonasstring) | ✔ | ✗ |
|
||||
| [JSONStrings](#jsonstrings) | ✔ | ✔ |
|
||||
| [JSONColumns](#jsoncolumns) | ✔ | ✔ |
|
||||
| [JSONColumnsWithMetadata](#jsoncolumnsmonoblock)) | ✔ | ✔ |
|
||||
| [JSONColumnsWithMetadata](#jsoncolumnsmonoblock) | ✔ | ✔ |
|
||||
| [JSONCompact](#jsoncompact) | ✔ | ✔ |
|
||||
| [JSONCompactStrings](#jsoncompactstrings) | ✗ | ✔ |
|
||||
| [JSONCompactColumns](#jsoncompactcolumns) | ✔ | ✔ |
|
||||
|
@ -13,7 +13,7 @@ can control it.
|
||||
|
||||
Schema inference is used when ClickHouse needs to read the data in a specific data format and the structure is unknown.
|
||||
|
||||
## Table functions [file](../sql-reference/table-functions/file.md), [s3](../sql-reference/table-functions/s3.md), [url](../sql-reference/table-functions/url.md), [hdfs](../sql-reference/table-functions/hdfs.md).
|
||||
## Table functions [file](../sql-reference/table-functions/file.md), [s3](../sql-reference/table-functions/s3.md), [url](../sql-reference/table-functions/url.md), [hdfs](../sql-reference/table-functions/hdfs.md), [azureBlobStorage](../sql-reference/table-functions/azureBlobStorage.md).
|
||||
|
||||
These table functions have the optional argument `structure` with the structure of input data. If this argument is not specified or set to `auto`, the structure will be inferred from the data.
|
||||
|
||||
@ -55,7 +55,7 @@ DESCRIBE file('hobbies.jsonl')
|
||||
└─────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
## Table engines [File](../engines/table-engines/special/file.md), [S3](../engines/table-engines/integrations/s3.md), [URL](../engines/table-engines/special/url.md), [HDFS](../engines/table-engines/integrations/hdfs.md)
|
||||
## Table engines [File](../engines/table-engines/special/file.md), [S3](../engines/table-engines/integrations/s3.md), [URL](../engines/table-engines/special/url.md), [HDFS](../engines/table-engines/integrations/hdfs.md), [azureBlobStorage](../engines/table-engines/integrations/azureBlobStorage.md)
|
||||
|
||||
If the list of columns is not specified in `CREATE TABLE` query, the structure of the table will be inferred automatically from the data.
|
||||
|
||||
@ -1061,7 +1061,7 @@ $$)
|
||||
└──────────────┴───────────────┘
|
||||
```
|
||||
|
||||
## Values {#values}
|
||||
### Values {#values}
|
||||
|
||||
In Values format ClickHouse extracts column value from the row and then parses it using
|
||||
the recursive parser similar to how literals are parsed.
|
||||
@ -1986,3 +1986,46 @@ Note:
|
||||
- As some of the files may not contain some columns from the resulting schema, union mode is supported only for formats that support reading subset of columns (like JSONEachRow, Parquet, TSVWithNames, etc) and won't work for other formats (like CSV, TSV, JSONCompactEachRow, etc).
|
||||
- If ClickHouse cannot infer the schema from one of the files, the exception will be thrown.
|
||||
- If you have a lot of files, reading schema from all of them can take a lot of time.
|
||||
|
||||
|
||||
## Automatic format detection {#automatic-format-detection}
|
||||
|
||||
If data format is not specified and cannot be determined by the file extension, ClickHouse will try to detect the file format by its content.
|
||||
|
||||
**Examples:**
|
||||
|
||||
Let's say we have `data` with the following content:
|
||||
```
|
||||
"a","b"
|
||||
1,"Data1"
|
||||
2,"Data2"
|
||||
3,"Data3"
|
||||
```
|
||||
|
||||
We can inspect and query this file without specifying format or structure:
|
||||
```sql
|
||||
:) desc file(data);
|
||||
```
|
||||
|
||||
```text
|
||||
┌─name─┬─type─────────────┐
|
||||
│ a │ Nullable(Int64) │
|
||||
│ b │ Nullable(String) │
|
||||
└──────┴──────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
:) select * from file(data);
|
||||
```
|
||||
|
||||
```text
|
||||
┌─a─┬─b─────┐
|
||||
│ 1 │ Data1 │
|
||||
│ 2 │ Data2 │
|
||||
│ 3 │ Data3 │
|
||||
└───┴───────┘
|
||||
```
|
||||
|
||||
:::note
|
||||
ClickHouse can detect only some subset of formats and this detection takes some time, it's always better to specify the format explicitly.
|
||||
:::
|
15
docs/en/interfaces/third-party/gui.md
vendored
15
docs/en/interfaces/third-party/gui.md
vendored
@ -306,3 +306,18 @@ License: [commercial](https://tablum.io/pricing) product with 3-month free perio
|
||||
|
||||
Try it out for free [in the cloud](https://tablum.io/try).
|
||||
Learn more about the product at [TABLUM.IO](https://tablum.io/)
|
||||
|
||||
### CKMAN {#ckman}
|
||||
|
||||
[CKMAN] (https://www.github.com/housepower/ckman) is a tool for managing and monitoring ClickHouse clusters!
|
||||
|
||||
Features:
|
||||
|
||||
- Rapid and convenient automated deployment of clusters through a browser interface
|
||||
- Clusters can be scaled or scaled
|
||||
- Load balance the data of the cluster
|
||||
- Upgrade the cluster online
|
||||
- Modify the cluster configuration on the page
|
||||
- Provides cluster node monitoring and zookeeper monitoring
|
||||
- Monitor the status of tables and partitions, and monitor slow SQL statements
|
||||
- Provides an easy-to-use SQL execution page
|
||||
|
@ -80,6 +80,7 @@ The BACKUP and RESTORE statements take a list of DATABASE and TABLE names, a des
|
||||
- ASYNC: backup or restore asynchronously
|
||||
- PARTITIONS: a list of partitions to restore
|
||||
- SETTINGS:
|
||||
- `id`: id of backup or restore operation, randomly generated UUID is used, if not specified manually. If there is already running operation with the same `id` exception is thrown.
|
||||
- [`compression_method`](/docs/en/sql-reference/statements/create/table.md/#column-compression-codecs) and compression_level
|
||||
- `password` for the file on disk
|
||||
- `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')`
|
||||
@ -206,7 +207,7 @@ end_time: 2022-08-30 09:21:46
|
||||
1 row in set. Elapsed: 0.002 sec.
|
||||
```
|
||||
|
||||
Along with `system.backups` table, all backup and restore operations are also tracked in the system log table [backup_log](../operations/system-tables/backup_log.md):
|
||||
Along with `system.backups` table, all backup and restore operations are also tracked in the system log table [backup_log](../operations/system-tables/backup_log.md):
|
||||
```
|
||||
SELECT *
|
||||
FROM system.backup_log
|
||||
@ -222,7 +223,7 @@ event_time_microseconds: 2023-08-18 11:13:43.097414
|
||||
id: 7678b0b3-f519-4e6e-811f-5a0781a4eb52
|
||||
name: Disk('backups', '1.zip')
|
||||
status: CREATING_BACKUP
|
||||
error:
|
||||
error:
|
||||
start_time: 2023-08-18 11:13:43
|
||||
end_time: 1970-01-01 03:00:00
|
||||
num_files: 0
|
||||
@ -252,7 +253,7 @@ compressed_size: 0
|
||||
files_read: 0
|
||||
bytes_read: 0
|
||||
|
||||
2 rows in set. Elapsed: 0.075 sec.
|
||||
2 rows in set. Elapsed: 0.075 sec.
|
||||
```
|
||||
|
||||
## Configuring BACKUP/RESTORE to use an S3 Endpoint
|
||||
@ -271,7 +272,7 @@ Creating an S3 bucket is covered in [Use S3 Object Storage as a ClickHouse disk]
|
||||
|
||||
The destination for a backup will be specified like this:
|
||||
```
|
||||
S3('<S3 endpoint>/<directory>', '<Access key ID>', '<Secret access key>)
|
||||
S3('<S3 endpoint>/<directory>', '<Access key ID>', '<Secret access key>')
|
||||
```
|
||||
|
||||
```sql
|
||||
|
@ -6,15 +6,66 @@ sidebar_label: Configuration Files
|
||||
|
||||
# Configuration Files
|
||||
|
||||
The ClickHouse server can be configured with configuration files in XML or YAML syntax. In most installation types, the ClickHouse server runs with `/etc/clickhouse-server/config.xml` as default configuration file but it is also possible to specify the location of the configuration file manually at server startup using command line option `--config-file=` or `-C`. Additional configuration files may be placed into directory `config.d/` relative to the main configuration file, for example into directory `/etc/clickhouse-server/config.d/`. Files in this directory and the main configuration are merged in a preprocessing step before the configuration is applied in ClickHouse server. Configuration files are merged in alphabetical order. To simplify updates and improve modularization, it is best practice to keep the default `config.xml` file unmodified and place additional customization into `config.d/`.
|
||||
The ClickHouse server can be configured with configuration files in XML or YAML syntax. In most installation types, the ClickHouse server runs with `/etc/clickhouse-server/config.xml` as default configuration file, but it is also possible to specify the location of the configuration file manually at server startup using command line option `--config-file=` or `-C`. Additional configuration files may be placed into directory `config.d/` relative to the main configuration file, for example into directory `/etc/clickhouse-server/config.d/`. Files in this directory and the main configuration are merged in a preprocessing step before the configuration is applied in ClickHouse server. Configuration files are merged in alphabetical order. To simplify updates and improve modularization, it is best practice to keep the default `config.xml` file unmodified and place additional customization into `config.d/`.
|
||||
|
||||
It is possible to mix XML and YAML configuration files, for example you could have a main configuration file `config.xml` and additional configuration files `config.d/network.xml`, `config.d/timezone.yaml` and `config.d/keeper.yaml`. Mixing XML and YAML within a single configuration file is not supported. XML configuration files should use `<clickhouse>...</clickhouse>` as top-level tag. In YAML configuration files, `clickhouse:` is optional, the parser inserts it implicitly if absent.
|
||||
|
||||
## Overriding Configuration {#override}
|
||||
## Merging Configuration {#merging}
|
||||
|
||||
The merge of configuration files behaves as one intuitively expects: The contents of both files are combined recursively, children with the same name are replaced by the element of the more specific configuration file. The merge can be customized using attributes `replace` and `remove`.
|
||||
- Attribute `replace` means that the element is replaced by the specified one.
|
||||
- Attribute `remove` means that the element is deleted.
|
||||
Two configuration files (usually the main configuration file and another configuration files from `config.d/`) are merged as follows:
|
||||
|
||||
- If a node (i.e. a path leading to an element) appears in both files and does not have attributes `replace` or `remove`, it is included in the merged configuration file and children from both nodes are included and merged recursively.
|
||||
- If one of both nodes contains attribute `replace`, it is included in the merged configuration file but only children from the node with attribute `replace` are included.
|
||||
- If one of both nodes contains attribute `remove`, the node is not included in the merged configuration file (if it exists already, it is deleted).
|
||||
|
||||
Example:
|
||||
|
||||
|
||||
```xml
|
||||
<!-- config.xml -->
|
||||
<clickhouse>
|
||||
<config_a>
|
||||
<setting_1>1</setting_1>
|
||||
</config_a>
|
||||
<config_b>
|
||||
<setting_2>2</setting_2>
|
||||
</config_b>
|
||||
<config_c>
|
||||
<setting_3>3</setting_3>
|
||||
</config_c>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
and
|
||||
|
||||
```xml
|
||||
<!-- config.d/other_config.xml -->
|
||||
<clickhouse>
|
||||
<config_a>
|
||||
<setting_4>4</setting_4>
|
||||
</config_a>
|
||||
<config_b replace="replace">
|
||||
<setting_5>5</setting_5>
|
||||
</config_b>
|
||||
<config_c remove="remove">
|
||||
<setting_6>6</setting_6>
|
||||
</config_c>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
generates merged configuration file:
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
||||
<config_a>
|
||||
<setting_1>1</setting_1>
|
||||
<setting_4>4</setting_4>
|
||||
</config_a>
|
||||
<config_b>
|
||||
<setting_5>5</setting_5>
|
||||
</config_b>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
To specify that a value of an element should be replaced by the value of an environment variable, you can use attribute `from_env`.
|
||||
|
||||
@ -36,7 +87,7 @@ which is equal to
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
<max_query_size/>150000</max_query_size>
|
||||
<max_query_size>150000</max_query_size>
|
||||
</default>
|
||||
</profiles>
|
||||
</clickhouse>
|
||||
@ -63,7 +114,7 @@ XML substitution example:
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Substitutions can also be performed from ZooKeeper. To do this, specify the attribute `from_zk = "/path/to/node"`. The element value is replaced with the contents of the node at `/path/to/node` in ZooKeeper. You can also put an entire XML subtree on the ZooKeeper node and it will be fully inserted into the source element.
|
||||
Substitutions can also be performed from ZooKeeper. To do this, specify the attribute `from_zk = "/path/to/node"`. The element value is replaced with the contents of the node at `/path/to/node` in ZooKeeper. You can also put an entire XML subtree on the ZooKeeper node, and it will be fully inserted into the source element.
|
||||
|
||||
## Encrypting and Hiding Configuration {#encryption}
|
||||
|
||||
@ -125,7 +176,7 @@ Users configuration can be split into separate files similar to `config.xml` and
|
||||
Directory name is defined as `users_config` setting without `.xml` postfix concatenated with `.d`.
|
||||
Directory `users.d` is used by default, as `users_config` defaults to `users.xml`.
|
||||
|
||||
Note that configuration files are first merged taking into account [Override](#override) settings and includes are processed after that.
|
||||
Note that configuration files are first [merged](#merging) taking into account settings, and includes are processed after that.
|
||||
|
||||
## XML example {#example}
|
||||
|
||||
|
@ -199,6 +199,20 @@ Type: Bool
|
||||
|
||||
Default: 0
|
||||
|
||||
|
||||
## dns_cache_max_size
|
||||
|
||||
Internal DNS cache max size in bytes.
|
||||
|
||||
:::note
|
||||
ClickHouse also has a reverse cache, so the actual memory usage could be twice as much.
|
||||
:::
|
||||
|
||||
Type: UInt64
|
||||
|
||||
Default: 1024
|
||||
|
||||
|
||||
## dns_cache_update_period
|
||||
|
||||
Internal DNS cache update period in seconds.
|
||||
@ -458,6 +472,38 @@ Type: Double
|
||||
|
||||
Default: 0.9
|
||||
|
||||
## cgroups_memory_usage_observer_wait_time
|
||||
|
||||
Interval in seconds during which the server's maximum allowed memory consumption is adjusted by the corresponding threshold in cgroups. (see
|
||||
settings `cgroup_memory_watcher_hard_limit_ratio` and `cgroup_memory_watcher_soft_limit_ratio`).
|
||||
|
||||
Type: UInt64
|
||||
|
||||
Default: 15
|
||||
|
||||
## cgroup_memory_watcher_hard_limit_ratio
|
||||
|
||||
Specifies the "hard" threshold with regards to the memory consumption of the server process according to cgroups after which the server's
|
||||
maximum memory consumption is adjusted to the threshold value.
|
||||
|
||||
See settings `cgroups_memory_usage_observer_wait_time` and `cgroup_memory_watcher_soft_limit_ratio`
|
||||
|
||||
Type: Double
|
||||
|
||||
Default: 0.95
|
||||
|
||||
## cgroup_memory_watcher_soft_limit_ratio
|
||||
|
||||
Specifies the "soft" threshold with regards to the memory consumption of the server process according to cgroups after which arenas in
|
||||
jemalloc are purged.
|
||||
|
||||
|
||||
See settings `cgroups_memory_usage_observer_wait_time` and `cgroup_memory_watcher_hard_limit_ratio`
|
||||
|
||||
Type: Double
|
||||
|
||||
Default: 0.95
|
||||
|
||||
## max_table_size_to_drop
|
||||
|
||||
Restriction on deleting tables.
|
||||
@ -472,10 +518,10 @@ The value 0 means that you can delete all tables without any restrictions.
|
||||
``` xml
|
||||
<max_table_size_to_drop>0</max_table_size_to_drop>
|
||||
```
|
||||
|
||||
|
||||
## max\_database\_num\_to\_warn {#max-database-num-to-warn}
|
||||
If the number of attached databases exceeds the specified value, clickhouse server will add warning messages to `system.warnings` table.
|
||||
|
||||
## max\_database\_num\_to\_warn {#max-database-num-to-warn}
|
||||
If the number of attached databases exceeds the specified value, clickhouse server will add warning messages to `system.warnings` table.
|
||||
Default value: 1000
|
||||
|
||||
**Example**
|
||||
@ -483,10 +529,10 @@ Default value: 1000
|
||||
``` xml
|
||||
<max_database_num_to_warn>50</max_database_num_to_warn>
|
||||
```
|
||||
|
||||
## max\_table\_num\_to\_warn {#max-table-num-to-warn}
|
||||
If the number of attached tables exceeds the specified value, clickhouse server will add warning messages to `system.warnings` table.
|
||||
Default value: 5000
|
||||
|
||||
## max\_table\_num\_to\_warn {#max-table-num-to-warn}
|
||||
If the number of attached tables exceeds the specified value, clickhouse server will add warning messages to `system.warnings` table.
|
||||
Default value: 5000
|
||||
|
||||
**Example**
|
||||
|
||||
@ -495,9 +541,9 @@ Default value: 5000
|
||||
```
|
||||
|
||||
|
||||
## max\_part\_num\_to\_warn {#max-part-num-to-warn}
|
||||
If the number of active parts exceeds the specified value, clickhouse server will add warning messages to `system.warnings` table.
|
||||
Default value: 100000
|
||||
## max\_part\_num\_to\_warn {#max-part-num-to-warn}
|
||||
If the number of active parts exceeds the specified value, clickhouse server will add warning messages to `system.warnings` table.
|
||||
Default value: 100000
|
||||
|
||||
**Example**
|
||||
|
||||
@ -2873,3 +2919,23 @@ A limit on the number of materialized views attached to a table.
|
||||
Note that only directly dependent views are considered here, and the creation of one view on top of another view is not considered.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## format_alter_operations_with_parentheses {#format_alter_operations_with_parentheses}
|
||||
|
||||
If set to true, then alter operations will be surrounded by parentheses in formatted queries. This makes the parsing of formatted alter queries less ambiguous.
|
||||
|
||||
Type: Bool
|
||||
|
||||
Default: 0
|
||||
|
||||
## ignore_empty_sql_security_in_create_view_query {#ignore_empty_sql_security_in_create_view_query}
|
||||
|
||||
If true, ClickHouse doesn't write defaults for empty SQL security statement in CREATE VIEW queries.
|
||||
|
||||
:::note
|
||||
This setting is only necessary for the migration period and will become obsolete in 24.4
|
||||
:::
|
||||
|
||||
Type: Bool
|
||||
|
||||
Default: 1
|
||||
|
@ -1656,6 +1656,33 @@ Result:
|
||||
└─────────────────────────┴─────────┘
|
||||
```
|
||||
|
||||
### output_format_pretty_single_large_number_tip_threshold {#output_format_pretty_single_large_number_tip_threshold}
|
||||
|
||||
Print a readable number tip on the right side of the table if the block consists of a single number which exceeds
|
||||
this value (except 0).
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — The readable number tip will not be printed.
|
||||
- Positive integer — The readable number tip will be printed if the single number exceeds this value.
|
||||
|
||||
Default value: `1000000`.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT 1000000000 as a;
|
||||
```
|
||||
|
||||
Result:
|
||||
```text
|
||||
┌──────────a─┐
|
||||
│ 1000000000 │ -- 1.00 billion
|
||||
└────────────┘
|
||||
```
|
||||
|
||||
## Template format settings {#template-format-settings}
|
||||
|
||||
### format_template_resultset {#format_template_resultset}
|
||||
|
@ -755,7 +755,7 @@ By default: 1,000,000. It only works when reading from MergeTree engines.
|
||||
|
||||
## max_concurrent_queries_for_user {#max-concurrent-queries-for-user}
|
||||
|
||||
The maximum number of simultaneously processed queries related to MergeTree table per user.
|
||||
The maximum number of simultaneously processed queries per user.
|
||||
|
||||
Possible values:
|
||||
|
||||
@ -1775,6 +1775,10 @@ Default value: 0 (no restriction).
|
||||
|
||||
## insert_quorum {#insert_quorum}
|
||||
|
||||
:::note
|
||||
This setting is not applicable to SharedMergeTree, see [SharedMergeTree consistency](/docs/en/cloud/reference/shared-merge-tree/#consistency) for more information.
|
||||
:::
|
||||
|
||||
Enables the quorum writes.
|
||||
|
||||
- If `insert_quorum < 2`, the quorum writes are disabled.
|
||||
@ -1814,6 +1818,10 @@ See also:
|
||||
|
||||
## insert_quorum_parallel {#insert_quorum_parallel}
|
||||
|
||||
:::note
|
||||
This setting is not applicable to SharedMergeTree, see [SharedMergeTree consistency](/docs/en/cloud/reference/shared-merge-tree/#consistency) for more information.
|
||||
:::
|
||||
|
||||
Enables or disables parallelism for quorum `INSERT` queries. If enabled, additional `INSERT` queries can be sent while previous queries have not yet finished. If disabled, additional writes to the same table will be rejected.
|
||||
|
||||
Possible values:
|
||||
@ -1831,6 +1839,10 @@ See also:
|
||||
|
||||
## select_sequential_consistency {#select_sequential_consistency}
|
||||
|
||||
:::note
|
||||
This setting differ in behavior between SharedMergeTree and ReplicatedMergeTree, see [SharedMergeTree consistency](/docs/en/cloud/reference/shared-merge-tree/#consistency) for more information about the behavior of `select_sequential_consistency` in SharedMergeTree.
|
||||
:::
|
||||
|
||||
Enables or disables sequential consistency for `SELECT` queries. Requires `insert_quorum_parallel` to be disabled (enabled by default).
|
||||
|
||||
Possible values:
|
||||
@ -2029,7 +2041,7 @@ Possible values:
|
||||
- 0 — Disabled.
|
||||
- 1 — Enabled.
|
||||
|
||||
Default value: 1.
|
||||
Default value: 0.
|
||||
|
||||
By default, async inserts are inserted into replicated tables by the `INSERT` statement enabling [async_insert](#async-insert) are deduplicated (see [Data Replication](../../engines/table-engines/mergetree-family/replication.md)).
|
||||
For the replicated tables, by default, only 10000 of the most recent inserts for each partition are deduplicated (see [replicated_deduplication_window_for_async_inserts](merge-tree-settings.md/#replicated-deduplication-window-async-inserts), [replicated_deduplication_window_seconds_for_async_inserts](merge-tree-settings.md/#replicated-deduplication-window-seconds-async-inserts)).
|
||||
@ -3437,7 +3449,7 @@ Has an effect only when the connection is made through the MySQL wire protocol.
|
||||
- 0 - Use `BLOB`.
|
||||
- 1 - Use `TEXT`.
|
||||
|
||||
Default value: `0`.
|
||||
Default value: `1`.
|
||||
|
||||
## mysql_map_fixed_string_to_text_in_show_columns {#mysql_map_fixed_string_to_text_in_show_columns}
|
||||
|
||||
@ -3448,7 +3460,7 @@ Has an effect only when the connection is made through the MySQL wire protocol.
|
||||
- 0 - Use `BLOB`.
|
||||
- 1 - Use `TEXT`.
|
||||
|
||||
Default value: `0`.
|
||||
Default value: `1`.
|
||||
|
||||
## execute_merges_on_single_replica_time_threshold {#execute-merges-on-single-replica-time-threshold}
|
||||
|
||||
@ -3698,7 +3710,7 @@ Default value: `0`.
|
||||
|
||||
## allow_experimental_live_view {#allow-experimental-live-view}
|
||||
|
||||
Allows creation of experimental [live views](../../sql-reference/statements/create/view.md/#live-view).
|
||||
Allows creation of a deprecated LIVE VIEW.
|
||||
|
||||
Possible values:
|
||||
|
||||
@ -3709,21 +3721,15 @@ Default value: `0`.
|
||||
|
||||
## live_view_heartbeat_interval {#live-view-heartbeat-interval}
|
||||
|
||||
Sets the heartbeat interval in seconds to indicate [live view](../../sql-reference/statements/create/view.md/#live-view) is alive .
|
||||
|
||||
Default value: `15`.
|
||||
Deprecated.
|
||||
|
||||
## max_live_view_insert_blocks_before_refresh {#max-live-view-insert-blocks-before-refresh}
|
||||
|
||||
Sets the maximum number of inserted blocks after which mergeable blocks are dropped and query for [live view](../../sql-reference/statements/create/view.md/#live-view) is re-executed.
|
||||
|
||||
Default value: `64`.
|
||||
Deprecated.
|
||||
|
||||
## periodic_live_view_refresh {#periodic-live-view-refresh}
|
||||
|
||||
Sets the interval in seconds after which periodically refreshed [live view](../../sql-reference/statements/create/view.md/#live-view) is forced to refresh.
|
||||
|
||||
Default value: `60`.
|
||||
Deprecated.
|
||||
|
||||
## http_connection_timeout {#http_connection_timeout}
|
||||
|
||||
@ -4273,7 +4279,7 @@ Result:
|
||||
|
||||
## enable_order_by_all {#enable-order-by-all}
|
||||
|
||||
Enables or disables sorting by `ALL` columns, i.e. [ORDER BY](../../sql-reference/statements/select/order-by.md)
|
||||
Enables or disables sorting with `ORDER BY ALL` syntax, see [ORDER BY](../../sql-reference/statements/select/order-by.md).
|
||||
|
||||
Possible values:
|
||||
|
||||
@ -4293,7 +4299,7 @@ INSERT INTO TAB VALUES (10, 20, 30), (20, 20, 10), (30, 10, 20);
|
||||
|
||||
SELECT * FROM TAB ORDER BY ALL; -- returns an error that ALL is ambiguous
|
||||
|
||||
SELECT * FROM TAB ORDER BY ALL SETTINGS enable_order_by_all;
|
||||
SELECT * FROM TAB ORDER BY ALL SETTINGS enable_order_by_all = 0;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -5372,6 +5378,24 @@ SELECT map('a', range(number), 'b', number, 'c', 'str_' || toString(number)) as
|
||||
|
||||
Default value: `false`.
|
||||
|
||||
## default_normal_view_sql_security {#default_normal_view_sql_security}
|
||||
|
||||
Allows to set default `SQL SECURITY` option while creating a normal view. [More about SQL security](../../sql-reference/statements/create/view.md#sql_security).
|
||||
|
||||
The default value is `INVOKER`.
|
||||
|
||||
## default_materialized_view_sql_security {#default_materialized_view_sql_security}
|
||||
|
||||
Allows to set a default value for SQL SECURITY option when creating a materialized view. [More about SQL security](../../sql-reference/statements/create/view.md#sql_security).
|
||||
|
||||
The default value is `DEFINER`.
|
||||
|
||||
## default_view_definer {#default_view_definer}
|
||||
|
||||
Allows to set default `DEFINER` option while creating a view. [More about SQL security](../../sql-reference/statements/create/view.md#sql_security).
|
||||
|
||||
The default value is `CURRENT_USER`.
|
||||
|
||||
## max_partition_size_to_drop
|
||||
|
||||
Restriction on dropping partitions in query time. The value 0 means that you can drop partitions without any restrictions.
|
||||
|
@ -49,6 +49,6 @@ Every job has a pool associated with it and is started in this pool. Each pool h
|
||||
|
||||
Time instants during job lifetime:
|
||||
- `schedule_time` (`DateTime64`) - Time when job was created and scheduled to be executed (usually with all its dependencies).
|
||||
- `enqueue_time` (`Nullable(DateTime64)`) - Time when job became ready and was enqueued into a ready queue of it's pool. Null if the job is not ready yet.
|
||||
- `enqueue_time` (`Nullable(DateTime64)`) - Time when job became ready and was enqueued into a ready queue of its pool. Null if the job is not ready yet.
|
||||
- `start_time` (`Nullable(DateTime64)`) - Time when worker dequeues the job from ready queue and start its execution. Null if the job is not started yet.
|
||||
- `finish_time` (`Nullable(DateTime64)`) - Time when job execution is finished. Null if the job is not finished yet.
|
||||
|
@ -297,11 +297,11 @@ Total number of databases on the server.
|
||||
|
||||
### NumberOfDetachedByUserParts
|
||||
|
||||
The total number of parts detached from MergeTree tables by users with the `ALTER TABLE DETACH` query (as opposed to unexpected, broken or ignored parts). The server does not care about detached parts and they can be removed.
|
||||
The total number of parts detached from MergeTree tables by users with the `ALTER TABLE DETACH` query (as opposed to unexpected, broken or ignored parts). The server does not care about detached parts, and they can be removed.
|
||||
|
||||
### NumberOfDetachedParts
|
||||
|
||||
The total number of parts detached from MergeTree tables. A part can be detached by a user with the `ALTER TABLE DETACH` query or by the server itself it the part is broken, unexpected or unneeded. The server does not care about detached parts and they can be removed.
|
||||
The total number of parts detached from MergeTree tables. A part can be detached by a user with the `ALTER TABLE DETACH` query or by the server itself it the part is broken, unexpected or unneeded. The server does not care about detached parts, and they can be removed.
|
||||
|
||||
### NumberOfTables
|
||||
|
||||
@ -393,7 +393,7 @@ The amount of free memory plus OS page cache memory on the host system, in bytes
|
||||
|
||||
### OSMemoryFreeWithoutCached
|
||||
|
||||
The amount of free memory on the host system, in bytes. This does not include the memory used by the OS page cache memory, in bytes. The page cache memory is also available for usage by programs, so the value of this metric can be confusing. See the `OSMemoryAvailable` metric instead. For convenience we also provide the `OSMemoryFreePlusCached` metric, that should be somewhat similar to OSMemoryAvailable. See also https://www.linuxatemyram.com/. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
The amount of free memory on the host system, in bytes. This does not include the memory used by the OS page cache memory, in bytes. The page cache memory is also available for usage by programs, so the value of this metric can be confusing. See the `OSMemoryAvailable` metric instead. For convenience, we also provide the `OSMemoryFreePlusCached` metric, that should be somewhat similar to OSMemoryAvailable. See also https://www.linuxatemyram.com/. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
|
||||
### OSMemoryTotal
|
||||
|
||||
@ -493,7 +493,7 @@ Number of threads in the server of the PostgreSQL compatibility protocol.
|
||||
|
||||
### QueryCacheBytes
|
||||
|
||||
Total size of the query cache cache in bytes.
|
||||
Total size of the query cache in bytes.
|
||||
|
||||
### QueryCacheEntries
|
||||
|
||||
@ -549,7 +549,7 @@ Total amount of bytes (compressed, including data and indices) stored in all tab
|
||||
|
||||
### TotalPartsOfMergeTreeTables
|
||||
|
||||
Total amount of data parts in all tables of MergeTree family. Numbers larger than 10 000 will negatively affect the server startup time and it may indicate unreasonable choice of the partition key.
|
||||
Total amount of data parts in all tables of MergeTree family. Numbers larger than 10 000 will negatively affect the server startup time, and it may indicate unreasonable choice of the partition key.
|
||||
|
||||
### TotalPrimaryKeyBytesInMemory
|
||||
|
||||
|
@ -19,7 +19,7 @@ Columns:
|
||||
- `default_database` ([String](../../sql-reference/data-types/string.md)) — The default database name.
|
||||
- `errors_count` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of times this host failed to reach replica.
|
||||
- `slowdowns_count` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of slowdowns that led to changing replica when establishing a connection with hedged requests.
|
||||
- `estimated_recovery_time` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Seconds remaining until the replica error count is zeroed and it is considered to be back to normal.
|
||||
- `estimated_recovery_time` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Seconds remaining until the replica error count is zeroed, and it is considered to be back to normal.
|
||||
- `database_shard_name` ([String](../../sql-reference/data-types/string.md)) — The name of the `Replicated` database shard (for clusters that belong to a `Replicated` database).
|
||||
- `database_replica_name` ([String](../../sql-reference/data-types/string.md)) — The name of the `Replicated` database replica (for clusters that belong to a `Replicated` database).
|
||||
- `is_active` ([Nullable(UInt8)](../../sql-reference/data-types/int-uint.md)) — The status of the `Replicated` database replica (for clusters that belong to a `Replicated` database): 1 means "replica is online", 0 means "replica is offline", `NULL` means "unknown".
|
||||
|
@ -18,7 +18,7 @@ Columns:
|
||||
- `LOADED_AND_RELOADING` — Dictionary is loaded successfully, and is being reloaded right now (frequent reasons: [SYSTEM RELOAD DICTIONARY](../../sql-reference/statements/system.md#query_language-system-reload-dictionary) query, timeout, dictionary config has changed).
|
||||
- `FAILED_AND_RELOADING` — Could not load the dictionary as a result of an error and is loading now.
|
||||
- `origin` ([String](../../sql-reference/data-types/string.md)) — Path to the configuration file that describes the dictionary.
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — Type of a dictionary allocation. [Storing Dictionaries in Memory](../../sql-reference/dictionaries/index.md#storig-dictionaries-in-memory).
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — Type of dictionary allocation. [Storing Dictionaries in Memory](../../sql-reference/dictionaries/index.md#storig-dictionaries-in-memory).
|
||||
- `key.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of [key names](../../sql-reference/dictionaries/index.md#dictionary-key-and-fields#ext_dict_structure-key) provided by the dictionary.
|
||||
- `key.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Corresponding array of [key types](../../sql-reference/dictionaries/index.md#dictionary-key-and-fields#ext_dict_structure-key) provided by the dictionary.
|
||||
- `attribute.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of [attribute names](../../sql-reference/dictionaries/index.md#dictionary-key-and-fields#ext_dict_structure-attributes) provided by the dictionary.
|
||||
|
38
docs/en/operations/system-tables/dns_cache.md
Normal file
38
docs/en/operations/system-tables/dns_cache.md
Normal file
@ -0,0 +1,38 @@
|
||||
---
|
||||
slug: /en/operations/system-tables/dns_cache
|
||||
---
|
||||
# dns_cache
|
||||
|
||||
Contains information about cached DNS records.
|
||||
|
||||
Columns:
|
||||
|
||||
- `hostname` ([String](../../sql-reference/data-types/string.md)) — cached hostname
|
||||
- `ip_address` ([String](../../sql-reference/data-types/string.md)) — ip address for the hostname
|
||||
- `ip_family` ([Enum](../../sql-reference/data-types/enum.md)) — family of the ip address, possible values:
|
||||
- 'IPv4'
|
||||
- 'IPv6'
|
||||
- 'UNIX_LOCAL'
|
||||
- `cached_at` ([DateTime](../../sql-reference/data-types/datetime.md)) - when the record was cached
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT * FROM system.dns_cache;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
| hostname | ip\_address | ip\_family | cached\_at |
|
||||
| :--- | :--- | :--- | :--- |
|
||||
| localhost | ::1 | IPv6 | 2024-02-11 17:04:40 |
|
||||
| localhost | 127.0.0.1 | IPv4 | 2024-02-11 17:04:40 |
|
||||
|
||||
**See also**
|
||||
|
||||
- [disable_internal_dns_cache setting](../../operations/server-configuration-parameters/settings.md#disable_internal_dns_cache)
|
||||
- [dns_cache_max_size setting](../../operations/server-configuration-parameters/settings.md#dns_cache_max_size)
|
||||
- [dns_cache_update_period setting](../../operations/server-configuration-parameters/settings.md#dns_cache_update_period)
|
||||
- [dns_max_consecutive_failures setting](../../operations/server-configuration-parameters/settings.md#dns_max_consecutive_failures)
|
32
docs/en/operations/system-tables/settings_changes.md
Normal file
32
docs/en/operations/system-tables/settings_changes.md
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
slug: /en/operations/system-tables/settings_changes
|
||||
---
|
||||
# settings_changes
|
||||
|
||||
Contains information about setting changes in previous ClickHouse versions.
|
||||
|
||||
Columns:
|
||||
|
||||
- `version` ([String](../../sql-reference/data-types/string.md)) — The ClickHouse version in which settings were changed
|
||||
- `changes` ([Array](../../sql-reference/data-types/array.md) of [Tuple](../../sql-reference/data-types/tuple.md)) — A description of the setting changes: (setting name, previous value, new value, reason for the change)
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT *
|
||||
FROM system.settings_changes
|
||||
WHERE version = '23.5'
|
||||
FORMAT Vertical
|
||||
```
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
version: 23.5
|
||||
changes: [('input_format_parquet_preserve_order','1','0','Allow Parquet reader to reorder rows for better parallelism.'),('parallelize_output_from_storages','0','1','Allow parallelism when executing queries that read from file/url/s3/etc. This may reorder rows.'),('use_with_fill_by_sorting_prefix','0','1','Columns preceding WITH FILL columns in ORDER BY clause form sorting prefix. Rows with different values in sorting prefix are filled independently'),('output_format_parquet_compliant_nested_types','0','1','Change an internal field name in output Parquet file schema.')]
|
||||
```
|
||||
|
||||
**See also**
|
||||
|
||||
- [Settings](../../operations/settings/index.md#session-settings-intro)
|
||||
- [system.settings](settings.md)
|
@ -26,6 +26,6 @@ Columns:
|
||||
|
||||
- `max` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — The maximum value of the setting. NULL if not set.
|
||||
|
||||
- `readonly` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges))) — Profile that allows only read queries.
|
||||
- `writability` ([Nullable](../../sql-reference/data-types/nullable.md)([Enum8](../../sql-reference/data-types/enum.md)('WRITABLE' = 0, 'CONST' = 1, 'CHANGEABLE_IN_READONLY' = 2))) — Sets the settings constraint writability kind.
|
||||
|
||||
- `inherit_profile` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — A parent profile for this setting profile. `NULL` if not set. Setting profile will inherit all the settings' values and constraints (`min`, `max`, `readonly`) from its parent profiles.
|
||||
|
@ -27,6 +27,8 @@ Columns:
|
||||
|
||||
- `metadata_modification_time` ([DateTime](../../sql-reference/data-types/datetime.md)) - Time of latest modification of the table metadata.
|
||||
|
||||
- `metadata_version` ([Int32](../../sql-reference/data-types/int-uint.md)) - Metadata version for ReplicatedMergeTree table, 0 for non ReplicatedMergeTree table.
|
||||
|
||||
- `dependencies_database` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - Database dependencies.
|
||||
|
||||
- `dependencies_table` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - Table dependencies ([materialized views](../../sql-reference/statements/create/view.md#materialized-view) the current table).
|
||||
|
@ -111,6 +111,14 @@ On newer Linux kernels transparent huge pages are alright.
|
||||
$ echo 'madvise' | sudo tee /sys/kernel/mm/transparent_hugepage/enabled
|
||||
```
|
||||
|
||||
If you want to modify the transparent huge pages setting permanently, editing the `/etc/default/grub` to add the `transparent_hugepage=never` to the `GRUB_CMDLINE_LINUX_DEFAULT` option:
|
||||
|
||||
```bash
|
||||
$ GRUB_CMDLINE_LINUX_DEFAULT="transparent_hugepage=madvise ..."
|
||||
```
|
||||
|
||||
After that, run the `sudo update-grub` command then reboot to take effect.
|
||||
|
||||
## Hypervisor configuration
|
||||
|
||||
If you are using OpenStack, set
|
||||
|
@ -34,7 +34,7 @@ The binary you just downloaded can run all sorts of ClickHouse tools and utiliti
|
||||
|
||||
A common use of `clickhouse-local` is to run ad-hoc queries on files: where you don't have to insert the data into a table. `clickhouse-local` can stream the data from a file into a temporary table and execute your SQL.
|
||||
|
||||
If the file is sitting on the same machine as `clickhouse-local`, you can simple specify the file to load. The following `reviews.tsv` file contains a sampling of Amazon product reviews:
|
||||
If the file is sitting on the same machine as `clickhouse-local`, you can simply specify the file to load. The following `reviews.tsv` file contains a sampling of Amazon product reviews:
|
||||
|
||||
```bash
|
||||
./clickhouse local -q "SELECT * FROM 'reviews.tsv'"
|
||||
@ -220,7 +220,7 @@ Arguments:
|
||||
- `--help` — arguments references for `clickhouse-local`.
|
||||
- `-V`, `--version` — print version information and exit.
|
||||
|
||||
Also there are arguments for each ClickHouse configuration variable which are more commonly used instead of `--config-file`.
|
||||
Also, there are arguments for each ClickHouse configuration variable which are more commonly used instead of `--config-file`.
|
||||
|
||||
|
||||
## Examples {#examples}
|
||||
|
@ -38,7 +38,7 @@ For example, you have a column `IsMobile` in your table with values 0 and 1. In
|
||||
|
||||
So, the user will be able to count the exact ratio of mobile traffic.
|
||||
|
||||
Let's give another example. When you have some private data in your table, like user email and you don't want to publish any single email address.
|
||||
Let's give another example. When you have some private data in your table, like user email, and you don't want to publish any single email address.
|
||||
If your table is large enough and contains multiple different emails and no email has a very high frequency than all others, it will anonymize all data. But if you have a small number of different values in a column, it can reproduce some of them.
|
||||
You should look at the working algorithm of this tool works, and fine-tune its command line parameters.
|
||||
|
||||
|
@ -9,7 +9,7 @@ Selects the first encountered value of a column.
|
||||
|
||||
By default, it ignores NULL values and returns the first NOT NULL value found in the column. As [`first_value`](../../../sql-reference/aggregate-functions/reference/first_value.md) if supports `RESPECT NULLS`, in which case it will select the first value passed, independently on whether it's NULL or not.
|
||||
|
||||
The return type of the function is the same as the input, except for LowCardinality which is discarded). This means that given no rows as input it will return the default value of that type (0 for integers, or Null for a Nullable() column). You might use the `-OrNull` [combinator](../../../sql-reference/aggregate-functions/combinators.md) ) to modify this behaviour.
|
||||
The return type of the function is the same as the input, except for LowCardinality which is discarded. This means that given no rows as input it will return the default value of that type (0 for integers, or Null for a Nullable() column). You might use the `-OrNull` [combinator](../../../sql-reference/aggregate-functions/combinators.md) ) to modify this behaviour.
|
||||
|
||||
The query can be executed in any order and even in a different order each time, so the result of this function is indeterminate.
|
||||
To get a determinate result, you can use the ‘min’ or ‘max’ function instead of ‘any’.
|
||||
|
@ -20,7 +20,7 @@ contingency(column1, column2)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- a value between 0 to 1. The larger the result, the closer the association of the two columns.
|
||||
- a value between 0 and 1. The larger the result, the closer the association of the two columns.
|
||||
|
||||
**Return type** is always [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
@ -48,4 +48,4 @@ Result:
|
||||
┌──────cramersV(a, b)─┬───contingency(a, b)─┐
|
||||
│ 0.41171788506213564 │ 0.05812725261759165 │
|
||||
└─────────────────────┴─────────────────────┘
|
||||
```
|
||||
```
|
||||
|
@ -0,0 +1,50 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/grouparrayintersect
|
||||
sidebar_position: 115
|
||||
---
|
||||
|
||||
# groupArrayIntersect
|
||||
|
||||
Return an intersection of given arrays (Return all items of arrays, that are in all given arrays).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
groupArrayIntersect(x)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — Argument (column name or expression).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Array that contains elements that are in all arrays.
|
||||
|
||||
Type: [Array](../../data-types/array.md).
|
||||
|
||||
**Examples**
|
||||
|
||||
Consider table `numbers`:
|
||||
|
||||
``` text
|
||||
┌─a──────────────┐
|
||||
│ [1,2,4] │
|
||||
│ [1,5,2,8,-1,0] │
|
||||
│ [1,5,7,5,8,2] │
|
||||
└────────────────┘
|
||||
```
|
||||
|
||||
Query with column name as argument:
|
||||
|
||||
``` sql
|
||||
SELECT groupArrayIntersect(a) as intersection FROM numbers;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─intersection──────┐
|
||||
│ [1, 2] │
|
||||
└───────────────────┘
|
||||
```
|
@ -55,6 +55,7 @@ ClickHouse-specific aggregate functions:
|
||||
- [groupArrayMovingSum](/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum.md)
|
||||
- [groupArraySample](./grouparraysample.md)
|
||||
- [groupArraySorted](/docs/en/sql-reference/aggregate-functions/reference/grouparraysorted.md)
|
||||
- [groupArrayIntersect](./grouparrayintersect.md)
|
||||
- [groupBitAnd](/docs/en/sql-reference/aggregate-functions/reference/groupbitand.md)
|
||||
- [groupBitOr](/docs/en/sql-reference/aggregate-functions/reference/groupbitor.md)
|
||||
- [groupBitXor](/docs/en/sql-reference/aggregate-functions/reference/groupbitxor.md)
|
||||
|
@ -13,8 +13,8 @@ simpleLinearRegression(x, y)
|
||||
|
||||
Parameters:
|
||||
|
||||
- `x` — Column with dependent variable values.
|
||||
- `y` — Column with explanatory variable values.
|
||||
- `x` — Column with explanatory variable values.
|
||||
- `y` — Column with dependent variable values.
|
||||
|
||||
Returned values:
|
||||
|
||||
|
@ -5,25 +5,25 @@ sidebar_position: 221
|
||||
|
||||
# stochasticLinearRegression
|
||||
|
||||
This function implements stochastic linear regression. It supports custom parameters for learning rate, L2 regularization coefficient, mini-batch size and has few methods for updating weights ([Adam](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam) (used by default), [simple SGD](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [Momentum](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), [Nesterov](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf)).
|
||||
This function implements stochastic linear regression. It supports custom parameters for learning rate, L2 regularization coefficient, mini-batch size, and has a few methods for updating weights ([Adam](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam) (used by default), [simple SGD](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [Momentum](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), and [Nesterov](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf)).
|
||||
|
||||
### Parameters
|
||||
|
||||
There are 4 customizable parameters. They are passed to the function sequentially, but there is no need to pass all four - default values will be used, however good model required some parameter tuning.
|
||||
|
||||
``` text
|
||||
stochasticLinearRegression(1.0, 1.0, 10, 'SGD')
|
||||
stochasticLinearRegression(0.00001, 0.1, 15, 'Adam')
|
||||
```
|
||||
|
||||
1. `learning rate` is the coefficient on step length, when gradient descent step is performed. Too big learning rate may cause infinite weights of the model. Default is `0.00001`.
|
||||
1. `learning rate` is the coefficient on step length, when the gradient descent step is performed. A learning rate that is too big may cause infinite weights of the model. Default is `0.00001`.
|
||||
2. `l2 regularization coefficient` which may help to prevent overfitting. Default is `0.1`.
|
||||
3. `mini-batch size` sets the number of elements, which gradients will be computed and summed to perform one step of gradient descent. Pure stochastic descent uses one element, however having small batches(about 10 elements) make gradient steps more stable. Default is `15`.
|
||||
4. `method for updating weights`, they are: `Adam` (by default), `SGD`, `Momentum`, `Nesterov`. `Momentum` and `Nesterov` require little bit more computations and memory, however they happen to be useful in terms of speed of convergence and stability of stochastic gradient methods.
|
||||
3. `mini-batch size` sets the number of elements, which gradients will be computed and summed to perform one step of gradient descent. Pure stochastic descent uses one element, however, having small batches (about 10 elements) makes gradient steps more stable. Default is `15`.
|
||||
4. `method for updating weights`, they are: `Adam` (by default), `SGD`, `Momentum`, and `Nesterov`. `Momentum` and `Nesterov` require a little bit more computations and memory, however, they happen to be useful in terms of speed of convergence and stability of stochastic gradient methods.
|
||||
|
||||
### Usage
|
||||
|
||||
`stochasticLinearRegression` is used in two steps: fitting the model and predicting on new data. In order to fit the model and save its state for later usage we use `-State` combinator, which basically saves the state (model weights, etc).
|
||||
To predict we use function [evalMLMethod](../../../sql-reference/functions/machine-learning-functions.md#machine_learning_methods-evalmlmethod), which takes a state as an argument as well as features to predict on.
|
||||
`stochasticLinearRegression` is used in two steps: fitting the model and predicting on new data. In order to fit the model and save its state for later usage, we use the `-State` combinator, which saves the state (e.g. model weights).
|
||||
To predict, we use the function [evalMLMethod](../../../sql-reference/functions/machine-learning-functions.md#machine_learning_methods-evalmlmethod), which takes a state as an argument as well as features to predict on.
|
||||
|
||||
<a name="stochasticlinearregression-usage-fitting"></a>
|
||||
|
||||
@ -44,12 +44,12 @@ stochasticLinearRegressionState(0.1, 0.0, 5, 'SGD')(target, param1, param2)
|
||||
AS state FROM train_data;
|
||||
```
|
||||
|
||||
Here we also need to insert data into `train_data` table. The number of parameters is not fixed, it depends only on number of arguments, passed into `linearRegressionState`. They all must be numeric values.
|
||||
Note that the column with target value(which we would like to learn to predict) is inserted as the first argument.
|
||||
Here, we also need to insert data into the `train_data` table. The number of parameters is not fixed, it depends only on the number of arguments passed into `linearRegressionState`. They all must be numeric values.
|
||||
Note that the column with target value (which we would like to learn to predict) is inserted as the first argument.
|
||||
|
||||
**2.** Predicting
|
||||
|
||||
After saving a state into the table, we may use it multiple times for prediction, or even merge with other states and create new even better models.
|
||||
After saving a state into the table, we may use it multiple times for prediction or even merge with other states and create new, even better models.
|
||||
|
||||
``` sql
|
||||
WITH (SELECT state FROM your_model) AS model SELECT
|
||||
|
@ -9,7 +9,7 @@ sidebar_label: DateTime64
|
||||
Allows to store an instant in time, that can be expressed as a calendar date and a time of a day, with defined sub-second precision
|
||||
|
||||
Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
|
||||
Typically are used - 3 (milliseconds), 6 (microseconds), 9 (nanoseconds).
|
||||
Typically, are used - 3 (milliseconds), 6 (microseconds), 9 (nanoseconds).
|
||||
|
||||
**Syntax:**
|
||||
|
||||
|
@ -10,7 +10,7 @@ Signed fixed-point numbers that keep precision during add, subtract and multiply
|
||||
|
||||
## Parameters
|
||||
|
||||
- P - precision. Valid range: \[ 1 : 76 \]. Determines how many decimal digits number can have (including fraction). By default the precision is 10.
|
||||
- P - precision. Valid range: \[ 1 : 76 \]. Determines how many decimal digits number can have (including fraction). By default, the precision is 10.
|
||||
- S - scale. Valid range: \[ 0 : P \]. Determines how many decimal digits fraction can have.
|
||||
|
||||
Decimal(P) is equivalent to Decimal(P, 0). Similarly, the syntax Decimal is equivalent to Decimal(10, 0).
|
||||
|
@ -12,6 +12,11 @@ has a value of either type `T1` or `T2` or ... or `TN` or none of them (`NULL` v
|
||||
The order of nested types doesn't matter: Variant(T1, T2) = Variant(T2, T1).
|
||||
Nested types can be arbitrary types except Nullable(...), LowCardinality(Nullable(...)) and Variant(...) types.
|
||||
|
||||
:::note
|
||||
It's not recommended to use similar types as variants (for example different numeric types like `Variant(UInt32, Int64)` or different date types like `Variant(Date, DateTime)`),
|
||||
because working with values of such types can lead to ambiguity. By default, creating such `Variant` type will lead to an exception, but can be enabled using setting `allow_suspicious_variant_types`
|
||||
:::
|
||||
|
||||
:::note
|
||||
The Variant data type is an experimental feature. To use it, set `allow_experimental_variant_type = 1`.
|
||||
:::
|
||||
@ -272,3 +277,121 @@ $$)
|
||||
│ [1,2,3] │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ [1,2,3] │
|
||||
└─────────────────────┴───────────────┴──────┴───────┴─────────────────────┴─────────┘
|
||||
```
|
||||
|
||||
|
||||
## Comparing values of Variant type
|
||||
|
||||
Values of a `Variant` type can be compared only with values with the same `Variant` type.
|
||||
|
||||
The result of operator `<` for values `v1` with underlying type `T1` and `v2` with underlying type `T2` of a type `Variant(..., T1, ... T2, ...)` is defined as follows:
|
||||
- If `T1 = T2 = T`, the result will be `v1.T < v2.T` (underlying values will be compared).
|
||||
- If `T1 != T2`, the result will be `T1 < T2` (type names will be compared).
|
||||
|
||||
Examples:
|
||||
```sql
|
||||
CREATE TABLE test (v1 Variant(String, UInt64, Array(UInt32)), v2 Variant(String, UInt64, Array(UInt32))) ENGINE=Memory;
|
||||
INSERT INTO test VALUES (42, 42), (42, 43), (42, 'abc'), (42, [1, 2, 3]), (42, []), (42, NULL);
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT v2, variantType(v2) as v2_type from test order by v2;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─v2──────┬─v2_type───────┐
|
||||
│ [] │ Array(UInt32) │
|
||||
│ [1,2,3] │ Array(UInt32) │
|
||||
│ abc │ String │
|
||||
│ 42 │ UInt64 │
|
||||
│ 43 │ UInt64 │
|
||||
│ ᴺᵁᴸᴸ │ None │
|
||||
└─────────┴───────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT v1, variantType(v1) as v1_type, v2, variantType(v2) as v2_type, v1 = v2, v1 < v2, v1 > v2 from test;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─v1─┬─v1_type─┬─v2──────┬─v2_type───────┬─equals(v1, v2)─┬─less(v1, v2)─┬─greater(v1, v2)─┐
|
||||
│ 42 │ UInt64 │ 42 │ UInt64 │ 1 │ 0 │ 0 │
|
||||
│ 42 │ UInt64 │ 43 │ UInt64 │ 0 │ 1 │ 0 │
|
||||
│ 42 │ UInt64 │ abc │ String │ 0 │ 0 │ 1 │
|
||||
│ 42 │ UInt64 │ [1,2,3] │ Array(UInt32) │ 0 │ 0 │ 1 │
|
||||
│ 42 │ UInt64 │ [] │ Array(UInt32) │ 0 │ 0 │ 1 │
|
||||
│ 42 │ UInt64 │ ᴺᵁᴸᴸ │ None │ 0 │ 1 │ 0 │
|
||||
└────┴─────────┴─────────┴───────────────┴────────────────┴──────────────┴─────────────────┘
|
||||
|
||||
```
|
||||
|
||||
If you need to find the row with specific `Variant` value, you can do one of the following:
|
||||
|
||||
- Cast value to the corresponding `Variant` type:
|
||||
|
||||
```sql
|
||||
SELECT * FROM test WHERE v2 == [1,2,3]::Array(UInt32)::Variant(String, UInt64, Array(UInt32));
|
||||
```
|
||||
|
||||
```text
|
||||
┌─v1─┬─v2──────┐
|
||||
│ 42 │ [1,2,3] │
|
||||
└────┴─────────┘
|
||||
```
|
||||
|
||||
- Compare `Variant` subcolumn with required type:
|
||||
|
||||
```sql
|
||||
SELECT * FROM test WHERE v2.`Array(UInt32)` == [1,2,3] -- or using variantElement(v2, 'Array(UInt32)')
|
||||
```
|
||||
|
||||
```text
|
||||
┌─v1─┬─v2──────┐
|
||||
│ 42 │ [1,2,3] │
|
||||
└────┴─────────┘
|
||||
```
|
||||
|
||||
Sometimes it can be useful to make additional check on variant type as subcolumns with complex types like `Array/Map/Tuple` cannot be inside `Nullable` and will have default values instead of `NULL` on rows with different types:
|
||||
|
||||
```sql
|
||||
SELECT v2, v2.`Array(UInt32)`, variantType(v2) FROM test WHERE v2.`Array(UInt32)` == [];
|
||||
```
|
||||
|
||||
```text
|
||||
┌─v2───┬─v2.Array(UInt32)─┬─variantType(v2)─┐
|
||||
│ 42 │ [] │ UInt64 │
|
||||
│ 43 │ [] │ UInt64 │
|
||||
│ abc │ [] │ String │
|
||||
│ [] │ [] │ Array(UInt32) │
|
||||
│ ᴺᵁᴸᴸ │ [] │ None │
|
||||
└──────┴──────────────────┴─────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT v2, v2.`Array(UInt32)`, variantType(v2) FROM test WHERE variantType(v2) == 'Array(UInt32)' AND v2.`Array(UInt32)` == [];
|
||||
```
|
||||
|
||||
```text
|
||||
┌─v2─┬─v2.Array(UInt32)─┬─variantType(v2)─┐
|
||||
│ [] │ [] │ Array(UInt32) │
|
||||
└────┴──────────────────┴─────────────────┘
|
||||
```
|
||||
|
||||
**Note:** values of variants with different numeric types are considered as different variants and not compared between each other, their type names are compared instead.
|
||||
|
||||
Example:
|
||||
|
||||
```sql
|
||||
SET allow_suspicious_variant_types = 1;
|
||||
CREATE TABLE test (v Variant(UInt32, Int64)) ENGINE=Memory;
|
||||
INSERT INTO test VALUES (1::UInt32), (1::Int64), (100::UInt32), (100::Int64);
|
||||
SELECT v, variantType(v) FROM test ORDER by v;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─v───┬─variantType(v)─┐
|
||||
│ 1 │ Int64 │
|
||||
│ 100 │ Int64 │
|
||||
│ 1 │ UInt32 │
|
||||
│ 100 │ UInt32 │
|
||||
└─────┴────────────────┘
|
||||
```
|
||||
|
@ -6,7 +6,7 @@ sidebar_label: Distributed DDL
|
||||
|
||||
# Distributed DDL Queries (ON CLUSTER Clause)
|
||||
|
||||
By default the `CREATE`, `DROP`, `ALTER`, and `RENAME` queries affect only the current server where they are executed. In a cluster setup, it is possible to run such queries in a distributed manner with the `ON CLUSTER` clause.
|
||||
By default, the `CREATE`, `DROP`, `ALTER`, and `RENAME` queries affect only the current server where they are executed. In a cluster setup, it is possible to run such queries in a distributed manner with the `ON CLUSTER` clause.
|
||||
|
||||
For example, the following query creates the `all_hits` `Distributed` table on each host in `cluster`:
|
||||
|
||||
|
@ -372,7 +372,7 @@ Result:
|
||||
|
||||
## bitmapAnd
|
||||
|
||||
Computes the logical conjunction of two two bitmaps.
|
||||
Computes the logical conjunction of two bitmaps.
|
||||
|
||||
**Syntax**
|
||||
|
||||
|
@ -1564,7 +1564,7 @@ Alias: `TO_DAYS`
|
||||
**Arguments**
|
||||
|
||||
- `date` — The date to calculate the number of days passed since year zero from. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
- `time_zone` — A String type const value or a expression represent the time zone. [String types](../../sql-reference/data-types/string.md)
|
||||
- `time_zone` — A String type const value or an expression represent the time zone. [String types](../../sql-reference/data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -2218,7 +2218,7 @@ now64([scale], [timezone])
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ]. Typically are used - 3 (default) (milliseconds), 6 (microseconds), 9 (nanoseconds).
|
||||
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ]. Typically, are used - 3 (default) (milliseconds), 6 (microseconds), 9 (nanoseconds).
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) for the returned value (optional). [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
@ -2305,7 +2305,7 @@ Rounds the time to the half hour.
|
||||
|
||||
Converts a date or date with time to a UInt32 number containing the year and month number (YYYY \* 100 + MM). Accepts a second optional timezone argument. If provided, the timezone must be a string constant.
|
||||
|
||||
This functions is the opposite of function `YYYYMMDDToDate()`.
|
||||
This function is the opposite of function `YYYYMMDDToDate()`.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -2362,7 +2362,7 @@ Result:
|
||||
|
||||
Converts a number containing the year, month and day number to a [Date](../../sql-reference/data-types/date.md).
|
||||
|
||||
This functions is the opposite of function `toYYYYMMDD()`.
|
||||
This function is the opposite of function `toYYYYMMDD()`.
|
||||
|
||||
The output is undefined if the input does not encode a valid Date value.
|
||||
|
||||
@ -2406,7 +2406,7 @@ Converts a number containing the year, month, day, hours, minute and second numb
|
||||
|
||||
The output is undefined if the input does not encode a valid DateTime value.
|
||||
|
||||
This functions is the opposite of function `toYYYYMMDDhhmmss()`.
|
||||
This function is the opposite of function `toYYYYMMDDhhmmss()`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -2981,8 +2981,8 @@ toUTCTimestamp(time_val, time_zone)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `time_val` — A DateTime/DateTime64 type const value or a expression . [DateTime/DateTime64 types](../../sql-reference/data-types/datetime.md)
|
||||
- `time_zone` — A String type const value or a expression represent the time zone. [String types](../../sql-reference/data-types/string.md)
|
||||
- `time_val` — A DateTime/DateTime64 type const value or an expression . [DateTime/DateTime64 types](../../sql-reference/data-types/datetime.md)
|
||||
- `time_zone` — A String type const value or an expression represent the time zone. [String types](../../sql-reference/data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -3014,8 +3014,8 @@ fromUTCTimestamp(time_val, time_zone)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `time_val` — A DateTime/DateTime64 type const value or a expression . [DateTime/DateTime64 types](../../sql-reference/data-types/datetime.md)
|
||||
- `time_zone` — A String type const value or a expression represent the time zone. [String types](../../sql-reference/data-types/string.md)
|
||||
- `time_val` — A DateTime/DateTime64 type const value or an expression . [DateTime/DateTime64 types](../../sql-reference/data-types/datetime.md)
|
||||
- `time_zone` — A String type const value or an expression represent the time zone. [String types](../../sql-reference/data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
|
@ -509,7 +509,7 @@ Result:
|
||||
|
||||
## cosineDistance
|
||||
|
||||
Calculates the cosine distance between two vectors (the values of the tuples are the coordinates). The less the returned value is, the more similar are the vectors.
|
||||
Calculates the cosine distance between two vectors (the values of the tuples are the coordinates). The smaller the returned value is, the more similar are the vectors.
|
||||
|
||||
**Syntax**
|
||||
|
||||
|
@ -780,8 +780,52 @@ If executed in the context of a distributed table, this function generates a nor
|
||||
|
||||
## version()
|
||||
|
||||
Returns the server version as a string.
|
||||
If executed in the context of a distributed table, this function generates a normal column with values relevant to each shard. Otherwise it produces a constant value.
|
||||
Returns the current version of ClickHouse as a string in the form of:
|
||||
|
||||
- Major version
|
||||
- Minor version
|
||||
- Patch version
|
||||
- Number of commits since the previous stable release.
|
||||
|
||||
```plaintext
|
||||
major_version.minor_version.patch_version.number_of_commits_since_the_previous_stable_release
|
||||
```
|
||||
|
||||
If executed in the context of a distributed table, this function generates a normal column with values relevant to each shard. Otherwise, it produces a constant value.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
version()
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
None.
|
||||
|
||||
**Returned value**
|
||||
|
||||
Type: [String](../data-types/string)
|
||||
|
||||
**Implementation details**
|
||||
|
||||
None.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT version()
|
||||
```
|
||||
|
||||
**Result**:
|
||||
|
||||
```response
|
||||
┌─version()─┐
|
||||
│ 24.2.1.1 │
|
||||
└───────────┘
|
||||
```
|
||||
|
||||
## buildId()
|
||||
|
||||
|
@ -4,6 +4,8 @@ sidebar_position: 170
|
||||
sidebar_label: Strings
|
||||
---
|
||||
|
||||
import VersionBadge from '@theme/badges/VersionBadge';
|
||||
|
||||
# Functions for Working with Strings
|
||||
|
||||
Functions for [searching](string-search-functions.md) in strings and for [replacing](string-replace-functions.md) in strings are described separately.
|
||||
@ -783,6 +785,8 @@ SELECT startsWith('Spider-Man', 'Spi');
|
||||
|
||||
## startsWithUTF8
|
||||
|
||||
<VersionBadge minVersion='23.8' />
|
||||
|
||||
Returns whether string `str` starts with `prefix`, the difference between `startsWithUTF8` and `startsWith` is that `startsWithUTF8` match `str` and `suffix` by UTF-8 characters.
|
||||
|
||||
|
||||
|
@ -542,7 +542,7 @@ Alias: `scalarProduct`.
|
||||
|
||||
- Scalar product.
|
||||
|
||||
Type: [Int/UInt](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
Type: [Int/UInt](../../sql-reference/data-types/int-uint.md) or [Float](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -10,7 +10,7 @@ sidebar_label: APPLY DELETED MASK
|
||||
ALTER TABLE [db].name [ON CLUSTER cluster] APPLY DELETED MASK [IN PARTITION partition_id]
|
||||
```
|
||||
|
||||
The command applies mask created by [lightweight delete](/docs/en/sql-reference/statements/delete) and forcefully removes rows marked as deleted from disk. This command is a heavyweight mutation and it semantically equals to query ```ALTER TABLE [db].name DELETE WHERE _row_exists = 0```.
|
||||
The command applies mask created by [lightweight delete](/docs/en/sql-reference/statements/delete) and forcefully removes rows marked as deleted from disk. This command is a heavyweight mutation, and it semantically equals to query ```ALTER TABLE [db].name DELETE WHERE _row_exists = 0```.
|
||||
|
||||
:::note
|
||||
It only works for tables in the [`MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) family (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables).
|
||||
|
@ -15,7 +15,7 @@ ALTER TABLE [db].name [ON CLUSTER cluster] DROP CONSTRAINT constraint_name;
|
||||
|
||||
See more on [constraints](../../../sql-reference/statements/create/table.md#constraints).
|
||||
|
||||
Queries will add or remove metadata about constraints from table so they are processed immediately.
|
||||
Queries will add or remove metadata about constraints from table, so they are processed immediately.
|
||||
|
||||
:::tip
|
||||
Constraint check **will not be executed** on existing data if it was added.
|
||||
|
@ -9,6 +9,7 @@ The following operations with [partitions](/docs/en/engines/table-engines/merget
|
||||
|
||||
- [DETACH PARTITION\|PART](#detach-partitionpart) — Moves a partition or part to the `detached` directory and forget it.
|
||||
- [DROP PARTITION\|PART](#drop-partitionpart) — Deletes a partition or part.
|
||||
- [FORGET PARTITION](#forget-partition) — Deletes a partition metadata from zookeeper if it's empty.
|
||||
- [ATTACH PARTITION\|PART](#attach-partitionpart) — Adds a partition or part from the `detached` directory to the table.
|
||||
- [ATTACH PARTITION FROM](#attach-partition-from) — Copies the data partition from one table to another and adds.
|
||||
- [REPLACE PARTITION](#replace-partition) — Copies the data partition from one table to another and replaces.
|
||||
@ -73,6 +74,22 @@ ALTER TABLE table_name [ON CLUSTER cluster] DROP DETACHED PARTITION|PART partiti
|
||||
Removes the specified part or all parts of the specified partition from `detached`.
|
||||
Read more about setting the partition expression in a section [How to set the partition expression](#how-to-set-partition-expression).
|
||||
|
||||
## FORGET PARTITION
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table_name FORGET PARTITION partition_expr
|
||||
```
|
||||
|
||||
Removes all metadata about an empty partition from ZooKeeper. Query fails if partition is not empty or unknown. Make sure to execute only for partitions that will never be used again.
|
||||
|
||||
Read about setting the partition expression in a section [How to set the partition expression](#how-to-set-partition-expression).
|
||||
|
||||
Example:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE mt FORGET PARTITION '20201121';
|
||||
```
|
||||
|
||||
## ATTACH PARTITION\|PART
|
||||
|
||||
``` sql
|
||||
|
@ -13,7 +13,9 @@ Creates a new view. Views can be [normal](#normal-view), [materialized](#materia
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name] AS SELECT ...
|
||||
CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name]
|
||||
[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }]
|
||||
AS SELECT ...
|
||||
```
|
||||
|
||||
Normal views do not store any data. They just perform a read from another table on each access. In other words, a normal view is nothing more than a saved query. When reading from a view, this saved query is used as a subquery in the [FROM](../../../sql-reference/statements/select/from.md) clause.
|
||||
@ -52,7 +54,9 @@ SELECT * FROM view(column1=value1, column2=value2 ...)
|
||||
## Materialized View
|
||||
|
||||
``` sql
|
||||
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ...
|
||||
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE]
|
||||
[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }]
|
||||
AS SELECT ...
|
||||
```
|
||||
|
||||
:::tip
|
||||
@ -91,6 +95,49 @@ Views look the same as normal tables. For example, they are listed in the result
|
||||
|
||||
To delete a view, use [DROP VIEW](../../../sql-reference/statements/drop.md#drop-view). Although `DROP TABLE` works for VIEWs as well.
|
||||
|
||||
## SQL security {#sql_security}
|
||||
|
||||
`DEFINER` and `SQL SECURITY` allow you to specify which ClickHouse user to use when executing the view's underlying query.
|
||||
`SQL SECURITY` has three legal values: `DEFINER`, `INVOKER`, or `NONE`. You can specify any existing user or `CURRENT_USER` in the `DEFINER` clause.
|
||||
|
||||
The following table will explain which rights are required for which user in order to select from view.
|
||||
Note that regardless of the SQL security option, in every case it is still required to have `GRANT SELECT ON <view>` in order to read from it.
|
||||
|
||||
| SQL security option | View | Materialized View |
|
||||
|---------------------|-----------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|
|
||||
| `DEFINER alice` | `alice` must have a `SELECT` grant for the view's source table. | `alice` must have a `SELECT` grant for the view's source table and an `INSERT` grant for the view's target table. |
|
||||
| `INVOKER` | User must have a `SELECT` grant for the view's source table. | `SQL SECURITY INVOKER` can't be specified for materialized views. |
|
||||
| `NONE` | - | - |
|
||||
|
||||
:::note
|
||||
`SQL SECURITY NONE` is a deprecated option. Any user with the rights to create views with `SQL SECURITY NONE` will be able to execute any arbitrary query.
|
||||
Thus, it is required to have `GRANT ALLOW SQL SECURITY NONE TO <user>` in order to create a view with this option.
|
||||
:::
|
||||
|
||||
If `DEFINER`/`SQL SECURITY` aren't specified, the default values are used:
|
||||
- `SQL SECURITY`: `INVOKER` for normal views and `DEFINER` for materialized views ([configurable by settings](../../../operations/settings/settings.md#default_normal_view_sql_security))
|
||||
- `DEFINER`: `CURRENT_USER` ([configurable by settings](../../../operations/settings/settings.md#default_view_definer))
|
||||
|
||||
If a view is attached without `DEFINER`/`SQL SECURITY` specified, the default value is `SQL SECURITY NONE` for the materialized view and `SQL SECURITY INVOKER` for the normal view.
|
||||
|
||||
To change SQL security for an existing view, use
|
||||
```sql
|
||||
ALTER TABLE MODIFY SQL SECURITY { DEFINER | INVOKER | NONE } [DEFINER = { user | CURRENT_USER }]
|
||||
```
|
||||
|
||||
### Examples sql security
|
||||
```sql
|
||||
CREATE test_view
|
||||
DEFINER = alice SQL SECURITY DEFINER
|
||||
AS SELECT ...
|
||||
```
|
||||
|
||||
```sql
|
||||
CREATE test_view
|
||||
SQL SECURITY INVOKER
|
||||
AS SELECT ...
|
||||
```
|
||||
|
||||
## Live View [Deprecated]
|
||||
|
||||
This feature is deprecated and will be removed in the future.
|
||||
|
@ -16,13 +16,13 @@ DETACH TABLE|VIEW|DICTIONARY|DATABASE [IF EXISTS] [db.]name [ON CLUSTER cluster]
|
||||
Detaching does not delete the data or metadata of a table, a materialized view, a dictionary or a database. If an entity was not detached `PERMANENTLY`, on the next server launch the server will read the metadata and recall the table/view/dictionary/database again. If an entity was detached `PERMANENTLY`, there will be no automatic recall.
|
||||
|
||||
Whether a table, a dictionary or a database was detached permanently or not, in both cases you can reattach them using the [ATTACH](../../sql-reference/statements/attach.md) query.
|
||||
System log tables can be also attached back (e.g. `query_log`, `text_log`, etc). Other system tables can't be reattached. On the next server launch the server will recall those tables again.
|
||||
System log tables can be also attached back (e.g. `query_log`, `text_log`, etc.). Other system tables can't be reattached. On the next server launch the server will recall those tables again.
|
||||
|
||||
`ATTACH MATERIALIZED VIEW` does not work with short syntax (without `SELECT`), but you can attach it using the `ATTACH TABLE` query.
|
||||
|
||||
Note that you can not detach permanently the table which is already detached (temporary). But you can attach it back and then detach permanently again.
|
||||
|
||||
Also you can not [DROP](../../sql-reference/statements/drop.md#drop-table) the detached table, or [CREATE TABLE](../../sql-reference/statements/create/table.md) with the same name as detached permanently, or replace it with the other table with [RENAME TABLE](../../sql-reference/statements/rename.md) query.
|
||||
Also, you can not [DROP](../../sql-reference/statements/drop.md#drop-table) the detached table, or [CREATE TABLE](../../sql-reference/statements/create/table.md) with the same name as detached permanently, or replace it with the other table with [RENAME TABLE](../../sql-reference/statements/rename.md) query.
|
||||
|
||||
The `SYNC` modifier executes the action without delay.
|
||||
|
||||
|
@ -114,6 +114,7 @@ Hierarchy of privileges:
|
||||
- `ALTER VIEW`
|
||||
- `ALTER VIEW REFRESH`
|
||||
- `ALTER VIEW MODIFY QUERY`
|
||||
- `ALTER VIEW MODIFY SQL SECURITY`
|
||||
- [CREATE](#grant-create)
|
||||
- `CREATE DATABASE`
|
||||
- `CREATE TABLE`
|
||||
@ -307,6 +308,7 @@ Allows executing [ALTER](../../sql-reference/statements/alter/index.md) queries
|
||||
- `ALTER VIEW` Level: `GROUP`
|
||||
- `ALTER VIEW REFRESH`. Level: `VIEW`. Aliases: `ALTER LIVE VIEW REFRESH`, `REFRESH VIEW`
|
||||
- `ALTER VIEW MODIFY QUERY`. Level: `VIEW`. Aliases: `ALTER TABLE MODIFY QUERY`
|
||||
- `ALTER VIEW MODIFY SQL SECURITY`. Level: `VIEW`. Aliases: `ALTER TABLE MODIFY SQL SECURITY`
|
||||
|
||||
Examples of how this hierarchy is treated:
|
||||
|
||||
@ -409,6 +411,7 @@ Allows a user to execute queries that manage users, roles and row policies.
|
||||
- `SHOW_ROW_POLICIES`. Level: `GLOBAL`. Aliases: `SHOW POLICIES`, `SHOW CREATE ROW POLICY`, `SHOW CREATE POLICY`
|
||||
- `SHOW_QUOTAS`. Level: `GLOBAL`. Aliases: `SHOW CREATE QUOTA`
|
||||
- `SHOW_SETTINGS_PROFILES`. Level: `GLOBAL`. Aliases: `SHOW PROFILES`, `SHOW CREATE SETTINGS PROFILE`, `SHOW CREATE PROFILE`
|
||||
- `ALLOW SQL SECURITY NONE`. Level: `GLOBAL`. Aliases: `CREATE SQL SECURITY NONE`, `SQL SECURITY NONE`, `SECURITY NONE`
|
||||
|
||||
The `ROLE ADMIN` privilege allows a user to assign and revoke any roles including those which are not assigned to the user with the admin option.
|
||||
|
||||
|
@ -176,7 +176,7 @@ INSERT INTO infile_globs FROM INFILE 'input_?.csv' FORMAT CSV;
|
||||
```
|
||||
:::
|
||||
|
||||
## Inserting into Table Function
|
||||
## Inserting using a Table Function
|
||||
|
||||
Data can be inserted into tables referenced by [table functions](../../sql-reference/table-functions/index.md).
|
||||
|
||||
@ -204,6 +204,26 @@ Result:
|
||||
└─────┴───────────────────────┘
|
||||
```
|
||||
|
||||
## Inserting into ClickHouse Cloud
|
||||
|
||||
By default, services on ClickHouse Cloud provide multiple replicas for high availability. When you connect to a service, a connection is established to one of these replicas.
|
||||
|
||||
After an `INSERT` succeeds, data is written to the underlying storage. However, it may take some time for replicas to receive these updates. Therefore, if you use a different connection that executes a `SELECT` query on one of these other replicas, the updated data may not yet be reflected.
|
||||
|
||||
It is possible to use the `select_sequential_consistency` to force the replica to receive the latest updates. Here is an example of a SELECT query using this setting:
|
||||
|
||||
```sql
|
||||
SELECT .... SETTINGS select_sequential_consistency = 1;
|
||||
```
|
||||
|
||||
Note that using `select_sequential_consistency` will increase the load on ClickHouse Keeper (used by ClickHouse Cloud internally) and may result in slower performance depending on the load on the service. We recommend against enabling this setting unless necessary. The recommended approach is to execute read/writes in the same session or to use a client driver that uses the native protocol (and thus supports sticky connections).
|
||||
|
||||
## Inserting into a replicated setup
|
||||
|
||||
In a replicated setup, data will be visible on other replicas after it has been replicated. Data begins being replicated (downloaded on other replicas) immediately after an `INSERT`. This differs from ClickHouse Cloud, where data is immediately written to shared storage and replicas subscribe to metadata changes.
|
||||
|
||||
Note that for replicated setups, `INSERTs` can sometimes take a considerable amount of time (in the order of one second) as it requires committing to ClickHouse Keeper for distributed consensus. Using S3 for storage also adds additional latency.
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
`INSERT` sorts the input data by primary key and splits them into partitions by a partition key. If you insert data into several partitions at once, it can significantly reduce the performance of the `INSERT` query. To avoid this:
|
||||
@ -216,7 +236,15 @@ Performance will not decrease if:
|
||||
- Data is added in real time.
|
||||
- You upload data that is usually sorted by time.
|
||||
|
||||
It's also possible to asynchronously insert data in small but frequent inserts. The data from such insertions is combined into batches and then safely inserted into a table. To enable the asynchronous mode, switch on the [async_insert](../../operations/settings/settings.md#async-insert) setting. Note that asynchronous insertions are supported only over HTTP protocol, and deduplication is not supported for them.
|
||||
### Asynchronous inserts
|
||||
|
||||
It is possible to asynchronously insert data in small but frequent inserts. The data from such insertions is combined into batches and then safely inserted into a table. To use asynchronous inserts, enable the [`async_insert`](../../operations/settings/settings.md#async-insert) setting.
|
||||
|
||||
Using `async_insert` or the [`Buffer` table engine](/en/engines/table-engines/special/buffer) results in additional buffering.
|
||||
|
||||
### Large or long-running inserts
|
||||
|
||||
When you are inserting large amounts of data, ClickHouse will optimize write performance through a process called "squashing". Small blocks of inserted data in memory are merged and squashed into larger blocks before being written to disk. Squashing reduces the overhead associated with each write operation. In this process, inserted data will be available to query after ClickHouse completes writing each [`max_insert_block_size`](/en/operations/settings/settings#max_insert_block_size) rows.
|
||||
|
||||
**See Also**
|
||||
|
||||
|
@ -5,7 +5,7 @@ sidebar_label: DISTINCT
|
||||
|
||||
# DISTINCT Clause
|
||||
|
||||
If `SELECT DISTINCT` is specified, only unique rows will remain in a query result. Thus only a single row will remain out of all the sets of fully matching rows in the result.
|
||||
If `SELECT DISTINCT` is specified, only unique rows will remain in a query result. Thus, only a single row will remain out of all the sets of fully matching rows in the result.
|
||||
|
||||
You can specify the list of columns that must have unique values: `SELECT DISTINCT ON (column1, column2,...)`. If the columns are not specified, all of them are taken into consideration.
|
||||
|
||||
|
@ -68,7 +68,7 @@ RELOAD FUNCTION [ON CLUSTER cluster_name] function_name
|
||||
|
||||
Clears ClickHouse’s internal DNS cache. Sometimes (for old ClickHouse versions) it is necessary to use this command when changing the infrastructure (changing the IP address of another ClickHouse server or the server used by dictionaries).
|
||||
|
||||
For more convenient (automatic) cache management, see disable_internal_dns_cache, dns_cache_update_period parameters.
|
||||
For more convenient (automatic) cache management, see disable_internal_dns_cache, dns_cache_max_size, dns_cache_update_period parameters.
|
||||
|
||||
## DROP MARK CACHE
|
||||
|
||||
|
@ -59,9 +59,7 @@ INSERT INTO TABLE FUNCTION file('file2.csv', 'CSV', 'i UInt32, s String') VALUES
|
||||
Now, read data contents of `test1.csv` and `test2.csv` via `fileCluster` table function:
|
||||
|
||||
```sql
|
||||
SELECT * from fileCluster(
|
||||
'my_cluster', 'file{1,2}.csv', 'CSV', 'i UInt32, s String') ORDER BY (i, s)"""
|
||||
)
|
||||
SELECT * FROM fileCluster('my_cluster', 'file{1,2}.csv', 'CSV', 'i UInt32, s String') ORDER BY i, s
|
||||
```
|
||||
|
||||
```
|
||||
|
@ -11,11 +11,11 @@ Creates a temporary [Merge](../../engines/table-engines/special/merge.md) table.
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
merge('db_name', 'tables_regexp')
|
||||
merge(['db_name',] 'tables_regexp')
|
||||
```
|
||||
**Arguments**
|
||||
|
||||
- `db_name` — Possible values:
|
||||
- `db_name` — Possible values (optional, default is `currentDatabase()`):
|
||||
- database name,
|
||||
- constant expression that returns a string with a database name, for example, `currentDatabase()`,
|
||||
- `REGEXP(expression)`, where `expression` is a regular expression to match the DB names.
|
||||
|
83
docs/en/sql-reference/table-functions/mergeTreeIndex.md
Normal file
83
docs/en/sql-reference/table-functions/mergeTreeIndex.md
Normal file
@ -0,0 +1,83 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/mergeTreeIndex
|
||||
sidebar_position: 77
|
||||
sidebar_label: mergeTreeIndex
|
||||
---
|
||||
|
||||
# mergeTreeIndex
|
||||
|
||||
Represents the contents of index and marks files of MergeTree tables. It can be used for introspection
|
||||
|
||||
``` sql
|
||||
mergeTreeIndex(database, table, [with_marks = true])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `database`- The database name to read index and marks from.
|
||||
- `table`- The table name to read index and marks from.
|
||||
- `with_marks` - Whether include columns with marks to the result.
|
||||
|
||||
**Returned Value**
|
||||
|
||||
A table object with columns with values of primary index of source table, columns with values of marks (if enabled) for all possible files in data parts of source table and virtual columns:
|
||||
|
||||
- `part_name` - The name of data part.
|
||||
- `mark_number` - The number of current mark in data part.
|
||||
- `rows_in_granule` - The number of rows in current granule.
|
||||
|
||||
Marks column may contain `(NULL, NULL)` value in case when column is absent in data part or marks for one of its substreams are not written (e.g. in compact parts).
|
||||
|
||||
## Usage Example
|
||||
|
||||
```sql
|
||||
CREATE TABLE test_table
|
||||
(
|
||||
`id` UInt64,
|
||||
`n` UInt64,
|
||||
`arr` Array(UInt64)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY id
|
||||
SETTINGS index_granularity = 3, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 8;
|
||||
|
||||
INSERT INTO test_table SELECT number, number, range(number % 5) FROM numbers(5);
|
||||
|
||||
INSERT INTO test_table SELECT number, number, range(number % 5) FROM numbers(10, 10);
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT * FROM mergeTreeIndex(currentDatabase(), test_table, with_marks = true);
|
||||
```
|
||||
|
||||
```text
|
||||
┌─part_name─┬─mark_number─┬─rows_in_granule─┬─id─┬─id.mark─┬─n.mark──┬─arr.size0.mark─┬─arr.mark─┐
|
||||
│ all_1_1_0 │ 0 │ 3 │ 0 │ (0,0) │ (42,0) │ (NULL,NULL) │ (84,0) │
|
||||
│ all_1_1_0 │ 1 │ 2 │ 3 │ (133,0) │ (172,0) │ (NULL,NULL) │ (211,0) │
|
||||
│ all_1_1_0 │ 2 │ 0 │ 4 │ (271,0) │ (271,0) │ (NULL,NULL) │ (271,0) │
|
||||
└───────────┴─────────────┴─────────────────┴────┴─────────┴─────────┴────────────────┴──────────┘
|
||||
┌─part_name─┬─mark_number─┬─rows_in_granule─┬─id─┬─id.mark─┬─n.mark─┬─arr.size0.mark─┬─arr.mark─┐
|
||||
│ all_2_2_0 │ 0 │ 3 │ 10 │ (0,0) │ (0,0) │ (0,0) │ (0,0) │
|
||||
│ all_2_2_0 │ 1 │ 3 │ 13 │ (0,24) │ (0,24) │ (0,24) │ (0,24) │
|
||||
│ all_2_2_0 │ 2 │ 3 │ 16 │ (0,48) │ (0,48) │ (0,48) │ (0,80) │
|
||||
│ all_2_2_0 │ 3 │ 1 │ 19 │ (0,72) │ (0,72) │ (0,72) │ (0,128) │
|
||||
│ all_2_2_0 │ 4 │ 0 │ 19 │ (0,80) │ (0,80) │ (0,80) │ (0,160) │
|
||||
└───────────┴─────────────┴─────────────────┴────┴─────────┴────────┴────────────────┴──────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
DESCRIBE mergeTreeIndex(currentDatabase(), test_table, with_marks = true) SETTINGS describe_compact_output = 1;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─name────────────┬─type─────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ part_name │ String │
|
||||
│ mark_number │ UInt64 │
|
||||
│ rows_in_granule │ UInt64 │
|
||||
│ id │ UInt64 │
|
||||
│ id.mark │ Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) │
|
||||
│ n.mark │ Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) │
|
||||
│ arr.size0.mark │ Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) │
|
||||
│ arr.mark │ Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) │
|
||||
└─────────────────┴──────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
@ -63,7 +63,7 @@ ClickHouse — полноценная столбцовая СУБД. Данны
|
||||
|
||||
Для байт-ориентированного ввода-вывода существуют абстрактные классы `ReadBuffer` и `WriteBuffer`. Они используются вместо `iostream`. Не волнуйтесь: каждый зрелый проект C++ использует что-то другое вместо `iostream` по уважительным причинам.
|
||||
|
||||
`ReadBuffer` и `WriteBuffer` — это просто непрерывный буфер и курсор, указывающий на позицию в этом буфере. Реализации могут как владеть так и не владеть памятью буфера. Существует виртуальный метод заполнения буфера следующими данными (для `ReadBuffer`) или сброса буфера куда-нибудь (например `WriteBuffer`). Виртуальные методы редко вызываются.
|
||||
`ReadBuffer` и `WriteBuffer` — это просто непрерывный буфер и курсор, указывающий на позицию в этом буфере. Реализации могут как владеть, так и не владеть памятью буфера. Существует виртуальный метод заполнения буфера следующими данными (для `ReadBuffer`) или сброса буфера куда-нибудь (например `WriteBuffer`). Виртуальные методы редко вызываются.
|
||||
|
||||
Реализации `ReadBuffer`/`WriteBuffer` используются для работы с файлами и файловыми дескрипторами, а также сетевыми сокетами, для реализации сжатия (`CompressedWriteBuffer` инициализируется вместе с другим `WriteBuffer` и осуществляет сжатие данных перед записью в него), и для других целей – названия `ConcatReadBuffer`, `LimitReadBuffer`, и `HashingWriteBuffer` говорят сами за себя.
|
||||
|
||||
|
@ -71,7 +71,7 @@ ClickHouse не работает и не собирается на 32-битны
|
||||
Please make sure you have the correct access rights
|
||||
and the repository exists.
|
||||
|
||||
Как правило это означает, что отсутствуют ssh ключи для соединения с GitHub. Ключи расположены в директории `~/.ssh`. В интерфейсе GitHub, в настройках, необходимо загрузить публичные ключи, чтобы он их понимал.
|
||||
Как правило, это означает, что отсутствуют ssh ключи для соединения с GitHub. Ключи расположены в директории `~/.ssh`. В интерфейсе GitHub, в настройках, необходимо загрузить публичные ключи, чтобы он их понимал.
|
||||
|
||||
Вы также можете клонировать репозиторий по протоколу https:
|
||||
|
||||
@ -199,7 +199,7 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
|
||||
В случае успешного запуска, вы увидите прогресс сборки - количество обработанных задач и общее количество задач.
|
||||
|
||||
В процессе сборки могут появится сообщения `libprotobuf WARNING` про protobuf файлы в библиотеке libhdfs2. Это не имеет значения.
|
||||
В процессе сборки могут появиться сообщения `libprotobuf WARNING` про protobuf файлы в библиотеке libhdfs2. Это не имеет значения.
|
||||
|
||||
При успешной сборке, вы получите готовый исполняемый файл `ClickHouse/build/programs/clickhouse`:
|
||||
|
||||
@ -207,7 +207,7 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
|
||||
## Запуск собранной версии ClickHouse {#zapusk-sobrannoi-versii-clickhouse}
|
||||
|
||||
Для запуска сервера из под текущего пользователя, с выводом логов в терминал и с использованием примеров конфигурационных файлов, расположенных в исходниках, перейдите в директорию `ClickHouse/programs/server/` (эта директория находится не в директории build) и выполните:
|
||||
Для запуска сервера из-под текущего пользователя, с выводом логов в терминал и с использованием примеров конфигурационных файлов, расположенных в исходниках, перейдите в директорию `ClickHouse/programs/server/` (эта директория находится не в директории build) и выполните:
|
||||
|
||||
../../build/programs/clickhouse server
|
||||
|
||||
|
@ -37,7 +37,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
||||
**Секции запроса**
|
||||
|
||||
При создании таблицы с движком `CollapsingMergeTree` используются те же [секции запроса](mergetree.md#table_engine-mergetree-creating-a-table) что и при создании таблицы с движком `MergeTree`.
|
||||
При создании таблицы с движком `CollapsingMergeTree` используются те же [секции запроса](mergetree.md#table_engine-mergetree-creating-a-table), что и при создании таблицы с движком `MergeTree`.
|
||||
|
||||
<details markdown="1">
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user