mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-30 03:22:14 +00:00
Merge branch 'master' into variant_inference
This commit is contained in:
commit
ee1013eba6
5
.github/workflows/backport_branches.yml
vendored
5
.github/workflows/backport_branches.yml
vendored
@ -269,10 +269,7 @@ jobs:
|
|||||||
- name: Check Workflow results
|
- name: Check Workflow results
|
||||||
run: |
|
run: |
|
||||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
${{ toJson(needs) }}
|
${{ toJson(needs) }}
|
||||||
EOF
|
EOF
|
||||||
echo "::group::Workflow results"
|
|
||||||
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
|
|
||||||
echo "::endgroup::"
|
|
||||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
69
.github/workflows/jepsen.yml
vendored
69
.github/workflows/jepsen.yml
vendored
@ -9,19 +9,64 @@ on: # yamllint disable-line rule:truthy
|
|||||||
- cron: '0 */6 * * *'
|
- cron: '0 */6 * * *'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
jobs:
|
jobs:
|
||||||
|
RunConfig:
|
||||||
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
|
outputs:
|
||||||
|
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||||
|
steps:
|
||||||
|
- name: DebugInfo
|
||||||
|
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true # to ensure correct digests
|
||||||
|
fetch-depth: 0 # to get version
|
||||||
|
filter: tree:0
|
||||||
|
- name: PrepareRunConfig
|
||||||
|
id: runconfig
|
||||||
|
run: |
|
||||||
|
echo "::group::configure CI run"
|
||||||
|
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --workflow "$GITHUB_WORKFLOW" --outfile ${{ runner.temp }}/ci_run_data.json
|
||||||
|
echo "::endgroup::"
|
||||||
|
|
||||||
|
echo "::group::CI run configure results"
|
||||||
|
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
|
||||||
|
echo "::endgroup::"
|
||||||
|
{
|
||||||
|
echo 'CI_DATA<<EOF'
|
||||||
|
cat ${{ runner.temp }}/ci_run_data.json
|
||||||
|
echo 'EOF'
|
||||||
|
} >> "$GITHUB_OUTPUT"
|
||||||
KeeperJepsenRelease:
|
KeeperJepsenRelease:
|
||||||
uses: ./.github/workflows/reusable_simple_job.yml
|
needs: [RunConfig]
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Jepsen keeper check
|
test_name: ClickHouse Keeper Jepsen
|
||||||
runner_type: style-checker
|
runner_type: style-checker-aarch64
|
||||||
report_required: true
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
run_command: |
|
run_command: |
|
||||||
python3 jepsen_check.py keeper
|
python3 jepsen_check.py keeper
|
||||||
# ServerJepsenRelease:
|
ServerJepsenRelease:
|
||||||
# uses: ./.github/workflows/reusable_simple_job.yml
|
if: false # skip for server
|
||||||
# with:
|
needs: [RunConfig]
|
||||||
# test_name: Jepsen server check
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
# runner_type: style-checker
|
with:
|
||||||
# run_command: |
|
test_name: ClickHouse Server Jepsen
|
||||||
# cd "$REPO_COPY/tests/ci"
|
runner_type: style-checker-aarch64
|
||||||
# python3 jepsen_check.py server
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
run_command: |
|
||||||
|
python3 jepsen_check.py server
|
||||||
|
CheckWorkflow:
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
needs: [RunConfig, ServerJepsenRelease, KeeperJepsenRelease]
|
||||||
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
- name: Check Workflow results
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
5
.github/workflows/master.yml
vendored
5
.github/workflows/master.yml
vendored
@ -135,10 +135,7 @@ jobs:
|
|||||||
- name: Check Workflow results
|
- name: Check Workflow results
|
||||||
run: |
|
run: |
|
||||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
${{ toJson(needs) }}
|
${{ toJson(needs) }}
|
||||||
EOF
|
EOF
|
||||||
echo "::group::Workflow results"
|
|
||||||
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
|
|
||||||
echo "::endgroup::"
|
|
||||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
5
.github/workflows/merge_queue.yml
vendored
5
.github/workflows/merge_queue.yml
vendored
@ -108,10 +108,7 @@ jobs:
|
|||||||
- name: Check Workflow results
|
- name: Check Workflow results
|
||||||
run: |
|
run: |
|
||||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
${{ toJson(needs) }}
|
${{ toJson(needs) }}
|
||||||
EOF
|
EOF
|
||||||
echo "::group::Workflow results"
|
|
||||||
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
|
|
||||||
echo "::endgroup::"
|
|
||||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
5
.github/workflows/nightly.yml
vendored
5
.github/workflows/nightly.yml
vendored
@ -54,10 +54,7 @@ jobs:
|
|||||||
- name: Check Workflow results
|
- name: Check Workflow results
|
||||||
run: |
|
run: |
|
||||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
${{ toJson(needs) }}
|
${{ toJson(needs) }}
|
||||||
EOF
|
EOF
|
||||||
echo "::group::Workflow results"
|
|
||||||
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
|
|
||||||
echo "::endgroup::"
|
|
||||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
10
.github/workflows/pull_request.yml
vendored
10
.github/workflows/pull_request.yml
vendored
@ -152,8 +152,9 @@ jobs:
|
|||||||
|
|
||||||
CheckReadyForMerge:
|
CheckReadyForMerge:
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
# Test_2 or Test_3 must not have jobs required for Mergeable check
|
# Test_2 or Test_3 do not have the jobs required for Mergeable check,
|
||||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1]
|
# however, set them as "needs" to get all checks results before the automatic merge occurs.
|
||||||
|
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
@ -168,12 +169,9 @@ jobs:
|
|||||||
- name: Check Workflow results
|
- name: Check Workflow results
|
||||||
run: |
|
run: |
|
||||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
${{ toJson(needs) }}
|
${{ toJson(needs) }}
|
||||||
EOF
|
EOF
|
||||||
echo "::group::Workflow results"
|
|
||||||
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
|
|
||||||
echo "::endgroup::"
|
|
||||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
|
||||||
################################# Stage Final #################################
|
################################# Stage Final #################################
|
||||||
|
6
.github/workflows/release_branches.yml
vendored
6
.github/workflows/release_branches.yml
vendored
@ -489,10 +489,8 @@ jobs:
|
|||||||
- name: Check Workflow results
|
- name: Check Workflow results
|
||||||
run: |
|
run: |
|
||||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
${{ toJson(needs) }}
|
${{ toJson(needs) }}
|
||||||
EOF
|
EOF
|
||||||
echo "::group::Workflow results"
|
|
||||||
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
|
|
||||||
echo "::endgroup::"
|
|
||||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
173
CHANGELOG.md
173
CHANGELOG.md
@ -1,4 +1,5 @@
|
|||||||
### Table of Contents
|
### Table of Contents
|
||||||
|
**[ClickHouse release v24.7, 2024-07-30](#247)**<br/>
|
||||||
**[ClickHouse release v24.6, 2024-07-01](#246)**<br/>
|
**[ClickHouse release v24.6, 2024-07-01](#246)**<br/>
|
||||||
**[ClickHouse release v24.5, 2024-05-30](#245)**<br/>
|
**[ClickHouse release v24.5, 2024-05-30](#245)**<br/>
|
||||||
**[ClickHouse release v24.4, 2024-04-30](#244)**<br/>
|
**[ClickHouse release v24.4, 2024-04-30](#244)**<br/>
|
||||||
@ -9,6 +10,178 @@
|
|||||||
|
|
||||||
# 2024 Changelog
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### <a id="247"></a> ClickHouse release 24.7, 2024-07-30
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
* Forbid `CRATE MATERIALIZED VIEW ... ENGINE Replicated*MergeTree POPULATE AS SELECT ...` with Replicated databases. [#63963](https://github.com/ClickHouse/ClickHouse/pull/63963) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* `clickhouse-keeper-client` will only accept paths in string literals, such as `ls '/hello/world'`, not bare strings such as `ls /hello/world`. [#65494](https://github.com/ClickHouse/ClickHouse/pull/65494) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Metric `KeeperOutstandingRequets` was renamed to `KeeperOutstandingRequests`. [#66206](https://github.com/ClickHouse/ClickHouse/pull/66206) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Remove `is_deterministic` field from the `system.functions` table. [#66630](https://github.com/ClickHouse/ClickHouse/pull/66630) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Function `tuple` will now try to construct named tuples in query (controlled by `enable_named_columns_in_function_tuple`). Introduce function `tupleNames` to extract names from tuples. [#54881](https://github.com/ClickHouse/ClickHouse/pull/54881) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
* Add `ASOF JOIN` support for `full_sorting_join` algorithm. [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Add new window function `percent_rank`. [#62747](https://github.com/ClickHouse/ClickHouse/pull/62747) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Support JWT authentication in `clickhouse-client` (will be available only in ClickHouse Cloud). [#62829](https://github.com/ClickHouse/ClickHouse/pull/62829) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Add SQL functions `changeYear`, `changeMonth`, `changeDay`, `changeHour`, `changeMinute`, `changeSecond`. For example, `SELECT changeMonth(toDate('2024-06-14'), 7)` returns date `2024-07-14`. [#63186](https://github.com/ClickHouse/ClickHouse/pull/63186) ([cucumber95](https://github.com/cucumber95)).
|
||||||
|
* Introduce startup scripts, which allow the execution of preconfigured queries at the startup stage. [#64889](https://github.com/ClickHouse/ClickHouse/pull/64889) ([pufit](https://github.com/pufit)).
|
||||||
|
* Support accept_invalid_certificate in client's config in order to allow for client to connect over secure TCP to a server running with self-signed certificate - can be used as a shorthand for corresponding `openSSL` client settings `verificationMode=none` + `invalidCertificateHandler.name=AcceptCertificateHandler`. [#65238](https://github.com/ClickHouse/ClickHouse/pull/65238) ([peacewalker122](https://github.com/peacewalker122)).
|
||||||
|
* Add system.error_log which contains history of error values from table system.errors, periodically flushed to disk. [#65381](https://github.com/ClickHouse/ClickHouse/pull/65381) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Add aggregate function `groupConcat`. About the same as `arrayStringConcat( groupArray(column), ',')` Can receive 2 parameters: a string delimiter and the number of elements to be processed. [#65451](https://github.com/ClickHouse/ClickHouse/pull/65451) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Add AzureQueue storage. [#65458](https://github.com/ClickHouse/ClickHouse/pull/65458) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add a new setting to disable/enable writing page index into parquet files. [#65475](https://github.com/ClickHouse/ClickHouse/pull/65475) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Introduce `logger.console_log_level` server config to control the log level to the console (if enabled). [#65559](https://github.com/ClickHouse/ClickHouse/pull/65559) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Automatically append a wildcard `*` to the end of a directory path with table function `file`. [#66019](https://github.com/ClickHouse/ClickHouse/pull/66019) ([Zhidong (David) Guo](https://github.com/Gun9niR)).
|
||||||
|
* Add `--memory-usage` option to client in non interactive mode. [#66393](https://github.com/ClickHouse/ClickHouse/pull/66393) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Make an interactive client for clickhouse-disks, add local disk from the local directory. [#64446](https://github.com/ClickHouse/ClickHouse/pull/64446) ([Daniil Ivanik](https://github.com/divanik)).
|
||||||
|
* When lightweight delete happens on a table with projection(s), users have choices either throw an exception (by default) or drop the projection [#65594](https://github.com/ClickHouse/ClickHouse/pull/65594) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
|
||||||
|
#### Experimental Feature
|
||||||
|
* Change binary serialization of Variant data type: add `compact` mode to avoid writing the same discriminator multiple times for granules with single variant or with only NULL values. Add MergeTree setting `use_compact_variant_discriminators_serialization` that is enabled by default. Note that Variant type is still experimental and backward-incompatible change in serialization is ok. [#62774](https://github.com/ClickHouse/ClickHouse/pull/62774) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Support rocksdb as backend storage of keeper. [#56626](https://github.com/ClickHouse/ClickHouse/pull/56626) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Refactor JSONExtract functions, support more types including experimental Dynamic type. [#66046](https://github.com/ClickHouse/ClickHouse/pull/66046) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Support null map subcolumn for Variant and Dynamic subcolumns. [#66178](https://github.com/ClickHouse/ClickHouse/pull/66178) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix reading dynamic subcolumns from altered Memory table. Previously if `max_types` parameter of a Dynamic type was changed in Memory table via alter, further subcolumns reading can return wrong result. [#66066](https://github.com/ClickHouse/ClickHouse/pull/66066) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Add support for `cluster_for_parallel_replicas` when using custom key parallel replicas. It allows you to use parallel replicas with custom key with MergeTree tables. [#65453](https://github.com/ClickHouse/ClickHouse/pull/65453) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
* Enable `optimize_functions_to_subcolumns` by default. [#58661](https://github.com/ClickHouse/ClickHouse/pull/58661) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Replace int to string algorithm with a faster one (from a modified amdn/itoa to a modified jeaiii/itoa). [#61661](https://github.com/ClickHouse/ClickHouse/pull/61661) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Sizes of hash tables created by join (`parallel_hash` algorithm) is collected and cached now. This information will be used to preallocate space in hash tables for subsequent query executions and save time on hash table resizes. [#64553](https://github.com/ClickHouse/ClickHouse/pull/64553) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Optimized queries with `ORDER BY` primary key and `WHERE` that have a condition with high selectivity by using of buffering. It is controlled by setting `read_in_order_use_buffering` (enabled by default) and can increase memory usage of query. [#64607](https://github.com/ClickHouse/ClickHouse/pull/64607) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Improve performance of loading `plain_rewritable` metadata. [#65634](https://github.com/ClickHouse/ClickHouse/pull/65634) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Attaching tables on read-only disks will use fewer resources by not loading outdated parts. [#65635](https://github.com/ClickHouse/ClickHouse/pull/65635) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Support minmax hyperrectangle for Set indices. [#65676](https://github.com/ClickHouse/ClickHouse/pull/65676) ([AntiTopQuark](https://github.com/AntiTopQuark)).
|
||||||
|
* Unload primary index of outdated parts to reduce total memory usage. [#65852](https://github.com/ClickHouse/ClickHouse/pull/65852) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Functions `replaceRegexpAll` and `replaceRegexpOne` are now significantly faster if the pattern is trivial, i.e. contains no metacharacters, pattern classes, flags, grouping characters etc. (Thanks to Taiyang Li). [#66185](https://github.com/ClickHouse/ClickHouse/pull/66185) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* s3 requests: Reduce retry time for queries, increase retries count for backups. 8.5 minutes and 100 retires for queries, 1.2 hours and 1000 retries for backup restore. [#65232](https://github.com/ClickHouse/ClickHouse/pull/65232) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Support query plan LIMIT optimization. Support LIMIT pushdown for PostgreSQL storage and table function. [#65454](https://github.com/ClickHouse/ClickHouse/pull/65454) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Improved ZooKeeper load balancing. The current session doesn't expire until the optimal nodes become available despite `fallback_session_lifetime`. Added support for AZ-aware balancing. [#65570](https://github.com/ClickHouse/ClickHouse/pull/65570) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* DatabaseCatalog drops tables faster by using up to database_catalog_drop_table_concurrency threads. [#66065](https://github.com/ClickHouse/ClickHouse/pull/66065) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* The setting `optimize_trivial_insert_select` is disabled by default. In most cases, it should be beneficial. Nevertheless, if you are seeing slower INSERT SELECT or increased memory usage, you can enable it back or `SET compatibility = '24.6'`. [#58970](https://github.com/ClickHouse/ClickHouse/pull/58970) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Print stacktrace and diagnostic info if `clickhouse-client` or `clickhouse-local` crashes. [#61109](https://github.com/ClickHouse/ClickHouse/pull/61109) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* The result of `SHOW INDEX | INDEXES | INDICES | KEYS` was previously sorted by the primary key column names. Since this was unintuitive, the result is now sorted by the position of the primary key columns within the primary key. [#61131](https://github.com/ClickHouse/ClickHouse/pull/61131) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Change how deduplication for Materialized Views works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Allow matching column names in a case insensitive manner when reading json files (`input_format_json_case_insensitive_column_matching`). [#61750](https://github.com/ClickHouse/ClickHouse/pull/61750) ([kevinyhzou](https://github.com/KevinyhZou)).
|
||||||
|
* Support reading partitioned data DeltaLake data. Infer DeltaLake schema by reading metadata instead of data. [#63201](https://github.com/ClickHouse/ClickHouse/pull/63201) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* In composable protocols TLS layer accepted only `certificateFile` and `privateKeyFile` parameters. https://clickhouse.com/docs/en/operations/settings/composable-protocols. [#63985](https://github.com/ClickHouse/ClickHouse/pull/63985) ([Anton Ivashkin](https://github.com/ianton-ru)).
|
||||||
|
* Added profile event `SelectQueriesWithPrimaryKeyUsage` which indicates how many SELECT queries use the primary key to evaluate the WHERE clause. [#64492](https://github.com/ClickHouse/ClickHouse/pull/64492) ([0x01f](https://github.com/0xfei)).
|
||||||
|
* `StorageS3Queue` related fixes and improvements. Deduce a default value of `s3queue_processing_threads_num` according to the number of physical cpu cores on the server (instead of the previous default value as 1). Set default value of `s3queue_loading_retries` to 10. Fix possible vague "Uncaught exception" in exception column of `system.s3queue`. Do not increment retry count on `MEMORY_LIMIT_EXCEEDED` exception. Move files commit to a stage after insertion into table fully finished to avoid files being commited while not inserted. Add settings `s3queue_max_processed_files_before_commit`, `s3queue_max_processed_rows_before_commit`, `s3queue_max_processed_bytes_before_commit`, `s3queue_max_processing_time_sec_before_commit`, to better control commit and flush time. [#65046](https://github.com/ClickHouse/ClickHouse/pull/65046) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Support aliases in parametrized view function (only new analyzer). [#65190](https://github.com/ClickHouse/ClickHouse/pull/65190) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Updated to mask account key in logs in azureBlobStorage. [#65273](https://github.com/ClickHouse/ClickHouse/pull/65273) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Partition pruning for `IN` predicates when filter expression is a part of `PARTITION BY` expression. [#65335](https://github.com/ClickHouse/ClickHouse/pull/65335) ([Eduard Karacharov](https://github.com/korowa)).
|
||||||
|
* Add system tables with main information about all detached tables. [#65400](https://github.com/ClickHouse/ClickHouse/pull/65400) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||||
|
* `arrayMin`/`arrayMax` can be applicable to all data types that are comparable. [#65455](https://github.com/ClickHouse/ClickHouse/pull/65455) ([pn](https://github.com/chloro-pn)).
|
||||||
|
* Improved memory accounting for cgroups v2 to exclude the amount occupied by the page cache. [#65470](https://github.com/ClickHouse/ClickHouse/pull/65470) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Do not create format settings for each row when serializing chunks to insert to EmbeddedRocksDB table. [#65474](https://github.com/ClickHouse/ClickHouse/pull/65474) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Reduce `clickhouse-local` prompt to just `:)`. `getFQDNOrHostName()` takes too long on macOS, and we don't want a hostname in the prompt for `clickhouse-local` anyway. [#65510](https://github.com/ClickHouse/ClickHouse/pull/65510) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Avoid printing a message from jemalloc about per-CPU arenas on low-end virtual machines. [#65532](https://github.com/ClickHouse/ClickHouse/pull/65532) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Disable filesystem cache background download by default. It will be enabled back when we fix the issue with possible "Memory limit exceeded" because memory deallocation is done outside of query context (while buffer is allocated inside of query context) if we use background download threads. Plus we need to add a separate setting to define max size to download for background workers (currently it is limited by max_file_segment_size, which might be too big). [#65534](https://github.com/ClickHouse/ClickHouse/pull/65534) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add new option to config `<config_reload_interval_ms>` which allow to specify how often clickhouse will reload config. [#65545](https://github.com/ClickHouse/ClickHouse/pull/65545) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Implement binary encoding for ClickHouse data types and add its specification in docs. Use it in Dynamic binary serialization, allow to use it in RowBinaryWithNamesAndTypes and Native formats under settings. [#65546](https://github.com/ClickHouse/ClickHouse/pull/65546) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Improved ZooKeeper load balancing. The current session doesn't expire until the optimal nodes become available despite `fallback_session_lifetime`. Added support for AZ-aware balancing. [#65570](https://github.com/ClickHouse/ClickHouse/pull/65570) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Server settings `compiled_expression_cache_size` and `compiled_expression_cache_elements_size` are now shown in `system.server_settings`. [#65584](https://github.com/ClickHouse/ClickHouse/pull/65584) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Add support for user identification based on x509 SubjectAltName extension. [#65626](https://github.com/ClickHouse/ClickHouse/pull/65626) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||||
|
* `clickhouse-local` will respect the `max_server_memory_usage` and `max_server_memory_usage_to_ram_ratio` from the configuration file. It will also set the max memory usage to 90% of the system memory by default, like `clickhouse-server` does. [#65697](https://github.com/ClickHouse/ClickHouse/pull/65697) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a script to backup your files to ClickHouse. [#65699](https://github.com/ClickHouse/ClickHouse/pull/65699) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* PostgreSQL source support cancel. [#65722](https://github.com/ClickHouse/ClickHouse/pull/65722) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Allow to use `concat` function with empty arguments ``` sql :) select concat();. [#65887](https://github.com/ClickHouse/ClickHouse/pull/65887) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Allow controlling named collections in clickhouse-local. [#65973](https://github.com/ClickHouse/ClickHouse/pull/65973) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Improve Azure profile events. [#65999](https://github.com/ClickHouse/ClickHouse/pull/65999) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Support ORC file read by writer time zone. [#66025](https://github.com/ClickHouse/ClickHouse/pull/66025) ([kevinyhzou](https://github.com/KevinyhZou)).
|
||||||
|
* Add settings to control connection to the PostgreSQL. * Setting `postgresql_connection_attempt_timeout` specifies the value passed to `connect_timeout` parameter of connection URL. * Setting `postgresql_connection_pool_retries` specifies the number of retries to establish a connection to the PostgreSQL end-point. [#66232](https://github.com/ClickHouse/ClickHouse/pull/66232) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Reduce inaccuracy of input_wait_elapsed_us/input_wait_elapsed_us/elapsed_us. [#66239](https://github.com/ClickHouse/ClickHouse/pull/66239) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Improve FilesystemCache ProfileEvents. [#66249](https://github.com/ClickHouse/ClickHouse/pull/66249) ([zhukai](https://github.com/nauu)).
|
||||||
|
* Add settings to ignore ON CLUSTER clause in queries for named collection management with replicated storage. [#66288](https://github.com/ClickHouse/ClickHouse/pull/66288) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||||
|
* Function `generateSnowflakeID` now allows to specify a machine ID as a parameter to prevent collisions in large clusters. [#66374](https://github.com/ClickHouse/ClickHouse/pull/66374) ([ZAWA_ll](https://github.com/Zawa-ll)).
|
||||||
|
* Disable suspending on Ctrl+Z in interactive mode. This is a common trap and is not expected behavior for almost all users. I imagine only a few extreme power users could appreciate suspending terminal applications to the background, but I don't know any. [#66511](https://github.com/ClickHouse/ClickHouse/pull/66511) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add option for validating the Primary key type in Dictionaries. Without this option for simple layouts any column type will be implicitly converted to UInt64. ### Documentation entry for user-facing changes. [#66595](https://github.com/ClickHouse/ClickHouse/pull/66595) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Fix unexpected size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Check cyclic dependencies on CREATE/REPLACE/RENAME/EXCHANGE queries and throw an exception if there is a cyclic dependency. Previously such cyclic dependencies could lead to a deadlock during server startup. Also fix some bugs in dependencies creation. [#65405](https://github.com/ClickHouse/ClickHouse/pull/65405) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix the VALID UNTIL clause in the user definition resetting after a restart. [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix SHOW MERGES remaining time. [#66735](https://github.com/ClickHouse/ClickHouse/pull/66735) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* `Query was cancelled` might have been printed twice in clickhouse-client. This behaviour is fixed. [#66005](https://github.com/ClickHouse/ClickHouse/pull/66005) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fixed crash while using MaterializedMySQL with TABLE OVERRIDE that maps MySQL NULL field into ClickHouse not NULL field. [#54649](https://github.com/ClickHouse/ClickHouse/pull/54649) ([Filipp Ozinov](https://github.com/bakwc)).
|
||||||
|
* Fix logical error when PREWHERE expression read no columns and table has no adaptive index granularity (very old table). [#59173](https://github.com/ClickHouse/ClickHouse/pull/59173) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Fix bug with cancellation buffer when canceling a query. [#64478](https://github.com/ClickHouse/ClickHouse/pull/64478) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Fix filling parts columns from metadata (when columns.txt does not exists). [#64757](https://github.com/ClickHouse/ClickHouse/pull/64757) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)).
|
||||||
|
* Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Eliminate injective function in argument of functions `uniq*` recursively. This used to work correctly but was broken in the new analyzer. [#65140](https://github.com/ClickHouse/ClickHouse/pull/65140) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Fix unexpected projection name when query with CTE. [#65267](https://github.com/ClickHouse/ClickHouse/pull/65267) ([wudidapaopao](https://github.com/wudidapaopao)).
|
||||||
|
* Require `dictGet` privilege when accessing dictionaries via direct query or the `Dictionary` table engine. [#65359](https://github.com/ClickHouse/ClickHouse/pull/65359) ([Joe Lynch](https://github.com/joelynch)).
|
||||||
|
* Fix user-specific S3 auth with incremental backups. [#65481](https://github.com/ClickHouse/ClickHouse/pull/65481) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Disable `non-intersecting-parts` optimization for queries with `FINAL` in case of `read-in-order` optimization was enabled. This could lead to an incorrect query result. As a workaround, disable `do_not_merge_across_partitions_select_final` and `split_parts_ranges_into_intersecting_and_non_intersecting_final` before this fix is merged. [#65505](https://github.com/ClickHouse/ClickHouse/pull/65505) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix getting exception `Index out of bound for blob metadata` in case all files from list batch were filtered out. [#65523](https://github.com/ClickHouse/ClickHouse/pull/65523) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix NOT_FOUND_COLUMN_IN_BLOCK for deduplicate merge of projection. [#65573](https://github.com/ClickHouse/ClickHouse/pull/65573) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fixed a bug that compatibility level '23.4' was not properly applied. [#65737](https://github.com/ClickHouse/ClickHouse/pull/65737) ([cw5121](https://github.com/cw5121)).
|
||||||
|
* Fix odbc table with nullable fields. [#65738](https://github.com/ClickHouse/ClickHouse/pull/65738) ([Rodolphe Dugé de Bernonville](https://github.com/RodolpheDuge)).
|
||||||
|
* Fix data race in `TCPHandler`, which could happen on fatal error. [#65744](https://github.com/ClickHouse/ClickHouse/pull/65744) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix invalid exceptions in function `parseDateTime` with `%F` and `%D` placeholders. [#65768](https://github.com/ClickHouse/ClickHouse/pull/65768) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Fix a bug leads to EmbeddedRocksDB with TTL write corrupted SST files. [#65816](https://github.com/ClickHouse/ClickHouse/pull/65816) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Functions `bitTest`, `bitTestAll`, and `bitTestAny` now return an error if the specified bit index is out-of-bounds [#65818](https://github.com/ClickHouse/ClickHouse/pull/65818) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Setting `join_any_take_last_row` is supported in any query with hash join. [#65820](https://github.com/ClickHouse/ClickHouse/pull/65820) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Functions `bitShiftLeft` and `bitShitfRight` return an error for out of bounds shift positions [#65838](https://github.com/ClickHouse/ClickHouse/pull/65838) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Fix growing memory usage in S3Queue. [#65839](https://github.com/ClickHouse/ClickHouse/pull/65839) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix tie handling in `arrayAUC` to match sklearn. [#65840](https://github.com/ClickHouse/ClickHouse/pull/65840) ([gabrielmcg44](https://github.com/gabrielmcg44)).
|
||||||
|
* Fix possible issues with MySQL server protocol TLS connections. [#65917](https://github.com/ClickHouse/ClickHouse/pull/65917) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix possible issues with MySQL client protocol TLS connections. [#65938](https://github.com/ClickHouse/ClickHouse/pull/65938) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix handling of `SSL_ERROR_WANT_READ`/`SSL_ERROR_WANT_WRITE` with zero timeout. [#65941](https://github.com/ClickHouse/ClickHouse/pull/65941) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Column _size in s3 engine and s3 table function denotes the size of a file inside the archive, not a size of the archive itself. [#65993](https://github.com/ClickHouse/ClickHouse/pull/65993) ([Daniil Ivanik](https://github.com/divanik)).
|
||||||
|
* Fix resolving dynamic subcolumns in analyzer, avoid reading the whole column on dynamic subcolumn reading. [#66004](https://github.com/ClickHouse/ClickHouse/pull/66004) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix config merging for from_env with replace overrides. [#66034](https://github.com/ClickHouse/ClickHouse/pull/66034) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix a possible hanging in `GRPCServer` during shutdown. [#66061](https://github.com/ClickHouse/ClickHouse/pull/66061) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fixed several cases in function `has` with non-constant `LowCardinality` arguments. [#66088](https://github.com/ClickHouse/ClickHouse/pull/66088) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix for `groupArrayIntersect`. It had incorrect behavior in the `merge()` function. Also, fixed behavior in `deserialise()` for numeric and general data. [#66103](https://github.com/ClickHouse/ClickHouse/pull/66103) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Disable the `merge-filters` optimization introduced in [#64760](https://github.com/ClickHouse/ClickHouse/issues/64760). It may cause an exception if optimization merges two filter expressions and does not apply a short-circuit evaluation. [#66126](https://github.com/ClickHouse/ClickHouse/pull/66126) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed the issue when the server failed to parse Avro files with negative block size arrays encoded, which is now allowed by the Avro specification. [#66130](https://github.com/ClickHouse/ClickHouse/pull/66130) ([Serge Klochkov](https://github.com/slvrtrn)).
|
||||||
|
* Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix rare case with missing data in the result of distributed query. [#66174](https://github.com/ClickHouse/ClickHouse/pull/66174) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix order of parsing metadata fields in StorageDeltaLake. [#66211](https://github.com/ClickHouse/ClickHouse/pull/66211) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Don't throw `TIMEOUT_EXCEEDED` for `none_only_active` mode of `distributed_ddl_output_mode`. [#66218](https://github.com/ClickHouse/ClickHouse/pull/66218) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix error reporting while copying to S3 or AzureBlobStorage. [#66295](https://github.com/ClickHouse/ClickHouse/pull/66295) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Prevent watchdog from keeping descriptors of unlinked(rotated) log files. [#66334](https://github.com/ClickHouse/ClickHouse/pull/66334) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||||
|
* Fix the bug that logicalexpressionoptimizerpass lost logical type of constant. [#66344](https://github.com/ClickHouse/ClickHouse/pull/66344) ([pn](https://github.com/chloro-pn)).
|
||||||
|
* Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix possible incorrect result for queries joining and filtering table external engine (like PostgreSQL), due to too aggressive filter pushdown. Since now, conditions from where section won't be send to external database in case of outer join with external table. [#66402](https://github.com/ClickHouse/ClickHouse/pull/66402) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Added missing column materialization for cross join. [#66413](https://github.com/ClickHouse/ClickHouse/pull/66413) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Avoid possible logical error during import from Npy format in case of bad array nesting level, fix testing of other kinds of errors. [#66461](https://github.com/ClickHouse/ClickHouse/pull/66461) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Fix wrong count() result when there is non-deterministic function in predicate. [#66510](https://github.com/ClickHouse/ClickHouse/pull/66510) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix reading of uninitialized memory when hashing empty tuples. [#66562](https://github.com/ClickHouse/ClickHouse/pull/66562) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix removing named collections in local storage. [#66599](https://github.com/ClickHouse/ClickHouse/pull/66599) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fix `column_length` is not updated in `ColumnTuple::insertManyFrom`. [#66626](https://github.com/ClickHouse/ClickHouse/pull/66626) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Fix `Unknown identifier` and `Column is not under aggregate function` errors for queries with the expression `(column IS NULL).` The bug was triggered by [#65088](https://github.com/ClickHouse/ClickHouse/issues/65088), with the disabled analyzer only. [#66654](https://github.com/ClickHouse/ClickHouse/pull/66654) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix rare case of stuck merge after drop column. [#66707](https://github.com/ClickHouse/ClickHouse/pull/66707) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix assertion `isUniqTypes` when insert select from remote sources. [#66722](https://github.com/ClickHouse/ClickHouse/pull/66722) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Fix logical error in PrometheusRequestHandler. [#66621](https://github.com/ClickHouse/ClickHouse/pull/66621) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix `indexHint` function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix AST formatting of 'create table b empty as a'. [#64951](https://github.com/ClickHouse/ClickHouse/pull/64951) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Instantiate template methods ahead in different .cpp files, avoid too large translation units during compiling. [#64818](https://github.com/ClickHouse/ClickHouse/pull/64818) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
|
||||||
### <a id="246"></a> ClickHouse release 24.6, 2024-07-01
|
### <a id="246"></a> ClickHouse release 24.6, 2024-07-01
|
||||||
|
|
||||||
#### Backward Incompatible Change
|
#### Backward Incompatible Change
|
||||||
|
@ -2,11 +2,11 @@
|
|||||||
|
|
||||||
# NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
# NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||||
SET(VERSION_REVISION 54488)
|
SET(VERSION_REVISION 54489)
|
||||||
SET(VERSION_MAJOR 24)
|
SET(VERSION_MAJOR 24)
|
||||||
SET(VERSION_MINOR 7)
|
SET(VERSION_MINOR 8)
|
||||||
SET(VERSION_PATCH 1)
|
SET(VERSION_PATCH 1)
|
||||||
SET(VERSION_GITHASH aa023477a9265e403982fca5ee29a714db5133d9)
|
SET(VERSION_GITHASH 3f8b27d7accd2b5ec4afe7d0dd459115323304af)
|
||||||
SET(VERSION_DESCRIBE v24.7.1.1-testing)
|
SET(VERSION_DESCRIBE v24.8.1.1-testing)
|
||||||
SET(VERSION_STRING 24.7.1.1)
|
SET(VERSION_STRING 24.8.1.1)
|
||||||
# end of autochange
|
# end of autochange
|
||||||
|
@ -9,6 +9,7 @@ set(DATASKETCHES_LIBRARY theta)
|
|||||||
add_library(_datasketches INTERFACE)
|
add_library(_datasketches INTERFACE)
|
||||||
target_include_directories(_datasketches SYSTEM BEFORE INTERFACE
|
target_include_directories(_datasketches SYSTEM BEFORE INTERFACE
|
||||||
"${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/common/include"
|
"${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/common/include"
|
||||||
|
"${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/count/include"
|
||||||
"${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/theta/include")
|
"${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/theta/include")
|
||||||
|
|
||||||
add_library(ch_contrib::datasketches ALIAS _datasketches)
|
add_library(ch_contrib::datasketches ALIAS _datasketches)
|
||||||
|
2
contrib/libunwind
vendored
2
contrib/libunwind
vendored
@ -1 +1 @@
|
|||||||
Subproject commit fe854449e24bedfa26e38465b84374312dbd587f
|
Subproject commit a89d904befea07814628c6ce0b44083c4e149c62
|
@ -23,15 +23,17 @@ RUN apt-get update \
|
|||||||
# and MEMORY_LIMIT_EXCEEDED exceptions in Functional tests (total memory limit in Functional tests is ~55.24 GiB).
|
# and MEMORY_LIMIT_EXCEEDED exceptions in Functional tests (total memory limit in Functional tests is ~55.24 GiB).
|
||||||
# TSAN will flush shadow memory when reaching this limit.
|
# TSAN will flush shadow memory when reaching this limit.
|
||||||
# It may cause false-negatives, but it's better than OOM.
|
# It may cause false-negatives, but it's better than OOM.
|
||||||
RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1'" >> /etc/environment
|
# max_allocation_size_mb is set to 32GB, so we have much bigger chance to run into memory limit than the limitation of the sanitizers
|
||||||
RUN echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment
|
RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1 max_allocation_size_mb=32768'" >> /etc/environment
|
||||||
RUN echo "MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'" >> /etc/environment
|
RUN echo "UBSAN_OPTIONS='print_stacktrace=1 max_allocation_size_mb=32768'" >> /etc/environment
|
||||||
RUN echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt'" >> /etc/environment
|
RUN echo "MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1 max_allocation_size_mb=32768'" >> /etc/environment
|
||||||
|
RUN echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt max_allocation_size_mb=32768'" >> /etc/environment
|
||||||
# Sanitizer options for current shell (not current, but the one that will be spawned on "docker run")
|
# Sanitizer options for current shell (not current, but the one that will be spawned on "docker run")
|
||||||
# (but w/o verbosity for TSAN, otherwise test.reference will not match)
|
# (but w/o verbosity for TSAN, otherwise test.reference will not match)
|
||||||
ENV TSAN_OPTIONS='halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1'
|
ENV TSAN_OPTIONS='halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1 max_allocation_size_mb=32768'
|
||||||
ENV UBSAN_OPTIONS='print_stacktrace=1'
|
ENV UBSAN_OPTIONS='print_stacktrace=1 max_allocation_size_mb=32768'
|
||||||
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'
|
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1 max_allocation_size_mb=32768'
|
||||||
|
ENV LSAN_OPTIONS='max_allocation_size_mb=32768'
|
||||||
|
|
||||||
# for external_symbolizer_path
|
# for external_symbolizer_path
|
||||||
RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer
|
RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer
|
||||||
|
@ -261,9 +261,12 @@ function timeout_with_logging() {
|
|||||||
|
|
||||||
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
|
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
|
||||||
|
|
||||||
|
echo "Checking if it is a timeout. The code 124 will indicate a timeout."
|
||||||
if [[ "${exit_code}" -eq "124" ]]
|
if [[ "${exit_code}" -eq "124" ]]
|
||||||
then
|
then
|
||||||
echo "The command 'timeout ${*}' has been killed by timeout"
|
echo "The command 'timeout ${*}' has been killed by timeout."
|
||||||
|
else
|
||||||
|
echo "No, it isn't a timeout."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
return $exit_code
|
return $exit_code
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
</max_execution_time>
|
</max_execution_time>
|
||||||
|
|
||||||
<max_memory_usage>
|
<max_memory_usage>
|
||||||
<max>10G</max>
|
<max>5G</max>
|
||||||
</max_memory_usage>
|
</max_memory_usage>
|
||||||
|
|
||||||
<table_function_remote_max_addresses>
|
<table_function_remote_max_addresses>
|
||||||
|
@ -208,7 +208,6 @@ handle SIGPIPE nostop noprint pass
|
|||||||
handle SIGTERM nostop noprint pass
|
handle SIGTERM nostop noprint pass
|
||||||
handle SIGUSR1 nostop noprint pass
|
handle SIGUSR1 nostop noprint pass
|
||||||
handle SIGUSR2 nostop noprint pass
|
handle SIGUSR2 nostop noprint pass
|
||||||
handle SIGSEGV nostop pass
|
|
||||||
handle SIG$RTMIN nostop noprint pass
|
handle SIG$RTMIN nostop noprint pass
|
||||||
info signals
|
info signals
|
||||||
continue
|
continue
|
||||||
|
@ -6,7 +6,7 @@ ARG apt_archive="http://archive.ubuntu.com"
|
|||||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||||
|
|
||||||
RUN apt-get update --yes \
|
RUN apt-get update --yes \
|
||||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install wget git default-jdk maven python3 --yes --no-install-recommends \
|
&& env DEBIAN_FRONTEND=noninteractive apt-get install wget git python3 default-jdk maven --yes --no-install-recommends \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
|
@ -191,8 +191,8 @@ else
|
|||||||
ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
||||||
SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
||||||
|
|
||||||
clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0"
|
clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
|
||||||
clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0"
|
clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
|
||||||
clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC"
|
clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC"
|
||||||
clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC"
|
clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC"
|
||||||
else
|
else
|
||||||
@ -200,7 +200,7 @@ else
|
|||||||
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
|
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
|
||||||
fi
|
fi
|
||||||
clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
||||||
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0"
|
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
clickhouse-client --query "SHOW TABLES FROM test"
|
clickhouse-client --query "SHOW TABLES FROM test"
|
||||||
@ -251,9 +251,12 @@ function timeout_with_logging() {
|
|||||||
|
|
||||||
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
|
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
|
||||||
|
|
||||||
|
echo "Checking if it is a timeout. The code 124 will indicate a timeout."
|
||||||
if [[ "${exit_code}" -eq "124" ]]
|
if [[ "${exit_code}" -eq "124" ]]
|
||||||
then
|
then
|
||||||
echo "The command 'timeout ${*}' has been killed by timeout"
|
echo "The command 'timeout ${*}' has been killed by timeout."
|
||||||
|
else
|
||||||
|
echo "No, it isn't a timeout."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
return $exit_code
|
return $exit_code
|
||||||
|
@ -20,7 +20,6 @@ handle SIGPIPE nostop noprint pass
|
|||||||
handle SIGTERM nostop noprint pass
|
handle SIGTERM nostop noprint pass
|
||||||
handle SIGUSR1 nostop noprint pass
|
handle SIGUSR1 nostop noprint pass
|
||||||
handle SIGUSR2 nostop noprint pass
|
handle SIGUSR2 nostop noprint pass
|
||||||
handle SIGSEGV nostop pass
|
|
||||||
handle SIG$RTMIN nostop noprint pass
|
handle SIG$RTMIN nostop noprint pass
|
||||||
info signals
|
info signals
|
||||||
continue
|
continue
|
||||||
|
@ -247,12 +247,22 @@ function run_tests()
|
|||||||
|
|
||||||
try_run_with_retry 10 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')"
|
try_run_with_retry 10 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')"
|
||||||
|
|
||||||
|
TIMEOUT=$((MAX_RUN_TIME - 800 > 8400 ? 8400 : MAX_RUN_TIME - 800))
|
||||||
|
START_TIME=${SECONDS}
|
||||||
set +e
|
set +e
|
||||||
timeout -k 60m -s TERM --preserve-status 140m clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
timeout --preserve-status --signal TERM --kill-after 60m ${TIMEOUT}s \
|
||||||
--no-drop-if-fail --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
||||||
|
--no-drop-if-fail --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
||||||
| ts '%Y-%m-%d %H:%M:%S' \
|
| ts '%Y-%m-%d %H:%M:%S' \
|
||||||
| tee -a test_output/test_result.txt
|
| tee -a test_output/test_result.txt
|
||||||
set -e
|
set -e
|
||||||
|
DURATION=$((START_TIME - SECONDS))
|
||||||
|
|
||||||
|
echo "Elapsed ${DURATION} seconds."
|
||||||
|
if [[ $DURATION -ge $TIMEOUT ]]
|
||||||
|
then
|
||||||
|
echo "It looks like the command is terminated by the timeout, which is ${TIMEOUT} seconds."
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
export -f run_tests
|
export -f run_tests
|
||||||
@ -264,7 +274,7 @@ if [ "$NUM_TRIES" -gt "1" ]; then
|
|||||||
# We don't run tests with Ordinary database in PRs, only in master.
|
# We don't run tests with Ordinary database in PRs, only in master.
|
||||||
# So run new/changed tests with Ordinary at least once in flaky check.
|
# So run new/changed tests with Ordinary at least once in flaky check.
|
||||||
timeout_with_logging "$TIMEOUT" bash -c 'NUM_TRIES=1; USE_DATABASE_ORDINARY=1; run_tests' \
|
timeout_with_logging "$TIMEOUT" bash -c 'NUM_TRIES=1; USE_DATABASE_ORDINARY=1; run_tests' \
|
||||||
| sed 's/All tests have finished//' | sed 's/No tests were run//' ||:
|
| sed 's/All tests have finished/Redacted: a message about tests finish is deleted/' | sed 's/No tests were run/Redacted: a message about no tests run is deleted/' ||:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
timeout_with_logging "$TIMEOUT" bash -c run_tests ||:
|
timeout_with_logging "$TIMEOUT" bash -c run_tests ||:
|
||||||
|
@ -45,9 +45,12 @@ function timeout_with_logging() {
|
|||||||
|
|
||||||
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
|
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
|
||||||
|
|
||||||
|
echo "Checking if it is a timeout. The code 124 will indicate a timeout."
|
||||||
if [[ "${exit_code}" -eq "124" ]]
|
if [[ "${exit_code}" -eq "124" ]]
|
||||||
then
|
then
|
||||||
echo "The command 'timeout ${*}' has been killed by timeout"
|
echo "The command 'timeout ${*}' has been killed by timeout."
|
||||||
|
else
|
||||||
|
echo "No, it isn't a timeout."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
return $exit_code
|
return $exit_code
|
||||||
|
@ -209,9 +209,9 @@ clickhouse-client --query "CREATE TABLE test.visits (CounterID UInt32, StartDat
|
|||||||
ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
||||||
SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='$TEMP_POLICY'"
|
SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='$TEMP_POLICY'"
|
||||||
|
|
||||||
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0"
|
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
|
||||||
clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0"
|
clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
|
||||||
clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0"
|
clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
|
||||||
|
|
||||||
clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC"
|
clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC"
|
||||||
clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC"
|
clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC"
|
||||||
|
@ -12,6 +12,7 @@ UNKNOWN_SIGN = "[ UNKNOWN "
|
|||||||
SKIPPED_SIGN = "[ SKIPPED "
|
SKIPPED_SIGN = "[ SKIPPED "
|
||||||
HUNG_SIGN = "Found hung queries in processlist"
|
HUNG_SIGN = "Found hung queries in processlist"
|
||||||
SERVER_DIED_SIGN = "Server died, terminating all processes"
|
SERVER_DIED_SIGN = "Server died, terminating all processes"
|
||||||
|
SERVER_DIED_SIGN2 = "Server does not respond to health check"
|
||||||
DATABASE_SIGN = "Database: "
|
DATABASE_SIGN = "Database: "
|
||||||
|
|
||||||
SUCCESS_FINISH_SIGNS = ["All tests have finished", "No tests were run"]
|
SUCCESS_FINISH_SIGNS = ["All tests have finished", "No tests were run"]
|
||||||
@ -43,7 +44,7 @@ def process_test_log(log_path, broken_tests):
|
|||||||
if HUNG_SIGN in line:
|
if HUNG_SIGN in line:
|
||||||
hung = True
|
hung = True
|
||||||
break
|
break
|
||||||
if SERVER_DIED_SIGN in line:
|
if SERVER_DIED_SIGN in line or SERVER_DIED_SIGN2 in line:
|
||||||
server_died = True
|
server_died = True
|
||||||
if RETRIES_SIGN in line:
|
if RETRIES_SIGN in line:
|
||||||
retries = True
|
retries = True
|
||||||
@ -111,12 +112,12 @@ def process_test_log(log_path, broken_tests):
|
|||||||
# Python does not support TSV, so we have to escape '\t' and '\n' manually
|
# Python does not support TSV, so we have to escape '\t' and '\n' manually
|
||||||
# and hope that complex escape sequences will not break anything
|
# and hope that complex escape sequences will not break anything
|
||||||
test_results = [
|
test_results = [
|
||||||
(
|
[
|
||||||
test[0],
|
test[0],
|
||||||
test[1],
|
test[1],
|
||||||
test[2],
|
test[2],
|
||||||
"".join(test[3])[:4096].replace("\t", "\\t").replace("\n", "\\n"),
|
"".join(test[3])[:4096].replace("\t", "\\t").replace("\n", "\\n"),
|
||||||
)
|
]
|
||||||
for test in test_results
|
for test in test_results
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -170,18 +171,23 @@ def process_result(result_path, broken_tests):
|
|||||||
if hung:
|
if hung:
|
||||||
description = "Some queries hung, "
|
description = "Some queries hung, "
|
||||||
state = "failure"
|
state = "failure"
|
||||||
test_results.append(("Some queries hung", "FAIL", "0", ""))
|
test_results.append(["Some queries hung", "FAIL", "0", ""])
|
||||||
elif server_died:
|
elif server_died:
|
||||||
description = "Server died, "
|
description = "Server died, "
|
||||||
state = "failure"
|
state = "failure"
|
||||||
test_results.append(("Server died", "FAIL", "0", ""))
|
# When ClickHouse server crashes, some tests are still running
|
||||||
|
# and fail because they cannot connect to server
|
||||||
|
for result in test_results:
|
||||||
|
if result[1] == "FAIL":
|
||||||
|
result[1] = "SERVER_DIED"
|
||||||
|
test_results.append(["Server died", "FAIL", "0", ""])
|
||||||
elif not success_finish:
|
elif not success_finish:
|
||||||
description = "Tests are not finished, "
|
description = "Tests are not finished, "
|
||||||
state = "failure"
|
state = "failure"
|
||||||
test_results.append(("Tests are not finished", "FAIL", "0", ""))
|
test_results.append(["Tests are not finished", "FAIL", "0", ""])
|
||||||
elif retries:
|
elif retries:
|
||||||
description = "Some tests restarted, "
|
description = "Some tests restarted, "
|
||||||
test_results.append(("Some tests restarted", "SKIPPED", "0", ""))
|
test_results.append(["Some tests restarted", "SKIPPED", "0", ""])
|
||||||
else:
|
else:
|
||||||
description = ""
|
description = ""
|
||||||
|
|
||||||
@ -233,11 +239,12 @@ if __name__ == "__main__":
|
|||||||
# sort by status then by check name
|
# sort by status then by check name
|
||||||
order = {
|
order = {
|
||||||
"FAIL": 0,
|
"FAIL": 0,
|
||||||
"Timeout": 1,
|
"SERVER_DIED": 1,
|
||||||
"NOT_FAILED": 2,
|
"Timeout": 2,
|
||||||
"BROKEN": 3,
|
"NOT_FAILED": 3,
|
||||||
"OK": 4,
|
"BROKEN": 4,
|
||||||
"SKIPPED": 5,
|
"OK": 5,
|
||||||
|
"SKIPPED": 6,
|
||||||
}
|
}
|
||||||
return order.get(item[1], 10), str(item[0]), item[1]
|
return order.get(item[1], 10), str(item[0]), item[1]
|
||||||
|
|
||||||
|
@ -999,6 +999,10 @@ They can be used for prewhere optimization only if we enable `set allow_statisti
|
|||||||
|
|
||||||
[HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) sketches which provide an estimation how many distinct values a column contains.
|
[HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) sketches which provide an estimation how many distinct values a column contains.
|
||||||
|
|
||||||
|
- `count_min`
|
||||||
|
|
||||||
|
[Count-min](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column.
|
||||||
|
|
||||||
## Column-level Settings {#column-level-settings}
|
## Column-level Settings {#column-level-settings}
|
||||||
|
|
||||||
Certain MergeTree settings can be override at column level:
|
Certain MergeTree settings can be override at column level:
|
||||||
|
@ -55,7 +55,7 @@ CMPLNT_FR_TM Nullable(String)
|
|||||||
```
|
```
|
||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
Most of the time the above command will let you know which fields in the input data are numeric, and which are strings, and which are tuples. This is not always the case. Because ClickHouse is routineley used with datasets containing billions of records there is a default number (100) of rows examined to [infer the schema](/docs/en/integrations/data-ingestion/data-formats/json.md#relying-on-schema-inference) in order to avoid parsing billions of rows to infer the schema. The response below may not match what you see, as the dataset is updated several times each year. Looking at the Data Dictionary you can see that CMPLNT_NUM is specified as text, and not numeric. By overriding the default of 100 rows for inference with the setting `SETTINGS input_format_max_rows_to_read_for_schema_inference=2000`
|
Most of the time the above command will let you know which fields in the input data are numeric, and which are strings, and which are tuples. This is not always the case. Because ClickHouse is routineley used with datasets containing billions of records there is a default number (100) of rows examined to [infer the schema](/en/integrations/data-formats/json/inference) in order to avoid parsing billions of rows to infer the schema. The response below may not match what you see, as the dataset is updated several times each year. Looking at the Data Dictionary you can see that CMPLNT_NUM is specified as text, and not numeric. By overriding the default of 100 rows for inference with the setting `SETTINGS input_format_max_rows_to_read_for_schema_inference=2000`
|
||||||
you can get a better idea of the content.
|
you can get a better idea of the content.
|
||||||
|
|
||||||
Note: as of version 22.5 the default is now 25,000 rows for inferring the schema, so only change the setting if you are on an older version or if you need more than 25,000 rows to be sampled.
|
Note: as of version 22.5 the default is now 25,000 rows for inferring the schema, so only change the setting if you are on an older version or if you need more than 25,000 rows to be sampled.
|
||||||
|
@ -7,7 +7,7 @@ keywords: [object, data type]
|
|||||||
|
|
||||||
# Object Data Type (deprecated)
|
# Object Data Type (deprecated)
|
||||||
|
|
||||||
**This feature is not production-ready and is now deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864).
|
**This feature is not production-ready and is now deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-formats/json/overview) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864).
|
||||||
|
|
||||||
<hr />
|
<hr />
|
||||||
|
|
||||||
|
@ -2102,14 +2102,14 @@ Result:
|
|||||||
└─────────────────┘
|
└─────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## filesystemFree
|
## filesystemUnreserved
|
||||||
|
|
||||||
Returns the total amount of the free space on the filesystem hosting the database persistence. See also `filesystemAvailable`
|
Returns the total amount of the free space on the filesystem hosting the database persistence. (previously `filesystemFree`). See also [`filesystemAvailable`](#filesystemavailable).
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
filesystemFree()
|
filesystemUnreserved()
|
||||||
```
|
```
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
@ -2121,7 +2121,7 @@ filesystemFree()
|
|||||||
Query:
|
Query:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT formatReadableSize(filesystemFree()) AS "Free space";
|
SELECT formatReadableSize(filesystemUnreserved()) AS "Free space";
|
||||||
```
|
```
|
||||||
|
|
||||||
Result:
|
Result:
|
||||||
@ -2449,11 +2449,11 @@ As you can see, `runningAccumulate` merges states for each group of rows separat
|
|||||||
|
|
||||||
## joinGet
|
## joinGet
|
||||||
|
|
||||||
The function lets you extract data from the table the same way as from a [dictionary](../../sql-reference/dictionaries/index.md).
|
The function lets you extract data from the table the same way as from a [dictionary](../../sql-reference/dictionaries/index.md). Gets the data from [Join](../../engines/table-engines/special/join.md#creating-a-table) tables using the specified join key.
|
||||||
|
|
||||||
Gets the data from [Join](../../engines/table-engines/special/join.md#creating-a-table) tables using the specified join key.
|
|
||||||
|
|
||||||
|
:::note
|
||||||
Only supports tables created with the `ENGINE = Join(ANY, LEFT, <join_keys>)` statement.
|
Only supports tables created with the `ENGINE = Join(ANY, LEFT, <join_keys>)` statement.
|
||||||
|
:::
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
@ -2463,26 +2463,32 @@ joinGet(join_storage_table_name, `value_column`, join_keys)
|
|||||||
|
|
||||||
**Arguments**
|
**Arguments**
|
||||||
|
|
||||||
- `join_storage_table_name` — an [identifier](../../sql-reference/syntax.md#syntax-identifiers) indicating where the search is performed. The identifier is searched in the default database (see setting `default_database` in the config file). To override the default database, use `USE db_name` or specify the database and the table through the separator `db_name.db_table` as in the example.
|
- `join_storage_table_name` — an [identifier](../../sql-reference/syntax.md#syntax-identifiers) indicating where the search is performed.
|
||||||
- `value_column` — name of the column of the table that contains required data.
|
- `value_column` — name of the column of the table that contains required data.
|
||||||
- `join_keys` — list of keys.
|
- `join_keys` — list of keys.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
The identifier is searched for in the default database (see setting `default_database` in the config file). To override the default database, use `USE db_name` or specify the database and the table through the separator `db_name.db_table` as in the example.
|
||||||
|
:::
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
Returns a list of values corresponded to list of keys.
|
- Returns a list of values corresponded to the list of keys.
|
||||||
|
|
||||||
If certain does not exist in source table then `0` or `null` will be returned based on [join_use_nulls](../../operations/settings/settings.md#join_use_nulls) setting.
|
|
||||||
|
|
||||||
|
:::note
|
||||||
|
If a certain key does not exist in source table then `0` or `null` will be returned based on [join_use_nulls](../../operations/settings/settings.md#join_use_nulls) setting during table creation.
|
||||||
More info about `join_use_nulls` in [Join operation](../../engines/table-engines/special/join.md).
|
More info about `join_use_nulls` in [Join operation](../../engines/table-engines/special/join.md).
|
||||||
|
:::
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
Input table:
|
Input table:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE DATABASE db_test
|
CREATE DATABASE db_test;
|
||||||
CREATE TABLE db_test.id_val(`id` UInt32, `val` UInt32) ENGINE = Join(ANY, LEFT, id) SETTINGS join_use_nulls = 1
|
CREATE TABLE db_test.id_val(`id` UInt32, `val` UInt32) ENGINE = Join(ANY, LEFT, id);
|
||||||
INSERT INTO db_test.id_val VALUES (1,11)(2,12)(4,13)
|
INSERT INTO db_test.id_val VALUES (1, 11)(2, 12)(4, 13);
|
||||||
|
SELECT * FROM db_test.id_val;
|
||||||
```
|
```
|
||||||
|
|
||||||
```text
|
```text
|
||||||
@ -2496,18 +2502,116 @@ INSERT INTO db_test.id_val VALUES (1,11)(2,12)(4,13)
|
|||||||
Query:
|
Query:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT joinGet(db_test.id_val, 'val', toUInt32(number)) from numbers(4) SETTINGS join_use_nulls = 1
|
SELECT number, joinGet(db_test.id_val, 'val', toUInt32(number)) from numbers(4);
|
||||||
```
|
```
|
||||||
|
|
||||||
Result:
|
Result:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
┌─joinGet(db_test.id_val, 'val', toUInt32(number))─┐
|
┌─number─┬─joinGet('db_test.id_val', 'val', toUInt32(number))─┐
|
||||||
│ 0 │
|
1. │ 0 │ 0 │
|
||||||
│ 11 │
|
2. │ 1 │ 11 │
|
||||||
│ 12 │
|
3. │ 2 │ 12 │
|
||||||
│ 0 │
|
4. │ 3 │ 0 │
|
||||||
└──────────────────────────────────────────────────┘
|
└────────┴────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Setting `join_use_nulls` can be used during table creation to change the behaviour of what gets returned if no key exists in the source table.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE DATABASE db_test;
|
||||||
|
CREATE TABLE db_test.id_val_nulls(`id` UInt32, `val` UInt32) ENGINE = Join(ANY, LEFT, id) SETTINGS join_use_nulls=1;
|
||||||
|
INSERT INTO db_test.id_val_nulls VALUES (1, 11)(2, 12)(4, 13);
|
||||||
|
SELECT * FROM db_test.id_val_nulls;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─id─┬─val─┐
|
||||||
|
│ 4 │ 13 │
|
||||||
|
│ 2 │ 12 │
|
||||||
|
│ 1 │ 11 │
|
||||||
|
└────┴─────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT number, joinGet(db_test.id_val_nulls, 'val', toUInt32(number)) from numbers(4);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─number─┬─joinGet('db_test.id_val_nulls', 'val', toUInt32(number))─┐
|
||||||
|
1. │ 0 │ ᴺᵁᴸᴸ │
|
||||||
|
2. │ 1 │ 11 │
|
||||||
|
3. │ 2 │ 12 │
|
||||||
|
4. │ 3 │ ᴺᵁᴸᴸ │
|
||||||
|
└────────┴──────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## joinGetOrNull
|
||||||
|
|
||||||
|
Like [joinGet](#joinget) but returns `NULL` when the key is missing instead of returning the default value.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
joinGetOrNull(join_storage_table_name, `value_column`, join_keys)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `join_storage_table_name` — an [identifier](../../sql-reference/syntax.md#syntax-identifiers) indicating where the search is performed.
|
||||||
|
- `value_column` — name of the column of the table that contains required data.
|
||||||
|
- `join_keys` — list of keys.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
The identifier is searched for in the default database (see setting `default_database` in the config file). To override the default database, use `USE db_name` or specify the database and the table through the separator `db_name.db_table` as in the example.
|
||||||
|
:::
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Returns a list of values corresponded to the list of keys.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
If a certain key does not exist in source table then `NULL` is returned for that key.
|
||||||
|
:::
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Input table:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE DATABASE db_test;
|
||||||
|
CREATE TABLE db_test.id_val(`id` UInt32, `val` UInt32) ENGINE = Join(ANY, LEFT, id);
|
||||||
|
INSERT INTO db_test.id_val VALUES (1, 11)(2, 12)(4, 13);
|
||||||
|
SELECT * FROM db_test.id_val;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─id─┬─val─┐
|
||||||
|
│ 4 │ 13 │
|
||||||
|
│ 2 │ 12 │
|
||||||
|
│ 1 │ 11 │
|
||||||
|
└────┴─────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT number, joinGetOrNull(db_test.id_val, 'val', toUInt32(number)) from numbers(4);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─number─┬─joinGetOrNull('db_test.id_val', 'val', toUInt32(number))─┐
|
||||||
|
1. │ 0 │ ᴺᵁᴸᴸ │
|
||||||
|
2. │ 1 │ 11 │
|
||||||
|
3. │ 2 │ 12 │
|
||||||
|
4. │ 3 │ ᴺᵁᴸᴸ │
|
||||||
|
└────────┴──────────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## catboostEvaluate
|
## catboostEvaluate
|
||||||
|
@ -21,7 +21,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
|||||||
name2 [type2] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|EPHEMERAL|ALIAS expr2] [COMMENT 'comment for column'] [compression_codec] [TTL expr2],
|
name2 [type2] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|EPHEMERAL|ALIAS expr2] [COMMENT 'comment for column'] [compression_codec] [TTL expr2],
|
||||||
...
|
...
|
||||||
) ENGINE = engine
|
) ENGINE = engine
|
||||||
COMMENT 'comment for table'
|
[COMMENT 'comment for table']
|
||||||
```
|
```
|
||||||
|
|
||||||
Creates a table named `table_name` in the `db` database or the current database if `db` is not set, with the structure specified in brackets and the `engine` engine.
|
Creates a table named `table_name` in the `db` database or the current database if `db` is not set, with the structure specified in brackets and the `engine` engine.
|
||||||
@ -626,11 +626,6 @@ SELECT * FROM base.t1;
|
|||||||
|
|
||||||
You can add a comment to the table when you creating it.
|
You can add a comment to the table when you creating it.
|
||||||
|
|
||||||
:::note
|
|
||||||
The comment clause is supported by all table engines except [Kafka](../../../engines/table-engines/integrations/kafka.md), [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md) and [EmbeddedRocksDB](../../../engines/table-engines/integrations/embedded-rocksdb.md).
|
|
||||||
:::
|
|
||||||
|
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
|
@ -16,6 +16,7 @@ Syntax:
|
|||||||
CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name]
|
CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name]
|
||||||
[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }]
|
[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }]
|
||||||
AS SELECT ...
|
AS SELECT ...
|
||||||
|
[COMMENT 'comment']
|
||||||
```
|
```
|
||||||
|
|
||||||
Normal views do not store any data. They just perform a read from another table on each access. In other words, a normal view is nothing more than a saved query. When reading from a view, this saved query is used as a subquery in the [FROM](../../../sql-reference/statements/select/from.md) clause.
|
Normal views do not store any data. They just perform a read from another table on each access. In other words, a normal view is nothing more than a saved query. When reading from a view, this saved query is used as a subquery in the [FROM](../../../sql-reference/statements/select/from.md) clause.
|
||||||
@ -57,6 +58,7 @@ SELECT * FROM view(column1=value1, column2=value2 ...)
|
|||||||
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE]
|
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE]
|
||||||
[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }]
|
[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }]
|
||||||
AS SELECT ...
|
AS SELECT ...
|
||||||
|
[COMMENT 'comment']
|
||||||
```
|
```
|
||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
@ -161,6 +163,7 @@ RANDOMIZE FOR interval
|
|||||||
DEPENDS ON [db.]name [, [db.]name [, ...]]
|
DEPENDS ON [db.]name [, [db.]name [, ...]]
|
||||||
[TO[db.]name] [(columns)] [ENGINE = engine] [EMPTY]
|
[TO[db.]name] [(columns)] [ENGINE = engine] [EMPTY]
|
||||||
AS SELECT ...
|
AS SELECT ...
|
||||||
|
[COMMENT 'comment']
|
||||||
```
|
```
|
||||||
where `interval` is a sequence of simple intervals:
|
where `interval` is a sequence of simple intervals:
|
||||||
```sql
|
```sql
|
||||||
@ -267,7 +270,10 @@ This is an experimental feature that may change in backwards-incompatible ways i
|
|||||||
:::
|
:::
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE WINDOW VIEW [IF NOT EXISTS] [db.]table_name [TO [db.]table_name] [INNER ENGINE engine] [ENGINE engine] [WATERMARK strategy] [ALLOWED_LATENESS interval_function] [POPULATE] AS SELECT ... GROUP BY time_window_function
|
CREATE WINDOW VIEW [IF NOT EXISTS] [db.]table_name [TO [db.]table_name] [INNER ENGINE engine] [ENGINE engine] [WATERMARK strategy] [ALLOWED_LATENESS interval_function] [POPULATE]
|
||||||
|
AS SELECT ...
|
||||||
|
GROUP BY time_window_function
|
||||||
|
[COMMENT 'comment']
|
||||||
```
|
```
|
||||||
|
|
||||||
Window view can aggregate data by time window and output the results when the window is ready to fire. It stores the partial aggregation results in an inner(or specified) table to reduce latency and can push the processing result to a specified table or push notifications using the WATCH query.
|
Window view can aggregate data by time window and output the results when the window is ready to fire. It stores the partial aggregation results in an inner(or specified) table to reduce latency and can push the processing result to a specified table or push notifications using the WATCH query.
|
||||||
|
@ -18,10 +18,21 @@ Reloads all dictionaries that have been successfully loaded before.
|
|||||||
By default, dictionaries are loaded lazily (see [dictionaries_lazy_load](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load)), so instead of being loaded automatically at startup, they are initialized on first access through dictGet function or SELECT from tables with ENGINE = Dictionary. The `SYSTEM RELOAD DICTIONARIES` query reloads such dictionaries (LOADED).
|
By default, dictionaries are loaded lazily (see [dictionaries_lazy_load](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load)), so instead of being loaded automatically at startup, they are initialized on first access through dictGet function or SELECT from tables with ENGINE = Dictionary. The `SYSTEM RELOAD DICTIONARIES` query reloads such dictionaries (LOADED).
|
||||||
Always returns `Ok.` regardless of the result of the dictionary update.
|
Always returns `Ok.` regardless of the result of the dictionary update.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SYSTEM RELOAD DICTIONARIES [ON CLUSTER cluster_name]
|
||||||
|
```
|
||||||
|
|
||||||
## RELOAD DICTIONARY
|
## RELOAD DICTIONARY
|
||||||
|
|
||||||
Completely reloads a dictionary `dictionary_name`, regardless of the state of the dictionary (LOADED / NOT_LOADED / FAILED).
|
Completely reloads a dictionary `dictionary_name`, regardless of the state of the dictionary (LOADED / NOT_LOADED / FAILED).
|
||||||
Always returns `Ok.` regardless of the result of updating the dictionary.
|
Always returns `Ok.` regardless of the result of updating the dictionary.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SYSTEM RELOAD DICTIONARY [ON CLUSTER cluster_name] dictionary_name
|
||||||
|
```
|
||||||
|
|
||||||
The status of the dictionary can be checked by querying the `system.dictionaries` table.
|
The status of the dictionary can be checked by querying the `system.dictionaries` table.
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
|
@ -1,36 +0,0 @@
|
|||||||
---
|
|
||||||
slug: /en/sql-reference/table-functions/fuzzQuery
|
|
||||||
sidebar_position: 75
|
|
||||||
sidebar_label: fuzzQuery
|
|
||||||
---
|
|
||||||
|
|
||||||
# fuzzQuery
|
|
||||||
|
|
||||||
Perturbs the given query string with random variations.
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
fuzzQuery(query[, max_query_length[, random_seed]])
|
|
||||||
```
|
|
||||||
|
|
||||||
**Arguments**
|
|
||||||
|
|
||||||
- `query` (String) - The source query to perform the fuzzing on.
|
|
||||||
- `max_query_length` (UInt64) - A maximum length the query can get during the fuzzing process.
|
|
||||||
- `random_seed` (UInt64) - A random seed for producing stable results.
|
|
||||||
|
|
||||||
**Returned Value**
|
|
||||||
|
|
||||||
A table object with a single column containing perturbed query strings.
|
|
||||||
|
|
||||||
## Usage Example
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT * FROM fuzzQuery('SELECT materialize(\'a\' AS key) GROUP BY key') LIMIT 2;
|
|
||||||
```
|
|
||||||
|
|
||||||
```
|
|
||||||
┌─query──────────────────────────────────────────────────────────┐
|
|
||||||
1. │ SELECT 'a' AS key GROUP BY key │
|
|
||||||
2. │ EXPLAIN PIPELINE compact = true SELECT 'a' AS key GROUP BY key │
|
|
||||||
└────────────────────────────────────────────────────────────────┘
|
|
||||||
```
|
|
@ -9,10 +9,7 @@ namespace DB
|
|||||||
class Client : public ClientBase
|
class Client : public ClientBase
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
Client()
|
Client() = default;
|
||||||
{
|
|
||||||
fuzzer = QueryFuzzer(randomSeed(), &std::cout, &std::cerr);
|
|
||||||
}
|
|
||||||
|
|
||||||
void initialize(Poco::Util::Application & self) override;
|
void initialize(Poco::Util::Application & self) override;
|
||||||
|
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
clickhouse_add_executable (validate-odbc-connection-string validate-odbc-connection-string.cpp ../validateODBCConnectionString.cpp)
|
clickhouse_add_executable (validate-odbc-connection-string validate-odbc-connection-string.cpp ../validateODBCConnectionString.cpp)
|
||||||
target_link_libraries (validate-odbc-connection-string PRIVATE clickhouse_common_io)
|
target_link_libraries (validate-odbc-connection-string PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
#include <cassert>
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
#include <IO/ReadHelpersArena.h>
|
#include <IO/ReadHelpersArena.h>
|
||||||
|
|
||||||
#include <DataTypes/DataTypeArray.h>
|
#include <DataTypes/DataTypeArray.h>
|
||||||
#include <DataTypes/DataTypesNumber.h>
|
#include <DataTypes/DataTypeDate.h>
|
||||||
|
#include <DataTypes/DataTypeDate32.h>
|
||||||
|
#include <DataTypes/DataTypeDateTime.h>
|
||||||
|
#include <DataTypes/DataTypeDateTime64.h>
|
||||||
#include <DataTypes/DataTypeString.h>
|
#include <DataTypes/DataTypeString.h>
|
||||||
|
|
||||||
#include <Columns/ColumnArray.h>
|
#include <Columns/ColumnArray.h>
|
||||||
@ -15,18 +15,14 @@
|
|||||||
#include <Common/HashTable/HashTableKeyHolder.h>
|
#include <Common/HashTable/HashTableKeyHolder.h>
|
||||||
#include <Common/assert_cast.h>
|
#include <Common/assert_cast.h>
|
||||||
|
|
||||||
#include <AggregateFunctions/IAggregateFunction.h>
|
|
||||||
#include <AggregateFunctions/KeyHolderHelpers.h>
|
|
||||||
|
|
||||||
#include <Core/Field.h>
|
#include <Core/Field.h>
|
||||||
|
|
||||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||||
#include <AggregateFunctions/Helpers.h>
|
|
||||||
#include <AggregateFunctions/FactoryHelpers.h>
|
#include <AggregateFunctions/FactoryHelpers.h>
|
||||||
#include <DataTypes/DataTypeDate.h>
|
#include <AggregateFunctions/Helpers.h>
|
||||||
#include <DataTypes/DataTypeDate32.h>
|
#include <AggregateFunctions/IAggregateFunction.h>
|
||||||
#include <DataTypes/DataTypeDateTime.h>
|
|
||||||
#include <DataTypes/DataTypeDateTime64.h>
|
#include <memory>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -51,7 +47,7 @@ struct AggregateFunctionGroupArrayIntersectData
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
/// Puts all values to the hash set. Returns an array of unique values. Implemented for numeric types.
|
/// Puts all values to the hash set. Returns an array of unique values present in all inputs. Implemented for numeric types.
|
||||||
template <typename T>
|
template <typename T>
|
||||||
class AggregateFunctionGroupArrayIntersect
|
class AggregateFunctionGroupArrayIntersect
|
||||||
: public IAggregateFunctionDataHelper<AggregateFunctionGroupArrayIntersectData<T>, AggregateFunctionGroupArrayIntersect<T>>
|
: public IAggregateFunctionDataHelper<AggregateFunctionGroupArrayIntersectData<T>, AggregateFunctionGroupArrayIntersect<T>>
|
||||||
@ -69,7 +65,7 @@ public:
|
|||||||
: IAggregateFunctionDataHelper<AggregateFunctionGroupArrayIntersectData<T>,
|
: IAggregateFunctionDataHelper<AggregateFunctionGroupArrayIntersectData<T>,
|
||||||
AggregateFunctionGroupArrayIntersect<T>>({argument_type}, parameters_, result_type_) {}
|
AggregateFunctionGroupArrayIntersect<T>>({argument_type}, parameters_, result_type_) {}
|
||||||
|
|
||||||
String getName() const override { return "GroupArrayIntersect"; }
|
String getName() const override { return "groupArrayIntersect"; }
|
||||||
|
|
||||||
bool allocatesMemoryInArena() const override { return false; }
|
bool allocatesMemoryInArena() const override { return false; }
|
||||||
|
|
||||||
@ -158,7 +154,7 @@ public:
|
|||||||
set.reserve(size);
|
set.reserve(size);
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
{
|
{
|
||||||
int key;
|
T key;
|
||||||
readIntBinary(key, buf);
|
readIntBinary(key, buf);
|
||||||
set.insert(key);
|
set.insert(key);
|
||||||
}
|
}
|
||||||
@ -213,7 +209,7 @@ public:
|
|||||||
: IAggregateFunctionDataHelper<AggregateFunctionGroupArrayIntersectGenericData, AggregateFunctionGroupArrayIntersectGeneric<is_plain_column>>({input_data_type_}, parameters_, result_type_)
|
: IAggregateFunctionDataHelper<AggregateFunctionGroupArrayIntersectGenericData, AggregateFunctionGroupArrayIntersectGeneric<is_plain_column>>({input_data_type_}, parameters_, result_type_)
|
||||||
, input_data_type(result_type_) {}
|
, input_data_type(result_type_) {}
|
||||||
|
|
||||||
String getName() const override { return "GroupArrayIntersect"; }
|
String getName() const override { return "groupArrayIntersect"; }
|
||||||
|
|
||||||
bool allocatesMemoryInArena() const override { return true; }
|
bool allocatesMemoryInArena() const override { return true; }
|
||||||
|
|
||||||
@ -240,7 +236,7 @@ public:
|
|||||||
{
|
{
|
||||||
const char * begin = nullptr;
|
const char * begin = nullptr;
|
||||||
StringRef serialized = data_column->serializeValueIntoArena(offset + i, *arena, begin);
|
StringRef serialized = data_column->serializeValueIntoArena(offset + i, *arena, begin);
|
||||||
assert(serialized.data != nullptr);
|
chassert(serialized.data != nullptr);
|
||||||
set.emplace(SerializedKeyHolder{serialized, *arena}, it, inserted);
|
set.emplace(SerializedKeyHolder{serialized, *arena}, it, inserted);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -260,7 +256,7 @@ public:
|
|||||||
{
|
{
|
||||||
const char * begin = nullptr;
|
const char * begin = nullptr;
|
||||||
StringRef serialized = data_column->serializeValueIntoArena(offset + i, *arena, begin);
|
StringRef serialized = data_column->serializeValueIntoArena(offset + i, *arena, begin);
|
||||||
assert(serialized.data != nullptr);
|
chassert(serialized.data != nullptr);
|
||||||
it = set.find(serialized);
|
it = set.find(serialized);
|
||||||
|
|
||||||
if (it != nullptr)
|
if (it != nullptr)
|
||||||
|
@ -195,7 +195,7 @@ bool SingleValueDataFixed<T>::isEqualTo(const IColumn & column, size_t index) co
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
bool SingleValueDataFixed<T>::isEqualTo(const SingleValueDataFixed<T> & to) const
|
bool SingleValueDataFixed<T>::isEqualTo(const SingleValueDataFixed<T> & to) const
|
||||||
{
|
{
|
||||||
return has() && to.value == value;
|
return has() && to.has() && to.value == value;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
@ -904,6 +904,7 @@ bool SingleValueDataNumeric<T>::isEqualTo(const DB::IColumn & column, size_t ind
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
bool SingleValueDataNumeric<T>::isEqualTo(const DB::SingleValueDataBase & to) const
|
bool SingleValueDataNumeric<T>::isEqualTo(const DB::SingleValueDataBase & to) const
|
||||||
{
|
{
|
||||||
|
/// to.has() is checked in memory.get().isEqualTo
|
||||||
auto const & other = assert_cast<const Self &>(to);
|
auto const & other = assert_cast<const Self &>(to);
|
||||||
return memory.get().isEqualTo(other.memory.get());
|
return memory.get().isEqualTo(other.memory.get());
|
||||||
}
|
}
|
||||||
@ -917,6 +918,7 @@ void SingleValueDataNumeric<T>::set(const DB::IColumn & column, size_t row_num,
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
void SingleValueDataNumeric<T>::set(const DB::SingleValueDataBase & to, DB::Arena * arena)
|
void SingleValueDataNumeric<T>::set(const DB::SingleValueDataBase & to, DB::Arena * arena)
|
||||||
{
|
{
|
||||||
|
/// to.has() is checked in memory.get().set
|
||||||
auto const & other = assert_cast<const Self &>(to);
|
auto const & other = assert_cast<const Self &>(to);
|
||||||
return memory.get().set(other.memory.get(), arena);
|
return memory.get().set(other.memory.get(), arena);
|
||||||
}
|
}
|
||||||
@ -924,6 +926,7 @@ void SingleValueDataNumeric<T>::set(const DB::SingleValueDataBase & to, DB::Aren
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
bool SingleValueDataNumeric<T>::setIfSmaller(const DB::SingleValueDataBase & to, DB::Arena * arena)
|
bool SingleValueDataNumeric<T>::setIfSmaller(const DB::SingleValueDataBase & to, DB::Arena * arena)
|
||||||
{
|
{
|
||||||
|
/// to.has() is checked in memory.get().setIfSmaller
|
||||||
auto const & other = assert_cast<const Self &>(to);
|
auto const & other = assert_cast<const Self &>(to);
|
||||||
return memory.get().setIfSmaller(other.memory.get(), arena);
|
return memory.get().setIfSmaller(other.memory.get(), arena);
|
||||||
}
|
}
|
||||||
@ -931,6 +934,7 @@ bool SingleValueDataNumeric<T>::setIfSmaller(const DB::SingleValueDataBase & to,
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
bool SingleValueDataNumeric<T>::setIfGreater(const DB::SingleValueDataBase & to, DB::Arena * arena)
|
bool SingleValueDataNumeric<T>::setIfGreater(const DB::SingleValueDataBase & to, DB::Arena * arena)
|
||||||
{
|
{
|
||||||
|
/// to.has() is checked in memory.get().setIfGreater
|
||||||
auto const & other = assert_cast<const Self &>(to);
|
auto const & other = assert_cast<const Self &>(to);
|
||||||
return memory.get().setIfGreater(other.memory.get(), arena);
|
return memory.get().setIfGreater(other.memory.get(), arena);
|
||||||
}
|
}
|
||||||
@ -1191,7 +1195,7 @@ bool SingleValueDataString::isEqualTo(const DB::IColumn & column, size_t row_num
|
|||||||
bool SingleValueDataString::isEqualTo(const SingleValueDataBase & other) const
|
bool SingleValueDataString::isEqualTo(const SingleValueDataBase & other) const
|
||||||
{
|
{
|
||||||
auto const & to = assert_cast<const Self &>(other);
|
auto const & to = assert_cast<const Self &>(other);
|
||||||
return has() && to.getStringRef() == getStringRef();
|
return has() && to.has() && to.getStringRef() == getStringRef();
|
||||||
}
|
}
|
||||||
|
|
||||||
void SingleValueDataString::set(const IColumn & column, size_t row_num, Arena * arena)
|
void SingleValueDataString::set(const IColumn & column, size_t row_num, Arena * arena)
|
||||||
@ -1291,7 +1295,7 @@ bool SingleValueDataGeneric::isEqualTo(const IColumn & column, size_t row_num) c
|
|||||||
bool SingleValueDataGeneric::isEqualTo(const DB::SingleValueDataBase & other) const
|
bool SingleValueDataGeneric::isEqualTo(const DB::SingleValueDataBase & other) const
|
||||||
{
|
{
|
||||||
auto const & to = assert_cast<const Self &>(other);
|
auto const & to = assert_cast<const Self &>(other);
|
||||||
return has() && to.value == value;
|
return has() && to.has() && to.value == value;
|
||||||
}
|
}
|
||||||
|
|
||||||
void SingleValueDataGeneric::set(const IColumn & column, size_t row_num, Arena *)
|
void SingleValueDataGeneric::set(const IColumn & column, size_t row_num, Arena *)
|
||||||
|
@ -49,7 +49,7 @@ enum class QueryTreeNodeType : uint8_t
|
|||||||
/// Convert query tree node type to string
|
/// Convert query tree node type to string
|
||||||
const char * toString(QueryTreeNodeType type);
|
const char * toString(QueryTreeNodeType type);
|
||||||
|
|
||||||
/** Query tree is semantical representation of query.
|
/** Query tree is a semantic representation of query.
|
||||||
* Query tree node represent node in query tree.
|
* Query tree node represent node in query tree.
|
||||||
* IQueryTreeNode is base class for all query tree nodes.
|
* IQueryTreeNode is base class for all query tree nodes.
|
||||||
*
|
*
|
||||||
|
@ -68,10 +68,13 @@ QueryTreeNodePtr findEqualsFunction(const QueryTreeNodes & nodes)
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks if the node is combination of isNull and notEquals functions of two the same arguments
|
/// Checks if the node is combination of isNull and notEquals functions of two the same arguments:
|
||||||
|
/// [ (a <> b AND) ] (a IS NULL) AND (b IS NULL)
|
||||||
bool matchIsNullOfTwoArgs(const QueryTreeNodes & nodes, QueryTreeNodePtr & lhs, QueryTreeNodePtr & rhs)
|
bool matchIsNullOfTwoArgs(const QueryTreeNodes & nodes, QueryTreeNodePtr & lhs, QueryTreeNodePtr & rhs)
|
||||||
{
|
{
|
||||||
QueryTreeNodePtrWithHashSet all_arguments;
|
QueryTreeNodePtrWithHashSet all_arguments;
|
||||||
|
QueryTreeNodePtrWithHashSet is_null_arguments;
|
||||||
|
|
||||||
for (const auto & node : nodes)
|
for (const auto & node : nodes)
|
||||||
{
|
{
|
||||||
const auto * func_node = node->as<FunctionNode>();
|
const auto * func_node = node->as<FunctionNode>();
|
||||||
@ -80,7 +83,11 @@ bool matchIsNullOfTwoArgs(const QueryTreeNodes & nodes, QueryTreeNodePtr & lhs,
|
|||||||
|
|
||||||
const auto & arguments = func_node->getArguments().getNodes();
|
const auto & arguments = func_node->getArguments().getNodes();
|
||||||
if (func_node->getFunctionName() == "isNull" && arguments.size() == 1)
|
if (func_node->getFunctionName() == "isNull" && arguments.size() == 1)
|
||||||
|
{
|
||||||
all_arguments.insert(QueryTreeNodePtrWithHash(arguments[0]));
|
all_arguments.insert(QueryTreeNodePtrWithHash(arguments[0]));
|
||||||
|
is_null_arguments.insert(QueryTreeNodePtrWithHash(arguments[0]));
|
||||||
|
}
|
||||||
|
|
||||||
else if (func_node->getFunctionName() == "notEquals" && arguments.size() == 2)
|
else if (func_node->getFunctionName() == "notEquals" && arguments.size() == 2)
|
||||||
{
|
{
|
||||||
if (arguments[0]->isEqual(*arguments[1]))
|
if (arguments[0]->isEqual(*arguments[1]))
|
||||||
@ -95,7 +102,7 @@ bool matchIsNullOfTwoArgs(const QueryTreeNodes & nodes, QueryTreeNodePtr & lhs,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (all_arguments.size() != 2)
|
if (all_arguments.size() != 2 || is_null_arguments.size() != 2)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
lhs = all_arguments.begin()->node;
|
lhs = all_arguments.begin()->node;
|
||||||
|
@ -4124,9 +4124,7 @@ void QueryAnalyzer::resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpo
|
|||||||
|
|
||||||
auto * column_to_interpolate = interpolate_node_typed.getExpression()->as<IdentifierNode>();
|
auto * column_to_interpolate = interpolate_node_typed.getExpression()->as<IdentifierNode>();
|
||||||
if (!column_to_interpolate)
|
if (!column_to_interpolate)
|
||||||
throw Exception(
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "INTERPOLATE can work only for indentifiers, but {} is found",
|
||||||
ErrorCodes::LOGICAL_ERROR,
|
|
||||||
"INTERPOLATE can work only for identifiers, but {} is found",
|
|
||||||
interpolate_node_typed.getExpression()->formatASTForErrorMessage());
|
interpolate_node_typed.getExpression()->formatASTForErrorMessage());
|
||||||
auto column_to_interpolate_name = column_to_interpolate->getIdentifier().getFullName();
|
auto column_to_interpolate_name = column_to_interpolate->getIdentifier().getFullName();
|
||||||
|
|
||||||
|
@ -226,6 +226,9 @@ add_object_library(clickhouse_storages_windowview Storages/WindowView)
|
|||||||
add_object_library(clickhouse_storages_s3queue Storages/ObjectStorageQueue)
|
add_object_library(clickhouse_storages_s3queue Storages/ObjectStorageQueue)
|
||||||
add_object_library(clickhouse_storages_materializedview Storages/MaterializedView)
|
add_object_library(clickhouse_storages_materializedview Storages/MaterializedView)
|
||||||
add_object_library(clickhouse_client Client)
|
add_object_library(clickhouse_client Client)
|
||||||
|
# Always compile this file with the highest possible level of optimizations, even in Debug builds.
|
||||||
|
# https://github.com/ClickHouse/ClickHouse/issues/65745
|
||||||
|
set_source_files_properties(Client/ClientBaseOptimizedParts.cpp PROPERTIES COMPILE_FLAGS "-O3")
|
||||||
add_object_library(clickhouse_bridge BridgeHelper)
|
add_object_library(clickhouse_bridge BridgeHelper)
|
||||||
add_object_library(clickhouse_server Server)
|
add_object_library(clickhouse_server Server)
|
||||||
add_object_library(clickhouse_server_http Server/HTTP)
|
add_object_library(clickhouse_server_http Server/HTTP)
|
||||||
@ -543,7 +546,7 @@ if (TARGET ch_contrib::libpqxx)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (TARGET ch_contrib::datasketches)
|
if (TARGET ch_contrib::datasketches)
|
||||||
target_link_libraries (clickhouse_aggregate_functions PRIVATE ch_contrib::datasketches)
|
dbms_target_link_libraries(PUBLIC ch_contrib::datasketches)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
target_link_libraries (clickhouse_common_io PRIVATE ch_contrib::lz4)
|
target_link_libraries (clickhouse_common_io PRIVATE ch_contrib::lz4)
|
||||||
|
@ -108,7 +108,6 @@ namespace ErrorCodes
|
|||||||
extern const int UNEXPECTED_PACKET_FROM_SERVER;
|
extern const int UNEXPECTED_PACKET_FROM_SERVER;
|
||||||
extern const int INVALID_USAGE_OF_INPUT;
|
extern const int INVALID_USAGE_OF_INPUT;
|
||||||
extern const int CANNOT_SET_SIGNAL_HANDLER;
|
extern const int CANNOT_SET_SIGNAL_HANDLER;
|
||||||
extern const int UNRECOGNIZED_ARGUMENTS;
|
|
||||||
extern const int LOGICAL_ERROR;
|
extern const int LOGICAL_ERROR;
|
||||||
extern const int CANNOT_OPEN_FILE;
|
extern const int CANNOT_OPEN_FILE;
|
||||||
extern const int FILE_ALREADY_EXISTS;
|
extern const int FILE_ALREADY_EXISTS;
|
||||||
@ -309,9 +308,16 @@ public:
|
|||||||
|
|
||||||
ClientBase::~ClientBase()
|
ClientBase::~ClientBase()
|
||||||
{
|
{
|
||||||
writeSignalIDtoSignalPipe(SignalListener::StopThread);
|
try
|
||||||
signal_listener_thread.join();
|
{
|
||||||
HandledSignals::instance().reset();
|
writeSignalIDtoSignalPipe(SignalListener::StopThread);
|
||||||
|
signal_listener_thread.join();
|
||||||
|
HandledSignals::instance().reset();
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ClientBase::ClientBase(
|
ClientBase::ClientBase(
|
||||||
@ -2234,6 +2240,8 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text)
|
|||||||
ASTPtr parsed_query;
|
ASTPtr parsed_query;
|
||||||
std::unique_ptr<Exception> current_exception;
|
std::unique_ptr<Exception> current_exception;
|
||||||
|
|
||||||
|
size_t retries_count = 0;
|
||||||
|
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
auto stage = analyzeMultiQueryText(this_query_begin, this_query_end, all_queries_end,
|
auto stage = analyzeMultiQueryText(this_query_begin, this_query_end, all_queries_end,
|
||||||
@ -2314,7 +2322,12 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text)
|
|||||||
// Check whether the error (or its absence) matches the test hints
|
// Check whether the error (or its absence) matches the test hints
|
||||||
// (or their absence).
|
// (or their absence).
|
||||||
bool error_matches_hint = true;
|
bool error_matches_hint = true;
|
||||||
if (have_error)
|
bool need_retry = test_hint.needRetry(server_exception, &retries_count);
|
||||||
|
if (need_retry)
|
||||||
|
{
|
||||||
|
std::this_thread::sleep_for(std::chrono::seconds(1));
|
||||||
|
}
|
||||||
|
else if (have_error)
|
||||||
{
|
{
|
||||||
if (test_hint.hasServerErrors())
|
if (test_hint.hasServerErrors())
|
||||||
{
|
{
|
||||||
@ -2408,7 +2421,8 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text)
|
|||||||
if (have_error && !ignore_error)
|
if (have_error && !ignore_error)
|
||||||
return is_interactive;
|
return is_interactive;
|
||||||
|
|
||||||
this_query_begin = this_query_end;
|
if (!need_retry)
|
||||||
|
this_query_begin = this_query_end;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2833,168 +2847,6 @@ void ClientBase::showClientVersion()
|
|||||||
output_stream << VERSION_NAME << " " + getName() + " version " << VERSION_STRING << VERSION_OFFICIAL << "." << std::endl;
|
output_stream << VERSION_NAME << " " + getName() + " version " << VERSION_STRING << VERSION_OFFICIAL << "." << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace
|
|
||||||
{
|
|
||||||
|
|
||||||
/// Define transparent hash to we can use
|
|
||||||
/// std::string_view with the containers
|
|
||||||
struct TransparentStringHash
|
|
||||||
{
|
|
||||||
using is_transparent = void;
|
|
||||||
size_t operator()(std::string_view txt) const
|
|
||||||
{
|
|
||||||
return std::hash<std::string_view>{}(txt);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This functor is used to parse command line arguments and replace dashes with underscores,
|
|
||||||
* allowing options to be specified using either dashes or underscores.
|
|
||||||
*/
|
|
||||||
class OptionsAliasParser
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
explicit OptionsAliasParser(const boost::program_options::options_description& options)
|
|
||||||
{
|
|
||||||
options_names.reserve(options.options().size());
|
|
||||||
for (const auto& option : options.options())
|
|
||||||
options_names.insert(option->long_name());
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Parses arguments by replacing dashes with underscores, and matches the resulting name with known options
|
|
||||||
* Implements boost::program_options::ext_parser logic
|
|
||||||
*/
|
|
||||||
std::pair<std::string, std::string> operator()(const std::string & token) const
|
|
||||||
{
|
|
||||||
if (!token.starts_with("--"))
|
|
||||||
return {};
|
|
||||||
std::string arg = token.substr(2);
|
|
||||||
|
|
||||||
// divide token by '=' to separate key and value if options style=long_allow_adjacent
|
|
||||||
auto pos_eq = arg.find('=');
|
|
||||||
std::string key = arg.substr(0, pos_eq);
|
|
||||||
|
|
||||||
if (options_names.contains(key))
|
|
||||||
// option does not require any changes, because it is already correct
|
|
||||||
return {};
|
|
||||||
|
|
||||||
std::replace(key.begin(), key.end(), '-', '_');
|
|
||||||
if (!options_names.contains(key))
|
|
||||||
// after replacing '-' with '_' argument is still unknown
|
|
||||||
return {};
|
|
||||||
|
|
||||||
std::string value;
|
|
||||||
if (pos_eq != std::string::npos && pos_eq < arg.size())
|
|
||||||
value = arg.substr(pos_eq + 1);
|
|
||||||
|
|
||||||
return {key, value};
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
std::unordered_set<std::string> options_names;
|
|
||||||
};
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Enable optimizations even in debug builds because otherwise options parsing becomes extremely slow affecting .sh tests
|
|
||||||
#if defined(__clang__)
|
|
||||||
#pragma clang optimize on
|
|
||||||
#endif
|
|
||||||
void ClientBase::parseAndCheckOptions(OptionsDescription & options_description, po::variables_map & options, Arguments & arguments)
|
|
||||||
{
|
|
||||||
if (allow_repeated_settings)
|
|
||||||
addProgramOptionsAsMultitokens(cmd_settings, options_description.main_description.value());
|
|
||||||
else
|
|
||||||
addProgramOptions(cmd_settings, options_description.main_description.value());
|
|
||||||
|
|
||||||
if (allow_merge_tree_settings)
|
|
||||||
{
|
|
||||||
/// Add merge tree settings manually, because names of some settings
|
|
||||||
/// may clash. Query settings have higher priority and we just
|
|
||||||
/// skip ambiguous merge tree settings.
|
|
||||||
auto & main_options = options_description.main_description.value();
|
|
||||||
|
|
||||||
std::unordered_set<std::string, TransparentStringHash, std::equal_to<>> main_option_names;
|
|
||||||
for (const auto & option : main_options.options())
|
|
||||||
main_option_names.insert(option->long_name());
|
|
||||||
|
|
||||||
for (const auto & setting : cmd_merge_tree_settings.all())
|
|
||||||
{
|
|
||||||
const auto add_setting = [&](const std::string_view name)
|
|
||||||
{
|
|
||||||
if (auto it = main_option_names.find(name); it != main_option_names.end())
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (allow_repeated_settings)
|
|
||||||
addProgramOptionAsMultitoken(cmd_merge_tree_settings, main_options, name, setting);
|
|
||||||
else
|
|
||||||
addProgramOption(cmd_merge_tree_settings, main_options, name, setting);
|
|
||||||
};
|
|
||||||
|
|
||||||
const auto & setting_name = setting.getName();
|
|
||||||
|
|
||||||
add_setting(setting_name);
|
|
||||||
|
|
||||||
const auto & settings_to_aliases = MergeTreeSettings::Traits::settingsToAliases();
|
|
||||||
if (auto it = settings_to_aliases.find(setting_name); it != settings_to_aliases.end())
|
|
||||||
{
|
|
||||||
for (const auto alias : it->second)
|
|
||||||
{
|
|
||||||
add_setting(alias);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse main commandline options.
|
|
||||||
auto parser = po::command_line_parser(arguments)
|
|
||||||
.options(options_description.main_description.value())
|
|
||||||
.extra_parser(OptionsAliasParser(options_description.main_description.value()))
|
|
||||||
.allow_unregistered();
|
|
||||||
po::parsed_options parsed = parser.run();
|
|
||||||
|
|
||||||
/// Check unrecognized options without positional options.
|
|
||||||
auto unrecognized_options = po::collect_unrecognized(parsed.options, po::collect_unrecognized_mode::exclude_positional);
|
|
||||||
if (!unrecognized_options.empty())
|
|
||||||
{
|
|
||||||
auto hints = this->getHints(unrecognized_options[0]);
|
|
||||||
if (!hints.empty())
|
|
||||||
throw Exception(ErrorCodes::UNRECOGNIZED_ARGUMENTS, "Unrecognized option '{}'. Maybe you meant {}",
|
|
||||||
unrecognized_options[0], toString(hints));
|
|
||||||
|
|
||||||
throw Exception(ErrorCodes::UNRECOGNIZED_ARGUMENTS, "Unrecognized option '{}'", unrecognized_options[0]);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check positional options.
|
|
||||||
for (const auto & op : parsed.options)
|
|
||||||
{
|
|
||||||
if (!op.unregistered && op.string_key.empty() && !op.original_tokens[0].starts_with("--")
|
|
||||||
&& !op.original_tokens[0].empty() && !op.value.empty())
|
|
||||||
{
|
|
||||||
/// Two special cases for better usability:
|
|
||||||
/// - if the option contains a whitespace, it might be a query: clickhouse "SELECT 1"
|
|
||||||
/// These are relevant for interactive usage - user-friendly, but questionable in general.
|
|
||||||
/// In case of ambiguity or for scripts, prefer using proper options.
|
|
||||||
|
|
||||||
const auto & token = op.original_tokens[0];
|
|
||||||
po::variable_value value(boost::any(op.value), false);
|
|
||||||
|
|
||||||
const char * option;
|
|
||||||
if (token.contains(' '))
|
|
||||||
option = "query";
|
|
||||||
else
|
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Positional option `{}` is not supported.", token);
|
|
||||||
|
|
||||||
if (!options.emplace(option, value).second)
|
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Positional option `{}` is not supported.", token);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
po::store(parsed, options);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void ClientBase::init(int argc, char ** argv)
|
void ClientBase::init(int argc, char ** argv)
|
||||||
{
|
{
|
||||||
namespace po = boost::program_options;
|
namespace po = boost::program_options;
|
||||||
|
@ -6,7 +6,6 @@
|
|||||||
#include <Common/ProgressIndication.h>
|
#include <Common/ProgressIndication.h>
|
||||||
#include <Common/InterruptListener.h>
|
#include <Common/InterruptListener.h>
|
||||||
#include <Common/ShellCommand.h>
|
#include <Common/ShellCommand.h>
|
||||||
#include <Common/QueryFuzzer.h>
|
|
||||||
#include <Common/Stopwatch.h>
|
#include <Common/Stopwatch.h>
|
||||||
#include <Common/DNSResolver.h>
|
#include <Common/DNSResolver.h>
|
||||||
#include <Core/ExternalTable.h>
|
#include <Core/ExternalTable.h>
|
||||||
@ -17,6 +16,7 @@
|
|||||||
#include <Poco/SplitterChannel.h>
|
#include <Poco/SplitterChannel.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
#include <Client/Suggest.h>
|
#include <Client/Suggest.h>
|
||||||
|
#include <Client/QueryFuzzer.h>
|
||||||
#include <boost/program_options.hpp>
|
#include <boost/program_options.hpp>
|
||||||
#include <Storages/StorageFile.h>
|
#include <Storages/StorageFile.h>
|
||||||
#include <Storages/SelectQueryInfo.h>
|
#include <Storages/SelectQueryInfo.h>
|
||||||
|
176
src/Client/ClientBaseOptimizedParts.cpp
Normal file
176
src/Client/ClientBaseOptimizedParts.cpp
Normal file
@ -0,0 +1,176 @@
|
|||||||
|
#include <Client/ClientBase.h>
|
||||||
|
#include <Core/BaseSettingsProgramOptions.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Program options parsing is very slow in debug builds and it affects .sh tests
|
||||||
|
* causing them to timeout sporadically.
|
||||||
|
* It seems impossible to enable optimizations for a single function (only to disable them), so
|
||||||
|
* instead we extract the code to a separate source file and compile it with different options.
|
||||||
|
*/
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int BAD_ARGUMENTS;
|
||||||
|
extern const int UNRECOGNIZED_ARGUMENTS;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
/// Define transparent hash to we can use
|
||||||
|
/// std::string_view with the containers
|
||||||
|
struct TransparentStringHash
|
||||||
|
{
|
||||||
|
using is_transparent = void;
|
||||||
|
size_t operator()(std::string_view txt) const
|
||||||
|
{
|
||||||
|
return std::hash<std::string_view>{}(txt);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This functor is used to parse command line arguments and replace dashes with underscores,
|
||||||
|
* allowing options to be specified using either dashes or underscores.
|
||||||
|
*/
|
||||||
|
class OptionsAliasParser
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit OptionsAliasParser(const boost::program_options::options_description& options)
|
||||||
|
{
|
||||||
|
options_names.reserve(options.options().size());
|
||||||
|
for (const auto& option : options.options())
|
||||||
|
options_names.insert(option->long_name());
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Parses arguments by replacing dashes with underscores, and matches the resulting name with known options
|
||||||
|
* Implements boost::program_options::ext_parser logic
|
||||||
|
*/
|
||||||
|
std::pair<std::string, std::string> operator()(const std::string & token) const
|
||||||
|
{
|
||||||
|
if (!token.starts_with("--"))
|
||||||
|
return {};
|
||||||
|
std::string arg = token.substr(2);
|
||||||
|
|
||||||
|
// divide token by '=' to separate key and value if options style=long_allow_adjacent
|
||||||
|
auto pos_eq = arg.find('=');
|
||||||
|
std::string key = arg.substr(0, pos_eq);
|
||||||
|
|
||||||
|
if (options_names.contains(key))
|
||||||
|
// option does not require any changes, because it is already correct
|
||||||
|
return {};
|
||||||
|
|
||||||
|
std::replace(key.begin(), key.end(), '-', '_');
|
||||||
|
if (!options_names.contains(key))
|
||||||
|
// after replacing '-' with '_' argument is still unknown
|
||||||
|
return {};
|
||||||
|
|
||||||
|
std::string value;
|
||||||
|
if (pos_eq != std::string::npos && pos_eq < arg.size())
|
||||||
|
value = arg.substr(pos_eq + 1);
|
||||||
|
|
||||||
|
return {key, value};
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::unordered_set<std::string> options_names;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void ClientBase::parseAndCheckOptions(OptionsDescription & options_description, po::variables_map & options, Arguments & arguments)
|
||||||
|
{
|
||||||
|
if (allow_repeated_settings)
|
||||||
|
addProgramOptionsAsMultitokens(cmd_settings, options_description.main_description.value());
|
||||||
|
else
|
||||||
|
addProgramOptions(cmd_settings, options_description.main_description.value());
|
||||||
|
|
||||||
|
if (allow_merge_tree_settings)
|
||||||
|
{
|
||||||
|
/// Add merge tree settings manually, because names of some settings
|
||||||
|
/// may clash. Query settings have higher priority and we just
|
||||||
|
/// skip ambiguous merge tree settings.
|
||||||
|
auto & main_options = options_description.main_description.value();
|
||||||
|
|
||||||
|
std::unordered_set<std::string, TransparentStringHash, std::equal_to<>> main_option_names;
|
||||||
|
for (const auto & option : main_options.options())
|
||||||
|
main_option_names.insert(option->long_name());
|
||||||
|
|
||||||
|
for (const auto & setting : cmd_merge_tree_settings.all())
|
||||||
|
{
|
||||||
|
const auto add_setting = [&](const std::string_view name)
|
||||||
|
{
|
||||||
|
if (auto it = main_option_names.find(name); it != main_option_names.end())
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (allow_repeated_settings)
|
||||||
|
addProgramOptionAsMultitoken(cmd_merge_tree_settings, main_options, name, setting);
|
||||||
|
else
|
||||||
|
addProgramOption(cmd_merge_tree_settings, main_options, name, setting);
|
||||||
|
};
|
||||||
|
|
||||||
|
const auto & setting_name = setting.getName();
|
||||||
|
|
||||||
|
add_setting(setting_name);
|
||||||
|
|
||||||
|
const auto & settings_to_aliases = MergeTreeSettings::Traits::settingsToAliases();
|
||||||
|
if (auto it = settings_to_aliases.find(setting_name); it != settings_to_aliases.end())
|
||||||
|
{
|
||||||
|
for (const auto alias : it->second)
|
||||||
|
{
|
||||||
|
add_setting(alias);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse main commandline options.
|
||||||
|
auto parser = po::command_line_parser(arguments)
|
||||||
|
.options(options_description.main_description.value())
|
||||||
|
.extra_parser(OptionsAliasParser(options_description.main_description.value()))
|
||||||
|
.allow_unregistered();
|
||||||
|
po::parsed_options parsed = parser.run();
|
||||||
|
|
||||||
|
/// Check unrecognized options without positional options.
|
||||||
|
auto unrecognized_options = po::collect_unrecognized(parsed.options, po::collect_unrecognized_mode::exclude_positional);
|
||||||
|
if (!unrecognized_options.empty())
|
||||||
|
{
|
||||||
|
auto hints = this->getHints(unrecognized_options[0]);
|
||||||
|
if (!hints.empty())
|
||||||
|
throw Exception(ErrorCodes::UNRECOGNIZED_ARGUMENTS, "Unrecognized option '{}'. Maybe you meant {}",
|
||||||
|
unrecognized_options[0], toString(hints));
|
||||||
|
|
||||||
|
throw Exception(ErrorCodes::UNRECOGNIZED_ARGUMENTS, "Unrecognized option '{}'", unrecognized_options[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check positional options.
|
||||||
|
for (const auto & op : parsed.options)
|
||||||
|
{
|
||||||
|
if (!op.unregistered && op.string_key.empty() && !op.original_tokens[0].starts_with("--")
|
||||||
|
&& !op.original_tokens[0].empty() && !op.value.empty())
|
||||||
|
{
|
||||||
|
/// Two special cases for better usability:
|
||||||
|
/// - if the option contains a whitespace, it might be a query: clickhouse "SELECT 1"
|
||||||
|
/// These are relevant for interactive usage - user-friendly, but questionable in general.
|
||||||
|
/// In case of ambiguity or for scripts, prefer using proper options.
|
||||||
|
|
||||||
|
const auto & token = op.original_tokens[0];
|
||||||
|
po::variable_value value(boost::any(op.value), false);
|
||||||
|
|
||||||
|
const char * option;
|
||||||
|
if (token.contains(' '))
|
||||||
|
option = "query";
|
||||||
|
else
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Positional option `{}` is not supported.", token);
|
||||||
|
|
||||||
|
if (!options.emplace(option, value).second)
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Positional option `{}` is not supported.", token);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
po::store(parsed, options);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -68,21 +68,22 @@ Field QueryFuzzer::getRandomField(int type)
|
|||||||
{
|
{
|
||||||
case 0:
|
case 0:
|
||||||
{
|
{
|
||||||
return bad_int64_values[fuzz_rand() % std::size(bad_int64_values)];
|
return bad_int64_values[fuzz_rand() % (sizeof(bad_int64_values)
|
||||||
|
/ sizeof(*bad_int64_values))];
|
||||||
}
|
}
|
||||||
case 1:
|
case 1:
|
||||||
{
|
{
|
||||||
static constexpr double values[]
|
static constexpr double values[]
|
||||||
= {NAN, INFINITY, -INFINITY, 0., -0., 0.0001, 0.5, 0.9999,
|
= {NAN, INFINITY, -INFINITY, 0., -0., 0.0001, 0.5, 0.9999,
|
||||||
1., 1.0001, 2., 10.0001, 100.0001, 1000.0001, 1e10, 1e20,
|
1., 1.0001, 2., 10.0001, 100.0001, 1000.0001, 1e10, 1e20,
|
||||||
FLT_MIN, FLT_MIN + FLT_EPSILON, FLT_MAX, FLT_MAX + FLT_EPSILON}; return values[fuzz_rand() % std::size(values)];
|
FLT_MIN, FLT_MIN + FLT_EPSILON, FLT_MAX, FLT_MAX + FLT_EPSILON}; return values[fuzz_rand() % (sizeof(values) / sizeof(*values))];
|
||||||
}
|
}
|
||||||
case 2:
|
case 2:
|
||||||
{
|
{
|
||||||
static constexpr UInt64 scales[] = {0, 1, 2, 10};
|
static constexpr UInt64 scales[] = {0, 1, 2, 10};
|
||||||
return DecimalField<Decimal64>(
|
return DecimalField<Decimal64>(
|
||||||
bad_int64_values[fuzz_rand() % std::size(bad_int64_values)],
|
bad_int64_values[fuzz_rand() % (sizeof(bad_int64_values) / sizeof(*bad_int64_values))],
|
||||||
static_cast<UInt32>(scales[fuzz_rand() % std::size(scales)])
|
static_cast<UInt32>(scales[fuzz_rand() % (sizeof(scales) / sizeof(*scales))])
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@ -164,8 +165,7 @@ Field QueryFuzzer::fuzzField(Field field)
|
|||||||
{
|
{
|
||||||
size_t pos = fuzz_rand() % arr.size();
|
size_t pos = fuzz_rand() % arr.size();
|
||||||
arr.erase(arr.begin() + pos);
|
arr.erase(arr.begin() + pos);
|
||||||
if (debug_stream)
|
std::cerr << "erased\n";
|
||||||
*debug_stream << "erased\n";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fuzz_rand() % 5 == 0)
|
if (fuzz_rand() % 5 == 0)
|
||||||
@ -174,14 +174,12 @@ Field QueryFuzzer::fuzzField(Field field)
|
|||||||
{
|
{
|
||||||
size_t pos = fuzz_rand() % arr.size();
|
size_t pos = fuzz_rand() % arr.size();
|
||||||
arr.insert(arr.begin() + pos, fuzzField(arr[pos]));
|
arr.insert(arr.begin() + pos, fuzzField(arr[pos]));
|
||||||
if (debug_stream)
|
std::cerr << fmt::format("inserted (pos {})\n", pos);
|
||||||
*debug_stream << fmt::format("inserted (pos {})\n", pos);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
arr.insert(arr.begin(), getRandomField(0));
|
arr.insert(arr.begin(), getRandomField(0));
|
||||||
if (debug_stream)
|
std::cerr << "inserted (0)\n";
|
||||||
*debug_stream << "inserted (0)\n";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -199,9 +197,7 @@ Field QueryFuzzer::fuzzField(Field field)
|
|||||||
{
|
{
|
||||||
size_t pos = fuzz_rand() % arr.size();
|
size_t pos = fuzz_rand() % arr.size();
|
||||||
arr.erase(arr.begin() + pos);
|
arr.erase(arr.begin() + pos);
|
||||||
|
std::cerr << "erased\n";
|
||||||
if (debug_stream)
|
|
||||||
*debug_stream << "erased\n";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fuzz_rand() % 5 == 0)
|
if (fuzz_rand() % 5 == 0)
|
||||||
@ -210,16 +206,12 @@ Field QueryFuzzer::fuzzField(Field field)
|
|||||||
{
|
{
|
||||||
size_t pos = fuzz_rand() % arr.size();
|
size_t pos = fuzz_rand() % arr.size();
|
||||||
arr.insert(arr.begin() + pos, fuzzField(arr[pos]));
|
arr.insert(arr.begin() + pos, fuzzField(arr[pos]));
|
||||||
|
std::cerr << fmt::format("inserted (pos {})\n", pos);
|
||||||
if (debug_stream)
|
|
||||||
*debug_stream << fmt::format("inserted (pos {})\n", pos);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
arr.insert(arr.begin(), getRandomField(0));
|
arr.insert(arr.begin(), getRandomField(0));
|
||||||
|
std::cerr << "inserted (0)\n";
|
||||||
if (debug_stream)
|
|
||||||
*debug_stream << "inserted (0)\n";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -352,8 +344,7 @@ void QueryFuzzer::fuzzOrderByList(IAST * ast)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (debug_stream)
|
std::cerr << "No random column.\n";
|
||||||
*debug_stream << "No random column.\n";
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -387,8 +378,7 @@ void QueryFuzzer::fuzzColumnLikeExpressionList(IAST * ast)
|
|||||||
if (col)
|
if (col)
|
||||||
impl->children.insert(pos, col);
|
impl->children.insert(pos, col);
|
||||||
else
|
else
|
||||||
if (debug_stream)
|
std::cerr << "No random column.\n";
|
||||||
*debug_stream << "No random column.\n";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// We don't have to recurse here to fuzz the children, this is handled by
|
// We don't have to recurse here to fuzz the children, this is handled by
|
||||||
@ -1371,15 +1361,11 @@ void QueryFuzzer::fuzzMain(ASTPtr & ast)
|
|||||||
collectFuzzInfoMain(ast);
|
collectFuzzInfoMain(ast);
|
||||||
fuzz(ast);
|
fuzz(ast);
|
||||||
|
|
||||||
if (out_stream)
|
std::cout << std::endl;
|
||||||
{
|
WriteBufferFromOStream ast_buf(std::cout, 4096);
|
||||||
*out_stream << std::endl;
|
formatAST(*ast, ast_buf, false /*highlight*/);
|
||||||
|
ast_buf.finalize();
|
||||||
WriteBufferFromOStream ast_buf(*out_stream, 4096);
|
std::cout << std::endl << std::endl;
|
||||||
formatAST(*ast, ast_buf, false /*highlight*/);
|
|
||||||
ast_buf.finalize();
|
|
||||||
*out_stream << std::endl << std::endl;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
@ -35,31 +35,9 @@ struct ASTWindowDefinition;
|
|||||||
* queries, so you want to feed it a lot of queries to get some interesting mix
|
* queries, so you want to feed it a lot of queries to get some interesting mix
|
||||||
* of them. Normally we feed SQL regression tests to it.
|
* of them. Normally we feed SQL regression tests to it.
|
||||||
*/
|
*/
|
||||||
class QueryFuzzer
|
struct QueryFuzzer
|
||||||
{
|
{
|
||||||
public:
|
pcg64 fuzz_rand{randomSeed()};
|
||||||
explicit QueryFuzzer(pcg64 fuzz_rand_ = randomSeed(), std::ostream * out_stream_ = nullptr, std::ostream * debug_stream_ = nullptr)
|
|
||||||
: fuzz_rand(fuzz_rand_)
|
|
||||||
, out_stream(out_stream_)
|
|
||||||
, debug_stream(debug_stream_)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is the only function you have to call -- it will modify the passed
|
|
||||||
// ASTPtr to point to new AST with some random changes.
|
|
||||||
void fuzzMain(ASTPtr & ast);
|
|
||||||
|
|
||||||
ASTs getInsertQueriesForFuzzedTables(const String & full_query);
|
|
||||||
ASTs getDropQueriesForFuzzedTables(const ASTDropQuery & drop_query);
|
|
||||||
void notifyQueryFailed(ASTPtr ast);
|
|
||||||
|
|
||||||
static bool isSuitableForFuzzing(const ASTCreateQuery & create);
|
|
||||||
|
|
||||||
private:
|
|
||||||
pcg64 fuzz_rand;
|
|
||||||
|
|
||||||
std::ostream * out_stream = nullptr;
|
|
||||||
std::ostream * debug_stream = nullptr;
|
|
||||||
|
|
||||||
// We add elements to expression lists with fixed probability. Some elements
|
// We add elements to expression lists with fixed probability. Some elements
|
||||||
// are so large, that the expected number of elements we add to them is
|
// are so large, that the expected number of elements we add to them is
|
||||||
@ -88,6 +66,10 @@ private:
|
|||||||
std::unordered_map<std::string, size_t> index_of_fuzzed_table;
|
std::unordered_map<std::string, size_t> index_of_fuzzed_table;
|
||||||
std::set<IAST::Hash> created_tables_hashes;
|
std::set<IAST::Hash> created_tables_hashes;
|
||||||
|
|
||||||
|
// This is the only function you have to call -- it will modify the passed
|
||||||
|
// ASTPtr to point to new AST with some random changes.
|
||||||
|
void fuzzMain(ASTPtr & ast);
|
||||||
|
|
||||||
// Various helper functions follow, normally you shouldn't have to call them.
|
// Various helper functions follow, normally you shouldn't have to call them.
|
||||||
Field getRandomField(int type);
|
Field getRandomField(int type);
|
||||||
Field fuzzField(Field field);
|
Field fuzzField(Field field);
|
||||||
@ -95,6 +77,9 @@ private:
|
|||||||
ASTPtr getRandomExpressionList();
|
ASTPtr getRandomExpressionList();
|
||||||
DataTypePtr fuzzDataType(DataTypePtr type);
|
DataTypePtr fuzzDataType(DataTypePtr type);
|
||||||
DataTypePtr getRandomType();
|
DataTypePtr getRandomType();
|
||||||
|
ASTs getInsertQueriesForFuzzedTables(const String & full_query);
|
||||||
|
ASTs getDropQueriesForFuzzedTables(const ASTDropQuery & drop_query);
|
||||||
|
void notifyQueryFailed(ASTPtr ast);
|
||||||
void replaceWithColumnLike(ASTPtr & ast);
|
void replaceWithColumnLike(ASTPtr & ast);
|
||||||
void replaceWithTableLike(ASTPtr & ast);
|
void replaceWithTableLike(ASTPtr & ast);
|
||||||
void fuzzOrderByElement(ASTOrderByElement * elem);
|
void fuzzOrderByElement(ASTOrderByElement * elem);
|
||||||
@ -117,6 +102,8 @@ private:
|
|||||||
void addTableLike(ASTPtr ast);
|
void addTableLike(ASTPtr ast);
|
||||||
void addColumnLike(ASTPtr ast);
|
void addColumnLike(ASTPtr ast);
|
||||||
void collectFuzzInfoRecurse(ASTPtr ast);
|
void collectFuzzInfoRecurse(ASTPtr ast);
|
||||||
|
|
||||||
|
static bool isSuitableForFuzzing(const ASTCreateQuery & create);
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
@ -10,6 +10,7 @@
|
|||||||
namespace DB::ErrorCodes
|
namespace DB::ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int CANNOT_PARSE_TEXT;
|
extern const int CANNOT_PARSE_TEXT;
|
||||||
|
extern const int OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -62,9 +63,28 @@ bool TestHint::hasExpectedServerError(int error)
|
|||||||
return std::find(server_errors.begin(), server_errors.end(), error) != server_errors.end();
|
return std::find(server_errors.begin(), server_errors.end(), error) != server_errors.end();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool TestHint::needRetry(const std::unique_ptr<Exception> & server_exception, size_t * retries_counter)
|
||||||
|
{
|
||||||
|
chassert(retries_counter);
|
||||||
|
if (max_retries <= *retries_counter)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
++*retries_counter;
|
||||||
|
|
||||||
|
int error = ErrorCodes::OK;
|
||||||
|
if (server_exception)
|
||||||
|
error = server_exception->code();
|
||||||
|
|
||||||
|
|
||||||
|
if (retry_until)
|
||||||
|
return !hasExpectedServerError(error); /// retry until we get the expected error
|
||||||
|
else
|
||||||
|
return hasExpectedServerError(error); /// retry while we have the expected error
|
||||||
|
}
|
||||||
|
|
||||||
void TestHint::parse(Lexer & comment_lexer, bool is_leading_hint)
|
void TestHint::parse(Lexer & comment_lexer, bool is_leading_hint)
|
||||||
{
|
{
|
||||||
std::unordered_set<std::string_view> commands{"echo", "echoOn", "echoOff"};
|
std::unordered_set<std::string_view> commands{"echo", "echoOn", "echoOff", "retry"};
|
||||||
|
|
||||||
std::unordered_set<std::string_view> command_errors{
|
std::unordered_set<std::string_view> command_errors{
|
||||||
"serverError",
|
"serverError",
|
||||||
@ -73,6 +93,9 @@ void TestHint::parse(Lexer & comment_lexer, bool is_leading_hint)
|
|||||||
|
|
||||||
for (Token token = comment_lexer.nextToken(); !token.isEnd(); token = comment_lexer.nextToken())
|
for (Token token = comment_lexer.nextToken(); !token.isEnd(); token = comment_lexer.nextToken())
|
||||||
{
|
{
|
||||||
|
if (token.type == TokenType::Whitespace)
|
||||||
|
continue;
|
||||||
|
|
||||||
String item = String(token.begin, token.end);
|
String item = String(token.begin, token.end);
|
||||||
if (token.type == TokenType::BareWord && commands.contains(item))
|
if (token.type == TokenType::BareWord && commands.contains(item))
|
||||||
{
|
{
|
||||||
@ -82,6 +105,30 @@ void TestHint::parse(Lexer & comment_lexer, bool is_leading_hint)
|
|||||||
echo.emplace(true);
|
echo.emplace(true);
|
||||||
if (item == "echoOff")
|
if (item == "echoOff")
|
||||||
echo.emplace(false);
|
echo.emplace(false);
|
||||||
|
|
||||||
|
if (item == "retry")
|
||||||
|
{
|
||||||
|
token = comment_lexer.nextToken();
|
||||||
|
while (token.type == TokenType::Whitespace)
|
||||||
|
token = comment_lexer.nextToken();
|
||||||
|
|
||||||
|
if (token.type != TokenType::Number)
|
||||||
|
throw DB::Exception(DB::ErrorCodes::CANNOT_PARSE_TEXT, "Could not parse the number of retries: {}",
|
||||||
|
std::string_view(token.begin, token.end));
|
||||||
|
|
||||||
|
max_retries = std::stoul(std::string(token.begin, token.end));
|
||||||
|
|
||||||
|
token = comment_lexer.nextToken();
|
||||||
|
while (token.type == TokenType::Whitespace)
|
||||||
|
token = comment_lexer.nextToken();
|
||||||
|
|
||||||
|
if (token.type != TokenType::BareWord ||
|
||||||
|
(std::string_view(token.begin, token.end) != "until" &&
|
||||||
|
std::string_view(token.begin, token.end) != "while"))
|
||||||
|
throw DB::Exception(DB::ErrorCodes::CANNOT_PARSE_TEXT, "Expected 'until' or 'while' after the number of retries, got: {}",
|
||||||
|
std::string_view(token.begin, token.end));
|
||||||
|
retry_until = std::string_view(token.begin, token.end) == "until";
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else if (!is_leading_hint && token.type == TokenType::BareWord && command_errors.contains(item))
|
else if (!is_leading_hint && token.type == TokenType::BareWord && command_errors.contains(item))
|
||||||
{
|
{
|
||||||
@ -133,6 +180,9 @@ void TestHint::parse(Lexer & comment_lexer, bool is_leading_hint)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (max_retries && server_errors.size() != 1)
|
||||||
|
throw DB::Exception(DB::ErrorCodes::CANNOT_PARSE_TEXT, "Expected one serverError after the 'retry N while|until' command");
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <fmt/format.h>
|
#include <fmt/format.h>
|
||||||
|
|
||||||
#include <Core/Types.h>
|
#include <Core/Types.h>
|
||||||
|
#include <Common/Exception.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -65,12 +66,17 @@ public:
|
|||||||
bool hasExpectedClientError(int error);
|
bool hasExpectedClientError(int error);
|
||||||
bool hasExpectedServerError(int error);
|
bool hasExpectedServerError(int error);
|
||||||
|
|
||||||
|
bool needRetry(const std::unique_ptr<Exception> & server_exception, size_t * retries_counter);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const String & query;
|
const String & query;
|
||||||
ErrorVector server_errors{};
|
ErrorVector server_errors{};
|
||||||
ErrorVector client_errors{};
|
ErrorVector client_errors{};
|
||||||
std::optional<bool> echo;
|
std::optional<bool> echo;
|
||||||
|
|
||||||
|
size_t max_retries = 0;
|
||||||
|
bool retry_until = false;
|
||||||
|
|
||||||
void parse(Lexer & comment_lexer, bool is_leading_hint);
|
void parse(Lexer & comment_lexer, bool is_leading_hint);
|
||||||
|
|
||||||
bool allErrorsExpected(int actual_server_error, int actual_client_error) const
|
bool allErrorsExpected(int actual_server_error, int actual_client_error) const
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
#include <base/cgroupsv2.h>
|
#include <base/cgroupsv2.h>
|
||||||
#include <base/getMemoryAmount.h>
|
#include <base/getMemoryAmount.h>
|
||||||
#include <base/sleep.h>
|
#include <base/sleep.h>
|
||||||
|
#include <fmt/ranges.h>
|
||||||
|
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
@ -45,26 +46,33 @@ namespace
|
|||||||
/// kernel 5
|
/// kernel 5
|
||||||
/// rss 15
|
/// rss 15
|
||||||
/// [...]
|
/// [...]
|
||||||
uint64_t readMetricFromStatFile(ReadBufferFromFile & buf, const std::string & key)
|
using Metrics = std::map<std::string, uint64_t>;
|
||||||
|
|
||||||
|
Metrics readAllMetricsFromStatFile(ReadBufferFromFile & buf)
|
||||||
{
|
{
|
||||||
|
Metrics metrics;
|
||||||
while (!buf.eof())
|
while (!buf.eof())
|
||||||
{
|
{
|
||||||
std::string current_key;
|
std::string current_key;
|
||||||
readStringUntilWhitespace(current_key, buf);
|
readStringUntilWhitespace(current_key, buf);
|
||||||
if (current_key != key)
|
|
||||||
{
|
|
||||||
std::string dummy;
|
|
||||||
readStringUntilNewlineInto(dummy, buf);
|
|
||||||
buf.ignore();
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
assertChar(' ', buf);
|
assertChar(' ', buf);
|
||||||
|
|
||||||
uint64_t value = 0;
|
uint64_t value = 0;
|
||||||
readIntText(value, buf);
|
readIntText(value, buf);
|
||||||
return value;
|
assertChar('\n', buf);
|
||||||
}
|
|
||||||
|
|
||||||
|
auto [_, inserted] = metrics.emplace(std::move(current_key), value);
|
||||||
|
chassert(inserted, "Duplicate keys in stat file");
|
||||||
|
}
|
||||||
|
return metrics;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t readMetricFromStatFile(ReadBufferFromFile & buf, const std::string & key)
|
||||||
|
{
|
||||||
|
const auto all_metrics = readAllMetricsFromStatFile(buf);
|
||||||
|
if (const auto it = all_metrics.find(key); it != all_metrics.end())
|
||||||
|
return it->second;
|
||||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot find '{}' in '{}'", key, buf.getFileName());
|
throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot find '{}' in '{}'", key, buf.getFileName());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -79,6 +87,13 @@ struct CgroupsV1Reader : ICgroupsReader
|
|||||||
return readMetricFromStatFile(buf, "rss");
|
return readMetricFromStatFile(buf, "rss");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::string dumpAllStats() override
|
||||||
|
{
|
||||||
|
std::lock_guard lock(mutex);
|
||||||
|
buf.rewind();
|
||||||
|
return fmt::format("{}", readAllMetricsFromStatFile(buf));
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::mutex mutex;
|
std::mutex mutex;
|
||||||
ReadBufferFromFile buf TSA_GUARDED_BY(mutex);
|
ReadBufferFromFile buf TSA_GUARDED_BY(mutex);
|
||||||
@ -106,6 +121,13 @@ struct CgroupsV2Reader : ICgroupsReader
|
|||||||
return mem_usage;
|
return mem_usage;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::string dumpAllStats() override
|
||||||
|
{
|
||||||
|
std::lock_guard lock(mutex);
|
||||||
|
stat_buf.rewind();
|
||||||
|
return fmt::format("{}", readAllMetricsFromStatFile(stat_buf));
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::mutex mutex;
|
std::mutex mutex;
|
||||||
ReadBufferFromFile current_buf TSA_GUARDED_BY(mutex);
|
ReadBufferFromFile current_buf TSA_GUARDED_BY(mutex);
|
||||||
@ -178,10 +200,7 @@ CgroupsMemoryUsageObserver::CgroupsMemoryUsageObserver(std::chrono::seconds wait
|
|||||||
{
|
{
|
||||||
const auto [cgroup_path, version] = getCgroupsPath();
|
const auto [cgroup_path, version] = getCgroupsPath();
|
||||||
|
|
||||||
if (version == CgroupsVersion::V2)
|
cgroup_reader = createCgroupsReader(version, cgroup_path);
|
||||||
cgroup_reader = std::make_unique<CgroupsV2Reader>(cgroup_path);
|
|
||||||
else
|
|
||||||
cgroup_reader = std::make_unique<CgroupsV1Reader>(cgroup_path);
|
|
||||||
|
|
||||||
LOG_INFO(
|
LOG_INFO(
|
||||||
log,
|
log,
|
||||||
@ -234,7 +253,12 @@ void CgroupsMemoryUsageObserver::setMemoryUsageLimits(uint64_t hard_limit_, uint
|
|||||||
# endif
|
# endif
|
||||||
/// Reset current usage in memory tracker. Expect zero for free_memory_in_allocator_arenas as we just purged them.
|
/// Reset current usage in memory tracker. Expect zero for free_memory_in_allocator_arenas as we just purged them.
|
||||||
uint64_t memory_usage = cgroup_reader->readMemoryUsage();
|
uint64_t memory_usage = cgroup_reader->readMemoryUsage();
|
||||||
LOG_TRACE(log, "Read current memory usage {} bytes ({}) from cgroups", memory_usage, ReadableSize(memory_usage));
|
LOG_TRACE(
|
||||||
|
log,
|
||||||
|
"Read current memory usage {} bytes ({}) from cgroups, full available stats: {}",
|
||||||
|
memory_usage,
|
||||||
|
ReadableSize(memory_usage),
|
||||||
|
cgroup_reader->dumpAllStats());
|
||||||
MemoryTracker::setRSS(memory_usage, 0);
|
MemoryTracker::setRSS(memory_usage, 0);
|
||||||
|
|
||||||
LOG_INFO(log, "Purged jemalloc arenas. Current memory usage is {}", ReadableSize(memory_usage));
|
LOG_INFO(log, "Purged jemalloc arenas. Current memory usage is {}", ReadableSize(memory_usage));
|
||||||
@ -338,6 +362,13 @@ void CgroupsMemoryUsageObserver::runThread()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<ICgroupsReader> createCgroupsReader(CgroupsMemoryUsageObserver::CgroupsVersion version, const fs::path & cgroup_path)
|
||||||
|
{
|
||||||
|
if (version == CgroupsMemoryUsageObserver::CgroupsVersion::V2)
|
||||||
|
return std::make_unique<CgroupsV2Reader>(cgroup_path);
|
||||||
|
else
|
||||||
|
return std::make_unique<CgroupsV1Reader>(cgroup_path);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -14,6 +14,8 @@ struct ICgroupsReader
|
|||||||
virtual ~ICgroupsReader() = default;
|
virtual ~ICgroupsReader() = default;
|
||||||
|
|
||||||
virtual uint64_t readMemoryUsage() = 0;
|
virtual uint64_t readMemoryUsage() = 0;
|
||||||
|
|
||||||
|
virtual std::string dumpAllStats() = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Does two things:
|
/// Does two things:
|
||||||
@ -81,6 +83,9 @@ private:
|
|||||||
bool quit = false;
|
bool quit = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
std::unique_ptr<ICgroupsReader>
|
||||||
|
createCgroupsReader(CgroupsMemoryUsageObserver::CgroupsVersion version, const std::filesystem::path & cgroup_path);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
class CgroupsMemoryUsageObserver
|
class CgroupsMemoryUsageObserver
|
||||||
{
|
{
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include <Common/ErrorCodes.h>
|
#include <Common/ErrorCodes.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <Common/LockMemoryExceptionInThread.h>
|
#include <Common/LockMemoryExceptionInThread.h>
|
||||||
|
#include <Common/Logger.h>
|
||||||
#include <Common/MemorySanitizer.h>
|
#include <Common/MemorySanitizer.h>
|
||||||
#include <Common/SensitiveDataMasker.h>
|
#include <Common/SensitiveDataMasker.h>
|
||||||
#include <Common/config_version.h>
|
#include <Common/config_version.h>
|
||||||
@ -100,7 +101,7 @@ Exception::Exception(const MessageMasked & msg_masked, int code, bool remote_)
|
|||||||
{
|
{
|
||||||
if (terminate_on_any_exception)
|
if (terminate_on_any_exception)
|
||||||
std::_Exit(terminate_status_code);
|
std::_Exit(terminate_status_code);
|
||||||
capture_thread_frame_pointers = thread_frame_pointers;
|
capture_thread_frame_pointers = getThreadFramePointers();
|
||||||
handle_error_code(msg_masked.msg, code, remote, getStackFramePointers());
|
handle_error_code(msg_masked.msg, code, remote, getStackFramePointers());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -110,7 +111,7 @@ Exception::Exception(MessageMasked && msg_masked, int code, bool remote_)
|
|||||||
{
|
{
|
||||||
if (terminate_on_any_exception)
|
if (terminate_on_any_exception)
|
||||||
std::_Exit(terminate_status_code);
|
std::_Exit(terminate_status_code);
|
||||||
capture_thread_frame_pointers = thread_frame_pointers;
|
capture_thread_frame_pointers = getThreadFramePointers();
|
||||||
handle_error_code(message(), code, remote, getStackFramePointers());
|
handle_error_code(message(), code, remote, getStackFramePointers());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -119,7 +120,7 @@ Exception::Exception(CreateFromPocoTag, const Poco::Exception & exc)
|
|||||||
{
|
{
|
||||||
if (terminate_on_any_exception)
|
if (terminate_on_any_exception)
|
||||||
std::_Exit(terminate_status_code);
|
std::_Exit(terminate_status_code);
|
||||||
capture_thread_frame_pointers = thread_frame_pointers;
|
capture_thread_frame_pointers = getThreadFramePointers();
|
||||||
#ifdef STD_EXCEPTION_HAS_STACK_TRACE
|
#ifdef STD_EXCEPTION_HAS_STACK_TRACE
|
||||||
auto * stack_trace_frames = exc.get_stack_trace_frames();
|
auto * stack_trace_frames = exc.get_stack_trace_frames();
|
||||||
auto stack_trace_size = exc.get_stack_trace_size();
|
auto stack_trace_size = exc.get_stack_trace_size();
|
||||||
@ -133,7 +134,7 @@ Exception::Exception(CreateFromSTDTag, const std::exception & exc)
|
|||||||
{
|
{
|
||||||
if (terminate_on_any_exception)
|
if (terminate_on_any_exception)
|
||||||
std::_Exit(terminate_status_code);
|
std::_Exit(terminate_status_code);
|
||||||
capture_thread_frame_pointers = thread_frame_pointers;
|
capture_thread_frame_pointers = getThreadFramePointers();
|
||||||
#ifdef STD_EXCEPTION_HAS_STACK_TRACE
|
#ifdef STD_EXCEPTION_HAS_STACK_TRACE
|
||||||
auto * stack_trace_frames = exc.get_stack_trace_frames();
|
auto * stack_trace_frames = exc.get_stack_trace_frames();
|
||||||
auto stack_trace_size = exc.get_stack_trace_size();
|
auto stack_trace_size = exc.get_stack_trace_size();
|
||||||
@ -223,10 +224,38 @@ Exception::FramePointers Exception::getStackFramePointers() const
|
|||||||
}
|
}
|
||||||
|
|
||||||
thread_local bool Exception::enable_job_stack_trace = false;
|
thread_local bool Exception::enable_job_stack_trace = false;
|
||||||
thread_local std::vector<StackTrace::FramePointers> Exception::thread_frame_pointers = {};
|
thread_local bool Exception::can_use_thread_frame_pointers = false;
|
||||||
|
thread_local Exception::ThreadFramePointers Exception::thread_frame_pointers;
|
||||||
|
|
||||||
|
Exception::ThreadFramePointers::ThreadFramePointers()
|
||||||
|
{
|
||||||
|
can_use_thread_frame_pointers = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Exception::ThreadFramePointers::~ThreadFramePointers()
|
||||||
|
{
|
||||||
|
can_use_thread_frame_pointers = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
Exception::ThreadFramePointersBase Exception::getThreadFramePointers()
|
||||||
|
{
|
||||||
|
if (can_use_thread_frame_pointers)
|
||||||
|
return thread_frame_pointers.frame_pointers;
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
void Exception::setThreadFramePointers(ThreadFramePointersBase frame_pointers)
|
||||||
|
{
|
||||||
|
if (can_use_thread_frame_pointers)
|
||||||
|
thread_frame_pointers.frame_pointers = std::move(frame_pointers);
|
||||||
|
}
|
||||||
|
|
||||||
static void tryLogCurrentExceptionImpl(Poco::Logger * logger, const std::string & start_of_message)
|
static void tryLogCurrentExceptionImpl(Poco::Logger * logger, const std::string & start_of_message)
|
||||||
{
|
{
|
||||||
|
if (!isLoggingEnabled())
|
||||||
|
return;
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
PreformattedMessage message = getCurrentExceptionMessageAndPattern(true);
|
PreformattedMessage message = getCurrentExceptionMessageAndPattern(true);
|
||||||
@ -242,6 +271,9 @@ static void tryLogCurrentExceptionImpl(Poco::Logger * logger, const std::string
|
|||||||
|
|
||||||
void tryLogCurrentException(const char * log_name, const std::string & start_of_message)
|
void tryLogCurrentException(const char * log_name, const std::string & start_of_message)
|
||||||
{
|
{
|
||||||
|
if (!isLoggingEnabled())
|
||||||
|
return;
|
||||||
|
|
||||||
/// Under high memory pressure, new allocations throw a
|
/// Under high memory pressure, new allocations throw a
|
||||||
/// MEMORY_LIMIT_EXCEEDED exception.
|
/// MEMORY_LIMIT_EXCEEDED exception.
|
||||||
///
|
///
|
||||||
|
@ -10,7 +10,6 @@
|
|||||||
|
|
||||||
#include <cerrno>
|
#include <cerrno>
|
||||||
#include <exception>
|
#include <exception>
|
||||||
#include <memory>
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include <fmt/core.h>
|
#include <fmt/core.h>
|
||||||
@ -49,14 +48,14 @@ public:
|
|||||||
{
|
{
|
||||||
if (terminate_on_any_exception)
|
if (terminate_on_any_exception)
|
||||||
std::terminate();
|
std::terminate();
|
||||||
capture_thread_frame_pointers = thread_frame_pointers;
|
capture_thread_frame_pointers = getThreadFramePointers();
|
||||||
}
|
}
|
||||||
|
|
||||||
Exception(const PreformattedMessage & msg, int code): Exception(msg.text, code)
|
Exception(const PreformattedMessage & msg, int code): Exception(msg.text, code)
|
||||||
{
|
{
|
||||||
if (terminate_on_any_exception)
|
if (terminate_on_any_exception)
|
||||||
std::terminate();
|
std::terminate();
|
||||||
capture_thread_frame_pointers = thread_frame_pointers;
|
capture_thread_frame_pointers = getThreadFramePointers();
|
||||||
message_format_string = msg.format_string;
|
message_format_string = msg.format_string;
|
||||||
message_format_string_args = msg.format_string_args;
|
message_format_string_args = msg.format_string_args;
|
||||||
}
|
}
|
||||||
@ -65,18 +64,36 @@ public:
|
|||||||
{
|
{
|
||||||
if (terminate_on_any_exception)
|
if (terminate_on_any_exception)
|
||||||
std::terminate();
|
std::terminate();
|
||||||
capture_thread_frame_pointers = thread_frame_pointers;
|
capture_thread_frame_pointers = getThreadFramePointers();
|
||||||
message_format_string = msg.format_string;
|
message_format_string = msg.format_string;
|
||||||
message_format_string_args = msg.format_string_args;
|
message_format_string_args = msg.format_string_args;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Collect call stacks of all previous jobs' schedulings leading to this thread job's execution
|
/// Collect call stacks of all previous jobs' schedulings leading to this thread job's execution
|
||||||
static thread_local bool enable_job_stack_trace;
|
static thread_local bool enable_job_stack_trace;
|
||||||
static thread_local std::vector<StackTrace::FramePointers> thread_frame_pointers;
|
static thread_local bool can_use_thread_frame_pointers;
|
||||||
|
/// Because of unknown order of static destructor calls,
|
||||||
|
/// thread_frame_pointers can already be uninitialized when a different destructor generates an exception.
|
||||||
|
/// To prevent such scenarios, a wrapper class is created and a function that will return empty vector
|
||||||
|
/// if its destructor is already called
|
||||||
|
using ThreadFramePointersBase = std::vector<StackTrace::FramePointers>;
|
||||||
|
struct ThreadFramePointers
|
||||||
|
{
|
||||||
|
ThreadFramePointers();
|
||||||
|
~ThreadFramePointers();
|
||||||
|
|
||||||
|
ThreadFramePointersBase frame_pointers;
|
||||||
|
};
|
||||||
|
|
||||||
|
static ThreadFramePointersBase getThreadFramePointers();
|
||||||
|
static void setThreadFramePointers(ThreadFramePointersBase frame_pointers);
|
||||||
|
|
||||||
/// Callback for any exception
|
/// Callback for any exception
|
||||||
static std::function<void(const std::string & msg, int code, bool remote, const Exception::FramePointers & trace)> callback;
|
static std::function<void(const std::string & msg, int code, bool remote, const Exception::FramePointers & trace)> callback;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
static thread_local ThreadFramePointers thread_frame_pointers;
|
||||||
|
|
||||||
// used to remove the sensitive information from exceptions if query_masking_rules is configured
|
// used to remove the sensitive information from exceptions if query_masking_rules is configured
|
||||||
struct MessageMasked
|
struct MessageMasked
|
||||||
{
|
{
|
||||||
@ -178,7 +195,7 @@ class ErrnoException : public Exception
|
|||||||
public:
|
public:
|
||||||
ErrnoException(std::string && msg, int code, int with_errno) : Exception(msg, code), saved_errno(with_errno)
|
ErrnoException(std::string && msg, int code, int with_errno) : Exception(msg, code), saved_errno(with_errno)
|
||||||
{
|
{
|
||||||
capture_thread_frame_pointers = thread_frame_pointers;
|
capture_thread_frame_pointers = getThreadFramePointers();
|
||||||
addMessage(", {}", errnoToString(saved_errno));
|
addMessage(", {}", errnoToString(saved_errno));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -187,7 +204,7 @@ public:
|
|||||||
requires std::is_convertible_v<T, String>
|
requires std::is_convertible_v<T, String>
|
||||||
ErrnoException(int code, T && message) : Exception(message, code), saved_errno(errno)
|
ErrnoException(int code, T && message) : Exception(message, code), saved_errno(errno)
|
||||||
{
|
{
|
||||||
capture_thread_frame_pointers = thread_frame_pointers;
|
capture_thread_frame_pointers = getThreadFramePointers();
|
||||||
addMessage(", {}", errnoToString(saved_errno));
|
addMessage(", {}", errnoToString(saved_errno));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,7 +57,9 @@ static struct InitFiu
|
|||||||
PAUSEABLE_ONCE(finish_clean_quorum_failed_parts) \
|
PAUSEABLE_ONCE(finish_clean_quorum_failed_parts) \
|
||||||
PAUSEABLE(dummy_pausable_failpoint) \
|
PAUSEABLE(dummy_pausable_failpoint) \
|
||||||
ONCE(execute_query_calling_empty_set_result_func_on_exception) \
|
ONCE(execute_query_calling_empty_set_result_func_on_exception) \
|
||||||
ONCE(receive_timeout_on_table_status_response)
|
ONCE(receive_timeout_on_table_status_response) \
|
||||||
|
REGULAR(keepermap_fail_drop_data) \
|
||||||
|
REGULAR(lazy_pipe_fds_fail_close) \
|
||||||
|
|
||||||
|
|
||||||
namespace FailPoints
|
namespace FailPoints
|
||||||
|
@ -25,3 +25,15 @@ bool hasLogger(const std::string & name)
|
|||||||
{
|
{
|
||||||
return Poco::Logger::has(name);
|
return Poco::Logger::has(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static constinit std::atomic<bool> allow_logging{true};
|
||||||
|
|
||||||
|
bool isLoggingEnabled()
|
||||||
|
{
|
||||||
|
return allow_logging;
|
||||||
|
}
|
||||||
|
|
||||||
|
void disableLogging()
|
||||||
|
{
|
||||||
|
allow_logging = false;
|
||||||
|
}
|
||||||
|
@ -64,3 +64,7 @@ LoggerRawPtr createRawLogger(const std::string & name, Poco::Channel * channel,
|
|||||||
* Otherwise, returns false.
|
* Otherwise, returns false.
|
||||||
*/
|
*/
|
||||||
bool hasLogger(const std::string & name);
|
bool hasLogger(const std::string & name);
|
||||||
|
|
||||||
|
void disableLogging();
|
||||||
|
|
||||||
|
bool isLoggingEnabled();
|
||||||
|
@ -1,19 +1,23 @@
|
|||||||
#include <Common/PipeFDs.h>
|
#include <Common/PipeFDs.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <Common/formatReadable.h>
|
#include <Common/formatReadable.h>
|
||||||
|
#include <Common/FailPoint.h>
|
||||||
|
|
||||||
#include <Common/logger_useful.h>
|
#include <Common/logger_useful.h>
|
||||||
#include <base/errnoToString.h>
|
#include <base/errnoToString.h>
|
||||||
|
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
#include <string>
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
namespace FailPoints
|
||||||
|
{
|
||||||
|
extern const char lazy_pipe_fds_fail_close[];
|
||||||
|
}
|
||||||
|
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int CANNOT_PIPE;
|
extern const int CANNOT_PIPE;
|
||||||
@ -42,6 +46,11 @@ void LazyPipeFDs::open()
|
|||||||
|
|
||||||
void LazyPipeFDs::close()
|
void LazyPipeFDs::close()
|
||||||
{
|
{
|
||||||
|
fiu_do_on(FailPoints::lazy_pipe_fds_fail_close,
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::CANNOT_PIPE, "Manually triggered exception on close");
|
||||||
|
});
|
||||||
|
|
||||||
for (int & fd : fds_rw)
|
for (int & fd : fds_rw)
|
||||||
{
|
{
|
||||||
if (fd < 0)
|
if (fd < 0)
|
||||||
|
@ -1,8 +1,5 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <cstddef>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ void signalHandler(int sig, siginfo_t * info, void * context)
|
|||||||
writePODBinary(*info, out);
|
writePODBinary(*info, out);
|
||||||
writePODBinary(signal_context, out);
|
writePODBinary(signal_context, out);
|
||||||
writePODBinary(stack_trace, out);
|
writePODBinary(stack_trace, out);
|
||||||
writeVectorBinary(Exception::enable_job_stack_trace ? Exception::thread_frame_pointers : std::vector<StackTrace::FramePointers>{}, out);
|
writeVectorBinary(Exception::enable_job_stack_trace ? Exception::getThreadFramePointers() : std::vector<StackTrace::FramePointers>{}, out);
|
||||||
writeBinary(static_cast<UInt32>(getThreadId()), out);
|
writeBinary(static_cast<UInt32>(getThreadId()), out);
|
||||||
writePODBinary(current_thread, out);
|
writePODBinary(current_thread, out);
|
||||||
|
|
||||||
@ -605,7 +605,14 @@ void HandledSignals::reset()
|
|||||||
|
|
||||||
HandledSignals::~HandledSignals()
|
HandledSignals::~HandledSignals()
|
||||||
{
|
{
|
||||||
reset();
|
try
|
||||||
|
{
|
||||||
|
reset();
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
HandledSignals & HandledSignals::instance()
|
HandledSignals & HandledSignals::instance()
|
||||||
|
@ -489,13 +489,25 @@ struct CacheEntry
|
|||||||
|
|
||||||
using CacheEntryPtr = std::shared_ptr<CacheEntry>;
|
using CacheEntryPtr = std::shared_ptr<CacheEntry>;
|
||||||
|
|
||||||
using StackTraceCache = std::map<StackTraceTriple, CacheEntryPtr, std::less<>>;
|
static constinit bool can_use_cache = false;
|
||||||
|
|
||||||
static StackTraceCache & cacheInstance()
|
using StackTraceCacheBase = std::map<StackTraceTriple, CacheEntryPtr, std::less<>>;
|
||||||
|
|
||||||
|
struct StackTraceCache : public StackTraceCacheBase
|
||||||
{
|
{
|
||||||
static StackTraceCache cache;
|
StackTraceCache()
|
||||||
return cache;
|
: StackTraceCacheBase()
|
||||||
}
|
{
|
||||||
|
can_use_cache = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
~StackTraceCache()
|
||||||
|
{
|
||||||
|
can_use_cache = false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static StackTraceCache cache;
|
||||||
|
|
||||||
static DB::SharedMutex stacktrace_cache_mutex;
|
static DB::SharedMutex stacktrace_cache_mutex;
|
||||||
|
|
||||||
@ -503,10 +515,16 @@ String toStringCached(const StackTrace::FramePointers & pointers, size_t offset,
|
|||||||
{
|
{
|
||||||
const StackTraceRefTriple key{pointers, offset, size};
|
const StackTraceRefTriple key{pointers, offset, size};
|
||||||
|
|
||||||
|
if (!can_use_cache)
|
||||||
|
{
|
||||||
|
DB::WriteBufferFromOwnString out;
|
||||||
|
toStringEveryLineImpl(false, key, [&](std::string_view str) { out << str << '\n'; });
|
||||||
|
return out.str();
|
||||||
|
}
|
||||||
|
|
||||||
/// Calculation of stack trace text is extremely slow.
|
/// Calculation of stack trace text is extremely slow.
|
||||||
/// We use cache because otherwise the server could be overloaded by trash queries.
|
/// We use cache because otherwise the server could be overloaded by trash queries.
|
||||||
/// Note that this cache can grow unconditionally, but practically it should be small.
|
/// Note that this cache can grow unconditionally, but practically it should be small.
|
||||||
StackTraceCache & cache = cacheInstance();
|
|
||||||
CacheEntryPtr cache_entry;
|
CacheEntryPtr cache_entry;
|
||||||
|
|
||||||
// Optimistic try for cache hit to avoid any contention whatsoever, should be the main hot code route
|
// Optimistic try for cache hit to avoid any contention whatsoever, should be the main hot code route
|
||||||
@ -558,7 +576,7 @@ std::string StackTrace::toString(void * const * frame_pointers_raw, size_t offse
|
|||||||
void StackTrace::dropCache()
|
void StackTrace::dropCache()
|
||||||
{
|
{
|
||||||
std::lock_guard lock{stacktrace_cache_mutex};
|
std::lock_guard lock{stacktrace_cache_mutex};
|
||||||
cacheInstance().clear();
|
cache.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -51,7 +51,7 @@ public:
|
|||||||
if (!capture_frame_pointers)
|
if (!capture_frame_pointers)
|
||||||
return;
|
return;
|
||||||
/// Save all previous jobs call stacks and append with current
|
/// Save all previous jobs call stacks and append with current
|
||||||
frame_pointers = DB::Exception::thread_frame_pointers;
|
frame_pointers = DB::Exception::getThreadFramePointers();
|
||||||
frame_pointers.push_back(StackTrace().getFramePointers());
|
frame_pointers.push_back(StackTrace().getFramePointers());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -455,7 +455,7 @@ void ThreadPoolImpl<Thread>::worker(typename std::list<Thread>::iterator thread_
|
|||||||
try
|
try
|
||||||
{
|
{
|
||||||
if (DB::Exception::enable_job_stack_trace)
|
if (DB::Exception::enable_job_stack_trace)
|
||||||
DB::Exception::thread_frame_pointers = std::move(job_data->frame_pointers);
|
DB::Exception::setThreadFramePointers(std::move(job_data->frame_pointers));
|
||||||
|
|
||||||
CurrentMetrics::Increment metric_active_pool_threads(metric_active_threads);
|
CurrentMetrics::Increment metric_active_pool_threads(metric_active_threads);
|
||||||
|
|
||||||
|
@ -1014,9 +1014,6 @@ void ZooKeeper::finalize(bool error_send, bool error_receive, const String & rea
|
|||||||
LOG_INFO(log, "Finalizing session {}. finalization_started: {}, queue_finished: {}, reason: '{}'",
|
LOG_INFO(log, "Finalizing session {}. finalization_started: {}, queue_finished: {}, reason: '{}'",
|
||||||
session_id, already_started, requests_queue.isFinished(), reason);
|
session_id, already_started, requests_queue.isFinished(), reason);
|
||||||
|
|
||||||
/// Reset the original index.
|
|
||||||
original_index = -1;
|
|
||||||
|
|
||||||
auto expire_session_if_not_expired = [&]
|
auto expire_session_if_not_expired = [&]
|
||||||
{
|
{
|
||||||
/// No new requests will appear in queue after finish()
|
/// No new requests will appear in queue after finish()
|
||||||
|
@ -1,14 +1,14 @@
|
|||||||
clickhouse_add_executable (hashes_test hashes_test.cpp)
|
clickhouse_add_executable (hashes_test hashes_test.cpp)
|
||||||
target_link_libraries (hashes_test PRIVATE clickhouse_common_io ch_contrib::cityhash)
|
target_link_libraries (hashes_test PRIVATE clickhouse_common_io clickhouse_common_config ch_contrib::cityhash)
|
||||||
if (TARGET OpenSSL::Crypto)
|
if (TARGET OpenSSL::Crypto)
|
||||||
target_link_libraries (hashes_test PRIVATE OpenSSL::Crypto)
|
target_link_libraries (hashes_test PRIVATE OpenSSL::Crypto)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
clickhouse_add_executable (sip_hash_perf sip_hash_perf.cpp)
|
clickhouse_add_executable (sip_hash_perf sip_hash_perf.cpp)
|
||||||
target_link_libraries (sip_hash_perf PRIVATE clickhouse_common_io)
|
target_link_libraries (sip_hash_perf PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||||
|
|
||||||
clickhouse_add_executable (small_table small_table.cpp)
|
clickhouse_add_executable (small_table small_table.cpp)
|
||||||
target_link_libraries (small_table PRIVATE clickhouse_common_io)
|
target_link_libraries (small_table PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||||
|
|
||||||
clickhouse_add_executable (parallel_aggregation parallel_aggregation.cpp)
|
clickhouse_add_executable (parallel_aggregation parallel_aggregation.cpp)
|
||||||
target_link_libraries (parallel_aggregation PRIVATE dbms clickhouse_functions)
|
target_link_libraries (parallel_aggregation PRIVATE dbms clickhouse_functions)
|
||||||
@ -17,13 +17,13 @@ clickhouse_add_executable (parallel_aggregation2 parallel_aggregation2.cpp)
|
|||||||
target_link_libraries (parallel_aggregation2 PRIVATE dbms clickhouse_functions)
|
target_link_libraries (parallel_aggregation2 PRIVATE dbms clickhouse_functions)
|
||||||
|
|
||||||
clickhouse_add_executable (int_hashes_perf int_hashes_perf.cpp)
|
clickhouse_add_executable (int_hashes_perf int_hashes_perf.cpp)
|
||||||
target_link_libraries (int_hashes_perf PRIVATE clickhouse_common_io)
|
target_link_libraries (int_hashes_perf PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||||
|
|
||||||
clickhouse_add_executable (compact_array compact_array.cpp)
|
clickhouse_add_executable (compact_array compact_array.cpp)
|
||||||
target_link_libraries (compact_array PRIVATE clickhouse_common_io)
|
target_link_libraries (compact_array PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||||
|
|
||||||
clickhouse_add_executable (radix_sort radix_sort.cpp)
|
clickhouse_add_executable (radix_sort radix_sort.cpp)
|
||||||
target_link_libraries (radix_sort PRIVATE clickhouse_common_io ch_contrib::pdqsort)
|
target_link_libraries (radix_sort PRIVATE clickhouse_common_io clickhouse_common_config ch_contrib::pdqsort)
|
||||||
|
|
||||||
clickhouse_add_executable (arena_with_free_lists arena_with_free_lists.cpp)
|
clickhouse_add_executable (arena_with_free_lists arena_with_free_lists.cpp)
|
||||||
target_link_libraries (arena_with_free_lists PRIVATE dbms)
|
target_link_libraries (arena_with_free_lists PRIVATE dbms)
|
||||||
@ -33,46 +33,46 @@ target_link_libraries (lru_hash_map_perf PRIVATE dbms)
|
|||||||
|
|
||||||
if (OS_LINUX)
|
if (OS_LINUX)
|
||||||
clickhouse_add_executable (thread_creation_latency thread_creation_latency.cpp)
|
clickhouse_add_executable (thread_creation_latency thread_creation_latency.cpp)
|
||||||
target_link_libraries (thread_creation_latency PRIVATE clickhouse_common_io)
|
target_link_libraries (thread_creation_latency PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
clickhouse_add_executable (array_cache array_cache.cpp)
|
clickhouse_add_executable (array_cache array_cache.cpp)
|
||||||
target_link_libraries (array_cache PRIVATE clickhouse_common_io)
|
target_link_libraries (array_cache PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||||
|
|
||||||
clickhouse_add_executable (space_saving space_saving.cpp)
|
clickhouse_add_executable (space_saving space_saving.cpp)
|
||||||
target_link_libraries (space_saving PRIVATE clickhouse_common_io)
|
target_link_libraries (space_saving PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||||
|
|
||||||
clickhouse_add_executable (integer_hash_tables_benchmark integer_hash_tables_benchmark.cpp)
|
clickhouse_add_executable (integer_hash_tables_benchmark integer_hash_tables_benchmark.cpp)
|
||||||
target_link_libraries (integer_hash_tables_benchmark PRIVATE dbms ch_contrib::abseil_swiss_tables ch_contrib::sparsehash)
|
target_link_libraries (integer_hash_tables_benchmark PRIVATE dbms ch_contrib::abseil_swiss_tables ch_contrib::sparsehash)
|
||||||
|
|
||||||
clickhouse_add_executable (cow_columns cow_columns.cpp)
|
clickhouse_add_executable (cow_columns cow_columns.cpp)
|
||||||
target_link_libraries (cow_columns PRIVATE clickhouse_common_io)
|
target_link_libraries (cow_columns PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||||
|
|
||||||
clickhouse_add_executable (cow_compositions cow_compositions.cpp)
|
clickhouse_add_executable (cow_compositions cow_compositions.cpp)
|
||||||
target_link_libraries (cow_compositions PRIVATE clickhouse_common_io)
|
target_link_libraries (cow_compositions PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||||
|
|
||||||
clickhouse_add_executable (stopwatch stopwatch.cpp)
|
clickhouse_add_executable (stopwatch stopwatch.cpp)
|
||||||
target_link_libraries (stopwatch PRIVATE clickhouse_common_io)
|
target_link_libraries (stopwatch PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||||
|
|
||||||
clickhouse_add_executable (symbol_index symbol_index.cpp)
|
clickhouse_add_executable (symbol_index symbol_index.cpp)
|
||||||
target_link_libraries (symbol_index PRIVATE clickhouse_common_io)
|
target_link_libraries (symbol_index PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||||
|
|
||||||
clickhouse_add_executable (chaos_sanitizer chaos_sanitizer.cpp)
|
clickhouse_add_executable (chaos_sanitizer chaos_sanitizer.cpp)
|
||||||
target_link_libraries (chaos_sanitizer PRIVATE clickhouse_common_io)
|
target_link_libraries (chaos_sanitizer PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||||
|
|
||||||
if (OS_LINUX)
|
if (OS_LINUX)
|
||||||
clickhouse_add_executable (memory_statistics_os_perf memory_statistics_os_perf.cpp)
|
clickhouse_add_executable (memory_statistics_os_perf memory_statistics_os_perf.cpp)
|
||||||
target_link_libraries (memory_statistics_os_perf PRIVATE clickhouse_common_io)
|
target_link_libraries (memory_statistics_os_perf PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
clickhouse_add_executable (procfs_metrics_provider_perf procfs_metrics_provider_perf.cpp)
|
clickhouse_add_executable (procfs_metrics_provider_perf procfs_metrics_provider_perf.cpp)
|
||||||
target_link_libraries (procfs_metrics_provider_perf PRIVATE clickhouse_common_io)
|
target_link_libraries (procfs_metrics_provider_perf PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||||
|
|
||||||
clickhouse_add_executable (average average.cpp)
|
clickhouse_add_executable (average average.cpp)
|
||||||
target_link_libraries (average PRIVATE clickhouse_common_io)
|
target_link_libraries (average PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||||
|
|
||||||
clickhouse_add_executable (shell_command_inout shell_command_inout.cpp)
|
clickhouse_add_executable (shell_command_inout shell_command_inout.cpp)
|
||||||
target_link_libraries (shell_command_inout PRIVATE clickhouse_common_io)
|
target_link_libraries (shell_command_inout PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||||
|
|
||||||
clickhouse_add_executable (executable_udf executable_udf.cpp)
|
clickhouse_add_executable (executable_udf executable_udf.cpp)
|
||||||
target_link_libraries (executable_udf PRIVATE dbms)
|
target_link_libraries (executable_udf PRIVATE dbms)
|
||||||
@ -91,4 +91,4 @@ if (ENABLE_SSL)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
clickhouse_add_executable (check_pointer_valid check_pointer_valid.cpp)
|
clickhouse_add_executable (check_pointer_valid check_pointer_valid.cpp)
|
||||||
target_link_libraries (check_pointer_valid PRIVATE clickhouse_common_io)
|
target_link_libraries (check_pointer_valid PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
clickhouse_add_executable (mysqlxx_pool_test mysqlxx_pool_test.cpp)
|
clickhouse_add_executable (mysqlxx_pool_test mysqlxx_pool_test.cpp)
|
||||||
target_link_libraries (mysqlxx_pool_test PRIVATE mysqlxx)
|
target_link_libraries (mysqlxx_pool_test PRIVATE mysqlxx clickhouse_common_config loggers_no_text_log)
|
||||||
|
178
src/Common/tests/gtest_cgroups_reader.cpp
Normal file
178
src/Common/tests/gtest_cgroups_reader.cpp
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
#if defined(OS_LINUX)
|
||||||
|
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
#include <IO/WriteBufferFromFile.h>
|
||||||
|
#include <Common/CgroupsMemoryUsageObserver.h>
|
||||||
|
#include <Common/filesystemHelpers.h>
|
||||||
|
|
||||||
|
using namespace DB;
|
||||||
|
|
||||||
|
|
||||||
|
const std::string SAMPLE_FILE[2] = {
|
||||||
|
R"(cache 4673703936
|
||||||
|
rss 2232029184
|
||||||
|
rss_huge 0
|
||||||
|
shmem 0
|
||||||
|
mapped_file 344678400
|
||||||
|
dirty 4730880
|
||||||
|
writeback 135168
|
||||||
|
swap 0
|
||||||
|
pgpgin 2038569918
|
||||||
|
pgpgout 2036883790
|
||||||
|
pgfault 2055373287
|
||||||
|
pgmajfault 0
|
||||||
|
inactive_anon 2156335104
|
||||||
|
active_anon 0
|
||||||
|
inactive_file 2841305088
|
||||||
|
active_file 1653915648
|
||||||
|
unevictable 256008192
|
||||||
|
hierarchical_memory_limit 8589934592
|
||||||
|
hierarchical_memsw_limit 8589934592
|
||||||
|
total_cache 4673703936
|
||||||
|
total_rss 2232029184
|
||||||
|
total_rss_huge 0
|
||||||
|
total_shmem 0
|
||||||
|
total_mapped_file 344678400
|
||||||
|
total_dirty 4730880
|
||||||
|
total_writeback 135168
|
||||||
|
total_swap 0
|
||||||
|
total_pgpgin 2038569918
|
||||||
|
total_pgpgout 2036883790
|
||||||
|
total_pgfault 2055373287
|
||||||
|
total_pgmajfault 0
|
||||||
|
total_inactive_anon 2156335104
|
||||||
|
total_active_anon 0
|
||||||
|
total_inactive_file 2841305088
|
||||||
|
total_active_file 1653915648
|
||||||
|
total_unevictable 256008192
|
||||||
|
)",
|
||||||
|
R"(anon 10429399040
|
||||||
|
file 17410793472
|
||||||
|
kernel 1537789952
|
||||||
|
kernel_stack 3833856
|
||||||
|
pagetables 65441792
|
||||||
|
sec_pagetables 0
|
||||||
|
percpu 15232
|
||||||
|
sock 0
|
||||||
|
vmalloc 0
|
||||||
|
shmem 0
|
||||||
|
zswap 0
|
||||||
|
zswapped 0
|
||||||
|
file_mapped 344010752
|
||||||
|
file_dirty 2060857344
|
||||||
|
file_writeback 0
|
||||||
|
swapcached 0
|
||||||
|
anon_thp 0
|
||||||
|
file_thp 0
|
||||||
|
shmem_thp 0
|
||||||
|
inactive_anon 0
|
||||||
|
active_anon 10429370368
|
||||||
|
inactive_file 8693084160
|
||||||
|
active_file 8717561856
|
||||||
|
unevictable 0
|
||||||
|
slab_reclaimable 1460982504
|
||||||
|
slab_unreclaimable 5152864
|
||||||
|
slab 1466135368
|
||||||
|
workingset_refault_anon 0
|
||||||
|
workingset_refault_file 0
|
||||||
|
workingset_activate_anon 0
|
||||||
|
workingset_activate_file 0
|
||||||
|
workingset_restore_anon 0
|
||||||
|
workingset_restore_file 0
|
||||||
|
workingset_nodereclaim 0
|
||||||
|
pgscan 0
|
||||||
|
pgsteal 0
|
||||||
|
pgscan_kswapd 0
|
||||||
|
pgscan_direct 0
|
||||||
|
pgscan_khugepaged 0
|
||||||
|
pgsteal_kswapd 0
|
||||||
|
pgsteal_direct 0
|
||||||
|
pgsteal_khugepaged 0
|
||||||
|
pgfault 43026352
|
||||||
|
pgmajfault 36762
|
||||||
|
pgrefill 0
|
||||||
|
pgactivate 0
|
||||||
|
pgdeactivate 0
|
||||||
|
pglazyfree 259
|
||||||
|
pglazyfreed 0
|
||||||
|
zswpin 0
|
||||||
|
zswpout 0
|
||||||
|
thp_fault_alloc 0
|
||||||
|
thp_collapse_alloc 0
|
||||||
|
)"};
|
||||||
|
|
||||||
|
const std::string EXPECTED[2]
|
||||||
|
= {"{\"active_anon\": 0, \"active_file\": 1653915648, \"cache\": 4673703936, \"dirty\": 4730880, \"hierarchical_memory_limit\": "
|
||||||
|
"8589934592, \"hierarchical_memsw_limit\": 8589934592, \"inactive_anon\": 2156335104, \"inactive_file\": 2841305088, "
|
||||||
|
"\"mapped_file\": 344678400, \"pgfault\": 2055373287, \"pgmajfault\": 0, \"pgpgin\": 2038569918, \"pgpgout\": 2036883790, \"rss\": "
|
||||||
|
"2232029184, \"rss_huge\": 0, \"shmem\": 0, \"swap\": 0, \"total_active_anon\": 0, \"total_active_file\": 1653915648, "
|
||||||
|
"\"total_cache\": 4673703936, \"total_dirty\": 4730880, \"total_inactive_anon\": 2156335104, \"total_inactive_file\": 2841305088, "
|
||||||
|
"\"total_mapped_file\": 344678400, \"total_pgfault\": 2055373287, \"total_pgmajfault\": 0, \"total_pgpgin\": 2038569918, "
|
||||||
|
"\"total_pgpgout\": 2036883790, \"total_rss\": 2232029184, \"total_rss_huge\": 0, \"total_shmem\": 0, \"total_swap\": 0, "
|
||||||
|
"\"total_unevictable\": 256008192, \"total_writeback\": 135168, \"unevictable\": 256008192, \"writeback\": 135168}",
|
||||||
|
"{\"active_anon\": 10429370368, \"active_file\": 8717561856, \"anon\": 10429399040, \"anon_thp\": 0, \"file\": 17410793472, "
|
||||||
|
"\"file_dirty\": 2060857344, \"file_mapped\": 344010752, \"file_thp\": 0, \"file_writeback\": 0, \"inactive_anon\": 0, "
|
||||||
|
"\"inactive_file\": 8693084160, \"kernel\": 1537789952, \"kernel_stack\": 3833856, \"pagetables\": 65441792, \"percpu\": 15232, "
|
||||||
|
"\"pgactivate\": 0, \"pgdeactivate\": 0, \"pgfault\": 43026352, \"pglazyfree\": 259, \"pglazyfreed\": 0, \"pgmajfault\": 36762, "
|
||||||
|
"\"pgrefill\": 0, \"pgscan\": 0, \"pgscan_direct\": 0, \"pgscan_khugepaged\": 0, \"pgscan_kswapd\": 0, \"pgsteal\": 0, "
|
||||||
|
"\"pgsteal_direct\": 0, \"pgsteal_khugepaged\": 0, \"pgsteal_kswapd\": 0, \"sec_pagetables\": 0, \"shmem\": 0, \"shmem_thp\": 0, "
|
||||||
|
"\"slab\": 1466135368, \"slab_reclaimable\": 1460982504, \"slab_unreclaimable\": 5152864, \"sock\": 0, \"swapcached\": 0, "
|
||||||
|
"\"thp_collapse_alloc\": 0, \"thp_fault_alloc\": 0, \"unevictable\": 0, \"vmalloc\": 0, \"workingset_activate_anon\": 0, "
|
||||||
|
"\"workingset_activate_file\": 0, \"workingset_nodereclaim\": 0, \"workingset_refault_anon\": 0, \"workingset_refault_file\": 0, "
|
||||||
|
"\"workingset_restore_anon\": 0, \"workingset_restore_file\": 0, \"zswap\": 0, \"zswapped\": 0, \"zswpin\": 0, \"zswpout\": 0}"};
|
||||||
|
|
||||||
|
|
||||||
|
class CgroupsMemoryUsageObserverFixture : public ::testing::TestWithParam<CgroupsMemoryUsageObserver::CgroupsVersion>
|
||||||
|
{
|
||||||
|
void SetUp() override
|
||||||
|
{
|
||||||
|
const uint8_t version = static_cast<uint8_t>(GetParam());
|
||||||
|
tmp_dir = fmt::format("./test_cgroups_{}", magic_enum::enum_name(GetParam()));
|
||||||
|
fs::create_directories(tmp_dir);
|
||||||
|
|
||||||
|
auto stat_file = WriteBufferFromFile(tmp_dir + "/memory.stat");
|
||||||
|
stat_file.write(SAMPLE_FILE[version].data(), SAMPLE_FILE[version].size());
|
||||||
|
stat_file.sync();
|
||||||
|
|
||||||
|
if (GetParam() == CgroupsMemoryUsageObserver::CgroupsVersion::V2)
|
||||||
|
{
|
||||||
|
auto current_file = WriteBufferFromFile(tmp_dir + "/memory.current");
|
||||||
|
current_file.write("29645422592", 11);
|
||||||
|
current_file.sync();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
std::string tmp_dir;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
TEST_P(CgroupsMemoryUsageObserverFixture, ReadMemoryUsageTest)
|
||||||
|
{
|
||||||
|
const auto version = GetParam();
|
||||||
|
auto reader = createCgroupsReader(version, tmp_dir);
|
||||||
|
ASSERT_EQ(
|
||||||
|
reader->readMemoryUsage(),
|
||||||
|
version == CgroupsMemoryUsageObserver::CgroupsVersion::V1 ? /* rss from memory.stat */ 2232029184
|
||||||
|
: /* value from memory.current - inactive_file */ 20952338432);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
TEST_P(CgroupsMemoryUsageObserverFixture, DumpAllStatsTest)
|
||||||
|
{
|
||||||
|
const auto version = GetParam();
|
||||||
|
auto reader = createCgroupsReader(version, tmp_dir);
|
||||||
|
ASSERT_EQ(reader->dumpAllStats(), EXPECTED[static_cast<uint8_t>(version)]);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
CgroupsMemoryUsageObserverTests,
|
||||||
|
CgroupsMemoryUsageObserverFixture,
|
||||||
|
::testing::Values(CgroupsMemoryUsageObserver::CgroupsVersion::V1, CgroupsMemoryUsageObserver::CgroupsVersion::V2));
|
||||||
|
|
||||||
|
#endif
|
@ -33,7 +33,7 @@ size_t toMilliseconds(auto duration)
|
|||||||
return std::chrono::duration_cast<std::chrono::milliseconds>(duration).count();
|
return std::chrono::duration_cast<std::chrono::milliseconds>(duration).count();
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto epsilon = 500us;
|
const auto epsilon = 1ms;
|
||||||
|
|
||||||
class ResolvePoolMock : public DB::HostResolver
|
class ResolvePoolMock : public DB::HostResolver
|
||||||
{
|
{
|
||||||
@ -358,53 +358,59 @@ void check_no_failed_address(size_t iteration, auto & resolver, auto & addresses
|
|||||||
|
|
||||||
TEST_F(ResolvePoolTest, BannedForConsiquenceFail)
|
TEST_F(ResolvePoolTest, BannedForConsiquenceFail)
|
||||||
{
|
{
|
||||||
auto history = 5ms;
|
auto history = 10ms;
|
||||||
auto resolver = make_resolver(toMilliseconds(history));
|
auto resolver = make_resolver(toMilliseconds(history));
|
||||||
|
|
||||||
auto failed_addr = resolver->resolve();
|
auto failed_addr = resolver->resolve();
|
||||||
ASSERT_TRUE(addresses.contains(*failed_addr));
|
ASSERT_TRUE(addresses.contains(*failed_addr));
|
||||||
|
|
||||||
auto start_at = now();
|
|
||||||
|
|
||||||
failed_addr.setFail();
|
failed_addr.setFail();
|
||||||
|
auto start_at = now();
|
||||||
|
|
||||||
ASSERT_EQ(3, CurrentMetrics::get(metrics.active_count));
|
ASSERT_EQ(3, CurrentMetrics::get(metrics.active_count));
|
||||||
ASSERT_EQ(1, CurrentMetrics::get(metrics.banned_count));
|
ASSERT_EQ(1, CurrentMetrics::get(metrics.banned_count));
|
||||||
check_no_failed_address(1, resolver, addresses, failed_addr, metrics, start_at + history - epsilon);
|
check_no_failed_address(1, resolver, addresses, failed_addr, metrics, start_at + history - epsilon);
|
||||||
|
|
||||||
sleep_until(start_at + history + epsilon);
|
sleep_until(start_at + history + epsilon);
|
||||||
start_at = now();
|
|
||||||
|
|
||||||
resolver->update();
|
resolver->update();
|
||||||
ASSERT_EQ(3, CurrentMetrics::get(metrics.active_count));
|
ASSERT_EQ(3, CurrentMetrics::get(metrics.active_count));
|
||||||
ASSERT_EQ(0, CurrentMetrics::get(metrics.banned_count));
|
ASSERT_EQ(0, CurrentMetrics::get(metrics.banned_count));
|
||||||
|
|
||||||
failed_addr.setFail();
|
failed_addr.setFail();
|
||||||
|
start_at = now();
|
||||||
|
|
||||||
check_no_failed_address(2, resolver, addresses, failed_addr, metrics, start_at + history - epsilon);
|
check_no_failed_address(2, resolver, addresses, failed_addr, metrics, start_at + history - epsilon);
|
||||||
|
|
||||||
sleep_until(start_at + history + epsilon);
|
sleep_until(start_at + history + epsilon);
|
||||||
start_at = now();
|
|
||||||
|
|
||||||
resolver->update();
|
resolver->update();
|
||||||
|
|
||||||
|
// too much time has passed
|
||||||
|
if (now() > start_at + 2*history - epsilon)
|
||||||
|
return;
|
||||||
|
|
||||||
ASSERT_EQ(3, CurrentMetrics::get(metrics.active_count));
|
ASSERT_EQ(3, CurrentMetrics::get(metrics.active_count));
|
||||||
ASSERT_EQ(1, CurrentMetrics::get(metrics.banned_count));
|
ASSERT_EQ(1, CurrentMetrics::get(metrics.banned_count));
|
||||||
|
|
||||||
// ip still banned adter history_ms + update, because it was his second consiquent fail
|
// ip still banned adter history_ms + update, because it was his second consiquent fail
|
||||||
check_no_failed_address(2, resolver, addresses, failed_addr, metrics, start_at + history - epsilon);
|
check_no_failed_address(2, resolver, addresses, failed_addr, metrics, start_at + 2*history - epsilon);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ResolvePoolTest, NoAditionalBannForConcurrentFail)
|
TEST_F(ResolvePoolTest, NoAditionalBannForConcurrentFail)
|
||||||
{
|
{
|
||||||
auto history = 5ms;
|
auto history = 10ms;
|
||||||
auto resolver = make_resolver(toMilliseconds(history));
|
auto resolver = make_resolver(toMilliseconds(history));
|
||||||
|
|
||||||
auto failed_addr = resolver->resolve();
|
auto failed_addr = resolver->resolve();
|
||||||
ASSERT_TRUE(addresses.contains(*failed_addr));
|
ASSERT_TRUE(addresses.contains(*failed_addr));
|
||||||
|
|
||||||
auto start_at = now();
|
failed_addr.setFail();
|
||||||
|
failed_addr.setFail();
|
||||||
|
failed_addr.setFail();
|
||||||
|
|
||||||
failed_addr.setFail();
|
auto start_at = now();
|
||||||
failed_addr.setFail();
|
|
||||||
failed_addr.setFail();
|
|
||||||
|
|
||||||
ASSERT_EQ(3, CurrentMetrics::get(metrics.active_count));
|
ASSERT_EQ(3, CurrentMetrics::get(metrics.active_count));
|
||||||
ASSERT_EQ(1, CurrentMetrics::get(metrics.banned_count));
|
ASSERT_EQ(1, CurrentMetrics::get(metrics.banned_count));
|
||||||
@ -413,6 +419,7 @@ TEST_F(ResolvePoolTest, NoAditionalBannForConcurrentFail)
|
|||||||
sleep_until(start_at + history + epsilon);
|
sleep_until(start_at + history + epsilon);
|
||||||
|
|
||||||
resolver->update();
|
resolver->update();
|
||||||
|
|
||||||
// ip is cleared after just 1 history_ms interval.
|
// ip is cleared after just 1 history_ms interval.
|
||||||
ASSERT_EQ(3, CurrentMetrics::get(metrics.active_count));
|
ASSERT_EQ(3, CurrentMetrics::get(metrics.active_count));
|
||||||
ASSERT_EQ(0, CurrentMetrics::get(metrics.banned_count));
|
ASSERT_EQ(0, CurrentMetrics::get(metrics.banned_count));
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
clickhouse_add_executable (compressed_buffer compressed_buffer.cpp)
|
clickhouse_add_executable (compressed_buffer compressed_buffer.cpp)
|
||||||
target_link_libraries (compressed_buffer PRIVATE clickhouse_common_io clickhouse_compression)
|
target_link_libraries (compressed_buffer PRIVATE clickhouse_common_io clickhouse_common_config clickhouse_compression)
|
||||||
|
@ -383,7 +383,10 @@ void KeeperServer::launchRaftServer(const Poco::Util::AbstractConfiguration & co
|
|||||||
LockMemoryExceptionInThread::removeUniqueLock();
|
LockMemoryExceptionInThread::removeUniqueLock();
|
||||||
};
|
};
|
||||||
|
|
||||||
asio_opts.thread_pool_size_ = getNumberOfPhysicalCPUCores();
|
/// At least 16 threads for network communication in asio.
|
||||||
|
/// asio is async framework, so even with 1 thread it should be ok, but
|
||||||
|
/// still as safeguard it's better to have some redundant capacity here
|
||||||
|
asio_opts.thread_pool_size_ = std::max(16U, getNumberOfPhysicalCPUCores());
|
||||||
|
|
||||||
if (state_manager->isSecure())
|
if (state_manager->isSecure())
|
||||||
{
|
{
|
||||||
|
@ -1,5 +1,8 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
/// CLion freezes for a minute on every keypress in any file including this.
|
||||||
|
#if !defined(__CLION_IDE__)
|
||||||
|
|
||||||
#include <Common/NamePrompter.h>
|
#include <Common/NamePrompter.h>
|
||||||
#include <Core/BaseSettings.h>
|
#include <Core/BaseSettings.h>
|
||||||
#include <Core/SettingsEnums.h>
|
#include <Core/SettingsEnums.h>
|
||||||
@ -602,7 +605,7 @@ class IColumn;
|
|||||||
M(Bool, optimize_if_chain_to_multiif, false, "Replace if(cond1, then1, if(cond2, ...)) chains to multiIf. Currently it's not beneficial for numeric types.", 0) \
|
M(Bool, optimize_if_chain_to_multiif, false, "Replace if(cond1, then1, if(cond2, ...)) chains to multiIf. Currently it's not beneficial for numeric types.", 0) \
|
||||||
M(Bool, optimize_multiif_to_if, true, "Replace 'multiIf' with only one condition to 'if'.", 0) \
|
M(Bool, optimize_multiif_to_if, true, "Replace 'multiIf' with only one condition to 'if'.", 0) \
|
||||||
M(Bool, optimize_if_transform_strings_to_enum, false, "Replaces string-type arguments in If and Transform to enum. Disabled by default cause it could make inconsistent change in distributed query that would lead to its fail.", 0) \
|
M(Bool, optimize_if_transform_strings_to_enum, false, "Replaces string-type arguments in If and Transform to enum. Disabled by default cause it could make inconsistent change in distributed query that would lead to its fail.", 0) \
|
||||||
M(Bool, optimize_functions_to_subcolumns, true, "Transform functions to subcolumns, if possible, to reduce amount of read data. E.g. 'length(arr)' -> 'arr.size0', 'col IS NULL' -> 'col.null' ", 0) \
|
M(Bool, optimize_functions_to_subcolumns, false, "Transform functions to subcolumns, if possible, to reduce amount of read data. E.g. 'length(arr)' -> 'arr.size0', 'col IS NULL' -> 'col.null' ", 0) \
|
||||||
M(Bool, optimize_using_constraints, false, "Use constraints for query optimization", 0) \
|
M(Bool, optimize_using_constraints, false, "Use constraints for query optimization", 0) \
|
||||||
M(Bool, optimize_substitute_columns, false, "Use constraints for column substitution", 0) \
|
M(Bool, optimize_substitute_columns, false, "Use constraints for column substitution", 0) \
|
||||||
M(Bool, optimize_append_index, false, "Use constraints in order to append index condition (indexHint)", 0) \
|
M(Bool, optimize_append_index, false, "Use constraints in order to append index condition (indexHint)", 0) \
|
||||||
@ -763,7 +766,7 @@ class IColumn;
|
|||||||
M(UInt64, merge_tree_min_rows_for_concurrent_read_for_remote_filesystem, (20 * 8192), "If at least as many lines are read from one file, the reading can be parallelized, when reading from remote filesystem.", 0) \
|
M(UInt64, merge_tree_min_rows_for_concurrent_read_for_remote_filesystem, (20 * 8192), "If at least as many lines are read from one file, the reading can be parallelized, when reading from remote filesystem.", 0) \
|
||||||
M(UInt64, merge_tree_min_bytes_for_concurrent_read_for_remote_filesystem, (24 * 10 * 1024 * 1024), "If at least as many bytes are read from one file, the reading can be parallelized, when reading from remote filesystem.", 0) \
|
M(UInt64, merge_tree_min_bytes_for_concurrent_read_for_remote_filesystem, (24 * 10 * 1024 * 1024), "If at least as many bytes are read from one file, the reading can be parallelized, when reading from remote filesystem.", 0) \
|
||||||
M(UInt64, remote_read_min_bytes_for_seek, 4 * DBMS_DEFAULT_BUFFER_SIZE, "Min bytes required for remote read (url, s3) to do seek, instead of read with ignore.", 0) \
|
M(UInt64, remote_read_min_bytes_for_seek, 4 * DBMS_DEFAULT_BUFFER_SIZE, "Min bytes required for remote read (url, s3) to do seek, instead of read with ignore.", 0) \
|
||||||
M(UInt64, merge_tree_min_bytes_per_task_for_remote_reading, 4 * DBMS_DEFAULT_BUFFER_SIZE, "Min bytes to read per task.", 0) \
|
M(UInt64, merge_tree_min_bytes_per_task_for_remote_reading, 2 * DBMS_DEFAULT_BUFFER_SIZE, "Min bytes to read per task.", 0) ALIAS(filesystem_prefetch_min_bytes_for_single_read_task) \
|
||||||
M(Bool, merge_tree_use_const_size_tasks_for_remote_reading, true, "Whether to use constant size tasks for reading from a remote table.", 0) \
|
M(Bool, merge_tree_use_const_size_tasks_for_remote_reading, true, "Whether to use constant size tasks for reading from a remote table.", 0) \
|
||||||
M(Bool, merge_tree_determine_task_size_by_prewhere_columns, true, "Whether to use only prewhere columns size to determine reading task size.", 0) \
|
M(Bool, merge_tree_determine_task_size_by_prewhere_columns, true, "Whether to use only prewhere columns size to determine reading task size.", 0) \
|
||||||
M(UInt64, merge_tree_compact_parts_min_granules_to_multibuffer_read, 16, "Only available in ClickHouse Cloud", 0) \
|
M(UInt64, merge_tree_compact_parts_min_granules_to_multibuffer_read, 16, "Only available in ClickHouse Cloud", 0) \
|
||||||
@ -805,7 +808,6 @@ class IColumn;
|
|||||||
M(UInt64, prefetch_buffer_size, DBMS_DEFAULT_BUFFER_SIZE, "The maximum size of the prefetch buffer to read from the filesystem.", 0) \
|
M(UInt64, prefetch_buffer_size, DBMS_DEFAULT_BUFFER_SIZE, "The maximum size of the prefetch buffer to read from the filesystem.", 0) \
|
||||||
M(UInt64, filesystem_prefetch_step_bytes, 0, "Prefetch step in bytes. Zero means `auto` - approximately the best prefetch step will be auto deduced, but might not be 100% the best. The actual value might be different because of setting filesystem_prefetch_min_bytes_for_single_read_task", 0) \
|
M(UInt64, filesystem_prefetch_step_bytes, 0, "Prefetch step in bytes. Zero means `auto` - approximately the best prefetch step will be auto deduced, but might not be 100% the best. The actual value might be different because of setting filesystem_prefetch_min_bytes_for_single_read_task", 0) \
|
||||||
M(UInt64, filesystem_prefetch_step_marks, 0, "Prefetch step in marks. Zero means `auto` - approximately the best prefetch step will be auto deduced, but might not be 100% the best. The actual value might be different because of setting filesystem_prefetch_min_bytes_for_single_read_task", 0) \
|
M(UInt64, filesystem_prefetch_step_marks, 0, "Prefetch step in marks. Zero means `auto` - approximately the best prefetch step will be auto deduced, but might not be 100% the best. The actual value might be different because of setting filesystem_prefetch_min_bytes_for_single_read_task", 0) \
|
||||||
M(UInt64, filesystem_prefetch_min_bytes_for_single_read_task, "2Mi", "Do not parallelize within one file read less than this amount of bytes. E.g. one reader will not receive a read task of size less than this amount. This setting is recommended to avoid spikes of time for aws getObject requests to aws", 0) \
|
|
||||||
M(UInt64, filesystem_prefetch_max_memory_usage, "1Gi", "Maximum memory usage for prefetches.", 0) \
|
M(UInt64, filesystem_prefetch_max_memory_usage, "1Gi", "Maximum memory usage for prefetches.", 0) \
|
||||||
M(UInt64, filesystem_prefetches_limit, 200, "Maximum number of prefetches. Zero means unlimited. A setting `filesystem_prefetches_max_memory_usage` is more recommended if you want to limit the number of prefetches", 0) \
|
M(UInt64, filesystem_prefetches_limit, 200, "Maximum number of prefetches. Zero means unlimited. A setting `filesystem_prefetches_max_memory_usage` is more recommended if you want to limit the number of prefetches", 0) \
|
||||||
\
|
\
|
||||||
@ -1125,6 +1127,7 @@ class IColumn;
|
|||||||
M(Bool, input_format_json_ignore_unnecessary_fields, true, "Ignore unnecessary fields and not parse them. Enabling this may not throw exceptions on json strings of invalid format or with duplicated fields", 0) \
|
M(Bool, input_format_json_ignore_unnecessary_fields, true, "Ignore unnecessary fields and not parse them. Enabling this may not throw exceptions on json strings of invalid format or with duplicated fields", 0) \
|
||||||
M(Bool, input_format_try_infer_variants, false, "Try to infer the Variant type in text formats when there is more than one possible type for column/array elements", 0) \
|
M(Bool, input_format_try_infer_variants, false, "Try to infer the Variant type in text formats when there is more than one possible type for column/array elements", 0) \
|
||||||
M(Bool, input_format_json_ignore_key_case, false, "Ignore json key case while read json field from string", 0) \
|
M(Bool, input_format_json_ignore_key_case, false, "Ignore json key case while read json field from string", 0) \
|
||||||
|
M(Bool, input_format_json_case_insensitive_column_matching, false, "Ignore case when matching JSON keys with CH columns", 0) \
|
||||||
M(Bool, input_format_try_infer_integers, true, "Try to infer integers instead of floats while schema inference in text formats", 0) \
|
M(Bool, input_format_try_infer_integers, true, "Try to infer integers instead of floats while schema inference in text formats", 0) \
|
||||||
M(Bool, input_format_try_infer_dates, true, "Try to infer dates from string fields while schema inference in text formats", 0) \
|
M(Bool, input_format_try_infer_dates, true, "Try to infer dates from string fields while schema inference in text formats", 0) \
|
||||||
M(Bool, input_format_try_infer_datetimes, true, "Try to infer datetimes from string fields while schema inference in text formats", 0) \
|
M(Bool, input_format_try_infer_datetimes, true, "Try to infer datetimes from string fields while schema inference in text formats", 0) \
|
||||||
@ -1349,3 +1352,5 @@ struct FormatFactorySettings : public BaseSettings<FormatFactorySettingsTraits>
|
|||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
@ -63,9 +63,8 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
|||||||
{"output_format_native_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in Native output format"},
|
{"output_format_native_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in Native output format"},
|
||||||
{"input_format_native_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in Native output format"},
|
{"input_format_native_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in Native output format"},
|
||||||
{"read_in_order_use_buffering", false, true, "Use buffering before merging while reading in order of primary key"},
|
{"read_in_order_use_buffering", false, true, "Use buffering before merging while reading in order of primary key"},
|
||||||
{"optimize_functions_to_subcolumns", false, true, "Enable optimization by default"},
|
|
||||||
{"enable_named_columns_in_function_tuple", false, true, "Generate named tuples in function tuple() when all names are unique and can be treated as unquoted identifiers."},
|
{"enable_named_columns_in_function_tuple", false, true, "Generate named tuples in function tuple() when all names are unique and can be treated as unquoted identifiers."},
|
||||||
{"input_format_json_ignore_key_case", false, false, "Ignore json key case while read json field from string."},
|
{"input_format_json_case_insensitive_column_matching", false, false, "Ignore case when matching JSON keys with CH columns."},
|
||||||
{"optimize_trivial_insert_select", true, false, "The optimization does not make sense in many cases."},
|
{"optimize_trivial_insert_select", true, false, "The optimization does not make sense in many cases."},
|
||||||
{"input_format_try_infer_variants", false, false, "Try to infer Variant type in text formats when there is more than one possible type for column/array elements"},
|
{"input_format_try_infer_variants", false, false, "Try to infer Variant type in text formats when there is more than one possible type for column/array elements"},
|
||||||
{"dictionary_validate_primary_key_type", false, false, "Validate primary key type for dictionaries. By default id type for simple layouts will be implicitly converted to UInt64."},
|
{"dictionary_validate_primary_key_type", false, false, "Validate primary key type for dictionaries. By default id type for simple layouts will be implicitly converted to UInt64."},
|
||||||
@ -78,6 +77,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
|||||||
{"azure_sdk_max_retries", 10, 10, "Maximum number of retries in azure sdk"},
|
{"azure_sdk_max_retries", 10, 10, "Maximum number of retries in azure sdk"},
|
||||||
{"azure_sdk_retry_initial_backoff_ms", 10, 10, "Minimal backoff between retries in azure sdk"},
|
{"azure_sdk_retry_initial_backoff_ms", 10, 10, "Minimal backoff between retries in azure sdk"},
|
||||||
{"azure_sdk_retry_max_backoff_ms", 1000, 1000, "Maximal backoff between retries in azure sdk"},
|
{"azure_sdk_retry_max_backoff_ms", 1000, 1000, "Maximal backoff between retries in azure sdk"},
|
||||||
|
{"merge_tree_min_bytes_per_task_for_remote_reading", 4194304, 2097152, "Value is unified with `filesystem_prefetch_min_bytes_for_single_read_task`"},
|
||||||
{"ignore_on_cluster_for_replicated_named_collections_queries", false, false, "Ignore ON CLUSTER clause for replicated named collections management queries."},
|
{"ignore_on_cluster_for_replicated_named_collections_queries", false, false, "Ignore ON CLUSTER clause for replicated named collections management queries."},
|
||||||
{"backup_restore_s3_retry_attempts", 1000,1000, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries. It takes place only for backup/restore."},
|
{"backup_restore_s3_retry_attempts", 1000,1000, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries. It takes place only for backup/restore."},
|
||||||
{"postgresql_connection_attempt_timeout", 2, 2, "Allow to control 'connect_timeout' parameter of PostgreSQL connection."},
|
{"postgresql_connection_attempt_timeout", 2, 2, "Allow to control 'connect_timeout' parameter of PostgreSQL connection."},
|
||||||
@ -148,7 +148,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
|||||||
{"default_table_engine", "None", "MergeTree", "Set default table engine to MergeTree for better usability"},
|
{"default_table_engine", "None", "MergeTree", "Set default table engine to MergeTree for better usability"},
|
||||||
{"input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects", false, false, "Allow to use String type for ambiguous paths during named tuple inference from JSON objects"},
|
{"input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects", false, false, "Allow to use String type for ambiguous paths during named tuple inference from JSON objects"},
|
||||||
{"traverse_shadow_remote_data_paths", false, false, "Traverse shadow directory when query system.remote_data_paths."},
|
{"traverse_shadow_remote_data_paths", false, false, "Traverse shadow directory when query system.remote_data_paths."},
|
||||||
{"throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert", false, true, "Deduplication is dependent materialized view cannot work together with async inserts."},
|
{"throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert", false, true, "Deduplication in dependent materialized view cannot work together with async inserts."},
|
||||||
{"parallel_replicas_allow_in_with_subquery", false, true, "If true, subquery for IN will be executed on every follower replica"},
|
{"parallel_replicas_allow_in_with_subquery", false, true, "If true, subquery for IN will be executed on every follower replica"},
|
||||||
{"log_processors_profiles", false, true, "Enable by default"},
|
{"log_processors_profiles", false, true, "Enable by default"},
|
||||||
{"function_locate_has_mysql_compatible_argument_order", false, true, "Increase compatibility with MySQL's locate function."},
|
{"function_locate_has_mysql_compatible_argument_order", false, true, "Increase compatibility with MySQL's locate function."},
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
clickhouse_add_executable (string_pool string_pool.cpp)
|
clickhouse_add_executable (string_pool string_pool.cpp)
|
||||||
target_link_libraries (string_pool PRIVATE clickhouse_common_io ch_contrib::sparsehash)
|
target_link_libraries (string_pool PRIVATE clickhouse_common_io clickhouse_common_config ch_contrib::sparsehash)
|
||||||
|
|
||||||
clickhouse_add_executable (field field.cpp)
|
clickhouse_add_executable (field field.cpp)
|
||||||
target_link_libraries (field PRIVATE dbms)
|
target_link_libraries (field PRIVATE dbms)
|
||||||
|
|
||||||
clickhouse_add_executable (string_ref_hash string_ref_hash.cpp)
|
clickhouse_add_executable (string_ref_hash string_ref_hash.cpp)
|
||||||
target_link_libraries (string_ref_hash PRIVATE clickhouse_common_io)
|
target_link_libraries (string_ref_hash PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||||
|
@ -146,10 +146,19 @@ BaseDaemon::BaseDaemon() = default;
|
|||||||
|
|
||||||
BaseDaemon::~BaseDaemon()
|
BaseDaemon::~BaseDaemon()
|
||||||
{
|
{
|
||||||
writeSignalIDtoSignalPipe(SignalListener::StopThread);
|
try
|
||||||
signal_listener_thread.join();
|
{
|
||||||
HandledSignals::instance().reset();
|
writeSignalIDtoSignalPipe(SignalListener::StopThread);
|
||||||
SentryWriter::resetInstance();
|
signal_listener_thread.join();
|
||||||
|
HandledSignals::instance().reset();
|
||||||
|
SentryWriter::resetInstance();
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
tryLogCurrentException(&logger());
|
||||||
|
}
|
||||||
|
|
||||||
|
disableLogging();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -125,23 +125,6 @@ DataTypePtr DataTypeFactory::getImpl(const String & family_name_param, const AST
|
|||||||
{
|
{
|
||||||
String family_name = getAliasToOrName(family_name_param);
|
String family_name = getAliasToOrName(family_name_param);
|
||||||
|
|
||||||
if (endsWith(family_name, "WithDictionary"))
|
|
||||||
{
|
|
||||||
ASTPtr low_cardinality_params = std::make_shared<ASTExpressionList>();
|
|
||||||
String param_name = family_name.substr(0, family_name.size() - strlen("WithDictionary"));
|
|
||||||
if (parameters)
|
|
||||||
{
|
|
||||||
auto func = std::make_shared<ASTFunction>();
|
|
||||||
func->name = param_name;
|
|
||||||
func->arguments = parameters;
|
|
||||||
low_cardinality_params->children.push_back(func);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
low_cardinality_params->children.push_back(std::make_shared<ASTIdentifier>(param_name));
|
|
||||||
|
|
||||||
return getImpl<nullptr_on_error>("LowCardinality", low_cardinality_params);
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto * creator = findCreatorByName<nullptr_on_error>(family_name);
|
const auto * creator = findCreatorByName<nullptr_on_error>(family_name);
|
||||||
if constexpr (nullptr_on_error)
|
if constexpr (nullptr_on_error)
|
||||||
{
|
{
|
||||||
|
@ -559,8 +559,11 @@ void DatabaseReplicated::createEmptyLogEntry(const ZooKeeperPtr & current_zookee
|
|||||||
|
|
||||||
bool DatabaseReplicated::waitForReplicaToProcessAllEntries(UInt64 timeout_ms)
|
bool DatabaseReplicated::waitForReplicaToProcessAllEntries(UInt64 timeout_ms)
|
||||||
{
|
{
|
||||||
if (!ddl_worker || is_probably_dropped)
|
{
|
||||||
return false;
|
std::lock_guard lock{ddl_worker_mutex};
|
||||||
|
if (!ddl_worker || is_probably_dropped)
|
||||||
|
return false;
|
||||||
|
}
|
||||||
return ddl_worker->waitForReplicaToProcessAllEntries(timeout_ms);
|
return ddl_worker->waitForReplicaToProcessAllEntries(timeout_ms);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -641,7 +644,10 @@ LoadTaskPtr DatabaseReplicated::startupDatabaseAsync(AsyncLoader & async_loader,
|
|||||||
if (is_probably_dropped)
|
if (is_probably_dropped)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ddl_worker = std::make_unique<DatabaseReplicatedDDLWorker>(this, getContext());
|
{
|
||||||
|
std::lock_guard lock{ddl_worker_mutex};
|
||||||
|
ddl_worker = std::make_unique<DatabaseReplicatedDDLWorker>(this, getContext());
|
||||||
|
}
|
||||||
ddl_worker->startup();
|
ddl_worker->startup();
|
||||||
ddl_worker_initialized = true;
|
ddl_worker_initialized = true;
|
||||||
});
|
});
|
||||||
@ -671,92 +677,96 @@ void DatabaseReplicated::stopLoading()
|
|||||||
DatabaseAtomic::stopLoading();
|
DatabaseAtomic::stopLoading();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool DatabaseReplicated::checkDigestValid(const ContextPtr & local_context, bool debug_check /* = true */) const
|
void DatabaseReplicated::dumpLocalTablesForDebugOnly(const ContextPtr & local_context) const
|
||||||
{
|
{
|
||||||
if (debug_check)
|
auto table_names = getAllTableNames(context.lock());
|
||||||
|
for (const auto & table_name : table_names)
|
||||||
{
|
{
|
||||||
/// Reduce number of debug checks
|
auto ast_ptr = tryGetCreateTableQuery(table_name, local_context);
|
||||||
if (thread_local_rng() % 16)
|
if (ast_ptr)
|
||||||
return true;
|
LOG_DEBUG(log, "[local] Table {} create query is {}", table_name, queryToString(ast_ptr));
|
||||||
|
else
|
||||||
|
LOG_DEBUG(log, "[local] Table {} has no create query", table_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_TEST(log, "Current in-memory metadata digest: {}", tables_metadata_digest);
|
|
||||||
|
|
||||||
/// Database is probably being dropped
|
|
||||||
if (!local_context->getZooKeeperMetadataTransaction() && (!ddl_worker || !ddl_worker->isCurrentlyActive()))
|
|
||||||
return true;
|
|
||||||
|
|
||||||
UInt64 local_digest = 0;
|
|
||||||
{
|
|
||||||
std::lock_guard lock{mutex};
|
|
||||||
for (const auto & table : TSA_SUPPRESS_WARNING_FOR_READ(tables))
|
|
||||||
local_digest += getMetadataHash(table.first);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (local_digest != tables_metadata_digest)
|
|
||||||
{
|
|
||||||
LOG_ERROR(log, "Digest of local metadata ({}) is not equal to in-memory digest ({})", local_digest, tables_metadata_digest);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Do not check digest in Keeper after internal subquery, it's probably not committed yet
|
|
||||||
if (local_context->isInternalSubquery())
|
|
||||||
return true;
|
|
||||||
|
|
||||||
/// Check does not make sense to check digest in Keeper during recovering
|
|
||||||
if (is_recovering)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
String zk_digest = getZooKeeper()->get(replica_path + "/digest");
|
|
||||||
String local_digest_str = toString(local_digest);
|
|
||||||
if (zk_digest != local_digest_str)
|
|
||||||
{
|
|
||||||
LOG_ERROR(log, "Digest of local metadata ({}) is not equal to digest in Keeper ({})", local_digest_str, zk_digest);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void DatabaseReplicated::checkQueryValid(const ASTPtr & query, ContextPtr query_context) const
|
void DatabaseReplicated::dumpTablesInZooKeeperForDebugOnly() const
|
||||||
{
|
{
|
||||||
/// Replicas will set correct name of current database in query context (database name can be different on replicas)
|
UInt32 max_log_ptr;
|
||||||
if (auto * ddl_query = dynamic_cast<ASTQueryWithTableAndOutput *>(query.get()))
|
auto table_name_to_metadata = tryGetConsistentMetadataSnapshot(getZooKeeper(), max_log_ptr);
|
||||||
|
for (const auto & [table_name, create_table_query] : table_name_to_metadata)
|
||||||
{
|
{
|
||||||
if (ddl_query->getDatabase() != getDatabaseName())
|
auto query_ast = parseQueryFromMetadataInZooKeeper(table_name, create_table_query);
|
||||||
throw Exception(ErrorCodes::UNKNOWN_DATABASE, "Database was renamed");
|
if (query_ast)
|
||||||
ddl_query->database.reset();
|
|
||||||
|
|
||||||
if (auto * create = query->as<ASTCreateQuery>())
|
|
||||||
{
|
{
|
||||||
if (create->storage)
|
LOG_DEBUG(log, "[zookeeper] Table {} create query is {}", table_name, queryToString(query_ast));
|
||||||
checkTableEngine(*create, *create->storage, query_context);
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
LOG_DEBUG(log, "[zookeeper] Table {} has no create query", table_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (create->targets)
|
void DatabaseReplicated::tryCompareLocalAndZooKeeperTablesAndDumpDiffForDebugOnly(const ContextPtr & local_context) const
|
||||||
|
{
|
||||||
|
UInt32 max_log_ptr;
|
||||||
|
auto table_name_to_metadata_in_zk = tryGetConsistentMetadataSnapshot(getZooKeeper(), max_log_ptr);
|
||||||
|
auto table_names_local = getAllTableNames(local_context);
|
||||||
|
|
||||||
|
if (table_name_to_metadata_in_zk.size() != table_names_local.size())
|
||||||
|
LOG_DEBUG(log, "Amount of tables in zk {} locally {}", table_name_to_metadata_in_zk.size(), table_names_local.size());
|
||||||
|
|
||||||
|
std::unordered_set<std::string> checked_tables;
|
||||||
|
|
||||||
|
for (const auto & table_name : table_names_local)
|
||||||
|
{
|
||||||
|
auto local_ast_ptr = tryGetCreateTableQuery(table_name, local_context);
|
||||||
|
if (table_name_to_metadata_in_zk.contains(table_name))
|
||||||
|
{
|
||||||
|
checked_tables.insert(table_name);
|
||||||
|
auto create_table_query_in_zk = table_name_to_metadata_in_zk[table_name];
|
||||||
|
auto zk_ast_ptr = parseQueryFromMetadataInZooKeeper(table_name, create_table_query_in_zk);
|
||||||
|
|
||||||
|
if (local_ast_ptr == nullptr && zk_ast_ptr == nullptr)
|
||||||
{
|
{
|
||||||
for (const auto & inner_table_engine : create->targets->getInnerEngines())
|
LOG_DEBUG(log, "AST for table {} is the same (nullptr) in local and ZK", table_name);
|
||||||
checkTableEngine(*create, *inner_table_engine, query_context);
|
}
|
||||||
|
else if (local_ast_ptr != nullptr && zk_ast_ptr != nullptr && queryToString(local_ast_ptr) != queryToString(zk_ast_ptr))
|
||||||
|
{
|
||||||
|
LOG_DEBUG(log, "AST differs for table {}, local {}, in zookeeper {}", table_name, queryToString(local_ast_ptr), queryToString(zk_ast_ptr));
|
||||||
|
}
|
||||||
|
else if (local_ast_ptr == nullptr)
|
||||||
|
{
|
||||||
|
LOG_DEBUG(log, "AST differs for table {}, local nullptr, in zookeeper {}", table_name, queryToString(zk_ast_ptr));
|
||||||
|
}
|
||||||
|
else if (zk_ast_ptr == nullptr)
|
||||||
|
{
|
||||||
|
LOG_DEBUG(log, "AST differs for table {}, local {}, in zookeeper nullptr", table_name, queryToString(local_ast_ptr));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
LOG_DEBUG(log, "AST for table {} is the same in local and ZK", table_name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
else
|
||||||
|
|
||||||
if (const auto * query_alter = query->as<ASTAlterQuery>())
|
|
||||||
{
|
|
||||||
for (const auto & command : query_alter->command_list->children)
|
|
||||||
{
|
{
|
||||||
if (!isSupportedAlterTypeForOnClusterDDLQuery(command->as<ASTAlterCommand&>().type))
|
if (local_ast_ptr == nullptr)
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported type of ALTER query");
|
LOG_DEBUG(log, "Table {} exists locally, but missing in ZK", table_name);
|
||||||
|
else
|
||||||
|
LOG_DEBUG(log, "Table {} exists locally with AST {}, but missing in ZK", table_name, queryToString(local_ast_ptr));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for (const auto & [table_name, table_metadata] : table_name_to_metadata_in_zk)
|
||||||
if (auto * query_drop = query->as<ASTDropQuery>())
|
|
||||||
{
|
{
|
||||||
if (query_drop->kind == ASTDropQuery::Kind::Detach && query_context->getSettingsRef().database_replicated_always_detach_permanently)
|
if (!checked_tables.contains(table_name))
|
||||||
query_drop->permanently = true;
|
{
|
||||||
if (query_drop->kind == ASTDropQuery::Kind::Detach && !query_drop->permanently)
|
auto zk_ast_ptr = parseQueryFromMetadataInZooKeeper(table_name, table_metadata);
|
||||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "DETACH TABLE is not allowed for Replicated databases. "
|
if (zk_ast_ptr == nullptr)
|
||||||
"Use DETACH TABLE PERMANENTLY or SYSTEM RESTART REPLICA or set "
|
LOG_DEBUG(log, "Table {} exists in ZK with AST {}, but missing locally", table_name, queryToString(zk_ast_ptr));
|
||||||
"database_replicated_always_detach_permanently to 1");
|
else
|
||||||
|
LOG_DEBUG(log, "Table {} exists in ZK, but missing locally", table_name);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -839,6 +849,107 @@ void DatabaseReplicated::checkTableEngine(const ASTCreateQuery & query, ASTStora
|
|||||||
"to distinguish different shards and replicas");
|
"to distinguish different shards and replicas");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool DatabaseReplicated::checkDigestValid(const ContextPtr & local_context, bool debug_check /* = true */) const
|
||||||
|
{
|
||||||
|
if (debug_check)
|
||||||
|
{
|
||||||
|
/// Reduce number of debug checks
|
||||||
|
if (thread_local_rng() % 16)
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_TEST(log, "Current in-memory metadata digest: {}", tables_metadata_digest);
|
||||||
|
|
||||||
|
/// Database is probably being dropped
|
||||||
|
if (!local_context->getZooKeeperMetadataTransaction() && (!ddl_worker || !ddl_worker->isCurrentlyActive()))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
UInt64 local_digest = 0;
|
||||||
|
{
|
||||||
|
std::lock_guard lock{mutex};
|
||||||
|
for (const auto & table : TSA_SUPPRESS_WARNING_FOR_READ(tables))
|
||||||
|
local_digest += getMetadataHash(table.first);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (local_digest != tables_metadata_digest)
|
||||||
|
{
|
||||||
|
LOG_ERROR(log, "Digest of local metadata ({}) is not equal to in-memory digest ({})", local_digest, tables_metadata_digest);
|
||||||
|
|
||||||
|
#ifndef NDEBUG
|
||||||
|
dumpLocalTablesForDebugOnly(local_context);
|
||||||
|
dumpTablesInZooKeeperForDebugOnly();
|
||||||
|
tryCompareLocalAndZooKeeperTablesAndDumpDiffForDebugOnly(local_context);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Do not check digest in Keeper after internal subquery, it's probably not committed yet
|
||||||
|
if (local_context->isInternalSubquery())
|
||||||
|
return true;
|
||||||
|
|
||||||
|
/// Check does not make sense to check digest in Keeper during recovering
|
||||||
|
if (is_recovering)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
String zk_digest = getZooKeeper()->get(replica_path + "/digest");
|
||||||
|
String local_digest_str = toString(local_digest);
|
||||||
|
if (zk_digest != local_digest_str)
|
||||||
|
{
|
||||||
|
LOG_ERROR(log, "Digest of local metadata ({}) is not equal to digest in Keeper ({})", local_digest_str, zk_digest);
|
||||||
|
#ifndef NDEBUG
|
||||||
|
dumpLocalTablesForDebugOnly(local_context);
|
||||||
|
dumpTablesInZooKeeperForDebugOnly();
|
||||||
|
tryCompareLocalAndZooKeeperTablesAndDumpDiffForDebugOnly(local_context);
|
||||||
|
#endif
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void DatabaseReplicated::checkQueryValid(const ASTPtr & query, ContextPtr query_context) const
|
||||||
|
{
|
||||||
|
/// Replicas will set correct name of current database in query context (database name can be different on replicas)
|
||||||
|
if (auto * ddl_query = dynamic_cast<ASTQueryWithTableAndOutput *>(query.get()))
|
||||||
|
{
|
||||||
|
if (ddl_query->getDatabase() != getDatabaseName())
|
||||||
|
throw Exception(ErrorCodes::UNKNOWN_DATABASE, "Database was renamed");
|
||||||
|
ddl_query->database.reset();
|
||||||
|
|
||||||
|
if (auto * create = query->as<ASTCreateQuery>())
|
||||||
|
{
|
||||||
|
if (create->storage)
|
||||||
|
checkTableEngine(*create, *create->storage, query_context);
|
||||||
|
|
||||||
|
if (create->targets)
|
||||||
|
{
|
||||||
|
for (const auto & inner_table_engine : create->targets->getInnerEngines())
|
||||||
|
checkTableEngine(*create, *inner_table_engine, query_context);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (const auto * query_alter = query->as<ASTAlterQuery>())
|
||||||
|
{
|
||||||
|
for (const auto & command : query_alter->command_list->children)
|
||||||
|
{
|
||||||
|
if (!isSupportedAlterTypeForOnClusterDDLQuery(command->as<ASTAlterCommand&>().type))
|
||||||
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported type of ALTER query");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (auto * query_drop = query->as<ASTDropQuery>())
|
||||||
|
{
|
||||||
|
if (query_drop->kind == ASTDropQuery::Kind::Detach && query_context->getSettingsRef().database_replicated_always_detach_permanently)
|
||||||
|
query_drop->permanently = true;
|
||||||
|
if (query_drop->kind == ASTDropQuery::Kind::Detach && !query_drop->permanently)
|
||||||
|
throw Exception(ErrorCodes::INCORRECT_QUERY, "DETACH TABLE is not allowed for Replicated databases. "
|
||||||
|
"Use DETACH TABLE PERMANENTLY or SYSTEM RESTART REPLICA or set "
|
||||||
|
"database_replicated_always_detach_permanently to 1");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
BlockIO DatabaseReplicated::tryEnqueueReplicatedDDL(const ASTPtr & query, ContextPtr query_context, QueryFlags flags)
|
BlockIO DatabaseReplicated::tryEnqueueReplicatedDDL(const ASTPtr & query, ContextPtr query_context, QueryFlags flags)
|
||||||
{
|
{
|
||||||
waitDatabaseStarted();
|
waitDatabaseStarted();
|
||||||
@ -1253,7 +1364,7 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep
|
|||||||
current_zookeeper->set(replica_path + "/digest", toString(tables_metadata_digest));
|
current_zookeeper->set(replica_path + "/digest", toString(tables_metadata_digest));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::map<String, String> DatabaseReplicated::tryGetConsistentMetadataSnapshot(const ZooKeeperPtr & zookeeper, UInt32 & max_log_ptr)
|
std::map<String, String> DatabaseReplicated::tryGetConsistentMetadataSnapshot(const ZooKeeperPtr & zookeeper, UInt32 & max_log_ptr) const
|
||||||
{
|
{
|
||||||
return getConsistentMetadataSnapshotImpl(zookeeper, {}, /* max_retries= */ 10, max_log_ptr);
|
return getConsistentMetadataSnapshotImpl(zookeeper, {}, /* max_retries= */ 10, max_log_ptr);
|
||||||
}
|
}
|
||||||
@ -1314,7 +1425,7 @@ std::map<String, String> DatabaseReplicated::getConsistentMetadataSnapshotImpl(
|
|||||||
return table_name_to_metadata;
|
return table_name_to_metadata;
|
||||||
}
|
}
|
||||||
|
|
||||||
ASTPtr DatabaseReplicated::parseQueryFromMetadataInZooKeeper(const String & node_name, const String & query)
|
ASTPtr DatabaseReplicated::parseQueryFromMetadataInZooKeeper(const String & node_name, const String & query) const
|
||||||
{
|
{
|
||||||
ParserCreateQuery parser;
|
ParserCreateQuery parser;
|
||||||
String description = "in ZooKeeper " + zookeeper_path + "/metadata/" + node_name;
|
String description = "in ZooKeeper " + zookeeper_path + "/metadata/" + node_name;
|
||||||
@ -1411,6 +1522,7 @@ void DatabaseReplicated::renameDatabase(ContextPtr query_context, const String &
|
|||||||
|
|
||||||
void DatabaseReplicated::stopReplication()
|
void DatabaseReplicated::stopReplication()
|
||||||
{
|
{
|
||||||
|
std::lock_guard lock{ddl_worker_mutex};
|
||||||
if (ddl_worker)
|
if (ddl_worker)
|
||||||
ddl_worker->shutdown();
|
ddl_worker->shutdown();
|
||||||
}
|
}
|
||||||
|
@ -109,14 +109,15 @@ private:
|
|||||||
void checkQueryValid(const ASTPtr & query, ContextPtr query_context) const;
|
void checkQueryValid(const ASTPtr & query, ContextPtr query_context) const;
|
||||||
void checkTableEngine(const ASTCreateQuery & query, ASTStorage & storage, ContextPtr query_context) const;
|
void checkTableEngine(const ASTCreateQuery & query, ASTStorage & storage, ContextPtr query_context) const;
|
||||||
|
|
||||||
|
|
||||||
void recoverLostReplica(const ZooKeeperPtr & current_zookeeper, UInt32 our_log_ptr, UInt32 & max_log_ptr);
|
void recoverLostReplica(const ZooKeeperPtr & current_zookeeper, UInt32 our_log_ptr, UInt32 & max_log_ptr);
|
||||||
|
|
||||||
std::map<String, String> tryGetConsistentMetadataSnapshot(const ZooKeeperPtr & zookeeper, UInt32 & max_log_ptr);
|
std::map<String, String> tryGetConsistentMetadataSnapshot(const ZooKeeperPtr & zookeeper, UInt32 & max_log_ptr) const;
|
||||||
|
|
||||||
std::map<String, String> getConsistentMetadataSnapshotImpl(const ZooKeeperPtr & zookeeper, const FilterByNameFunction & filter_by_table_name,
|
std::map<String, String> getConsistentMetadataSnapshotImpl(const ZooKeeperPtr & zookeeper, const FilterByNameFunction & filter_by_table_name,
|
||||||
size_t max_retries, UInt32 & max_log_ptr) const;
|
size_t max_retries, UInt32 & max_log_ptr) const;
|
||||||
|
|
||||||
ASTPtr parseQueryFromMetadataInZooKeeper(const String & node_name, const String & query);
|
ASTPtr parseQueryFromMetadataInZooKeeper(const String & node_name, const String & query) const;
|
||||||
String readMetadataFile(const String & table_name) const;
|
String readMetadataFile(const String & table_name) const;
|
||||||
|
|
||||||
ClusterPtr getClusterImpl(bool all_groups = false) const;
|
ClusterPtr getClusterImpl(bool all_groups = false) const;
|
||||||
@ -132,6 +133,11 @@ private:
|
|||||||
UInt64 getMetadataHash(const String & table_name) const;
|
UInt64 getMetadataHash(const String & table_name) const;
|
||||||
bool checkDigestValid(const ContextPtr & local_context, bool debug_check = true) const TSA_REQUIRES(metadata_mutex);
|
bool checkDigestValid(const ContextPtr & local_context, bool debug_check = true) const TSA_REQUIRES(metadata_mutex);
|
||||||
|
|
||||||
|
/// For debug purposes only, don't use in production code
|
||||||
|
void dumpLocalTablesForDebugOnly(const ContextPtr & local_context) const;
|
||||||
|
void dumpTablesInZooKeeperForDebugOnly() const;
|
||||||
|
void tryCompareLocalAndZooKeeperTablesAndDumpDiffForDebugOnly(const ContextPtr & local_context) const;
|
||||||
|
|
||||||
void waitDatabaseStarted() const override;
|
void waitDatabaseStarted() const override;
|
||||||
void stopLoading() override;
|
void stopLoading() override;
|
||||||
|
|
||||||
@ -149,6 +155,7 @@ private:
|
|||||||
std::atomic_bool is_recovering = false;
|
std::atomic_bool is_recovering = false;
|
||||||
std::atomic_bool ddl_worker_initialized = false;
|
std::atomic_bool ddl_worker_initialized = false;
|
||||||
std::unique_ptr<DatabaseReplicatedDDLWorker> ddl_worker;
|
std::unique_ptr<DatabaseReplicatedDDLWorker> ddl_worker;
|
||||||
|
std::mutex ddl_worker_mutex;
|
||||||
UInt32 max_log_ptr_at_creation = 0;
|
UInt32 max_log_ptr_at_creation = 0;
|
||||||
|
|
||||||
/// Usually operation with metadata are single-threaded because of the way replication works,
|
/// Usually operation with metadata are single-threaded because of the way replication works,
|
||||||
|
@ -289,8 +289,11 @@ StoragePtr DatabaseWithOwnTablesBase::detachTableUnlocked(const String & table_n
|
|||||||
tables.erase(it);
|
tables.erase(it);
|
||||||
table_storage->is_detached = true;
|
table_storage->is_detached = true;
|
||||||
|
|
||||||
if (!table_storage->isSystemStorage() && database_name != DatabaseCatalog::SYSTEM_DATABASE)
|
if (!table_storage->isSystemStorage() && !DatabaseCatalog::isPredefinedDatabase(database_name))
|
||||||
|
{
|
||||||
|
LOG_TEST(log, "Counting detached table {} to database {}", table_name, database_name);
|
||||||
CurrentMetrics::sub(getAttachedCounterForStorage(table_storage));
|
CurrentMetrics::sub(getAttachedCounterForStorage(table_storage));
|
||||||
|
}
|
||||||
|
|
||||||
auto table_id = table_storage->getStorageID();
|
auto table_id = table_storage->getStorageID();
|
||||||
if (table_id.hasUUID())
|
if (table_id.hasUUID())
|
||||||
@ -334,8 +337,11 @@ void DatabaseWithOwnTablesBase::attachTableUnlocked(const String & table_name, c
|
|||||||
/// non-Atomic database the is_detached is set to true before RENAME.
|
/// non-Atomic database the is_detached is set to true before RENAME.
|
||||||
table->is_detached = false;
|
table->is_detached = false;
|
||||||
|
|
||||||
if (!table->isSystemStorage() && table_id.database_name != DatabaseCatalog::SYSTEM_DATABASE)
|
if (!table->isSystemStorage() && !DatabaseCatalog::isPredefinedDatabase(database_name))
|
||||||
|
{
|
||||||
|
LOG_TEST(log, "Counting attached table {} to database {}", table_name, database_name);
|
||||||
CurrentMetrics::add(getAttachedCounterForStorage(table));
|
CurrentMetrics::add(getAttachedCounterForStorage(table));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void DatabaseWithOwnTablesBase::shutdown()
|
void DatabaseWithOwnTablesBase::shutdown()
|
||||||
|
@ -154,6 +154,7 @@ StoragePtr DatabaseSQLite::fetchTable(const String & table_name, ContextPtr loca
|
|||||||
table_name,
|
table_name,
|
||||||
ColumnsDescription{*columns},
|
ColumnsDescription{*columns},
|
||||||
ConstraintsDescription{},
|
ConstraintsDescription{},
|
||||||
|
/* comment = */ "",
|
||||||
local_context);
|
local_context);
|
||||||
|
|
||||||
return storage;
|
return storage;
|
||||||
|
@ -874,7 +874,9 @@ void DiskObjectStorageTransaction::writeFileUsingBlobWritingFunction(
|
|||||||
/// Create metadata (see create_metadata_callback in DiskObjectStorageTransaction::writeFile()).
|
/// Create metadata (see create_metadata_callback in DiskObjectStorageTransaction::writeFile()).
|
||||||
if (mode == WriteMode::Rewrite)
|
if (mode == WriteMode::Rewrite)
|
||||||
{
|
{
|
||||||
if (!object_storage.isWriteOnce() && metadata_storage.exists(path))
|
/// Otherwise we will produce lost blobs which nobody points to
|
||||||
|
/// WriteOnce storages are not affected by the issue
|
||||||
|
if (!object_storage.isPlain() && metadata_storage.exists(path))
|
||||||
object_storage.removeObjectsIfExist(metadata_storage.getStorageObjects(path));
|
object_storage.removeObjectsIfExist(metadata_storage.getStorageObjects(path));
|
||||||
|
|
||||||
metadata_transaction->createMetadataFile(path, std::move(object_key), object_size);
|
metadata_transaction->createMetadataFile(path, std::move(object_key), object_size);
|
||||||
|
@ -58,7 +58,8 @@ TemporaryFileOnDisk::~TemporaryFileOnDisk()
|
|||||||
|
|
||||||
if (!disk->exists(relative_path))
|
if (!disk->exists(relative_path))
|
||||||
{
|
{
|
||||||
LOG_WARNING(getLogger("TemporaryFileOnDisk"), "Temporary path '{}' does not exist in '{}'", relative_path, disk->getPath());
|
if (show_warning_if_removed)
|
||||||
|
LOG_WARNING(getLogger("TemporaryFileOnDisk"), "Temporary path '{}' does not exist in '{}'", relative_path, disk->getPath());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -27,12 +27,19 @@ public:
|
|||||||
/// Return relative path (without disk)
|
/// Return relative path (without disk)
|
||||||
const String & getRelativePath() const { return relative_path; }
|
const String & getRelativePath() const { return relative_path; }
|
||||||
|
|
||||||
|
/// Sets whether the destructor should show a warning if the temporary file has been already removed.
|
||||||
|
/// By default a warning is shown.
|
||||||
|
void setShowWarningIfRemoved(bool show_warning_if_removed_) { show_warning_if_removed = show_warning_if_removed_; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
DiskPtr disk;
|
DiskPtr disk;
|
||||||
|
|
||||||
/// Relative path in disk to the temporary file or directory
|
/// Relative path in disk to the temporary file or directory
|
||||||
String relative_path;
|
String relative_path;
|
||||||
|
|
||||||
|
/// Whether the destructor should show a warning if the temporary file has been already removed.
|
||||||
|
bool show_warning_if_removed = true;
|
||||||
|
|
||||||
CurrentMetrics::Increment metric_increment;
|
CurrentMetrics::Increment metric_increment;
|
||||||
|
|
||||||
/// Specified if we know what for file is used (sort/aggregate/join).
|
/// Specified if we know what for file is used (sort/aggregate/join).
|
||||||
|
@ -150,7 +150,7 @@ FormatSettings getFormatSettings(const ContextPtr & context, const Settings & se
|
|||||||
format_settings.json.try_infer_objects_as_tuples = settings.input_format_json_try_infer_named_tuples_from_objects;
|
format_settings.json.try_infer_objects_as_tuples = settings.input_format_json_try_infer_named_tuples_from_objects;
|
||||||
format_settings.json.throw_on_bad_escape_sequence = settings.input_format_json_throw_on_bad_escape_sequence;
|
format_settings.json.throw_on_bad_escape_sequence = settings.input_format_json_throw_on_bad_escape_sequence;
|
||||||
format_settings.json.ignore_unnecessary_fields = settings.input_format_json_ignore_unnecessary_fields;
|
format_settings.json.ignore_unnecessary_fields = settings.input_format_json_ignore_unnecessary_fields;
|
||||||
format_settings.json.ignore_key_case = settings.input_format_json_ignore_key_case;
|
format_settings.json.case_insensitive_column_matching = settings.input_format_json_case_insensitive_column_matching;
|
||||||
format_settings.null_as_default = settings.input_format_null_as_default;
|
format_settings.null_as_default = settings.input_format_null_as_default;
|
||||||
format_settings.force_null_for_omitted_fields = settings.input_format_force_null_for_omitted_fields;
|
format_settings.force_null_for_omitted_fields = settings.input_format_force_null_for_omitted_fields;
|
||||||
format_settings.decimal_trailing_zeros = settings.output_format_decimal_trailing_zeros;
|
format_settings.decimal_trailing_zeros = settings.output_format_decimal_trailing_zeros;
|
||||||
|
@ -234,7 +234,7 @@ struct FormatSettings
|
|||||||
bool infer_incomplete_types_as_strings = true;
|
bool infer_incomplete_types_as_strings = true;
|
||||||
bool throw_on_bad_escape_sequence = true;
|
bool throw_on_bad_escape_sequence = true;
|
||||||
bool ignore_unnecessary_fields = true;
|
bool ignore_unnecessary_fields = true;
|
||||||
bool ignore_key_case = false;
|
bool case_insensitive_column_matching = false;
|
||||||
} json{};
|
} json{};
|
||||||
|
|
||||||
struct
|
struct
|
||||||
|
@ -18,6 +18,7 @@ namespace ErrorCodes
|
|||||||
extern const int ILLEGAL_COLUMN;
|
extern const int ILLEGAL_COLUMN;
|
||||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
extern const int BAD_ARGUMENTS;
|
extern const int BAD_ARGUMENTS;
|
||||||
|
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Base58Encode
|
struct Base58Encode
|
||||||
@ -135,7 +136,7 @@ public:
|
|||||||
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||||
{
|
{
|
||||||
if (arguments.size() != 1)
|
if (arguments.size() != 1)
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong number of arguments for function {}: 1 expected.", getName());
|
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Wrong number of arguments for function {}: 1 expected.", getName());
|
||||||
|
|
||||||
if (!isString(arguments[0].type))
|
if (!isString(arguments[0].type))
|
||||||
throw Exception(
|
throw Exception(
|
||||||
|
@ -15,6 +15,7 @@ namespace ErrorCodes
|
|||||||
{
|
{
|
||||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
extern const int ILLEGAL_COLUMN;
|
extern const int ILLEGAL_COLUMN;
|
||||||
|
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
|
||||||
}
|
}
|
||||||
|
|
||||||
class FunctionChar : public IFunction
|
class FunctionChar : public IFunction
|
||||||
@ -36,7 +37,7 @@ public:
|
|||||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||||
{
|
{
|
||||||
if (arguments.empty())
|
if (arguments.empty())
|
||||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION,
|
||||||
"Number of arguments for function {} can't be {}, should be at least 1",
|
"Number of arguments for function {} can't be {}, should be at least 1",
|
||||||
getName(), arguments.size());
|
getName(), arguments.size());
|
||||||
|
|
||||||
|
@ -59,19 +59,19 @@ public:
|
|||||||
|
|
||||||
bool useDefaultImplementationForConstants() const override { return true; }
|
bool useDefaultImplementationForConstants() const override { return true; }
|
||||||
|
|
||||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||||
{
|
{
|
||||||
const ColumnPtr column = arguments[0].column;
|
const ColumnPtr column = arguments[0].column;
|
||||||
if (const ColumnString * col = checkAndGetColumn<ColumnString>(column.get()))
|
if (const ColumnString * col = checkAndGetColumn<ColumnString>(column.get()))
|
||||||
{
|
{
|
||||||
auto col_res = ColumnString::create();
|
auto col_res = ColumnString::create();
|
||||||
Impl::vector(col->getChars(), col->getOffsets(), col_res->getChars(), col_res->getOffsets());
|
Impl::vector(col->getChars(), col->getOffsets(), col_res->getChars(), col_res->getOffsets(), input_rows_count);
|
||||||
return col_res;
|
return col_res;
|
||||||
}
|
}
|
||||||
else if (const ColumnFixedString * col_fixed = checkAndGetColumn<ColumnFixedString>(column.get()))
|
else if (const ColumnFixedString * col_fixed = checkAndGetColumn<ColumnFixedString>(column.get()))
|
||||||
{
|
{
|
||||||
auto col_res = ColumnFixedString::create(col_fixed->getN());
|
auto col_res = ColumnFixedString::create(col_fixed->getN());
|
||||||
Impl::vectorFixed(col_fixed->getChars(), col_fixed->getN(), col_res->getChars());
|
Impl::vectorFixed(col_fixed->getChars(), col_fixed->getN(), col_res->getChars(), input_rows_count);
|
||||||
return col_res;
|
return col_res;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -84,7 +84,7 @@ public:
|
|||||||
return std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>());
|
return std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>());
|
||||||
}
|
}
|
||||||
|
|
||||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /*input_rows_count*/) const override
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
|
||||||
{
|
{
|
||||||
Generator generator;
|
Generator generator;
|
||||||
generator.init(arguments, max_substrings_includes_remaining_string);
|
generator.init(arguments, max_substrings_includes_remaining_string);
|
||||||
@ -107,18 +107,17 @@ public:
|
|||||||
const ColumnString::Chars & src_chars = col_str->getChars();
|
const ColumnString::Chars & src_chars = col_str->getChars();
|
||||||
const ColumnString::Offsets & src_offsets = col_str->getOffsets();
|
const ColumnString::Offsets & src_offsets = col_str->getOffsets();
|
||||||
|
|
||||||
res_offsets.reserve(src_offsets.size());
|
res_offsets.reserve(input_rows_count);
|
||||||
res_strings_offsets.reserve(src_offsets.size() * 5); /// Constant 5 - at random.
|
res_strings_offsets.reserve(input_rows_count * 5); /// Constant 5 - at random.
|
||||||
res_strings_chars.reserve(src_chars.size());
|
res_strings_chars.reserve(src_chars.size());
|
||||||
|
|
||||||
Pos token_begin = nullptr;
|
Pos token_begin = nullptr;
|
||||||
Pos token_end = nullptr;
|
Pos token_end = nullptr;
|
||||||
|
|
||||||
size_t size = src_offsets.size();
|
|
||||||
ColumnString::Offset current_src_offset = 0;
|
ColumnString::Offset current_src_offset = 0;
|
||||||
ColumnArray::Offset current_dst_offset = 0;
|
ColumnArray::Offset current_dst_offset = 0;
|
||||||
ColumnString::Offset current_dst_strings_offset = 0;
|
ColumnString::Offset current_dst_strings_offset = 0;
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < input_rows_count; ++i)
|
||||||
{
|
{
|
||||||
Pos pos = reinterpret_cast<Pos>(&src_chars[current_src_offset]);
|
Pos pos = reinterpret_cast<Pos>(&src_chars[current_src_offset]);
|
||||||
current_src_offset = src_offsets[i];
|
current_src_offset = src_offsets[i];
|
||||||
|
@ -739,7 +739,8 @@ public:
|
|||||||
{
|
{
|
||||||
NumberType value;
|
NumberType value;
|
||||||
|
|
||||||
tryGetNumericValueFromJSONElement<JSONParser, NumberType>(value, element, convert_bool_to_integer, error);
|
if (!tryGetNumericValueFromJSONElement<JSONParser, NumberType>(value, element, convert_bool_to_integer, error))
|
||||||
|
return false;
|
||||||
auto & col_vec = assert_cast<ColumnVector<NumberType> &>(dest);
|
auto & col_vec = assert_cast<ColumnVector<NumberType> &>(dest);
|
||||||
col_vec.insertValue(value);
|
col_vec.insertValue(value);
|
||||||
return true;
|
return true;
|
||||||
|
@ -47,85 +47,54 @@ bool allArgumentsAreConstants(const ColumnsWithTypeAndName & args)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Replaces single low cardinality column in a function call by its dictionary
|
|
||||||
/// This can only happen after the arguments have been adapted in IFunctionOverloadResolver::getReturnType
|
|
||||||
/// as it's only possible if there is one low cardinality column and, optionally, const columns
|
|
||||||
ColumnPtr replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes(
|
ColumnPtr replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes(
|
||||||
ColumnsWithTypeAndName & args, bool can_be_executed_on_default_arguments, size_t input_rows_count)
|
ColumnsWithTypeAndName & args, bool can_be_executed_on_default_arguments, size_t input_rows_count)
|
||||||
{
|
{
|
||||||
/// We return the LC indexes so the LC can be reconstructed with the function result
|
size_t num_rows = input_rows_count;
|
||||||
ColumnPtr indexes;
|
ColumnPtr indexes;
|
||||||
|
|
||||||
size_t number_low_cardinality_columns = 0;
|
/// Find first LowCardinality column and replace it to nested dictionary.
|
||||||
size_t last_low_cardinality = 0;
|
for (auto & column : args)
|
||||||
size_t number_const_columns = 0;
|
|
||||||
size_t number_full_columns = 0;
|
|
||||||
|
|
||||||
for (size_t i = 0; i < args.size(); i++)
|
|
||||||
{
|
{
|
||||||
auto const & arg = args[i];
|
if (const auto * low_cardinality_column = checkAndGetColumn<ColumnLowCardinality>(column.column.get()))
|
||||||
if (checkAndGetColumn<ColumnLowCardinality>(arg.column.get()))
|
|
||||||
{
|
{
|
||||||
number_low_cardinality_columns++;
|
/// Single LowCardinality column is supported now.
|
||||||
last_low_cardinality = i;
|
if (indexes)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected single dictionary argument for function.");
|
||||||
|
|
||||||
|
const auto * low_cardinality_type = checkAndGetDataType<DataTypeLowCardinality>(column.type.get());
|
||||||
|
|
||||||
|
if (!low_cardinality_type)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Incompatible type for LowCardinality column: {}",
|
||||||
|
column.type->getName());
|
||||||
|
|
||||||
|
if (can_be_executed_on_default_arguments)
|
||||||
|
{
|
||||||
|
/// Normal case, when function can be executed on values' default.
|
||||||
|
column.column = low_cardinality_column->getDictionary().getNestedColumn();
|
||||||
|
indexes = low_cardinality_column->getIndexesPtr();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/// Special case when default value can't be used. Example: 1 % LowCardinality(Int).
|
||||||
|
/// LowCardinality always contains default, so 1 % 0 will throw exception in normal case.
|
||||||
|
auto dict_encoded = low_cardinality_column->getMinimalDictionaryEncodedColumn(0, low_cardinality_column->size());
|
||||||
|
column.column = dict_encoded.dictionary;
|
||||||
|
indexes = dict_encoded.indexes;
|
||||||
|
}
|
||||||
|
|
||||||
|
num_rows = column.column->size();
|
||||||
|
column.type = low_cardinality_type->getDictionaryType();
|
||||||
}
|
}
|
||||||
else if (checkAndGetColumn<ColumnConst>(arg.column.get()))
|
|
||||||
number_const_columns++;
|
|
||||||
else
|
|
||||||
number_full_columns++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!number_low_cardinality_columns && !number_const_columns)
|
/// Change size of constants.
|
||||||
return nullptr;
|
|
||||||
|
|
||||||
if (number_full_columns > 0 || number_low_cardinality_columns > 1)
|
|
||||||
{
|
|
||||||
/// This should not be possible but currently there are multiple tests in CI failing because of it
|
|
||||||
/// TODO: Fix those cases, then enable this exception
|
|
||||||
#if 0
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected low cardinality types found. Low cardinality: {}. Full {}. Const {}",
|
|
||||||
number_low_cardinality_columns, number_full_columns, number_const_columns);
|
|
||||||
#else
|
|
||||||
return nullptr;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
else if (number_low_cardinality_columns == 1)
|
|
||||||
{
|
|
||||||
auto & lc_arg = args[last_low_cardinality];
|
|
||||||
|
|
||||||
const auto * low_cardinality_type = checkAndGetDataType<DataTypeLowCardinality>(lc_arg.type.get());
|
|
||||||
if (!low_cardinality_type)
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Incompatible type for LowCardinality column: {}", lc_arg.type->getName());
|
|
||||||
|
|
||||||
const auto * low_cardinality_column = checkAndGetColumn<ColumnLowCardinality>(lc_arg.column.get());
|
|
||||||
chassert(low_cardinality_column);
|
|
||||||
|
|
||||||
if (can_be_executed_on_default_arguments)
|
|
||||||
{
|
|
||||||
/// Normal case, when function can be executed on values' default.
|
|
||||||
lc_arg.column = low_cardinality_column->getDictionary().getNestedColumn();
|
|
||||||
indexes = low_cardinality_column->getIndexesPtr();
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
/// Special case when default value can't be used. Example: 1 % LowCardinality(Int).
|
|
||||||
/// LowCardinality always contains default, so 1 % 0 will throw exception in normal case.
|
|
||||||
auto dict_encoded = low_cardinality_column->getMinimalDictionaryEncodedColumn(0, low_cardinality_column->size());
|
|
||||||
lc_arg.column = dict_encoded.dictionary;
|
|
||||||
indexes = dict_encoded.indexes;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The new column will have a different number of rows, normally less but occasionally it might be more (NULL)
|
|
||||||
input_rows_count = lc_arg.column->size();
|
|
||||||
lc_arg.type = low_cardinality_type->getDictionaryType();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Change size of constants
|
|
||||||
for (auto & column : args)
|
for (auto & column : args)
|
||||||
{
|
{
|
||||||
if (const auto * column_const = checkAndGetColumn<ColumnConst>(column.column.get()))
|
if (const auto * column_const = checkAndGetColumn<ColumnConst>(column.column.get()))
|
||||||
{
|
{
|
||||||
column.column = ColumnConst::create(recursiveRemoveLowCardinality(column_const->getDataColumnPtr()), input_rows_count);
|
column.column = ColumnConst::create(recursiveRemoveLowCardinality(column_const->getDataColumnPtr()), num_rows);
|
||||||
column.type = recursiveRemoveLowCardinality(column.type);
|
column.type = recursiveRemoveLowCardinality(column.type);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -301,8 +270,6 @@ ColumnPtr IExecutableFunction::executeWithoutSparseColumns(const ColumnsWithType
|
|||||||
bool can_be_executed_on_default_arguments = canBeExecutedOnDefaultArguments();
|
bool can_be_executed_on_default_arguments = canBeExecutedOnDefaultArguments();
|
||||||
|
|
||||||
const auto & dictionary_type = res_low_cardinality_type->getDictionaryType();
|
const auto & dictionary_type = res_low_cardinality_type->getDictionaryType();
|
||||||
/// The arguments should have been adapted in IFunctionOverloadResolver::getReturnType
|
|
||||||
/// So there is only one low cardinality column (and optionally some const columns) and no full column
|
|
||||||
ColumnPtr indexes = replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes(
|
ColumnPtr indexes = replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes(
|
||||||
columns_without_low_cardinality, can_be_executed_on_default_arguments, input_rows_count);
|
columns_without_low_cardinality, can_be_executed_on_default_arguments, input_rows_count);
|
||||||
|
|
||||||
|
@ -8,17 +8,19 @@ namespace DB
|
|||||||
template <char not_case_lower_bound, char not_case_upper_bound>
|
template <char not_case_lower_bound, char not_case_upper_bound>
|
||||||
struct LowerUpperImpl
|
struct LowerUpperImpl
|
||||||
{
|
{
|
||||||
static void vector(const ColumnString::Chars & data,
|
static void vector(
|
||||||
|
const ColumnString::Chars & data,
|
||||||
const ColumnString::Offsets & offsets,
|
const ColumnString::Offsets & offsets,
|
||||||
ColumnString::Chars & res_data,
|
ColumnString::Chars & res_data,
|
||||||
ColumnString::Offsets & res_offsets)
|
ColumnString::Offsets & res_offsets,
|
||||||
|
size_t /*input_rows_count*/)
|
||||||
{
|
{
|
||||||
res_data.resize_exact(data.size());
|
res_data.resize_exact(data.size());
|
||||||
res_offsets.assign(offsets);
|
res_offsets.assign(offsets);
|
||||||
array(data.data(), data.data() + data.size(), res_data.data());
|
array(data.data(), data.data() + data.size(), res_data.data());
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vectorFixed(const ColumnString::Chars & data, size_t /*n*/, ColumnString::Chars & res_data)
|
static void vectorFixed(const ColumnString::Chars & data, size_t /*n*/, ColumnString::Chars & res_data, size_t /*input_rows_count*/)
|
||||||
{
|
{
|
||||||
res_data.resize_exact(data.size());
|
res_data.resize_exact(data.size());
|
||||||
array(data.data(), data.data() + data.size(), res_data.data());
|
array(data.data(), data.data() + data.size(), res_data.data());
|
||||||
|
@ -90,7 +90,8 @@ struct LowerUpperUTF8Impl
|
|||||||
const ColumnString::Chars & data,
|
const ColumnString::Chars & data,
|
||||||
const ColumnString::Offsets & offsets,
|
const ColumnString::Offsets & offsets,
|
||||||
ColumnString::Chars & res_data,
|
ColumnString::Chars & res_data,
|
||||||
ColumnString::Offsets & res_offsets)
|
ColumnString::Offsets & res_offsets,
|
||||||
|
size_t input_rows_count)
|
||||||
{
|
{
|
||||||
if (data.empty())
|
if (data.empty())
|
||||||
return;
|
return;
|
||||||
@ -98,7 +99,7 @@ struct LowerUpperUTF8Impl
|
|||||||
bool all_ascii = isAllASCII(data.data(), data.size());
|
bool all_ascii = isAllASCII(data.data(), data.size());
|
||||||
if (all_ascii)
|
if (all_ascii)
|
||||||
{
|
{
|
||||||
LowerUpperImpl<not_case_lower_bound, not_case_upper_bound>::vector(data, offsets, res_data, res_offsets);
|
LowerUpperImpl<not_case_lower_bound, not_case_upper_bound>::vector(data, offsets, res_data, res_offsets, input_rows_count);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -107,7 +108,7 @@ struct LowerUpperUTF8Impl
|
|||||||
array(data.data(), data.data() + data.size(), offsets, res_data.data());
|
array(data.data(), data.data() + data.size(), offsets, res_data.data());
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &)
|
static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t)
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Functions lowerUTF8 and upperUTF8 cannot work with FixedString argument");
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Functions lowerUTF8 and upperUTF8 cannot work with FixedString argument");
|
||||||
}
|
}
|
||||||
|
@ -62,12 +62,13 @@ using Pos = const char *;
|
|||||||
template <typename Extractor>
|
template <typename Extractor>
|
||||||
struct ExtractSubstringImpl
|
struct ExtractSubstringImpl
|
||||||
{
|
{
|
||||||
static void vector(const ColumnString::Chars & data, const ColumnString::Offsets & offsets,
|
static void vector(
|
||||||
ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets)
|
const ColumnString::Chars & data, const ColumnString::Offsets & offsets,
|
||||||
|
ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets,
|
||||||
|
size_t input_rows_count)
|
||||||
{
|
{
|
||||||
size_t size = offsets.size();
|
res_offsets.resize(input_rows_count);
|
||||||
res_offsets.resize(size);
|
res_data.reserve(input_rows_count * Extractor::getReserveLengthForElement());
|
||||||
res_data.reserve(size * Extractor::getReserveLengthForElement());
|
|
||||||
|
|
||||||
size_t prev_offset = 0;
|
size_t prev_offset = 0;
|
||||||
size_t res_offset = 0;
|
size_t res_offset = 0;
|
||||||
@ -76,7 +77,7 @@ struct ExtractSubstringImpl
|
|||||||
Pos start;
|
Pos start;
|
||||||
size_t length;
|
size_t length;
|
||||||
|
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < input_rows_count; ++i)
|
||||||
{
|
{
|
||||||
Extractor::execute(reinterpret_cast<const char *>(&data[prev_offset]), offsets[i] - prev_offset - 1, start, length);
|
Extractor::execute(reinterpret_cast<const char *>(&data[prev_offset]), offsets[i] - prev_offset - 1, start, length);
|
||||||
|
|
||||||
@ -99,7 +100,7 @@ struct ExtractSubstringImpl
|
|||||||
res_data.assign(start, length);
|
res_data.assign(start, length);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &)
|
static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t)
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Column of type FixedString is not supported by this function");
|
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Column of type FixedString is not supported by this function");
|
||||||
}
|
}
|
||||||
@ -111,12 +112,13 @@ struct ExtractSubstringImpl
|
|||||||
template <typename Extractor>
|
template <typename Extractor>
|
||||||
struct CutSubstringImpl
|
struct CutSubstringImpl
|
||||||
{
|
{
|
||||||
static void vector(const ColumnString::Chars & data, const ColumnString::Offsets & offsets,
|
static void vector(
|
||||||
ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets)
|
const ColumnString::Chars & data, const ColumnString::Offsets & offsets,
|
||||||
|
ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets,
|
||||||
|
size_t input_rows_count)
|
||||||
{
|
{
|
||||||
res_data.reserve(data.size());
|
res_data.reserve(data.size());
|
||||||
size_t size = offsets.size();
|
res_offsets.resize(input_rows_count);
|
||||||
res_offsets.resize(size);
|
|
||||||
|
|
||||||
size_t prev_offset = 0;
|
size_t prev_offset = 0;
|
||||||
size_t res_offset = 0;
|
size_t res_offset = 0;
|
||||||
@ -125,7 +127,7 @@ struct CutSubstringImpl
|
|||||||
Pos start;
|
Pos start;
|
||||||
size_t length;
|
size_t length;
|
||||||
|
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < input_rows_count; ++i)
|
||||||
{
|
{
|
||||||
const char * current = reinterpret_cast<const char *>(&data[prev_offset]);
|
const char * current = reinterpret_cast<const char *>(&data[prev_offset]);
|
||||||
Extractor::execute(current, offsets[i] - prev_offset - 1, start, length);
|
Extractor::execute(current, offsets[i] - prev_offset - 1, start, length);
|
||||||
@ -154,7 +156,7 @@ struct CutSubstringImpl
|
|||||||
res_data.append(start + length, data.data() + data.size());
|
res_data.append(start + length, data.data() + data.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &)
|
static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t)
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Column of type FixedString is not supported by this function");
|
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Column of type FixedString is not supported by this function");
|
||||||
}
|
}
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <base/find_symbols.h>
|
#include <base/find_symbols.h>
|
||||||
#include "domain.h"
|
#include <Functions/URL/domain.h>
|
||||||
#include "tldLookup.h"
|
#include <Functions/URL/tldLookup.h>
|
||||||
#include <Common/TLDListsHolder.h> /// TLDType
|
#include <Common/TLDListsHolder.h> /// TLDType
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include "fragment.h"
|
|
||||||
#include <Functions/FunctionStringToString.h>
|
#include <Functions/FunctionStringToString.h>
|
||||||
|
#include <Functions/URL/fragment.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include "queryString.h"
|
|
||||||
#include <Functions/FunctionStringToString.h>
|
#include <Functions/FunctionStringToString.h>
|
||||||
|
#include <Functions/URL/queryString.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include "queryStringAndFragment.h"
|
|
||||||
#include <Functions/FunctionStringToString.h>
|
#include <Functions/FunctionStringToString.h>
|
||||||
|
#include <Functions/URL/queryStringAndFragment.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include <Functions/FunctionStringToString.h>
|
#include <Functions/FunctionStringToString.h>
|
||||||
#include "ExtractFirstSignificantSubdomain.h"
|
#include <Functions/URL/ExtractFirstSignificantSubdomain.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include "ExtractFirstSignificantSubdomain.h"
|
#include <Functions/URL/ExtractFirstSignificantSubdomain.h>
|
||||||
#include "FirstSignificantSubdomainCustomImpl.h"
|
#include <Functions/URL/FirstSignificantSubdomainCustomImpl.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include <Functions/FunctionStringToString.h>
|
#include <Functions/FunctionStringToString.h>
|
||||||
#include "protocol.h"
|
#include <Functions/URL/protocol.h>
|
||||||
#include <base/find_symbols.h>
|
#include <base/find_symbols.h>
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#include <base/hex.h>
|
|
||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include <Functions/FunctionStringToString.h>
|
#include <Functions/FunctionStringToString.h>
|
||||||
#include <base/find_symbols.h>
|
#include <base/find_symbols.h>
|
||||||
|
#include <base/hex.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -121,8 +121,10 @@ enum URLCodeStrategy
|
|||||||
template <URLCodeStrategy code_strategy, bool space_as_plus>
|
template <URLCodeStrategy code_strategy, bool space_as_plus>
|
||||||
struct CodeURLComponentImpl
|
struct CodeURLComponentImpl
|
||||||
{
|
{
|
||||||
static void vector(const ColumnString::Chars & data, const ColumnString::Offsets & offsets,
|
static void vector(
|
||||||
ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets)
|
const ColumnString::Chars & data, const ColumnString::Offsets & offsets,
|
||||||
|
ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets,
|
||||||
|
size_t input_rows_count)
|
||||||
{
|
{
|
||||||
if (code_strategy == encode)
|
if (code_strategy == encode)
|
||||||
{
|
{
|
||||||
@ -134,13 +136,12 @@ struct CodeURLComponentImpl
|
|||||||
res_data.resize(data.size());
|
res_data.resize(data.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t size = offsets.size();
|
res_offsets.resize(input_rows_count);
|
||||||
res_offsets.resize(size);
|
|
||||||
|
|
||||||
size_t prev_offset = 0;
|
size_t prev_offset = 0;
|
||||||
size_t res_offset = 0;
|
size_t res_offset = 0;
|
||||||
|
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < input_rows_count; ++i)
|
||||||
{
|
{
|
||||||
const char * src_data = reinterpret_cast<const char *>(&data[prev_offset]);
|
const char * src_data = reinterpret_cast<const char *>(&data[prev_offset]);
|
||||||
size_t src_size = offsets[i] - prev_offset;
|
size_t src_size = offsets[i] - prev_offset;
|
||||||
@ -165,7 +166,7 @@ struct CodeURLComponentImpl
|
|||||||
res_data.resize(res_offset);
|
res_data.resize(res_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
[[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &)
|
[[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t)
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Column of type FixedString is not supported by URL functions");
|
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Column of type FixedString is not supported by URL functions");
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
#include "domain.h"
|
#include <Functions/URL/domain.h>
|
||||||
|
|
||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include <Functions/FunctionStringToString.h>
|
#include <Functions/FunctionStringToString.h>
|
||||||
|
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "protocol.h"
|
|
||||||
#include <base/find_symbols.h>
|
|
||||||
#include <cstring>
|
|
||||||
#include <Common/StringUtils.h>
|
#include <Common/StringUtils.h>
|
||||||
|
#include <Functions/URL/protocol.h>
|
||||||
|
#include <base/find_symbols.h>
|
||||||
|
|
||||||
|
#include <cstring>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include <Functions/FunctionStringToString.h>
|
#include <Functions/FunctionStringToString.h>
|
||||||
#include "domain.h"
|
#include <Functions/URL/domain.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include <Functions/FunctionStringToString.h>
|
#include <Functions/FunctionStringToString.h>
|
||||||
#include "ExtractFirstSignificantSubdomain.h"
|
#include <Functions/URL/ExtractFirstSignificantSubdomain.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include "ExtractFirstSignificantSubdomain.h"
|
#include <Functions/URL/ExtractFirstSignificantSubdomain.h>
|
||||||
#include "FirstSignificantSubdomainCustomImpl.h"
|
#include <Functions/URL/FirstSignificantSubdomainCustomImpl.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user