mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 09:32:01 +00:00
Merge branch 'master' into parquet-fixed-binary
This commit is contained in:
commit
23c12ac8ee
33
.github/workflows/release.yml
vendored
33
.github/workflows/release.yml
vendored
@ -12,38 +12,9 @@ jobs:
|
||||
ReleasePublish:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set envs
|
||||
- name: Deploy packages and assets
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
JFROG_API_KEY=${{ secrets.JFROG_ARTIFACTORY_API_KEY }}
|
||||
TEMP_PATH=${{runner.temp}}/release_packages
|
||||
REPO_COPY=${{runner.temp}}/release_packages/ClickHouse
|
||||
EOF
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
# Always use the most recent script version
|
||||
ref: master
|
||||
- name: Download packages and push to Artifactory
|
||||
run: |
|
||||
rm -rf "$TEMP_PATH" && mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY"
|
||||
# Download and push packages to artifactory
|
||||
python3 ./tests/ci/push_to_artifactory.py --release '${{ github.ref }}' \
|
||||
--commit '${{ github.sha }}' --artifactory-url '${{ secrets.JFROG_ARTIFACTORY_URL }}' --all
|
||||
# Download macos binaries to ${{runner.temp}}/download_binary
|
||||
python3 ./tests/ci/download_binary.py --version '${{ github.ref }}' \
|
||||
--commit '${{ github.sha }}' binary_darwin binary_darwin_aarch64
|
||||
mv '${{runner.temp}}/download_binary/'clickhouse-* '${{runner.temp}}/push_to_artifactory'
|
||||
- name: Upload packages to release assets
|
||||
uses: svenstaro/upload-release-action@v2
|
||||
with:
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
file: ${{runner.temp}}/push_to_artifactory/*
|
||||
overwrite: true
|
||||
tag: ${{ github.ref }}
|
||||
file_glob: true
|
||||
curl '${{ secrets.PACKAGES_RELEASE_URL }}/release/${{ github.ref }}?binary=binary_darwin&binary=binary_darwin_aarch64&sync=true' -d ''
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
|
2
contrib/arrow
vendored
2
contrib/arrow
vendored
@ -1 +1 @@
|
||||
Subproject commit 450a5638704386356f8e520080468fc9bc8bcaf8
|
||||
Subproject commit d03245f801f798c63ee9a7d2b8914a9e5c5cd666
|
@ -470,6 +470,9 @@ The `set` index can be used with all functions. Function subsets for other index
|
||||
| [empty](/docs/en/sql-reference/functions/array-functions#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||
| [notEmpty](/docs/en/sql-reference/functions/array-functions#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ |
|
||||
| hasTokenOrNull | ✗ | ✗ | ✗ | ✔ | ✗ |
|
||||
| hasTokenCaseInsensitive | ✗ | ✗ | ✗ | ✔ | ✗ |
|
||||
| hasTokenCaseInsensitiveOrNull | ✗ | ✗ | ✗ | ✔ | ✗ |
|
||||
|
||||
Functions with a constant argument that is less than ngram size can’t be used by `ngrambf_v1` for query optimization.
|
||||
|
||||
|
@ -22,8 +22,8 @@ functions in ClickHouse. The sample datasets include:
|
||||
- The [Cell Towers dataset](../getting-started/example-datasets/cell-towers.md) imports a CSV into ClickHouse
|
||||
- The [NYPD Complaint Data](../getting-started/example-datasets/nypd_complaint_data.md) demonstrates how to use data inference to simplify creating tables
|
||||
- The ["What's on the Menu?" dataset](../getting-started/example-datasets/menus.md) has an example of denormalizing data
|
||||
- The [Getting Data Into ClickHouse - Part 1](https://clickhouse.com/blog/getting-data-into-clickhouse-part-1) provides examples of defining a schema and loading a small Hacker News dataset
|
||||
- The [Getting Data Into ClickHouse - Part 2 - A JSON detour](https://clickhouse.com/blog/getting-data-into-clickhouse-part-2-json) shows how JSON data can be loaded
|
||||
- The [Getting Data Into ClickHouse - Part 3 - Using S3](https://clickhouse.com/blog/getting-data-into-clickhouse-part-3-s3) has examples of loading data from s3
|
||||
- [Getting Data Into ClickHouse - Part 1](https://clickhouse.com/blog/getting-data-into-clickhouse-part-1) provides examples of defining a schema and loading a small Hacker News dataset
|
||||
- [Getting Data Into ClickHouse - Part 3 - Using S3](https://clickhouse.com/blog/getting-data-into-clickhouse-part-3-s3) has examples of loading data from s3
|
||||
- [Generating random data in ClickHouse](https://clickhouse.com/blog/generating-random-test-distribution-data-for-clickhouse) shows how to generate random data if none of the above fit your needs.
|
||||
|
||||
View the **Tutorials and Datasets** menu for a complete list of sample datasets.
|
||||
|
@ -189,6 +189,7 @@ SELECT * FROM nestedt FORMAT TSV
|
||||
- [input_format_tsv_use_best_effort_in_schema_inference](/docs/en/operations/settings/settings.md/#input_format_tsv_use_best_effort_in_schema_inference) - use some tweaks and heuristics to infer schema in TSV format. If disabled, all fields will be inferred as Strings. Default value - `true`.
|
||||
- [output_format_tsv_crlf_end_of_line](/docs/en/operations/settings/settings.md/#output_format_tsv_crlf_end_of_line) - if it is set true, end of line in TSV output format will be `\r\n` instead of `\n`. Default value - `false`.
|
||||
- [input_format_tsv_skip_first_lines](/docs/en/operations/settings/settings.md/#input_format_tsv_skip_first_lines) - skip specified number of lines at the beginning of data. Default value - `0`.
|
||||
- [input_format_tsv_detect_header](/docs/en/operations/settings/settings.md/#input_format_tsv_detect_header) - automatically detect header with names and types in TSV format. Default value - `true`.
|
||||
|
||||
## TabSeparatedRaw {#tabseparatedraw}
|
||||
|
||||
@ -462,6 +463,7 @@ The CSV format supports the output of totals and extremes the same way as `TabSe
|
||||
- [input_format_csv_arrays_as_nested_csv](/docs/en/operations/settings/settings.md/#input_format_csv_arrays_as_nested_csv) - when reading Array from CSV, expect that its elements were serialized in nested CSV and then put into string. Default value - `false`.
|
||||
- [output_format_csv_crlf_end_of_line](/docs/en/operations/settings/settings.md/#output_format_csv_crlf_end_of_line) - if it is set to true, end of line in CSV output format will be `\r\n` instead of `\n`. Default value - `false`.
|
||||
- [input_format_csv_skip_first_lines](/docs/en/operations/settings/settings.md/#input_format_csv_skip_first_lines) - skip the specified number of lines at the beginning of data. Default value - `0`.
|
||||
- [input_format_csv_detect_header](/docs/en/operations/settings/settings.md/#input_format_csv_detect_header) - automatically detect header with names and types in CSV format. Default value - `true`.
|
||||
|
||||
## CSVWithNames {#csvwithnames}
|
||||
|
||||
@ -489,6 +491,8 @@ the types from input data will be compared with the types of the corresponding c
|
||||
|
||||
Similar to [Template](#format-template), but it prints or reads all names and types of columns and uses escaping rule from [format_custom_escaping_rule](/docs/en/operations/settings/settings.md/#format_custom_escaping_rule) setting and delimiters from [format_custom_field_delimiter](/docs/en/operations/settings/settings.md/#format_custom_field_delimiter), [format_custom_row_before_delimiter](/docs/en/operations/settings/settings.md/#format_custom_row_before_delimiter), [format_custom_row_after_delimiter](/docs/en/operations/settings/settings.md/#format_custom_row_after_delimiter), [format_custom_row_between_delimiter](/docs/en/operations/settings/settings.md/#format_custom_row_between_delimiter), [format_custom_result_before_delimiter](/docs/en/operations/settings/settings.md/#format_custom_result_before_delimiter) and [format_custom_result_after_delimiter](/docs/en/operations/settings/settings.md/#format_custom_result_after_delimiter) settings, not from format strings.
|
||||
|
||||
If setting [input_format_custom_detect_header](/docs/en/operations/settings/settings.md/#input_format_custom_detect_header) is enabled, ClickHouse will automatically detect header with names and types if any.
|
||||
|
||||
There is also `CustomSeparatedIgnoreSpaces` format, which is similar to [TemplateIgnoreSpaces](#templateignorespaces).
|
||||
|
||||
## CustomSeparatedWithNames {#customseparatedwithnames}
|
||||
@ -1714,7 +1718,7 @@ something_weird{problem="division by zero"} +Inf -3982045
|
||||
|
||||
## Protobuf {#protobuf}
|
||||
|
||||
Protobuf - is a [Protocol Buffers](https://developers.google.com/protocol-buffers/) format.
|
||||
Protobuf - is a [Protocol Buffers](https://protobuf.dev/) format.
|
||||
|
||||
This format requires an external format schema. The schema is cached between queries.
|
||||
ClickHouse supports both `proto2` and `proto3` syntaxes. Repeated/optional/required fields are supported.
|
||||
|
@ -558,6 +558,8 @@ and if the value is not a number, ClickHouse treats it as a string.
|
||||
If you don't want ClickHouse to try to determine complex types using some parsers and heuristics, you can disable setting `input_format_csv_use_best_effort_in_schema_inference`
|
||||
and ClickHouse will treat all columns as Strings.
|
||||
|
||||
If setting `input_format_csv_detect_header` is enabled, ClickHouse will try to detect the header with column names (and maybe types) while inferring schema. This setting is enabled by default.
|
||||
|
||||
**Examples:**
|
||||
|
||||
Integers, Floats, Bools, Strings:
|
||||
@ -669,6 +671,61 @@ DESC format(CSV, '"[1,2,3]",42.42,Hello World!')
|
||||
└──────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
Examples of header auto-detection (when `input_format_csv_detect_header` is enabled):
|
||||
|
||||
Only names:
|
||||
```sql
|
||||
SELECT * FROM format(CSV,
|
||||
$$"number","string","array"
|
||||
42,"Hello","[1, 2, 3]"
|
||||
43,"World","[4, 5, 6]"
|
||||
$$)
|
||||
```
|
||||
|
||||
```response
|
||||
┌─number─┬─string─┬─array───┐
|
||||
│ 42 │ Hello │ [1,2,3] │
|
||||
│ 43 │ World │ [4,5,6] │
|
||||
└────────┴────────┴─────────┘
|
||||
```
|
||||
|
||||
Names and types:
|
||||
|
||||
```sql
|
||||
DESC format(CSV,
|
||||
$$"number","string","array"
|
||||
"UInt32","String","Array(UInt16)"
|
||||
42,"Hello","[1, 2, 3]"
|
||||
43,"World","[4, 5, 6]"
|
||||
$$)
|
||||
```
|
||||
|
||||
```response
|
||||
┌─name───┬─type──────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ number │ UInt32 │ │ │ │ │ │
|
||||
│ string │ String │ │ │ │ │ │
|
||||
│ array │ Array(UInt16) │ │ │ │ │ │
|
||||
└────────┴───────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
Note that the header can be detected only if there is at least one column with a non-String type. If all columns have String type, the header is not detected:
|
||||
|
||||
```sql
|
||||
SELECT * FROM format(CSV,
|
||||
$$"first_column","second_column"
|
||||
"Hello","World"
|
||||
"World","Hello"
|
||||
$$)
|
||||
```
|
||||
|
||||
```response
|
||||
┌─c1───────────┬─c2────────────┐
|
||||
│ first_column │ second_column │
|
||||
│ Hello │ World │
|
||||
│ World │ Hello │
|
||||
└──────────────┴───────────────┘
|
||||
```
|
||||
|
||||
## TSV/TSKV {#tsv-tskv}
|
||||
|
||||
In TSV/TSKV formats ClickHouse extracts column value from the row according to tabular delimiters and then parses extracted value using
|
||||
@ -677,6 +734,7 @@ the recursive parser to determine the most appropriate type. If the type cannot
|
||||
If you don't want ClickHouse to try to determine complex types using some parsers and heuristics, you can disable setting `input_format_tsv_use_best_effort_in_schema_inference`
|
||||
and ClickHouse will treat all columns as Strings.
|
||||
|
||||
If setting `input_format_tsv_detect_header` is enabled, ClickHouse will try to detect the header with column names (and maybe types) while inferring schema. This setting is enabled by default.
|
||||
|
||||
**Examples:**
|
||||
|
||||
@ -799,6 +857,61 @@ DESC format(TSV, '[1,2,3] 42.42 Hello World!')
|
||||
└──────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
Examples of header auto-detection (when `input_format_tsv_detect_header` is enabled):
|
||||
|
||||
Only names:
|
||||
```sql
|
||||
SELECT * FROM format(TSV,
|
||||
$$number string array
|
||||
42 Hello [1, 2, 3]
|
||||
43 World [4, 5, 6]
|
||||
$$);
|
||||
```
|
||||
|
||||
```response
|
||||
┌─number─┬─string─┬─array───┐
|
||||
│ 42 │ Hello │ [1,2,3] │
|
||||
│ 43 │ World │ [4,5,6] │
|
||||
└────────┴────────┴─────────┘
|
||||
```
|
||||
|
||||
Names and types:
|
||||
|
||||
```sql
|
||||
DESC format(TSV,
|
||||
$$number string array
|
||||
UInt32 String Array(UInt16)
|
||||
42 Hello [1, 2, 3]
|
||||
43 World [4, 5, 6]
|
||||
$$)
|
||||
```
|
||||
|
||||
```response
|
||||
┌─name───┬─type──────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ number │ UInt32 │ │ │ │ │ │
|
||||
│ string │ String │ │ │ │ │ │
|
||||
│ array │ Array(UInt16) │ │ │ │ │ │
|
||||
└────────┴───────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
Note that the header can be detected only if there is at least one column with a non-String type. If all columns have String type, the header is not detected:
|
||||
|
||||
```sql
|
||||
SELECT * FROM format(TSV,
|
||||
$$first_column second_column
|
||||
Hello World
|
||||
World Hello
|
||||
$$)
|
||||
```
|
||||
|
||||
```response
|
||||
┌─c1───────────┬─c2────────────┐
|
||||
│ first_column │ second_column │
|
||||
│ Hello │ World │
|
||||
│ World │ Hello │
|
||||
└──────────────┴───────────────┘
|
||||
```
|
||||
|
||||
## Values {#values}
|
||||
|
||||
In Values format ClickHouse extracts column value from the row and then parses it using
|
||||
@ -911,6 +1024,8 @@ DESC format(TSV, '[1,2,3] 42.42 Hello World!')
|
||||
In CustomSeparated format ClickHouse first extracts all column values from the row according to specified delimiters and then tries to infer
|
||||
the data type for each value according to escaping rule.
|
||||
|
||||
If setting `input_format_custom_detect_header` is enabled, ClickHouse will try to detect the header with column names (and maybe types) while inferring schema. This setting is enabled by default.
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
@ -937,6 +1052,34 @@ $$)
|
||||
└──────┴────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
Example of header auto-detection (when `input_format_custom_detect_header` is enabled):
|
||||
|
||||
```sql
|
||||
SET format_custom_row_before_delimiter = '<row_before_delimiter>',
|
||||
format_custom_row_after_delimiter = '<row_after_delimiter>\n',
|
||||
format_custom_row_between_delimiter = '<row_between_delimiter>\n',
|
||||
format_custom_result_before_delimiter = '<result_before_delimiter>\n',
|
||||
format_custom_result_after_delimiter = '<result_after_delimiter>\n',
|
||||
format_custom_field_delimiter = '<field_delimiter>',
|
||||
format_custom_escaping_rule = 'Quoted'
|
||||
|
||||
DESC format(CustomSeparated, $$<result_before_delimiter>
|
||||
<row_before_delimiter>'number'<field_delimiter>'string'<field_delimiter>'array'<row_after_delimiter>
|
||||
<row_between_delimiter>
|
||||
<row_before_delimiter>42.42<field_delimiter>'Some string 1'<field_delimiter>[1, NULL, 3]<row_after_delimiter>
|
||||
<row_between_delimiter>
|
||||
<row_before_delimiter>NULL<field_delimiter>'Some string 3'<field_delimiter>[1, 2, NULL]<row_after_delimiter>
|
||||
<result_after_delimiter>
|
||||
$$)
|
||||
```
|
||||
|
||||
```response
|
||||
┌─number─┬─string────────┬─array──────┐
|
||||
│ 42.42 │ Some string 1 │ [1,NULL,3] │
|
||||
│ ᴺᵁᴸᴸ │ Some string 3 │ [1,2,NULL] │
|
||||
└────────┴───────────────┴────────────┘
|
||||
```
|
||||
|
||||
## Template {#template}
|
||||
|
||||
In Template format ClickHouse first extracts all column values from the row according to the specified template and then tries to infer the
|
||||
|
@ -22,5 +22,6 @@ Additional cache types:
|
||||
- [Dictionaries](../sql-reference/dictionaries/index.md) data cache.
|
||||
- Schema inference cache.
|
||||
- [Filesystem cache](storing-data.md) over S3, Azure, Local and other disks.
|
||||
- [(Experimental) Query result cache](query-result-cache.md).
|
||||
|
||||
To drop one of the caches, use [SYSTEM DROP ... CACHE](../sql-reference/statements/system.md#drop-mark-cache) statements.
|
||||
|
99
docs/en/operations/query-result-cache.md
Normal file
99
docs/en/operations/query-result-cache.md
Normal file
@ -0,0 +1,99 @@
|
||||
---
|
||||
slug: /en/operations/query-result-cache
|
||||
sidebar_position: 65
|
||||
sidebar_label: Query Result Cache [experimental]
|
||||
---
|
||||
|
||||
# Query Result Cache [experimental]
|
||||
|
||||
The query result cache allows to compute SELECT queries just once and to serve further executions of the same query directly from the cache.
|
||||
Depending on the type of the queries, this can dramatically reduce latency and resource consumption of the ClickHouse server.
|
||||
|
||||
## Background, Design and Limitations
|
||||
|
||||
Query result caches can generally be viewed as transactionally consistent or inconsistent.
|
||||
|
||||
- In transactionally consistent caches, the database invalidates (discards) cached query results if the result of the SELECT query changes
|
||||
or potentially changes. In ClickHouse, operations which change the data include inserts/updates/deletes in/of/from tables or collapsing
|
||||
merges. Transactionally consistent caching is especially suitable for OLTP databases, for example
|
||||
[MySQL](https://dev.mysql.com/doc/refman/5.6/en/query-cache.html) (which removed query result cache after v8.0) and
|
||||
[Oracle](https://docs.oracle.com/database/121/TGDBA/tune_result_cache.htm).
|
||||
- In transactionally inconsistent caches, slight inaccuracies in query results are accepted under the assumption that all cache entries are
|
||||
assigned a validity period after which they expire (e.g. 1 minute) and that the underlying data changes only little during this period.
|
||||
This approach is overall more suitable for OLAP databases. As an example where transactionally inconsistent caching is sufficient,
|
||||
consider an hourly sales report in a reporting tool which is simultaneously accessed by multiple users. Sales data changes typically
|
||||
slowly enough that the database only needs to compute the report once (represented by the first SELECT query). Further queries can be
|
||||
served directly from the query result cache. In this example, a reasonable validity period could be 30 min.
|
||||
|
||||
Transactionally inconsistent caching is traditionally provided by client tools or proxy packages interacting with the database. As a result,
|
||||
the same caching logic and configuration is often duplicated. With ClickHouse's query result cache, the caching logic moves to the server
|
||||
side. This reduces maintenance effort and avoids redundancy.
|
||||
|
||||
:::warning
|
||||
The query result cache is an experimental feature that should not be used in production. There are known cases (e.g. in distributed query
|
||||
processing) where wrong results are returned.
|
||||
:::
|
||||
|
||||
## Configuration Settings and Usage
|
||||
|
||||
Parameter [enable_experimental_query_result_cache](settings/settings.md#enable-experimental-query-result-cache) controls whether query
|
||||
results are inserted into / retrieved from the cache for the current query or session. For example, the first execution of query
|
||||
|
||||
``` sql
|
||||
SELECT some_expensive_calculation(column_1, column_2)
|
||||
FROM table
|
||||
SETTINGS enable_experimental_query_result_cache = true;
|
||||
```
|
||||
|
||||
stores the query result into the query result cache. Subsequent executions of the same query (also with parameter
|
||||
`enable_experimental_query_result_cache = true`) will read the computed result directly from the cache.
|
||||
|
||||
Sometimes, it is desirable to use the query result cache only passively, i.e. to allow reading from it but not writing into it (if the cache
|
||||
result is not stored yet). Parameter [enable_experimental_query_result_cache_passive_usage](settings/settings.md#enable-experimental-query-result-cache-passive-usage)
|
||||
instead of 'enable_experimental_query_result_cache' can be used for that.
|
||||
|
||||
For maximum control, it is generally recommended to provide settings "enable_experimental_query_result_cache" or
|
||||
"enable_experimental_query_result_cache_passive_usage" only with specific queries. It is also possible to enable caching at user or profile
|
||||
level but one should keep in mind that all SELECT queries may return a cached results, including monitoring or debugging queries to system
|
||||
tables.
|
||||
|
||||
The query result cache can be cleared using statement `SYSTEM DROP QUERY RESULT CACHE`. The content of the query result cache is displayed
|
||||
in system table `SYSTEM.QUERY_RESULT_CACHE`. The number of query result cache hits and misses are shown as events "QueryResultCacheHits" and
|
||||
"QueryResultCacheMisses" in system table `SYSTEM.EVENTS`. Both counters are only updated for SELECT queries which run with settings
|
||||
"enable_experimental_query_result_cache = true" or "enable_experimental_query_result_cache_passive_usage = true". Other queries do not
|
||||
affect the cache miss counter.
|
||||
|
||||
The query result cache exists once per ClickHouse server process. However, cache results are by default not shared between users. This can
|
||||
be changed (see below) but doing so is not recommended for security reasons.
|
||||
|
||||
Query results are referenced in the query result cache by the [Abstract Syntax Tree (AST)](https://en.wikipedia.org/wiki/Abstract_syntax_tree)
|
||||
of their query. This means that caching is agnostic to upper/lowercase, for example `SELECT 1` and `select 1` are treated as the same query.
|
||||
To make the matching more natural, all query-level settings related to the query result cache are removed from the AST.
|
||||
|
||||
If the query was aborted due to an exception or user cancellation, no entry is written into the query result cache.
|
||||
|
||||
The size of the query result cache, the maximum number of cache entries and the maximum size of cache entries (in bytes and in records) can
|
||||
be configured using different [server configuration options](server-configuration-parameters/settings.md#server_configuration_parameters_query-result-cache).
|
||||
|
||||
To define how long a query must run at least such that its result can be cached, you can use setting
|
||||
[query_result_cache_min_query_duration](settings/settings.md#query-result-cache-min-query-duration). For example, the result of query
|
||||
|
||||
``` sql
|
||||
SELECT some_expensive_calculation(column_1, column_2)
|
||||
FROM table
|
||||
SETTINGS enable_experimental_query_result_cache = true, query_result_cache_min_query_duration = 5000;
|
||||
```
|
||||
|
||||
is only cached if the query runs longer than 5 seconds. It is also possible to specify how often a query needs to run until its result is
|
||||
cached - for that use setting [query_result_cache_min_query_runs](settings/settings.md#query-result-cache-min-query-runs).
|
||||
|
||||
Entries in the query result cache become stale after a certain time period (time-to-live). By default, this period is 60 seconds but a
|
||||
different value can be specified at session, profile or query level using setting [query_result_cache_ttl](settings/settings.md#query-result-cache-ttl).
|
||||
|
||||
Also, results of queries with non-deterministic functions such as `rand()` and `now()` are not cached. This can be overruled using
|
||||
setting [query_result_cache_store_results_of_queries_with_nondeterministic_functions](settings/settings.md#query-result-cache-store-results-of-queries-with-nondeterministic-functions).
|
||||
|
||||
Finally, entries in the query cache are not shared between users due to security reasons. For example, user A must not be able to bypass a
|
||||
row policy on a table by running the same query as another user B for whom no such policy exists. However, if necessary, cache entries can
|
||||
be marked accessible by other users (i.e. shared) by supplying setting
|
||||
[query_result_cache_share_between_users]{settings/settings.md#query-result-cache-share-between-users}.
|
@ -1270,6 +1270,32 @@ If the table does not exist, ClickHouse will create it. If the structure of the
|
||||
</query_log>
|
||||
```
|
||||
|
||||
## query_result_cache {#server_configuration_parameters_query-result-cache}
|
||||
|
||||
[Query result cache](../query-result-cache.md) configuration.
|
||||
|
||||
The following settings are available:
|
||||
|
||||
- `size`: The maximum cache size in bytes. 0 means the query result cache is disabled. Default value: `1073741824` (1 GiB).
|
||||
- `max_entries`: The maximum number of SELECT query results stored in the cache. Default value: `1024`.
|
||||
- `max_entry_size`: The maximum size in bytes SELECT query results may have to be saved in the cache. Default value: `1048576` (1 MiB).
|
||||
- `max_entry_records`: The maximum number of records SELECT query results may have to be saved in the cache. Default value: `30000000` (30 mil).
|
||||
|
||||
:::warning
|
||||
Data for the query result cache is allocated in DRAM. If memory is scarce, make sure to set a small value for `size` or disable the query result cache altogether.
|
||||
:::
|
||||
|
||||
**Example**
|
||||
|
||||
```xml
|
||||
<query_result_cache>
|
||||
<size>1073741824</size>
|
||||
<max_entries>1024</max_entries>
|
||||
<max_entry_size>1048576</max_entry_size>
|
||||
<max_entry_records>30000000</max_entry_records>
|
||||
</query_result_cache>
|
||||
```
|
||||
|
||||
## query_thread_log {#server_configuration_parameters-query_thread_log}
|
||||
|
||||
Setting for logging threads of queries received with the [log_query_threads=1](../../operations/settings/settings.md#settings-log-query-threads) setting.
|
||||
|
@ -1300,6 +1300,81 @@ Possible values:
|
||||
|
||||
Default value: `3`.
|
||||
|
||||
## enable_experimental_query_result_cache {#enable-experimental-query-result-cache}
|
||||
|
||||
If turned on, results of SELECT queries are stored in and (if available) retrieved from the [query result cache](../query-result-cache.md).
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 - Disabled
|
||||
- 1 - Enabled
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## enable_experimental_query_result_cache_passive_usage {#enable-experimental-query-result-cache-passive-usage}
|
||||
|
||||
If turned on, results of SELECT queries are (if available) retrieved from the [query result cache](../query-result-cache.md).
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 - Disabled
|
||||
- 1 - Enabled
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## query_result_cache_store_results_of_queries_with_nondeterministic_functions {#query-result-cache-store-results-of-queries-with-nondeterministic-functions}
|
||||
|
||||
If turned on, then results of SELECT queries with non-deterministic functions (e.g. `rand()`, `now()`) can be cached in the [query result cache](../query-result-cache.md).
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 - Disabled
|
||||
- 1 - Enabled
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## query_result_cache_min_query_runs {#query-result-cache-min-query-runs}
|
||||
|
||||
Minimum number of times a SELECT query must run before its result is stored in the [query result cache](../query-result-cache.md).
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer >= 0.
|
||||
|
||||
Default value: `0`
|
||||
|
||||
## query_result_cache_min_query_duration {#query-result-cache-min-query-duration}
|
||||
|
||||
Minimum duration in milliseconds a query needs to run for its result to be stored in the [query result cache](../query-result-cache.md).
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer >= 0.
|
||||
|
||||
Default value: `0`
|
||||
|
||||
## query_result_cache_ttl {#query-result-cache-ttl}
|
||||
|
||||
After this time in seconds entries in the [query result cache](../query-result-cache.md) become stale.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer >= 0.
|
||||
|
||||
Default value: `60`
|
||||
|
||||
## query_result_cache_share_between_users {#query-result-cache-share-between-users}
|
||||
|
||||
If turned on, the result of SELECT queries cached in the [query result cache](../query-result-cache.md) can be read by other users.
|
||||
It is not recommended to enable this setting due to security reasons.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 - Disabled
|
||||
- 1 - Enabled
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## insert_quorum {#settings-insert_quorum}
|
||||
|
||||
Enables the quorum writes.
|
||||
@ -4323,6 +4398,12 @@ The number of lines to skip at the beginning of data in TSV input format.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
### input_format_tsv_detect_header {#input_format_tsv_detect_header}
|
||||
|
||||
Automatically detect header with names and types in TSV format.
|
||||
|
||||
Enabled by default.
|
||||
|
||||
### output_format_tsv_crlf_end_of_line {#output_format_tsv_crlf_end_of_line}
|
||||
|
||||
Use DOC/Windows-style line separator (CRLF) in TSV instead of Unix style (LF).
|
||||
@ -4483,6 +4564,12 @@ The number of lines to skip at the beginning of data in CSV input format.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
### input_format_csv_detect_header {#input_format_csv_detect_header}
|
||||
|
||||
Automatically detect header with names and types in CSV format.
|
||||
|
||||
Enabled by default.
|
||||
|
||||
### format_csv_null_representation {#format_csv_null_representation}
|
||||
|
||||
Defines the representation of `NULL` for [CSV](../../interfaces/formats.md/#csv) output and input formats. User can set any string as a value, for example, `My NULL`.
|
||||
@ -5057,6 +5144,12 @@ Sets the character that is interpreted as a suffix after the result set for [Cus
|
||||
|
||||
Default value: `''`.
|
||||
|
||||
### input_format_custom_detect_header {#input_format_custom_detect_header}
|
||||
|
||||
Automatically detect header with names and types in CustomSeparated format.
|
||||
|
||||
Enabled by default.
|
||||
|
||||
## Regexp format settings {#regexp-format-settings}
|
||||
|
||||
### format_regexp_escaping_rule {#format_regexp_escaping_rule}
|
||||
|
@ -72,3 +72,10 @@ If procfs is supported and enabled on the system, ClickHouse server collects the
|
||||
- `OSWriteChars`
|
||||
- `OSReadBytes`
|
||||
- `OSWriteBytes`
|
||||
|
||||
## Related content
|
||||
|
||||
- Blog: [System Tables and a window into the internals of ClickHouse](https://clickhouse.com/blog/clickhouse-debugging-issues-with-system-tables)
|
||||
- Blog: [Essential monitoring queries - part 1 - INSERT queries](https://clickhouse.com/blog/monitoring-troubleshooting-insert-queries-clickhouse)
|
||||
- Blog: [Essential monitoring queries - part 2 - SELECT queries](https://clickhouse.com/blog/monitoring-troubleshooting-select-queries-clickhouse)
|
||||
|
||||
|
@ -54,7 +54,9 @@ Functions:
|
||||
|
||||
- [toLowCardinality](../../sql-reference/functions/type-conversion-functions.md#tolowcardinality)
|
||||
|
||||
## See Also
|
||||
## Related content
|
||||
|
||||
- [Reducing ClickHouse Storage Cost with the Low Cardinality Type – Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/).
|
||||
- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/ClickHouse/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf).
|
||||
- [Reducing ClickHouse Storage Cost with the Low Cardinality Type – Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/)
|
||||
- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/ClickHouse/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf)
|
||||
- Blog: [Optimizing ClickHouse with Schemas and Codecs](https://clickhouse.com/blog/optimize-clickhouse-codecs-compression-schema)
|
||||
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
||||
|
@ -1582,3 +1582,8 @@ Result:
|
||||
│ 2020-01-01 │
|
||||
└────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Related content
|
||||
|
||||
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
||||
|
||||
|
@ -588,3 +588,6 @@ Result:
|
||||
│ aeca2A │
|
||||
└───────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Related content
|
||||
- Blog: [Generating random data in ClickHouse](https://clickhouse.com/blog/generating-random-test-distribution-data-for-clickhouse)
|
||||
|
@ -115,3 +115,7 @@ Returns the exclusive upper bound of the corresponding hopping window.
|
||||
hopEnd(bounds_tuple);
|
||||
hopEnd(time_attr, hop_interval, window_interval [, timezone]);
|
||||
```
|
||||
|
||||
## Related content
|
||||
|
||||
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
@ -1402,6 +1402,8 @@ The output value is a timestamp in UTC, not in the timezone of `DateTime64`.
|
||||
|
||||
```sql
|
||||
toUnixTimestamp64Milli(value)
|
||||
toUnixTimestamp64Micro(value)
|
||||
toUnixTimestamp64Nano(value)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
@ -1455,7 +1457,9 @@ Converts an `Int64` to a `DateTime64` value with fixed sub-second precision and
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
fromUnixTimestamp64Milli(value [, ti])
|
||||
fromUnixTimestamp64Milli(value [, timezone])
|
||||
fromUnixTimestamp64Micro(value [, timezone])
|
||||
fromUnixTimestamp64Nano(value [, timezone])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
@ -502,3 +502,9 @@ Result:
|
||||
│ t1 │ The temporary table │
|
||||
└──────┴─────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
## Related content
|
||||
|
||||
- Blog: [Optimizing ClickHouse with Schemas and Codecs](https://clickhouse.com/blog/optimize-clickhouse-codecs-compression-schema)
|
||||
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
||||
|
@ -350,3 +350,7 @@ The window view is useful in the following scenarios:
|
||||
|
||||
* **Monitoring**: Aggregate and calculate the metrics logs by time, and output the results to a target table. The dashboard can use the target table as a source table.
|
||||
* **Analyzing**: Automatically aggregate and preprocess data in the time window. This can be useful when analyzing a large number of logs. The preprocessing eliminates repeated calculations in multiple queries and reduces query latency.
|
||||
|
||||
## Related Content
|
||||
|
||||
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
||||
|
@ -299,3 +299,8 @@ ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num;
|
||||
## Implementation Details
|
||||
|
||||
The query execution order is optimized when running `ARRAY JOIN`. Although `ARRAY JOIN` must always be specified before the [WHERE](../../../sql-reference/statements/select/where.md)/[PREWHERE](../../../sql-reference/statements/select/prewhere.md) clause in a query, technically they can be performed in any order, unless result of `ARRAY JOIN` is used for filtering. The processing order is controlled by the query optimizer.
|
||||
|
||||
|
||||
## Related content
|
||||
|
||||
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
||||
|
@ -23,7 +23,9 @@ FROM table2
|
||||
```
|
||||
The condition could be any expression based on your requirements.
|
||||
|
||||
**Examples**
|
||||
## Examples
|
||||
|
||||
Here is a simple example that returns the numbers 1 to 10 that are _not_ a part of the numbers 3 to 8:
|
||||
|
||||
Query:
|
||||
|
||||
@ -33,7 +35,7 @@ SELECT number FROM numbers(1,10) EXCEPT SELECT number FROM numbers(3,6);
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─number─┐
|
||||
│ 1 │
|
||||
│ 2 │
|
||||
@ -42,28 +44,109 @@ Result:
|
||||
└────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
`EXCEPT` and `INTERSECT` can often be used interchangeably with different Boolean logic, and they are both useful if you have two tables that share a common column (or columns). For example, suppose we have a few million rows of historical cryptocurrency data that contains trade prices and volume:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE t1(one String, two String, three String) ENGINE=Memory();
|
||||
CREATE TABLE t2(four String, five String, six String) ENGINE=Memory();
|
||||
```sql
|
||||
CREATE TABLE crypto_prices
|
||||
(
|
||||
trade_date Date,
|
||||
crypto_name String,
|
||||
volume Float32,
|
||||
price Float32,
|
||||
market_cap Float32,
|
||||
change_1_day Float32
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PRIMARY KEY (crypto_name, trade_date);
|
||||
|
||||
INSERT INTO t1 VALUES ('q', 'm', 'b'), ('s', 'd', 'f'), ('l', 'p', 'o'), ('s', 'd', 'f'), ('s', 'd', 'f'), ('k', 't', 'd'), ('l', 'p', 'o');
|
||||
INSERT INTO t2 VALUES ('q', 'm', 'b'), ('b', 'd', 'k'), ('s', 'y', 't'), ('s', 'd', 'f'), ('m', 'f', 'o'), ('k', 'k', 'd');
|
||||
INSERT INTO crypto_prices
|
||||
SELECT *
|
||||
FROM s3(
|
||||
'https://learn-clickhouse.s3.us-east-2.amazonaws.com/crypto_prices.csv',
|
||||
'CSVWithNames'
|
||||
);
|
||||
|
||||
SELECT * FROM t1 EXCEPT SELECT * FROM t2;
|
||||
SELECT * FROM crypto_prices
|
||||
WHERE crypto_name = 'Bitcoin'
|
||||
ORDER BY trade_date DESC
|
||||
LIMIT 10;
|
||||
```
|
||||
|
||||
```response
|
||||
┌─trade_date─┬─crypto_name─┬──────volume─┬────price─┬───market_cap─┬──change_1_day─┐
|
||||
│ 2020-11-02 │ Bitcoin │ 30771456000 │ 13550.49 │ 251119860000 │ -0.013585099 │
|
||||
│ 2020-11-01 │ Bitcoin │ 24453857000 │ 13737.11 │ 254569760000 │ -0.0031840964 │
|
||||
│ 2020-10-31 │ Bitcoin │ 30306464000 │ 13780.99 │ 255372070000 │ 0.017308505 │
|
||||
│ 2020-10-30 │ Bitcoin │ 30581486000 │ 13546.52 │ 251018150000 │ 0.008084608 │
|
||||
│ 2020-10-29 │ Bitcoin │ 56499500000 │ 13437.88 │ 248995320000 │ 0.012552661 │
|
||||
│ 2020-10-28 │ Bitcoin │ 35867320000 │ 13271.29 │ 245899820000 │ -0.02804481 │
|
||||
│ 2020-10-27 │ Bitcoin │ 33749879000 │ 13654.22 │ 252985950000 │ 0.04427984 │
|
||||
│ 2020-10-26 │ Bitcoin │ 29461459000 │ 13075.25 │ 242251000000 │ 0.0033826586 │
|
||||
│ 2020-10-25 │ Bitcoin │ 24406921000 │ 13031.17 │ 241425220000 │ -0.0058658565 │
|
||||
│ 2020-10-24 │ Bitcoin │ 24542319000 │ 13108.06 │ 242839880000 │ 0.013650347 │
|
||||
└────────────┴─────────────┴─────────────┴──────────┴──────────────┴───────────────┘
|
||||
```
|
||||
|
||||
Now suppose we have a table named `holdings` that contains a list of cryptocurrencies that we own, along with the number of coins:
|
||||
|
||||
```sql
|
||||
CREATE TABLE holdings
|
||||
(
|
||||
crypto_name String,
|
||||
quantity UInt64
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PRIMARY KEY (crypto_name);
|
||||
|
||||
INSERT INTO holdings VALUES
|
||||
('Bitcoin', 1000),
|
||||
('Bitcoin', 200),
|
||||
('Ethereum', 250),
|
||||
('Ethereum', 5000),
|
||||
('DOGEFI', 10);
|
||||
('Bitcoin Diamond', 5000);
|
||||
```
|
||||
|
||||
We can use `EXCEPT` to answer a question like **"Which coins do we own have never traded below $10?"**:
|
||||
|
||||
```sql
|
||||
SELECT crypto_name FROM holdings
|
||||
EXCEPT
|
||||
SELECT crypto_name FROM crypto_prices
|
||||
WHERE price < 10;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─one─┬─two─┬─three─┐
|
||||
│ l │ p │ o │
|
||||
│ k │ t │ d │
|
||||
│ l │ p │ o │
|
||||
└─────┴─────┴───────┘
|
||||
```response
|
||||
┌─crypto_name─┐
|
||||
│ Bitcoin │
|
||||
│ Bitcoin │
|
||||
└─────────────┘
|
||||
```
|
||||
|
||||
This means of the four cryptocurrencies we own, only Bitcoin has never dropped below $10 (based on the limited data we have here in this example).
|
||||
|
||||
## EXCEPT DISTINCT
|
||||
|
||||
Notice in the previous query we had multiple Bitcoin holdings in the result. You can add `DISTINCT` to `EXCEPT` to eliminate duplicate rows from the result:
|
||||
|
||||
```sql
|
||||
SELECT crypto_name FROM holdings
|
||||
EXCEPT DISTINCT
|
||||
SELECT crypto_name FROM crypto_prices
|
||||
WHERE price < 10;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─crypto_name─┐
|
||||
│ Bitcoin │
|
||||
└─────────────┘
|
||||
```
|
||||
|
||||
|
||||
**See Also**
|
||||
|
||||
- [UNION](union.md#union-clause)
|
||||
|
@ -24,17 +24,17 @@ FROM table2
|
||||
```
|
||||
The condition could be any expression based on your requirements.
|
||||
|
||||
**Examples**
|
||||
## Examples
|
||||
|
||||
Query:
|
||||
Here is a simple example that intersects the numbers 1 to 10 with the numbers 3 to 8:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT number FROM numbers(1,10) INTERSECT SELECT number FROM numbers(3,6);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─number─┐
|
||||
│ 3 │
|
||||
│ 4 │
|
||||
@ -45,29 +45,112 @@ Result:
|
||||
└────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
`INTERSECT` is useful if you have two tables that share a common column (or columns). You can intersect the results of two queries, as long as the results contain the same columns. For example, suppose we have a few million rows of historical cryptocurrency data that contains trade prices and volume:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE t1(one String, two String, three String) ENGINE=Memory();
|
||||
CREATE TABLE t2(four String, five String, six String) ENGINE=Memory();
|
||||
```sql
|
||||
CREATE TABLE crypto_prices
|
||||
(
|
||||
trade_date Date,
|
||||
crypto_name String,
|
||||
volume Float32,
|
||||
price Float32,
|
||||
market_cap Float32,
|
||||
change_1_day Float32
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PRIMARY KEY (crypto_name, trade_date);
|
||||
|
||||
INSERT INTO t1 VALUES ('q', 'm', 'b'), ('s', 'd', 'f'), ('l', 'p', 'o'), ('s', 'd', 'f'), ('s', 'd', 'f'), ('k', 't', 'd'), ('l', 'p', 'o');
|
||||
INSERT INTO t2 VALUES ('q', 'm', 'b'), ('b', 'd', 'k'), ('s', 'y', 't'), ('s', 'd', 'f'), ('m', 'f', 'o'), ('k', 'k', 'd');
|
||||
INSERT INTO crypto_prices
|
||||
SELECT *
|
||||
FROM s3(
|
||||
'https://learn-clickhouse.s3.us-east-2.amazonaws.com/crypto_prices.csv',
|
||||
'CSVWithNames'
|
||||
);
|
||||
|
||||
SELECT * FROM t1 INTERSECT SELECT * FROM t2;
|
||||
SELECT * FROM crypto_prices
|
||||
WHERE crypto_name = 'Bitcoin'
|
||||
ORDER BY trade_date DESC
|
||||
LIMIT 10;
|
||||
```
|
||||
|
||||
```response
|
||||
┌─trade_date─┬─crypto_name─┬──────volume─┬────price─┬───market_cap─┬──change_1_day─┐
|
||||
│ 2020-11-02 │ Bitcoin │ 30771456000 │ 13550.49 │ 251119860000 │ -0.013585099 │
|
||||
│ 2020-11-01 │ Bitcoin │ 24453857000 │ 13737.11 │ 254569760000 │ -0.0031840964 │
|
||||
│ 2020-10-31 │ Bitcoin │ 30306464000 │ 13780.99 │ 255372070000 │ 0.017308505 │
|
||||
│ 2020-10-30 │ Bitcoin │ 30581486000 │ 13546.52 │ 251018150000 │ 0.008084608 │
|
||||
│ 2020-10-29 │ Bitcoin │ 56499500000 │ 13437.88 │ 248995320000 │ 0.012552661 │
|
||||
│ 2020-10-28 │ Bitcoin │ 35867320000 │ 13271.29 │ 245899820000 │ -0.02804481 │
|
||||
│ 2020-10-27 │ Bitcoin │ 33749879000 │ 13654.22 │ 252985950000 │ 0.04427984 │
|
||||
│ 2020-10-26 │ Bitcoin │ 29461459000 │ 13075.25 │ 242251000000 │ 0.0033826586 │
|
||||
│ 2020-10-25 │ Bitcoin │ 24406921000 │ 13031.17 │ 241425220000 │ -0.0058658565 │
|
||||
│ 2020-10-24 │ Bitcoin │ 24542319000 │ 13108.06 │ 242839880000 │ 0.013650347 │
|
||||
└────────────┴─────────────┴─────────────┴──────────┴──────────────┴───────────────┘
|
||||
```
|
||||
|
||||
Now suppose we have a table named `holdings` that contains a list of cryptocurrencies that we own, along with the number of coins:
|
||||
|
||||
```sql
|
||||
CREATE TABLE holdings
|
||||
(
|
||||
crypto_name String,
|
||||
quantity UInt64
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PRIMARY KEY (crypto_name);
|
||||
|
||||
INSERT INTO holdings VALUES
|
||||
('Bitcoin', 1000),
|
||||
('Bitcoin', 200),
|
||||
('Ethereum', 250),
|
||||
('Ethereum', 5000),
|
||||
('DOGEFI', 10);
|
||||
('Bitcoin Diamond', 5000);
|
||||
```
|
||||
|
||||
We can use `INTERSECT` to answer questions like **"Which coins do we own have traded at a price greater than $100?"**:
|
||||
|
||||
```sql
|
||||
SELECT crypto_name FROM holdings
|
||||
INTERSECT
|
||||
SELECT crypto_name FROM crypto_prices
|
||||
WHERE price > 100
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─one─┬─two─┬─three─┐
|
||||
│ q │ m │ b │
|
||||
│ s │ d │ f │
|
||||
│ s │ d │ f │
|
||||
│ s │ d │ f │
|
||||
└─────┴─────┴───────┘
|
||||
```response
|
||||
┌─crypto_name─┐
|
||||
│ Bitcoin │
|
||||
│ Bitcoin │
|
||||
│ Ethereum │
|
||||
│ Ethereum │
|
||||
└─────────────┘
|
||||
```
|
||||
|
||||
This means at some point in time, Bitcoin and Ethereum traded above $100, and DOGEFI and Bitcoin Diamond have never traded above $100 (at least using the data we have here in this example).
|
||||
|
||||
## INTERSECT DISTINCT
|
||||
|
||||
Notice in the previous query we had multiple Bitcoin and Ethereum holdings that traded above $100. It might be nice to remove duplicate rows (since they only repeat what we already know). You can add `DISTINCT` to `INTERSECT` to eliminate duplicate rows from the result:
|
||||
|
||||
```sql
|
||||
SELECT crypto_name FROM holdings
|
||||
INTERSECT DISTINCT
|
||||
SELECT crypto_name FROM crypto_prices
|
||||
WHERE price > 100;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─crypto_name─┐
|
||||
│ Bitcoin │
|
||||
│ Ethereum │
|
||||
└─────────────┘
|
||||
```
|
||||
|
||||
|
||||
**See Also**
|
||||
|
||||
- [UNION](union.md#union-clause)
|
||||
|
@ -543,3 +543,7 @@ Result:
|
||||
│ 7 │ original │ 7 │
|
||||
└─────┴──────────┴───────┘
|
||||
```
|
||||
|
||||
## Related content
|
||||
|
||||
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
||||
|
@ -72,7 +72,7 @@ For more convenient (automatic) cache management, see disable_internal_dns_cache
|
||||
|
||||
## DROP MARK CACHE
|
||||
|
||||
Resets the mark cache. Used in development of ClickHouse and performance tests.
|
||||
Resets the mark cache.
|
||||
|
||||
## DROP REPLICA
|
||||
|
||||
@ -94,13 +94,18 @@ The fourth one is useful to remove metadata of dead replica when all other repli
|
||||
|
||||
## DROP UNCOMPRESSED CACHE
|
||||
|
||||
Reset the uncompressed data cache. Used in development of ClickHouse and performance tests.
|
||||
For manage uncompressed data cache parameters use following server level settings [uncompressed_cache_size](../../operations/server-configuration-parameters/settings.md#server-settings-uncompressed_cache_size) and query/user/profile level settings [use_uncompressed_cache](../../operations/settings/settings.md#setting-use_uncompressed_cache)
|
||||
Reset the uncompressed data cache.
|
||||
The uncompressed data cache is enabled/disabled with the query/user/profile-level setting [use_uncompressed_cache](../../operations/settings/settings.md#setting-use_uncompressed_cache).
|
||||
Its size can be configured using the server-level setting [uncompressed_cache_size](../../operations/server-configuration-parameters/settings.md#server-settings-uncompressed_cache_size).
|
||||
|
||||
## DROP COMPILED EXPRESSION CACHE
|
||||
|
||||
Reset the compiled expression cache. Used in development of ClickHouse and performance tests.
|
||||
Compiled expression cache used when query/user/profile enable option [compile-expressions](../../operations/settings/settings.md#compile-expressions)
|
||||
Reset the compiled expression cache.
|
||||
The compiled expression cache is enabled/disabled with the query/user/profile-level setting [compile_expressions](../../operations/settings/settings.md#compile-expressions).
|
||||
|
||||
## DROP QUERY RESULT CACHE
|
||||
|
||||
Resets the [query result cache](../../operations/query-result-cache.md).
|
||||
|
||||
## FLUSH LOGS
|
||||
|
||||
|
@ -51,4 +51,7 @@ SELECT * FROM random;
|
||||
│ [] │ 68091.8197 │ ('2037-10-02 12:44:23.368','039ecab7-81c2-45ee-208c-844e5c6c5652') │
|
||||
│ [8,-83,0,-22,65,9,-30,28,64] │ -186233.4909 │ ('2062-01-11 00:06:04.124','69563ea1-5ad1-f870-16d8-67061da0df25') │
|
||||
└──────────────────────────────┴──────────────┴────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
```
|
||||
|
||||
## Related content
|
||||
- Blog: [Generating random data in ClickHouse](https://clickhouse.com/blog/generating-random-test-distribution-data-for-clickhouse)
|
||||
|
@ -131,3 +131,6 @@ CREATE TABLE pg_table_schema_with_dots (a UInt32)
|
||||
|
||||
- [The PostgreSQL table engine](../../engines/table-engines/integrations/postgresql.md)
|
||||
- [Using PostgreSQL as a dictionary source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql)
|
||||
|
||||
## Related content
|
||||
- Blog: [ClickHouse and PostgreSQL - a match made in data heaven - part 1](https://clickhouse.com/blog/migrating-data-between-clickhouse-postgres)
|
||||
|
@ -590,5 +590,6 @@ ORDER BY
|
||||
|
||||
## Related Content
|
||||
|
||||
- [Window and array functions for Git commit sequences](https://clickhouse.com/blog/clickhouse-window-array-functions-git-commits)
|
||||
- [Getting Data Into ClickHouse - Part 3 - Using S3](https://clickhouse.com/blog/getting-data-into-clickhouse-part-3-s3)
|
||||
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
||||
- Blog: [Window and array functions for Git commit sequences](https://clickhouse.com/blog/clickhouse-window-array-functions-git-commits)
|
||||
- Blog: [Getting Data Into ClickHouse - Part 3 - Using S3](https://clickhouse.com/blog/getting-data-into-clickhouse-part-3-s3)
|
||||
|
@ -968,7 +968,7 @@ void Client::processOptions(const OptionsDescription & options_description,
|
||||
if (external_tables.back().file == "-")
|
||||
++number_of_external_tables_with_stdin_source;
|
||||
if (number_of_external_tables_with_stdin_source > 1)
|
||||
throw Exception("Two or more external tables has stdin (-) set as --file field", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Two or more external tables has stdin (-) set as --file field");
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
@ -1021,7 +1021,7 @@ void Client::processOptions(const OptionsDescription & options_description,
|
||||
}
|
||||
|
||||
if (options.count("config-file") && options.count("config"))
|
||||
throw Exception("Two or more configuration files referenced in arguments", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Two or more configuration files referenced in arguments");
|
||||
|
||||
if (options.count("config"))
|
||||
config().setString("config-file", options["config"].as<std::string>());
|
||||
@ -1212,14 +1212,14 @@ void Client::readArguments(
|
||||
/// param_name value
|
||||
++arg_num;
|
||||
if (arg_num >= argc)
|
||||
throw Exception("Parameter requires value", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter requires value");
|
||||
arg = argv[arg_num];
|
||||
query_parameters.emplace(String(param_continuation), String(arg));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (equal_pos == 0)
|
||||
throw Exception("Parameter name cannot be empty", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter name cannot be empty");
|
||||
|
||||
/// param_name=value
|
||||
query_parameters.emplace(param_continuation.substr(0, equal_pos), param_continuation.substr(equal_pos + 1));
|
||||
@ -1233,7 +1233,7 @@ void Client::readArguments(
|
||||
{
|
||||
++arg_num;
|
||||
if (arg_num >= argc)
|
||||
throw Exception("Host argument requires value", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Host argument requires value");
|
||||
arg = argv[arg_num];
|
||||
host_arg = "--host=";
|
||||
host_arg.append(arg);
|
||||
@ -1265,7 +1265,7 @@ void Client::readArguments(
|
||||
port_arg.push_back('=');
|
||||
++arg_num;
|
||||
if (arg_num >= argc)
|
||||
throw Exception("Port argument requires value", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Port argument requires value");
|
||||
arg = argv[arg_num];
|
||||
port_arg.append(arg);
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ void checkAndWriteHeader(DB::ReadBuffer & in, DB::WriteBuffer & out)
|
||||
UInt32 size_compressed = unalignedLoad<UInt32>(&header[1]);
|
||||
|
||||
if (size_compressed > DBMS_MAX_COMPRESSED_SIZE)
|
||||
throw DB::Exception("Too large size_compressed. Most likely corrupted data.", DB::ErrorCodes::TOO_LARGE_SIZE_COMPRESSED);
|
||||
throw DB::Exception(DB::ErrorCodes::TOO_LARGE_SIZE_COMPRESSED, "Too large size_compressed. Most likely corrupted data.");
|
||||
|
||||
UInt32 size_decompressed = unalignedLoad<UInt32>(&header[5]);
|
||||
|
||||
@ -113,10 +113,10 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
|
||||
codecs = options["codec"].as<std::vector<std::string>>();
|
||||
|
||||
if ((use_lz4hc || use_zstd || use_deflate_qpl || use_none) && !codecs.empty())
|
||||
throw Exception("Wrong options, codec flags like --zstd and --codec options are mutually exclusive", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong options, codec flags like --zstd and --codec options are mutually exclusive");
|
||||
|
||||
if (!codecs.empty() && options.count("level"))
|
||||
throw Exception("Wrong options, --level is not compatible with --codec list", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong options, --level is not compatible with --codec list");
|
||||
|
||||
std::string method_family = "LZ4";
|
||||
|
||||
|
@ -77,7 +77,7 @@ decltype(auto) ClusterCopier::retry(T && func, UInt64 max_tries)
|
||||
std::exception_ptr exception;
|
||||
|
||||
if (max_tries == 0)
|
||||
throw Exception("Cannot perform zero retries", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot perform zero retries");
|
||||
|
||||
for (UInt64 try_number = 1; try_number <= max_tries; ++try_number)
|
||||
{
|
||||
@ -123,7 +123,7 @@ void ClusterCopier::discoverShardPartitions(const ConnectionTimeouts & timeouts,
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
throw Exception("Partition " + partition_text_quoted + " has incorrect format. " + e.displayText(), ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Partition {} has incorrect format. {}", partition_text_quoted, e.displayText());
|
||||
}
|
||||
};
|
||||
|
||||
@ -325,8 +325,8 @@ void ClusterCopier::process(const ConnectionTimeouts & timeouts)
|
||||
|
||||
if (!table_is_done)
|
||||
{
|
||||
throw Exception("Too many tries to process table " + task_table.table_id + ". Abort remaining execution",
|
||||
ErrorCodes::UNFINISHED);
|
||||
throw Exception(ErrorCodes::UNFINISHED, "Too many tries to process table {}. Abort remaining execution",
|
||||
task_table.table_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -666,7 +666,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t
|
||||
}
|
||||
|
||||
if (inject_fault)
|
||||
throw Exception("Copy fault injection is activated", ErrorCodes::UNFINISHED);
|
||||
throw Exception(ErrorCodes::UNFINISHED, "Copy fault injection is activated");
|
||||
}
|
||||
|
||||
/// Create node to signal that we finished moving
|
||||
@ -753,7 +753,7 @@ std::shared_ptr<ASTCreateQuery> rewriteCreateQueryStorage(const ASTPtr & create_
|
||||
auto res = std::make_shared<ASTCreateQuery>(create);
|
||||
|
||||
if (create.storage == nullptr || new_storage_ast == nullptr)
|
||||
throw Exception("Storage is not specified", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Storage is not specified");
|
||||
|
||||
res->setDatabase(new_table.first);
|
||||
res->setTable(new_table.second);
|
||||
@ -775,7 +775,7 @@ bool ClusterCopier::tryDropPartitionPiece(
|
||||
const CleanStateClock & clean_state_clock)
|
||||
{
|
||||
if (is_safe_mode)
|
||||
throw Exception("DROP PARTITION is prohibited in safe mode", ErrorCodes::NOT_IMPLEMENTED);
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "DROP PARTITION is prohibited in safe mode");
|
||||
|
||||
TaskTable & task_table = task_partition.task_shard.task_table;
|
||||
ShardPartitionPiece & partition_piece = task_partition.pieces[current_piece_number];
|
||||
@ -944,7 +944,7 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab
|
||||
for (const String & partition_name : task_table.ordered_partition_names)
|
||||
{
|
||||
if (!task_table.cluster_partitions.contains(partition_name))
|
||||
throw Exception("There are no expected partition " + partition_name + ". It is a bug", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "There are no expected partition {}. It is a bug", partition_name);
|
||||
|
||||
ClusterPartition & cluster_partition = task_table.cluster_partitions[partition_name];
|
||||
|
||||
@ -1006,7 +1006,7 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab
|
||||
/// Previously when we discovered that shard does not contain current partition, we skipped it.
|
||||
/// At this moment partition have to be present.
|
||||
if (it_shard_partition == shard->partition_tasks.end())
|
||||
throw Exception("There are no such partition in a shard. This is a bug.", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "There are no such partition in a shard. This is a bug.");
|
||||
auto & partition = it_shard_partition->second;
|
||||
|
||||
expected_shards.emplace_back(shard);
|
||||
@ -1587,7 +1587,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
|
||||
auto cancel_check = [&] ()
|
||||
{
|
||||
if (zookeeper->expired())
|
||||
throw Exception("ZooKeeper session is expired, cancel INSERT SELECT", ErrorCodes::UNFINISHED);
|
||||
throw Exception(ErrorCodes::UNFINISHED, "ZooKeeper session is expired, cancel INSERT SELECT");
|
||||
|
||||
if (!future_is_dirty_checker.valid())
|
||||
future_is_dirty_checker = zookeeper->asyncExists(piece_is_dirty_flag_path);
|
||||
@ -1603,7 +1603,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
|
||||
LogicalClock dirt_discovery_epoch (status.stat.mzxid);
|
||||
if (dirt_discovery_epoch == clean_state_clock.discovery_zxid)
|
||||
return false;
|
||||
throw Exception("Partition is dirty, cancel INSERT SELECT", ErrorCodes::UNFINISHED);
|
||||
throw Exception(ErrorCodes::UNFINISHED, "Partition is dirty, cancel INSERT SELECT");
|
||||
}
|
||||
}
|
||||
|
||||
@ -1646,7 +1646,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
|
||||
future_is_dirty_checker.get();
|
||||
|
||||
if (inject_fault)
|
||||
throw Exception("Copy fault injection is activated", ErrorCodes::UNFINISHED);
|
||||
throw Exception(ErrorCodes::UNFINISHED, "Copy fault injection is activated");
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -90,9 +90,7 @@ ASTPtr extractPartitionKey(const ASTPtr & storage_ast)
|
||||
|
||||
if (!endsWith(engine.name, "MergeTree"))
|
||||
{
|
||||
throw Exception(
|
||||
"Unsupported engine was specified in " + storage_str + ", only *MergeTree engines are supported",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported engine was specified in {}, only *MergeTree engines are supported", storage_str);
|
||||
}
|
||||
|
||||
if (isExtendedDefinitionStorage(storage_ast))
|
||||
@ -109,14 +107,13 @@ ASTPtr extractPartitionKey(const ASTPtr & storage_ast)
|
||||
size_t min_args = is_replicated ? 3 : 1;
|
||||
|
||||
if (!engine.arguments)
|
||||
throw Exception("Expected arguments in " + storage_str, ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected arguments in {}", storage_str);
|
||||
|
||||
ASTPtr arguments_ast = engine.arguments->clone();
|
||||
ASTs & arguments = arguments_ast->children;
|
||||
|
||||
if (arguments.size() < min_args)
|
||||
throw Exception("Expected at least " + toString(min_args) + " arguments in " + storage_str,
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected at least {} arguments in {}", min_args, storage_str);
|
||||
|
||||
ASTPtr & month_arg = is_replicated ? arguments[2] : arguments[1];
|
||||
return makeASTFunction("toYYYYMM", month_arg->clone());
|
||||
@ -132,14 +129,12 @@ ASTPtr extractPrimaryKey(const ASTPtr & storage_ast)
|
||||
|
||||
if (!endsWith(engine.name, "MergeTree"))
|
||||
{
|
||||
throw Exception("Unsupported engine was specified in " + storage_str + ", only *MergeTree engines are supported",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported engine was specified in {}, only *MergeTree engines are supported", storage_str);
|
||||
}
|
||||
|
||||
if (!isExtendedDefinitionStorage(storage_ast))
|
||||
{
|
||||
throw Exception("Is not extended deginition storage " + storage_str + " Will be fixed later.",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Is not extended deginition storage {} Will be fixed later.", storage_str);
|
||||
}
|
||||
|
||||
if (storage.primary_key)
|
||||
@ -158,20 +153,18 @@ ASTPtr extractOrderBy(const ASTPtr & storage_ast)
|
||||
|
||||
if (!endsWith(engine.name, "MergeTree"))
|
||||
{
|
||||
throw Exception("Unsupported engine was specified in " + storage_str + ", only *MergeTree engines are supported",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported engine was specified in {}, only *MergeTree engines are supported", storage_str);
|
||||
}
|
||||
|
||||
if (!isExtendedDefinitionStorage(storage_ast))
|
||||
{
|
||||
throw Exception("Is not extended deginition storage " + storage_str + " Will be fixed later.",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Is not extended deginition storage {} Will be fixed later.", storage_str);
|
||||
}
|
||||
|
||||
if (storage.order_by)
|
||||
return storage.order_by->clone();
|
||||
|
||||
throw Exception("ORDER BY cannot be empty", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "ORDER BY cannot be empty");
|
||||
}
|
||||
|
||||
/// Wraps only identifiers with backticks.
|
||||
@ -191,7 +184,7 @@ std::string wrapIdentifiersWithBackticks(const ASTPtr & root)
|
||||
return boost::algorithm::join(function_arguments, ", ");
|
||||
}
|
||||
|
||||
throw Exception("Primary key could be represented only as columns or functions from columns.", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Primary key could be represented only as columns or functions from columns.");
|
||||
}
|
||||
|
||||
|
||||
@ -210,9 +203,9 @@ Names extractPrimaryKeyColumnNames(const ASTPtr & storage_ast)
|
||||
size_t sorting_key_size = sorting_key_expr_list->children.size();
|
||||
|
||||
if (primary_key_size > sorting_key_size)
|
||||
throw Exception("Primary key must be a prefix of the sorting key, but its length: "
|
||||
+ toString(primary_key_size) + " is greater than the sorting key length: " + toString(sorting_key_size),
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Primary key must be a prefix of the sorting key, but its length: "
|
||||
"{} is greater than the sorting key length: {}",
|
||||
primary_key_size, sorting_key_size);
|
||||
|
||||
Names primary_key_columns;
|
||||
NameSet primary_key_columns_set;
|
||||
@ -228,12 +221,12 @@ Names extractPrimaryKeyColumnNames(const ASTPtr & storage_ast)
|
||||
{
|
||||
String pk_column = primary_key_expr_list->children[i]->getColumnName();
|
||||
if (pk_column != sorting_key_column)
|
||||
throw Exception("Primary key must be a prefix of the sorting key, but the column in the position "
|
||||
+ toString(i) + " is " + sorting_key_column +", not " + pk_column,
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Primary key must be a prefix of the sorting key, "
|
||||
"but the column in the position {} is {}, not {}", i, sorting_key_column, pk_column);
|
||||
|
||||
if (!primary_key_columns_set.emplace(pk_column).second)
|
||||
throw Exception("Primary key contains duplicate columns", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Primary key contains duplicate columns");
|
||||
|
||||
primary_key_columns.push_back(wrapIdentifiersWithBackticks(primary_key_expr_list->children[i]));
|
||||
}
|
||||
@ -250,9 +243,7 @@ bool isReplicatedTableEngine(const ASTPtr & storage_ast)
|
||||
if (!endsWith(engine.name, "MergeTree"))
|
||||
{
|
||||
String storage_str = queryToString(storage_ast);
|
||||
throw Exception(
|
||||
"Unsupported engine was specified in " + storage_str + ", only *MergeTree engines are supported",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported engine was specified in {}, only *MergeTree engines are supported", storage_str);
|
||||
}
|
||||
|
||||
return startsWith(engine.name, "Replicated");
|
||||
|
@ -119,7 +119,7 @@ struct TaskStateWithOwner
|
||||
rb >> state >> "\n" >> escape >> res.owner;
|
||||
|
||||
if (state >= static_cast<int>(TaskState::Unknown))
|
||||
throw Exception("Unknown state " + data, ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown state {}", data);
|
||||
|
||||
res.state = static_cast<TaskState>(state);
|
||||
return res;
|
||||
|
@ -19,7 +19,7 @@ void DB::TaskCluster::loadTasks(const Poco::Util::AbstractConfiguration & config
|
||||
|
||||
clusters_prefix = prefix + "remote_servers";
|
||||
if (!config.has(clusters_prefix))
|
||||
throw Exception("You should specify list of clusters in " + clusters_prefix, ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "You should specify list of clusters in {}", clusters_prefix);
|
||||
|
||||
Poco::Util::AbstractConfiguration::Keys tables_keys;
|
||||
config.keys(prefix + "tables", tables_keys);
|
||||
|
@ -102,7 +102,7 @@ TaskTable::TaskTable(TaskCluster & parent, const Poco::Util::AbstractConfigurati
|
||||
for (const String &key : keys)
|
||||
{
|
||||
if (!startsWith(key, "partition"))
|
||||
throw Exception("Unknown key " + key + " in " + enabled_partitions_prefix, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
||||
throw Exception(ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG, "Unknown key {} in {}", key, enabled_partitions_prefix);
|
||||
|
||||
enabled_partitions.emplace_back(config.getString(enabled_partitions_prefix + "." + key));
|
||||
}
|
||||
@ -213,8 +213,7 @@ ClusterPartition & TaskTable::getClusterPartition(const String & partition_name)
|
||||
{
|
||||
auto it = cluster_partitions.find(partition_name);
|
||||
if (it == cluster_partitions.end())
|
||||
throw Exception("There are no cluster partition " + partition_name + " in " + table_id,
|
||||
ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "There are no cluster partition {} in {}", partition_name, table_id);
|
||||
return it->second;
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@ public:
|
||||
if (command_arguments.size() != 2)
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
String disk_name_from = config.getString("diskFrom", config.getString("disk", "default"));
|
||||
|
@ -33,7 +33,7 @@ public:
|
||||
if (command_arguments.size() != 2)
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
@ -40,7 +40,7 @@ public:
|
||||
if (command_arguments.size() != 1)
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
@ -32,7 +32,7 @@ public:
|
||||
if (!command_arguments.empty())
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
for (const auto & [disk_name, _] : global_context->getDisksMap())
|
||||
|
@ -41,7 +41,7 @@ public:
|
||||
if (command_arguments.size() != 1)
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
@ -32,7 +32,7 @@ public:
|
||||
if (command_arguments.size() != 2)
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
@ -43,7 +43,7 @@ public:
|
||||
if (command_arguments.size() != 1)
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
@ -32,7 +32,7 @@ public:
|
||||
if (command_arguments.size() != 1)
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
@ -44,7 +44,7 @@ public:
|
||||
if (command_arguments.size() != 1)
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
@ -119,7 +119,7 @@ void DisksApp::init(std::vector<String> & common_arguments)
|
||||
{
|
||||
std::cerr << "Unknown command name: " << command_name << "\n";
|
||||
printHelpMessage(options_description);
|
||||
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
processOptions();
|
||||
|
@ -1160,7 +1160,7 @@ void processLog(const Options & options)
|
||||
/// Will run multiple processes in parallel
|
||||
size_t num_threads = options.threads;
|
||||
if (num_threads == 0)
|
||||
throw Exception("num-threads cannot be zero", ErrorCodes::INCORRECT_DATA);
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "num-threads cannot be zero");
|
||||
|
||||
std::vector<std::unique_ptr<ShellCommand>> show_commands(num_threads);
|
||||
for (size_t i = 0; i < num_commits && i < num_threads; ++i)
|
||||
|
@ -484,8 +484,7 @@ try
|
||||
config().getUInt64("keeper_server.socket_send_timeout_sec", DBMS_DEFAULT_SEND_TIMEOUT_SEC), true), server_pool, socket));
|
||||
#else
|
||||
UNUSED(port);
|
||||
throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
|
||||
ErrorCodes::SUPPORT_IS_DISABLED};
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");
|
||||
#endif
|
||||
});
|
||||
|
||||
|
@ -169,7 +169,7 @@ std::vector<PODArray<char>> placeStringColumns(const ColumnRawPtrs & columns, si
|
||||
else if (const auto * column_fixed_string = typeid_cast<const ColumnFixedString *>(column))
|
||||
data.push_back(placeFixedStringColumn(*column_fixed_string, buffer + i, size));
|
||||
else
|
||||
throw Exception("Cannot place string column.", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot place string column.");
|
||||
}
|
||||
|
||||
return data;
|
||||
|
@ -32,7 +32,7 @@ ExternalDictionaryLibraryHandler::ExternalDictionaryLibraryHandler(
|
||||
if (lib_new)
|
||||
lib_data = lib_new(&settings_holder->strings, ExternalDictionaryLibraryAPI::log);
|
||||
else
|
||||
throw Exception("Method extDict_libNew failed", ErrorCodes::EXTERNAL_LIBRARY_ERROR);
|
||||
throw Exception(ErrorCodes::EXTERNAL_LIBRARY_ERROR, "Method extDict_libNew failed");
|
||||
}
|
||||
|
||||
|
||||
@ -173,22 +173,21 @@ Block ExternalDictionaryLibraryHandler::loadKeys(const Columns & key_columns)
|
||||
Block ExternalDictionaryLibraryHandler::dataToBlock(ExternalDictionaryLibraryAPI::RawClickHouseLibraryTable data)
|
||||
{
|
||||
if (!data)
|
||||
throw Exception("LibraryDictionarySource: No data returned", ErrorCodes::EXTERNAL_LIBRARY_ERROR);
|
||||
throw Exception(ErrorCodes::EXTERNAL_LIBRARY_ERROR, "LibraryDictionarySource: No data returned");
|
||||
|
||||
const auto * columns_received = static_cast<const ExternalDictionaryLibraryAPI::Table *>(data);
|
||||
if (columns_received->error_code)
|
||||
throw Exception(
|
||||
"LibraryDictionarySource: Returned error: " + std::to_string(columns_received->error_code) + " " + (columns_received->error_string ? columns_received->error_string : ""),
|
||||
ErrorCodes::EXTERNAL_LIBRARY_ERROR);
|
||||
throw Exception(ErrorCodes::EXTERNAL_LIBRARY_ERROR, "LibraryDictionarySource: Returned error: {} {}",
|
||||
std::to_string(columns_received->error_code), (columns_received->error_string ? columns_received->error_string : ""));
|
||||
|
||||
MutableColumns columns = sample_block.cloneEmptyColumns();
|
||||
|
||||
for (size_t col_n = 0; col_n < columns_received->size; ++col_n)
|
||||
{
|
||||
if (columns.size() != columns_received->data[col_n].size)
|
||||
throw Exception(
|
||||
"LibraryDictionarySource: Returned unexpected number of columns: " + std::to_string(columns_received->data[col_n].size) + ", must be " + std::to_string(columns.size()),
|
||||
ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH);
|
||||
throw Exception(ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH, "LibraryDictionarySource: "
|
||||
"Returned unexpected number of columns: {}, must be {}",
|
||||
columns_received->data[col_n].size, columns.size());
|
||||
|
||||
for (size_t row_n = 0; row_n < columns_received->data[col_n].size; ++row_n)
|
||||
{
|
||||
|
@ -359,7 +359,7 @@ void LocalServer::setupUsers()
|
||||
if (users_config)
|
||||
global_context->setUsersConfig(users_config);
|
||||
else
|
||||
throw Exception("Can't load config for users", ErrorCodes::CANNOT_LOAD_CONFIG);
|
||||
throw Exception(ErrorCodes::CANNOT_LOAD_CONFIG, "Can't load config for users");
|
||||
}
|
||||
|
||||
void LocalServer::connect()
|
||||
@ -489,7 +489,7 @@ void LocalServer::processConfig()
|
||||
if (is_interactive && !delayed_interactive)
|
||||
{
|
||||
if (config().has("query") && config().has("queries-file"))
|
||||
throw Exception("Specify either `query` or `queries-file` option", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Specify either `query` or `queries-file` option");
|
||||
|
||||
if (config().has("multiquery"))
|
||||
is_multiquery = true;
|
||||
|
@ -880,7 +880,7 @@ public:
|
||||
}
|
||||
|
||||
if (!it)
|
||||
throw Exception("Logical error in markov model", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error in markov model");
|
||||
|
||||
size_t offset_from_begin_of_string = pos - data;
|
||||
size_t determinator_sliding_window_size = params.determinator_sliding_window_size;
|
||||
@ -1139,7 +1139,7 @@ public:
|
||||
if (const auto * type = typeid_cast<const DataTypeNullable *>(&data_type))
|
||||
return std::make_unique<NullableModel>(get(*type->getNestedType(), seed, markov_model_params));
|
||||
|
||||
throw Exception("Unsupported data type", ErrorCodes::NOT_IMPLEMENTED);
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported data type");
|
||||
}
|
||||
};
|
||||
|
||||
@ -1384,7 +1384,7 @@ try
|
||||
UInt8 version = 0;
|
||||
readBinary(version, model_in);
|
||||
if (version != 0)
|
||||
throw Exception("Unknown version of the model file", ErrorCodes::UNKNOWN_FORMAT_VERSION);
|
||||
throw Exception(ErrorCodes::UNKNOWN_FORMAT_VERSION, "Unknown version of the model file");
|
||||
|
||||
readBinary(source_rows, model_in);
|
||||
|
||||
@ -1392,14 +1392,14 @@ try
|
||||
size_t header_size = 0;
|
||||
readBinary(header_size, model_in);
|
||||
if (header_size != data_types.size())
|
||||
throw Exception("The saved model was created for different number of columns", ErrorCodes::INCORRECT_NUMBER_OF_COLUMNS);
|
||||
throw Exception(ErrorCodes::INCORRECT_NUMBER_OF_COLUMNS, "The saved model was created for different number of columns");
|
||||
|
||||
for (size_t i = 0; i < header_size; ++i)
|
||||
{
|
||||
String type;
|
||||
readBinary(type, model_in);
|
||||
if (type != data_types[i])
|
||||
throw Exception("The saved model was created for different types of columns", ErrorCodes::TYPE_MISMATCH);
|
||||
throw Exception(ErrorCodes::TYPE_MISMATCH, "The saved model was created for different types of columns");
|
||||
}
|
||||
|
||||
obfuscator.deserialize(model_in);
|
||||
|
@ -181,7 +181,7 @@ void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServ
|
||||
}
|
||||
|
||||
if (columns.empty())
|
||||
throw Exception("Columns definition was not returned", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Columns definition was not returned");
|
||||
|
||||
WriteBufferFromHTTPServerResponse out(
|
||||
response,
|
||||
|
@ -163,7 +163,7 @@ void ODBCSource::insertValue(
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw Exception("Unsupported value type", ErrorCodes::UNKNOWN_TYPE);
|
||||
throw Exception(ErrorCodes::UNKNOWN_TYPE, "Unsupported value type");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -151,7 +151,7 @@ public:
|
||||
auto connection_available = pool->tryBorrowObject(connection, []() { return nullptr; }, ODBC_POOL_WAIT_TIMEOUT);
|
||||
|
||||
if (!connection_available)
|
||||
throw Exception("Unable to fetch connection within the timeout", ErrorCodes::NO_FREE_CONNECTION);
|
||||
throw Exception(ErrorCodes::NO_FREE_CONNECTION, "Unable to fetch connection within the timeout");
|
||||
|
||||
try
|
||||
{
|
||||
|
@ -44,7 +44,8 @@ IdentifierQuotingStyle getQuotingStyle(nanodbc::ConnectionHolderPtr connection)
|
||||
else if (identifier_quote[0] == '"')
|
||||
return IdentifierQuotingStyle::DoubleQuotes;
|
||||
else
|
||||
throw Exception("Can not map quote identifier '" + identifier_quote + "' to IdentifierQuotingStyle value", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Can not map quote identifier '{}' to IdentifierQuotingStyle value", identifier_quote);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -38,10 +38,10 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
static constexpr size_t MAX_CONNECTION_STRING_SIZE = 1000;
|
||||
|
||||
if (connection_string.empty())
|
||||
throw Exception("ODBC connection string cannot be empty", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string cannot be empty");
|
||||
|
||||
if (connection_string.size() >= MAX_CONNECTION_STRING_SIZE)
|
||||
throw Exception("ODBC connection string is too long", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string is too long");
|
||||
|
||||
const char * pos = connection_string.data();
|
||||
const char * end = pos + connection_string.size();
|
||||
@ -51,7 +51,7 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
while (pos < end && isWhitespaceASCII(*pos))
|
||||
{
|
||||
if (*pos != ' ')
|
||||
throw Exception("ODBC connection string parameter contains unusual whitespace character", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string parameter contains unusual whitespace character");
|
||||
++pos;
|
||||
}
|
||||
};
|
||||
@ -63,7 +63,8 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
if (pos < end && isValidIdentifierBegin(*pos))
|
||||
++pos;
|
||||
else
|
||||
throw Exception("ODBC connection string parameter name doesn't begin with valid identifier character", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING,
|
||||
"ODBC connection string parameter name doesn't begin with valid identifier character");
|
||||
|
||||
/// Additionally allow dash and dot symbols in names.
|
||||
/// Strictly speaking, the name with that characters should be escaped.
|
||||
@ -83,7 +84,8 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
{
|
||||
signed char c = *pos;
|
||||
if (c < 32 || strchr("[]{}(),;?*=!@'\"", c) != nullptr)
|
||||
throw Exception("ODBC connection string parameter value is unescaped and contains illegal character", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING,
|
||||
"ODBC connection string parameter value is unescaped and contains illegal character");
|
||||
++pos;
|
||||
}
|
||||
|
||||
@ -97,7 +99,7 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
if (pos < end && *pos == '{')
|
||||
++pos;
|
||||
else
|
||||
throw Exception("ODBC connection string parameter value doesn't begin with opening curly brace", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string parameter value doesn't begin with opening curly brace");
|
||||
|
||||
while (pos < end)
|
||||
{
|
||||
@ -109,13 +111,13 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
}
|
||||
|
||||
if (*pos == 0)
|
||||
throw Exception("ODBC connection string parameter value contains ASCII NUL character", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string parameter value contains ASCII NUL character");
|
||||
|
||||
res += *pos;
|
||||
++pos;
|
||||
}
|
||||
|
||||
throw Exception("ODBC connection string parameter is escaped but there is no closing curly brace", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string parameter is escaped but there is no closing curly brace");
|
||||
};
|
||||
|
||||
auto read_value = [&]
|
||||
@ -139,25 +141,25 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
|
||||
Poco::toUpperInPlace(name);
|
||||
if (name == "FILEDSN" || name == "SAVEFILE" || name == "DRIVER")
|
||||
throw Exception("ODBC connection string has forbidden parameter", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string has forbidden parameter");
|
||||
|
||||
if (pos >= end)
|
||||
throw Exception("ODBC connection string parameter doesn't have value", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string parameter doesn't have value");
|
||||
|
||||
if (*pos == '=')
|
||||
++pos;
|
||||
else
|
||||
throw Exception("ODBC connection string parameter doesn't have value", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string parameter doesn't have value");
|
||||
|
||||
skip_whitespaces();
|
||||
std::string value = read_value();
|
||||
skip_whitespaces();
|
||||
|
||||
if (name.size() > MAX_ELEMENT_SIZE || value.size() > MAX_ELEMENT_SIZE)
|
||||
throw Exception("ODBC connection string has too long keyword or value", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string has too long keyword or value");
|
||||
|
||||
if (!parameters.emplace(name, value).second)
|
||||
throw Exception("Duplicate parameter found in ODBC connection string", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "Duplicate parameter found in ODBC connection string");
|
||||
|
||||
if (pos >= end)
|
||||
break;
|
||||
@ -165,7 +167,7 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
if (*pos == ';')
|
||||
++pos;
|
||||
else
|
||||
throw Exception("Unexpected character found after parameter value in ODBC connection string", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "Unexpected character found after parameter value in ODBC connection string");
|
||||
}
|
||||
|
||||
/// Reconstruct the connection string.
|
||||
@ -173,12 +175,12 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
auto it = parameters.find("DSN");
|
||||
|
||||
if (parameters.end() == it)
|
||||
throw Exception("DSN parameter is mandatory for ODBC connection string", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "DSN parameter is mandatory for ODBC connection string");
|
||||
|
||||
std::string dsn = it->second;
|
||||
|
||||
if (dsn.empty())
|
||||
throw Exception("DSN parameter cannot be empty in ODBC connection string", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "DSN parameter cannot be empty in ODBC connection string");
|
||||
|
||||
parameters.erase(it);
|
||||
|
||||
@ -241,7 +243,7 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
write_element(elem.first, elem.second);
|
||||
|
||||
if (reconstructed_connection_string.size() >= MAX_CONNECTION_STRING_SIZE)
|
||||
throw Exception("ODBC connection string is too long", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string is too long");
|
||||
|
||||
return reconstructed_connection_string;
|
||||
}
|
||||
|
@ -257,7 +257,7 @@ static std::string getCanonicalPath(std::string && path)
|
||||
{
|
||||
Poco::trimInPlace(path);
|
||||
if (path.empty())
|
||||
throw Exception("path configuration parameter is empty", ErrorCodes::INVALID_CONFIG_PARAMETER);
|
||||
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "path configuration parameter is empty");
|
||||
if (path.back() != '/')
|
||||
path += '/';
|
||||
return std::move(path);
|
||||
@ -1116,7 +1116,7 @@ try
|
||||
#endif
|
||||
|
||||
if (config().has("interserver_http_port") && config().has("interserver_https_port"))
|
||||
throw Exception("Both http and https interserver ports are specified", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
|
||||
throw Exception(ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG, "Both http and https interserver ports are specified");
|
||||
|
||||
static const auto interserver_tags =
|
||||
{
|
||||
@ -1141,7 +1141,7 @@ try
|
||||
int port = parse<int>(port_str);
|
||||
|
||||
if (port < 0 || port > 0xFFFF)
|
||||
throw Exception("Out of range '" + String(port_tag) + "': " + toString(port), ErrorCodes::ARGUMENT_OUT_OF_BOUND);
|
||||
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Out of range '{}': {}", String(port_tag), port);
|
||||
|
||||
global_context->setInterserverIOAddress(this_host, port);
|
||||
global_context->setInterserverScheme(scheme);
|
||||
@ -1419,8 +1419,7 @@ try
|
||||
global_context->getSettingsRef().send_timeout.totalSeconds(), true), server_pool, socket));
|
||||
#else
|
||||
UNUSED(port);
|
||||
throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
|
||||
ErrorCodes::SUPPORT_IS_DISABLED};
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");
|
||||
#endif
|
||||
});
|
||||
}
|
||||
@ -1465,7 +1464,7 @@ try
|
||||
size_t max_cache_size = static_cast<size_t>(memory_amount * cache_size_to_ram_max_ratio);
|
||||
|
||||
/// Size of cache for uncompressed blocks. Zero means disabled.
|
||||
String uncompressed_cache_policy = config().getString("uncompressed_cache_policy", "");
|
||||
String uncompressed_cache_policy = config().getString("uncompressed_cache_policy", "SLRU");
|
||||
LOG_INFO(log, "Uncompressed cache policy name {}", uncompressed_cache_policy);
|
||||
size_t uncompressed_cache_size = config().getUInt64("uncompressed_cache_size", 0);
|
||||
if (uncompressed_cache_size > max_cache_size)
|
||||
@ -1491,7 +1490,7 @@ try
|
||||
|
||||
/// Size of cache for marks (index of MergeTree family of tables).
|
||||
size_t mark_cache_size = config().getUInt64("mark_cache_size", 5368709120);
|
||||
String mark_cache_policy = config().getString("mark_cache_policy", "");
|
||||
String mark_cache_policy = config().getString("mark_cache_policy", "SLRU");
|
||||
if (!mark_cache_size)
|
||||
LOG_ERROR(log, "Too low mark cache size will lead to severe performance degradation.");
|
||||
if (mark_cache_size > max_cache_size)
|
||||
@ -1517,6 +1516,15 @@ try
|
||||
if (mmap_cache_size)
|
||||
global_context->setMMappedFileCache(mmap_cache_size);
|
||||
|
||||
/// A cache for query results.
|
||||
size_t query_result_cache_size = config().getUInt64("query_result_cache.size", 1_GiB);
|
||||
if (query_result_cache_size)
|
||||
global_context->setQueryResultCache(
|
||||
query_result_cache_size,
|
||||
config().getUInt64("query_result_cache.max_entries", 1024),
|
||||
config().getUInt64("query_result_cache.max_entry_size", 1_MiB),
|
||||
config().getUInt64("query_result_cache.max_entry_records", 30'000'000));
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
/// 128 MB
|
||||
constexpr size_t compiled_expression_cache_size_default = 1024 * 1024 * 128;
|
||||
@ -1740,14 +1748,15 @@ try
|
||||
std::lock_guard lock(servers_lock);
|
||||
createServers(config(), listen_hosts, interserver_listen_hosts, listen_try, server_pool, async_metrics, servers);
|
||||
if (servers.empty())
|
||||
throw Exception(
|
||||
"No servers started (add valid listen_host and 'tcp_port' or 'http_port' to configuration file.)",
|
||||
ErrorCodes::NO_ELEMENTS_IN_CONFIG);
|
||||
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG,
|
||||
"No servers started (add valid listen_host and 'tcp_port' or 'http_port' "
|
||||
"to configuration file.)");
|
||||
}
|
||||
|
||||
if (servers.empty())
|
||||
throw Exception("No servers started (add valid listen_host and 'tcp_port' or 'http_port' to configuration file.)",
|
||||
ErrorCodes::NO_ELEMENTS_IN_CONFIG);
|
||||
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG,
|
||||
"No servers started (add valid listen_host and 'tcp_port' or 'http_port' "
|
||||
"to configuration file.)");
|
||||
|
||||
#if USE_SSL
|
||||
CertificateReloader::instance().tryLoad(config());
|
||||
@ -1807,7 +1816,7 @@ try
|
||||
String ddl_zookeeper_path = config().getString("distributed_ddl.path", "/clickhouse/task_queue/ddl/");
|
||||
int pool_size = config().getInt("distributed_ddl.pool_size", 1);
|
||||
if (pool_size < 1)
|
||||
throw Exception("distributed_ddl.pool_size should be greater then 0", ErrorCodes::ARGUMENT_OUT_OF_BOUND);
|
||||
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "distributed_ddl.pool_size should be greater then 0");
|
||||
global_context->setDDLWorker(std::make_unique<DDLWorker>(pool_size, ddl_zookeeper_path, global_context, &config(),
|
||||
"distributed_ddl", "DDLWorker",
|
||||
&CurrentMetrics::MaxDDLEntryID, &CurrentMetrics::MaxPushedDDLEntryID));
|
||||
@ -1936,8 +1945,7 @@ std::unique_ptr<TCPProtocolStackFactory> Server::buildProtocolStackFromConfig(
|
||||
#if USE_SSL
|
||||
return TCPServerConnectionFactory::Ptr(new TLSHandlerFactory(*this, conf_name));
|
||||
#else
|
||||
throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
|
||||
ErrorCodes::SUPPORT_IS_DISABLED};
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");
|
||||
#endif
|
||||
|
||||
if (type == "proxy1")
|
||||
@ -2104,8 +2112,7 @@ void Server::createServers(
|
||||
httpContext(), createHandlerFactory(*this, config, async_metrics, "HTTPSHandler-factory"), server_pool, socket, http_params));
|
||||
#else
|
||||
UNUSED(port);
|
||||
throw Exception{"HTTPS protocol is disabled because Poco library was built without NetSSL support.",
|
||||
ErrorCodes::SUPPORT_IS_DISABLED};
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "HTTPS protocol is disabled because Poco library was built without NetSSL support.");
|
||||
#endif
|
||||
});
|
||||
|
||||
@ -2167,8 +2174,7 @@ void Server::createServers(
|
||||
new Poco::Net::TCPServerParams));
|
||||
#else
|
||||
UNUSED(port);
|
||||
throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
|
||||
ErrorCodes::SUPPORT_IS_DISABLED};
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");
|
||||
#endif
|
||||
});
|
||||
|
||||
@ -2273,8 +2279,7 @@ void Server::createServers(
|
||||
http_params));
|
||||
#else
|
||||
UNUSED(port);
|
||||
throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
|
||||
ErrorCodes::SUPPORT_IS_DISABLED};
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");
|
||||
#endif
|
||||
});
|
||||
}
|
||||
|
@ -1453,6 +1453,14 @@
|
||||
</rocksdb>
|
||||
-->
|
||||
|
||||
<!-- Configuration for the query result cache -->
|
||||
<!-- <query_result_cache> -->
|
||||
<!-- <size>1073741824</size> -->
|
||||
<!-- <max_entries>1024</max_entries> -->
|
||||
<!-- <max_entry_size>1048576</max_entry_size> -->
|
||||
<!-- <max_entry_records>30000000</max_entry_records> -->
|
||||
<!-- </query_result_cache> -->
|
||||
|
||||
<!-- Uncomment if enable merge tree metadata cache -->
|
||||
<!--merge_tree_metadata_cache>
|
||||
<lru_cache_size>268435456</lru_cache_size>
|
||||
|
@ -65,7 +65,7 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
|
||||
}
|
||||
|
||||
if (gid == 0 && getgid() != 0)
|
||||
throw Exception("Group has id 0, but dropping privileges to gid 0 does not make sense", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Group has id 0, but dropping privileges to gid 0 does not make sense");
|
||||
|
||||
if (0 != setgid(gid))
|
||||
throwFromErrno(fmt::format("Cannot do 'setgid' to user ({})", arg_gid), ErrorCodes::SYSTEM_ERROR);
|
||||
@ -90,7 +90,7 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
|
||||
}
|
||||
|
||||
if (uid == 0 && getuid() != 0)
|
||||
throw Exception("User has id 0, but dropping privileges to uid 0 does not make sense", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "User has id 0, but dropping privileges to uid 0 does not make sense");
|
||||
|
||||
if (0 != setuid(uid))
|
||||
throwFromErrno(fmt::format("Cannot do 'setuid' to user ({})", arg_uid), ErrorCodes::SYSTEM_ERROR);
|
||||
|
@ -289,7 +289,7 @@ namespace
|
||||
}
|
||||
|
||||
default:
|
||||
throw Exception("Unknown type: " + toString(entity_type), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown type: {}", toString(entity_type));
|
||||
}
|
||||
}
|
||||
return res;
|
||||
|
@ -126,10 +126,10 @@ public:
|
||||
std::lock_guard lock{mutex};
|
||||
if (!registered_prefixes.empty())
|
||||
{
|
||||
throw Exception(
|
||||
"Setting " + String{setting_name} + " is neither a builtin setting nor started with the prefix '"
|
||||
+ boost::algorithm::join(registered_prefixes, "' or '") + "' registered for user-defined settings",
|
||||
ErrorCodes::UNKNOWN_SETTING);
|
||||
throw Exception(ErrorCodes::UNKNOWN_SETTING,
|
||||
"Setting {} is neither a builtin setting nor started with the prefix '{}"
|
||||
"' registered for user-defined settings",
|
||||
String{setting_name}, boost::algorithm::join(registered_prefixes, "' or '"));
|
||||
}
|
||||
else
|
||||
BaseSettingsHelpers::throwSettingNotFound(setting_name);
|
||||
@ -450,7 +450,7 @@ void AccessControl::addStoragesFromUserDirectoriesConfig(
|
||||
addReplicatedStorage(name, zookeeper_path, get_zookeeper_function, allow_backup);
|
||||
}
|
||||
else
|
||||
throw Exception("Unknown storage type '" + type + "' at " + prefix + " in config", ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
||||
throw Exception(ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG, "Unknown storage type '{}' at {} in config", type, prefix);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -80,53 +80,53 @@ AccessEntityPtr deserializeAccessEntityImpl(const String & definition)
|
||||
if (auto * create_user_query = query->as<ASTCreateUserQuery>())
|
||||
{
|
||||
if (res)
|
||||
throw Exception("Two access entities attached in the same file", ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "Two access entities attached in the same file");
|
||||
res = user = std::make_unique<User>();
|
||||
InterpreterCreateUserQuery::updateUserFromQuery(*user, *create_user_query, /* allow_no_password = */ true, /* allow_plaintext_password = */ true);
|
||||
}
|
||||
else if (auto * create_role_query = query->as<ASTCreateRoleQuery>())
|
||||
{
|
||||
if (res)
|
||||
throw Exception("Two access entities attached in the same file", ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "Two access entities attached in the same file");
|
||||
res = role = std::make_unique<Role>();
|
||||
InterpreterCreateRoleQuery::updateRoleFromQuery(*role, *create_role_query);
|
||||
}
|
||||
else if (auto * create_policy_query = query->as<ASTCreateRowPolicyQuery>())
|
||||
{
|
||||
if (res)
|
||||
throw Exception("Two access entities attached in the same file", ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "Two access entities attached in the same file");
|
||||
res = policy = std::make_unique<RowPolicy>();
|
||||
InterpreterCreateRowPolicyQuery::updateRowPolicyFromQuery(*policy, *create_policy_query);
|
||||
}
|
||||
else if (auto * create_quota_query = query->as<ASTCreateQuotaQuery>())
|
||||
{
|
||||
if (res)
|
||||
throw Exception("Two access entities attached in the same file", ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "Two access entities attached in the same file");
|
||||
res = quota = std::make_unique<Quota>();
|
||||
InterpreterCreateQuotaQuery::updateQuotaFromQuery(*quota, *create_quota_query);
|
||||
}
|
||||
else if (auto * create_profile_query = query->as<ASTCreateSettingsProfileQuery>())
|
||||
{
|
||||
if (res)
|
||||
throw Exception("Two access entities attached in the same file", ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "Two access entities attached in the same file");
|
||||
res = profile = std::make_unique<SettingsProfile>();
|
||||
InterpreterCreateSettingsProfileQuery::updateSettingsProfileFromQuery(*profile, *create_profile_query);
|
||||
}
|
||||
else if (auto * grant_query = query->as<ASTGrantQuery>())
|
||||
{
|
||||
if (!user && !role)
|
||||
throw Exception("A user or role should be attached before grant", ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "A user or role should be attached before grant");
|
||||
if (user)
|
||||
InterpreterGrantQuery::updateUserFromQuery(*user, *grant_query);
|
||||
else
|
||||
InterpreterGrantQuery::updateRoleFromQuery(*role, *grant_query);
|
||||
}
|
||||
else
|
||||
throw Exception("No interpreter found for query " + query->getID(), ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "No interpreter found for query {}", query->getID());
|
||||
}
|
||||
|
||||
if (!res)
|
||||
throw Exception("No access entities attached", ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "No access entities attached");
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -797,7 +797,7 @@ template <bool with_grant_option>
|
||||
void AccessRights::grantImpl(const AccessRightsElement & element)
|
||||
{
|
||||
if (element.is_partial_revoke)
|
||||
throw Exception("A partial revoke should be revoked, not granted", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "A partial revoke should be revoked, not granted");
|
||||
if constexpr (with_grant_option)
|
||||
{
|
||||
grantImplHelper<true>(element);
|
||||
|
@ -176,7 +176,7 @@ bool Authentication::areCredentialsValid(const Credentials & credentials, const
|
||||
if ([[maybe_unused]] const auto * always_allow_credentials = typeid_cast<const AlwaysAllowCredentials *>(&credentials))
|
||||
return true;
|
||||
|
||||
throw Exception("areCredentialsValid(): authentication type " + toString(auth_data.getType()) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "areCredentialsValid(): authentication type {} not supported", toString(auth_data.getType()));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ const AccessEntityTypeInfo & AccessEntityTypeInfo::get(AccessEntityType type_)
|
||||
}
|
||||
case AccessEntityType::MAX: break;
|
||||
}
|
||||
throw Exception("Unknown type: " + std::to_string(static_cast<size_t>(type_)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown type: {}", static_cast<size_t>(type_));
|
||||
}
|
||||
|
||||
AccessEntityType AccessEntityTypeInfo::parseType(const String & name_)
|
||||
|
@ -44,7 +44,7 @@ namespace
|
||||
boost::to_upper(uppercased_keyword);
|
||||
it = keyword_to_flags_map.find(uppercased_keyword);
|
||||
if (it == keyword_to_flags_map.end())
|
||||
throw Exception("Unknown access type: " + String(keyword), ErrorCodes::UNKNOWN_ACCESS_TYPE);
|
||||
throw Exception(ErrorCodes::UNKNOWN_ACCESS_TYPE, "Unknown access type: {}", String(keyword));
|
||||
}
|
||||
return it->second;
|
||||
}
|
||||
@ -179,7 +179,7 @@ namespace
|
||||
else
|
||||
{
|
||||
if (nodes.contains(keyword))
|
||||
throw Exception(keyword + " declared twice", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "{} declared twice", keyword);
|
||||
node = std::make_unique<Node>(keyword, node_type);
|
||||
nodes[node->keyword] = node.get();
|
||||
}
|
||||
@ -225,9 +225,9 @@ namespace
|
||||
# undef MAKE_ACCESS_FLAGS_NODE
|
||||
|
||||
if (!owned_nodes.contains("NONE"))
|
||||
throw Exception("'NONE' not declared", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "'NONE' not declared");
|
||||
if (!owned_nodes.contains("ALL"))
|
||||
throw Exception("'ALL' not declared", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "'ALL' not declared");
|
||||
|
||||
all_node = std::move(owned_nodes["ALL"]);
|
||||
none_node = std::move(owned_nodes["NONE"]);
|
||||
@ -238,9 +238,9 @@ namespace
|
||||
{
|
||||
const auto & unused_node = *(owned_nodes.begin()->second);
|
||||
if (unused_node.node_type == UNKNOWN)
|
||||
throw Exception("Parent group '" + unused_node.keyword + "' not found", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Parent group '{}' not found", unused_node.keyword);
|
||||
else
|
||||
throw Exception("Access type '" + unused_node.keyword + "' should have parent group", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Access type '{}' should have parent group", unused_node.keyword);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -142,6 +142,7 @@ enum class AccessType
|
||||
M(SYSTEM_DROP_MARK_CACHE, "SYSTEM DROP MARK, DROP MARK CACHE, DROP MARKS", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||
M(SYSTEM_DROP_UNCOMPRESSED_CACHE, "SYSTEM DROP UNCOMPRESSED, DROP UNCOMPRESSED CACHE, DROP UNCOMPRESSED", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||
M(SYSTEM_DROP_MMAP_CACHE, "SYSTEM DROP MMAP, DROP MMAP CACHE, DROP MMAP", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||
M(SYSTEM_DROP_QUERY_RESULT_CACHE, "SYSTEM DROP QUERY RESULT, DROP QUERY RESULT CACHE, DROP QUERY RESULT", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||
M(SYSTEM_DROP_COMPILED_EXPRESSION_CACHE, "SYSTEM DROP COMPILED EXPRESSION, DROP COMPILED EXPRESSION CACHE, DROP COMPILED EXPRESSIONS", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||
M(SYSTEM_DROP_FILESYSTEM_CACHE, "SYSTEM DROP FILESYSTEM CACHE, DROP FILESYSTEM CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||
M(SYSTEM_DROP_SCHEMA_CACHE, "SYSTEM DROP SCHEMA CACHE, DROP SCHEMA CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||
|
@ -67,7 +67,7 @@ const AuthenticationTypeInfo & AuthenticationTypeInfo::get(AuthenticationType ty
|
||||
case AuthenticationType::MAX:
|
||||
break;
|
||||
}
|
||||
throw Exception("Unknown authentication type: " + std::to_string(static_cast<int>(type_)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown authentication type: {}", static_cast<int>(type_));
|
||||
}
|
||||
|
||||
|
||||
@ -119,19 +119,19 @@ void AuthenticationData::setPassword(const String & password_)
|
||||
case AuthenticationType::LDAP:
|
||||
case AuthenticationType::KERBEROS:
|
||||
case AuthenticationType::SSL_CERTIFICATE:
|
||||
throw Exception("Cannot specify password for authentication type " + toString(type), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot specify password for authentication type {}", toString(type));
|
||||
|
||||
case AuthenticationType::MAX:
|
||||
break;
|
||||
}
|
||||
throw Exception("setPassword(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "setPassword(): authentication type {} not supported", toString(type));
|
||||
}
|
||||
|
||||
|
||||
String AuthenticationData::getPassword() const
|
||||
{
|
||||
if (type != AuthenticationType::PLAINTEXT_PASSWORD)
|
||||
throw Exception("Cannot decode the password", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot decode the password");
|
||||
return String(password_hash.data(), password_hash.data() + password_hash.size());
|
||||
}
|
||||
|
||||
@ -157,7 +157,7 @@ void AuthenticationData::setPasswordHashHex(const String & hash)
|
||||
String AuthenticationData::getPasswordHashHex() const
|
||||
{
|
||||
if (type == AuthenticationType::LDAP || type == AuthenticationType::KERBEROS || type == AuthenticationType::SSL_CERTIFICATE)
|
||||
throw Exception("Cannot get password hex hash for authentication type " + toString(type), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot get password hex hash for authentication type {}", toString(type));
|
||||
|
||||
String hex;
|
||||
hex.resize(password_hash.size() * 2);
|
||||
@ -179,10 +179,9 @@ void AuthenticationData::setPasswordHashBinary(const Digest & hash)
|
||||
case AuthenticationType::SHA256_PASSWORD:
|
||||
{
|
||||
if (hash.size() != 32)
|
||||
throw Exception(
|
||||
"Password hash for the 'SHA256_PASSWORD' authentication type has length " + std::to_string(hash.size())
|
||||
+ " but must be exactly 32 bytes.",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Password hash for the 'SHA256_PASSWORD' authentication type has length {} "
|
||||
"but must be exactly 32 bytes.", hash.size());
|
||||
password_hash = hash;
|
||||
return;
|
||||
}
|
||||
@ -190,10 +189,9 @@ void AuthenticationData::setPasswordHashBinary(const Digest & hash)
|
||||
case AuthenticationType::DOUBLE_SHA1_PASSWORD:
|
||||
{
|
||||
if (hash.size() != 20)
|
||||
throw Exception(
|
||||
"Password hash for the 'DOUBLE_SHA1_PASSWORD' authentication type has length " + std::to_string(hash.size())
|
||||
+ " but must be exactly 20 bytes.",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Password hash for the 'DOUBLE_SHA1_PASSWORD' authentication type has length {} "
|
||||
"but must be exactly 20 bytes.", hash.size());
|
||||
password_hash = hash;
|
||||
return;
|
||||
}
|
||||
@ -202,18 +200,18 @@ void AuthenticationData::setPasswordHashBinary(const Digest & hash)
|
||||
case AuthenticationType::LDAP:
|
||||
case AuthenticationType::KERBEROS:
|
||||
case AuthenticationType::SSL_CERTIFICATE:
|
||||
throw Exception("Cannot specify password binary hash for authentication type " + toString(type), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot specify password binary hash for authentication type {}", toString(type));
|
||||
|
||||
case AuthenticationType::MAX:
|
||||
break;
|
||||
}
|
||||
throw Exception("setPasswordHashBinary(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "setPasswordHashBinary(): authentication type {} not supported", toString(type));
|
||||
}
|
||||
|
||||
void AuthenticationData::setSalt(String salt_)
|
||||
{
|
||||
if (type != AuthenticationType::SHA256_PASSWORD)
|
||||
throw Exception("setSalt(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "setSalt(): authentication type {} not supported", toString(type));
|
||||
salt = std::move(salt_);
|
||||
}
|
||||
|
||||
@ -225,7 +223,7 @@ String AuthenticationData::getSalt() const
|
||||
void AuthenticationData::setSSLCertificateCommonNames(boost::container::flat_set<String> common_names_)
|
||||
{
|
||||
if (common_names_.empty())
|
||||
throw Exception("The 'SSL CERTIFICATE' authentication type requires a non-empty list of common names.", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The 'SSL CERTIFICATE' authentication type requires a non-empty list of common names.");
|
||||
ssl_certificate_common_names = std::move(common_names_);
|
||||
}
|
||||
|
||||
|
@ -113,7 +113,7 @@ const QuotaTypeInfo & QuotaTypeInfo::get(QuotaType type)
|
||||
}
|
||||
case QuotaType::MAX: break;
|
||||
}
|
||||
throw Exception("Unexpected quota type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected quota type: {}", static_cast<int>(type));
|
||||
}
|
||||
|
||||
String toString(QuotaKeyType type)
|
||||
@ -187,7 +187,7 @@ const QuotaKeyTypeInfo & QuotaKeyTypeInfo::get(QuotaKeyType type)
|
||||
}
|
||||
case QuotaKeyType::MAX: break;
|
||||
}
|
||||
throw Exception("Unexpected quota key type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected quota key type: {}", static_cast<int>(type));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ const RowPolicyFilterTypeInfo & RowPolicyFilterTypeInfo::get(RowPolicyFilterType
|
||||
#endif
|
||||
case RowPolicyFilterType::MAX: break;
|
||||
}
|
||||
throw Exception("Unknown type: " + std::to_string(static_cast<size_t>(type_)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown type: {}", static_cast<size_t>(type_));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -470,7 +470,7 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg
|
||||
/// If the current user has been dropped we always throw an exception (even if `throw_if_denied` is false)
|
||||
/// because dropping of the current user is considered as a situation which is exceptional enough to stop
|
||||
/// query execution.
|
||||
throw Exception(getUserName() + ": User has been dropped", ErrorCodes::UNKNOWN_USER);
|
||||
throw Exception(ErrorCodes::UNKNOWN_USER, "{}: User has been dropped", getUserName());
|
||||
}
|
||||
|
||||
if (is_full_access)
|
||||
@ -790,7 +790,7 @@ void ContextAccess::checkGranteeIsAllowed(const UUID & grantee_id, const IAccess
|
||||
|
||||
auto current_user = getUser();
|
||||
if (!current_user->grantees.match(grantee_id))
|
||||
throw Exception(grantee.formatTypeWithName() + " is not allowed as grantee", ErrorCodes::ACCESS_DENIED);
|
||||
throw Exception(ErrorCodes::ACCESS_DENIED, "{} is not allowed as grantee", grantee.formatTypeWithName());
|
||||
}
|
||||
|
||||
void ContextAccess::checkGranteesAreAllowed(const std::vector<UUID> & grantee_ids) const
|
||||
|
@ -29,7 +29,7 @@ bool Credentials::isReady() const
|
||||
|
||||
void Credentials::throwNotReady()
|
||||
{
|
||||
throw Exception("Credentials are not ready", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Credentials are not ready");
|
||||
}
|
||||
|
||||
AlwaysAllowCredentials::AlwaysAllowCredentials()
|
||||
|
@ -172,7 +172,8 @@ DiskAccessStorage::DiskAccessStorage(const String & storage_name_, const String
|
||||
std::filesystem::create_directories(directory_path, create_dir_error_code);
|
||||
|
||||
if (!std::filesystem::exists(directory_path) || !std::filesystem::is_directory(directory_path) || create_dir_error_code)
|
||||
throw Exception("Couldn't create directory " + directory_path + " reason: '" + create_dir_error_code.message() + "'", ErrorCodes::DIRECTORY_DOESNT_EXIST);
|
||||
throw Exception(ErrorCodes::DIRECTORY_DOESNT_EXIST, "Couldn't create directory {} reason: '{}'",
|
||||
directory_path, create_dir_error_code.message());
|
||||
|
||||
bool should_rebuild_lists = std::filesystem::exists(getNeedRebuildListsMarkFilePath(directory_path));
|
||||
if (!should_rebuild_lists)
|
||||
@ -722,7 +723,7 @@ void DiskAccessStorage::deleteAccessEntityOnDisk(const UUID & id) const
|
||||
{
|
||||
auto file_path = getEntityFilePath(directory_path, id);
|
||||
if (!std::filesystem::remove(file_path))
|
||||
throw Exception("Couldn't delete " + file_path, ErrorCodes::FILE_DOESNT_EXIST);
|
||||
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Couldn't delete {}", file_path);
|
||||
}
|
||||
|
||||
|
||||
|
@ -29,11 +29,11 @@ struct EnabledQuota::Impl
|
||||
std::chrono::system_clock::time_point end_of_interval)
|
||||
{
|
||||
const auto & type_info = QuotaTypeInfo::get(quota_type);
|
||||
throw Exception(
|
||||
"Quota for user " + backQuote(user_name) + " for " + to_string(duration) + " has been exceeded: "
|
||||
+ type_info.valueToStringWithName(used) + "/" + type_info.valueToString(max) + ". "
|
||||
+ "Interval will end at " + to_string(end_of_interval) + ". " + "Name of quota template: " + backQuote(quota_name),
|
||||
ErrorCodes::QUOTA_EXCEEDED);
|
||||
throw Exception(ErrorCodes::QUOTA_EXCEEDED, "Quota for user {} for {} has been exceeded: {}/{}. "
|
||||
"Interval will end at {}. Name of quota template: {}",
|
||||
backQuote(user_name), to_string(duration),
|
||||
type_info.valueToStringWithName(used),
|
||||
type_info.valueToString(max), to_string(end_of_interval), backQuote(quota_name));
|
||||
}
|
||||
|
||||
|
||||
|
@ -47,15 +47,16 @@ void parseLDAPSearchParams(LDAPClient::SearchParams & params, const Poco::Util::
|
||||
else if (scope == "subtree") params.scope = LDAPClient::SearchParams::Scope::SUBTREE;
|
||||
else if (scope == "children") params.scope = LDAPClient::SearchParams::Scope::CHILDREN;
|
||||
else
|
||||
throw Exception("Invalid value for 'scope' field of LDAP search parameters in '" + prefix +
|
||||
"' section, must be one of 'base', 'one_level', 'subtree', or 'children'", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Invalid value for 'scope' field of LDAP search parameters "
|
||||
"in '{}' section, must be one of 'base', 'one_level', 'subtree', or 'children'", prefix);
|
||||
}
|
||||
}
|
||||
|
||||
void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConfiguration & config, const String & name)
|
||||
{
|
||||
if (name.empty())
|
||||
throw Exception("LDAP server name cannot be empty", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "LDAP server name cannot be empty");
|
||||
|
||||
const String ldap_server_config = "ldap_servers." + name;
|
||||
|
||||
@ -77,17 +78,17 @@ void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConf
|
||||
const bool has_search_limit = config.has(ldap_server_config + ".search_limit");
|
||||
|
||||
if (!has_host)
|
||||
throw Exception("Missing 'host' entry", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Missing 'host' entry");
|
||||
|
||||
params.host = config.getString(ldap_server_config + ".host");
|
||||
|
||||
if (params.host.empty())
|
||||
throw Exception("Empty 'host' entry", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Empty 'host' entry");
|
||||
|
||||
if (has_bind_dn)
|
||||
{
|
||||
if (has_auth_dn_prefix || has_auth_dn_suffix)
|
||||
throw Exception("Deprecated 'auth_dn_prefix' and 'auth_dn_suffix' entries cannot be used with 'bind_dn' entry", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Deprecated 'auth_dn_prefix' and 'auth_dn_suffix' entries cannot be used with 'bind_dn' entry");
|
||||
|
||||
params.bind_dn = config.getString(ldap_server_config + ".bind_dn");
|
||||
}
|
||||
@ -141,7 +142,9 @@ void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConf
|
||||
else if (tls_minimum_protocol_version_lc_str == "tls1.2")
|
||||
params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_2; //-V1048
|
||||
else
|
||||
throw Exception("Bad value for 'tls_minimum_protocol_version' entry, allowed values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2'", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Bad value for 'tls_minimum_protocol_version' entry, allowed values are: "
|
||||
"'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2'");
|
||||
}
|
||||
|
||||
if (has_tls_require_cert)
|
||||
@ -158,7 +161,9 @@ void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConf
|
||||
else if (tls_require_cert_lc_str == "demand")
|
||||
params.tls_require_cert = LDAPClient::Params::TLSRequireCert::DEMAND; //-V1048
|
||||
else
|
||||
throw Exception("Bad value for 'tls_require_cert' entry, allowed values are: 'never', 'allow', 'try', 'demand'", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Bad value for 'tls_require_cert' entry, allowed values are: "
|
||||
"'never', 'allow', 'try', 'demand'");
|
||||
}
|
||||
|
||||
if (has_tls_cert_file)
|
||||
@ -180,7 +185,7 @@ void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConf
|
||||
{
|
||||
UInt32 port = config.getUInt(ldap_server_config + ".port");
|
||||
if (port > 65535)
|
||||
throw Exception("Bad value for 'port' entry", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad value for 'port' entry");
|
||||
|
||||
params.port = port;
|
||||
}
|
||||
@ -212,13 +217,13 @@ void parseKerberosParams(GSSAcceptorContext::Params & params, const Poco::Util::
|
||||
}
|
||||
|
||||
if (reealm_key_count > 0 && principal_keys_count > 0)
|
||||
throw Exception("Realm and principal name cannot be specified simultaneously", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Realm and principal name cannot be specified simultaneously");
|
||||
|
||||
if (reealm_key_count > 1)
|
||||
throw Exception("Multiple realm sections are not allowed", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Multiple realm sections are not allowed");
|
||||
|
||||
if (principal_keys_count > 1)
|
||||
throw Exception("Multiple principal sections are not allowed", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Multiple principal sections are not allowed");
|
||||
|
||||
params.realm = config.getString("kerberos.realm", "");
|
||||
params.principal = config.getString("kerberos.principal", "");
|
||||
@ -274,10 +279,10 @@ void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfigur
|
||||
}
|
||||
|
||||
if (ldap_servers_key_count > 1)
|
||||
throw Exception("Multiple ldap_servers sections are not allowed", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Multiple ldap_servers sections are not allowed");
|
||||
|
||||
if (kerberos_keys_count > 1)
|
||||
throw Exception("Multiple kerberos sections are not allowed", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Multiple kerberos sections are not allowed");
|
||||
|
||||
Poco::Util::AbstractConfiguration::Keys ldap_server_names;
|
||||
config.keys("ldap_servers", ldap_server_names);
|
||||
@ -291,7 +296,7 @@ void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfigur
|
||||
ldap_server_name.resize(bracket_pos);
|
||||
|
||||
if (ldap_client_params_blueprint.contains(ldap_server_name))
|
||||
throw Exception("Multiple LDAP servers with the same name are not allowed", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Multiple LDAP servers with the same name are not allowed");
|
||||
|
||||
LDAPClient::Params ldap_client_params_tmp;
|
||||
parseLDAPServer(ldap_client_params_tmp, config, ldap_server_name);
|
||||
@ -346,7 +351,7 @@ bool ExternalAuthenticators::checkLDAPCredentials(const String & server, const B
|
||||
// Retrieve the server parameters.
|
||||
const auto pit = ldap_client_params_blueprint.find(server);
|
||||
if (pit == ldap_client_params_blueprint.end())
|
||||
throw Exception("LDAP server '" + server + "' is not configured", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "LDAP server '{}' is not configured", server);
|
||||
|
||||
params = pit->second;
|
||||
params->user = credentials.getUserName();
|
||||
@ -461,7 +466,7 @@ bool ExternalAuthenticators::checkKerberosCredentials(const String & realm, cons
|
||||
std::scoped_lock lock(mutex);
|
||||
|
||||
if (!kerberos_params.has_value())
|
||||
throw Exception("Kerberos is not enabled", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Kerberos is not enabled");
|
||||
|
||||
if (!credentials.isReady())
|
||||
return false;
|
||||
@ -480,7 +485,7 @@ GSSAcceptorContext::Params ExternalAuthenticators::getKerberosParams() const
|
||||
std::scoped_lock lock(mutex);
|
||||
|
||||
if (!kerberos_params.has_value())
|
||||
throw Exception("Kerberos is not enabled", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Kerberos is not enabled");
|
||||
|
||||
return kerberos_params.value();
|
||||
}
|
||||
|
@ -265,16 +265,16 @@ void GSSAcceptorContext::initHandles()
|
||||
if (!params.keytab.empty())
|
||||
{
|
||||
if (!std::filesystem::exists(params.keytab))
|
||||
throw Exception("Keytab file not found", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Keytab file not found");
|
||||
|
||||
if (krb5_gss_register_acceptor_identity(params.keytab.c_str()))
|
||||
throw Exception("Failed to register keytab file", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Failed to register keytab file");
|
||||
}
|
||||
|
||||
if (!params.principal.empty())
|
||||
{
|
||||
if (!params.realm.empty())
|
||||
throw Exception("Realm and principal name cannot be specified simultaneously", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Realm and principal name cannot be specified simultaneously");
|
||||
|
||||
gss_buffer_desc acceptor_name_buf;
|
||||
acceptor_name_buf.length = params.principal.size();
|
||||
@ -305,7 +305,7 @@ void GSSAcceptorContext::initHandles()
|
||||
if (GSS_ERROR(major_status))
|
||||
{
|
||||
const auto messages = extractStatusMessages(major_status, minor_status, GSS_C_NO_OID);
|
||||
throw Exception("gss_import_name() failed" + (messages.empty() ? "" : ": " + messages), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "gss_import_name() failed{}", (messages.empty() ? "" : ": " + messages));
|
||||
}
|
||||
|
||||
minor_status = 0;
|
||||
@ -323,7 +323,7 @@ void GSSAcceptorContext::initHandles()
|
||||
if (GSS_ERROR(major_status))
|
||||
{
|
||||
const auto messages = extractStatusMessages(major_status, minor_status, GSS_C_NO_OID);
|
||||
throw Exception("gss_acquire_cred() failed" + (messages.empty() ? "" : ": " + messages), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "gss_acquire_cred() failed{}", (messages.empty() ? "" : ": " + messages));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -387,21 +387,26 @@ String GSSAcceptorContext::processToken(const String & input_token, Poco::Logger
|
||||
if (major_status == GSS_S_COMPLETE)
|
||||
{
|
||||
if (!params.mechanism.empty() && !equalMechanisms(params.mechanism, mech_type))
|
||||
throw Exception("gss_accept_sec_context() succeeded, but: the authentication mechanism is not what was expected", ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR,
|
||||
"gss_accept_sec_context() succeeded, but: "
|
||||
"the authentication mechanism is not what was expected");
|
||||
|
||||
if (flags & GSS_C_ANON_FLAG)
|
||||
throw Exception("gss_accept_sec_context() succeeded, but: the initiator does not wish to be authenticated", ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "gss_accept_sec_context() succeeded, but: the initiator does not wish to be authenticated");
|
||||
|
||||
std::tie(user_name, realm) = extractNameAndRealm(initiator_name);
|
||||
|
||||
if (user_name.empty())
|
||||
throw Exception("gss_accept_sec_context() succeeded, but: the initiator name cannot be extracted", ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "gss_accept_sec_context() succeeded, but: the initiator name cannot be extracted");
|
||||
|
||||
if (realm.empty())
|
||||
throw Exception("gss_accept_sec_context() succeeded, but: the initiator realm cannot be extracted", ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "gss_accept_sec_context() succeeded, but: the initiator realm cannot be extracted");
|
||||
|
||||
if (!params.realm.empty() && params.realm != realm)
|
||||
throw Exception("gss_accept_sec_context() succeeded, but: the initiator realm is not what was expected (expected: " + params.realm + ", actual: " + realm + ")", ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR,
|
||||
"gss_accept_sec_context() succeeded, but: "
|
||||
"the initiator realm is not what was expected (expected: {}, actual: {})",
|
||||
params.realm, realm);
|
||||
|
||||
output_token = bufferToString(output_token_buf);
|
||||
|
||||
@ -420,7 +425,7 @@ String GSSAcceptorContext::processToken(const String & input_token, Poco::Logger
|
||||
else
|
||||
{
|
||||
const auto messages = extractStatusMessages(major_status, minor_status, mech_type);
|
||||
throw Exception("gss_accept_sec_context() failed" + (messages.empty() ? "" : ": " + messages), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "gss_accept_sec_context() failed{}", (messages.empty() ? "" : ": " + messages));
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
@ -452,7 +457,7 @@ void GSSAcceptorContext::initHandles()
|
||||
|
||||
String GSSAcceptorContext::processToken(const String &, Poco::Logger *)
|
||||
{
|
||||
throw Exception("ClickHouse was built without GSS-API/Kerberos support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
|
||||
throw Exception(ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME, "ClickHouse was built without GSS-API/Kerberos support");
|
||||
}
|
||||
|
||||
#endif // USE_KRB5
|
||||
|
@ -630,79 +630,70 @@ Poco::Logger * IAccessStorage::getLogger() const
|
||||
|
||||
void IAccessStorage::throwNotFound(const UUID & id) const
|
||||
{
|
||||
throw Exception(outputID(id) + " not found in " + getStorageName(), ErrorCodes::ACCESS_ENTITY_NOT_FOUND);
|
||||
throw Exception(ErrorCodes::ACCESS_ENTITY_NOT_FOUND, "{} not found in {}", outputID(id), getStorageName());
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwNotFound(AccessEntityType type, const String & name) const
|
||||
{
|
||||
int error_code = AccessEntityTypeInfo::get(type).not_found_error_code;
|
||||
throw Exception("There is no " + formatEntityTypeWithName(type, name) + " in " + getStorageName(), error_code);
|
||||
throw Exception(error_code, "There is no {} in {}", formatEntityTypeWithName(type, name), getStorageName());
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwBadCast(const UUID & id, AccessEntityType type, const String & name, AccessEntityType required_type)
|
||||
{
|
||||
throw Exception(
|
||||
outputID(id) + ": " + formatEntityTypeWithName(type, name) + " expected to be of type " + toString(required_type),
|
||||
ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "{}: {} expected to be of type {}", outputID(id),
|
||||
formatEntityTypeWithName(type, name), toString(required_type));
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwIDCollisionCannotInsert(const UUID & id, AccessEntityType type, const String & name, AccessEntityType existing_type, const String & existing_name) const
|
||||
{
|
||||
throw Exception(
|
||||
formatEntityTypeWithName(type, name) + ": cannot insert because the " + outputID(id) + " is already used by "
|
||||
+ formatEntityTypeWithName(existing_type, existing_name) + " in " + getStorageName(),
|
||||
ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS);
|
||||
throw Exception(ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS, "{}: "
|
||||
"cannot insert because the {} is already used by {} in {}", formatEntityTypeWithName(type, name),
|
||||
outputID(id), formatEntityTypeWithName(existing_type, existing_name), getStorageName());
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwNameCollisionCannotInsert(AccessEntityType type, const String & name) const
|
||||
{
|
||||
throw Exception(
|
||||
formatEntityTypeWithName(type, name) + ": cannot insert because " + formatEntityTypeWithName(type, name) + " already exists in "
|
||||
+ getStorageName(),
|
||||
ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS);
|
||||
throw Exception(ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS, "{}: cannot insert because {} already exists in {}",
|
||||
formatEntityTypeWithName(type, name), formatEntityTypeWithName(type, name), getStorageName());
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwNameCollisionCannotRename(AccessEntityType type, const String & old_name, const String & new_name) const
|
||||
{
|
||||
throw Exception(
|
||||
formatEntityTypeWithName(type, old_name) + ": cannot rename to " + backQuote(new_name) + " because "
|
||||
+ formatEntityTypeWithName(type, new_name) + " already exists in " + getStorageName(),
|
||||
ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS);
|
||||
throw Exception(ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS, "{}: cannot rename to {} because {} already exists in {}",
|
||||
formatEntityTypeWithName(type, old_name), backQuote(new_name), formatEntityTypeWithName(type, new_name), getStorageName());
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwReadonlyCannotInsert(AccessEntityType type, const String & name) const
|
||||
{
|
||||
throw Exception(
|
||||
"Cannot insert " + formatEntityTypeWithName(type, name) + " to " + getStorageName() + " because this storage is readonly",
|
||||
ErrorCodes::ACCESS_STORAGE_READONLY);
|
||||
throw Exception(ErrorCodes::ACCESS_STORAGE_READONLY, "Cannot insert {} to {} because this storage is readonly",
|
||||
formatEntityTypeWithName(type, name), getStorageName());
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwReadonlyCannotUpdate(AccessEntityType type, const String & name) const
|
||||
{
|
||||
throw Exception(
|
||||
"Cannot update " + formatEntityTypeWithName(type, name) + " in " + getStorageName() + " because this storage is readonly",
|
||||
ErrorCodes::ACCESS_STORAGE_READONLY);
|
||||
throw Exception(ErrorCodes::ACCESS_STORAGE_READONLY, "Cannot update {} in {} because this storage is readonly",
|
||||
formatEntityTypeWithName(type, name), getStorageName());
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwReadonlyCannotRemove(AccessEntityType type, const String & name) const
|
||||
{
|
||||
throw Exception(
|
||||
"Cannot remove " + formatEntityTypeWithName(type, name) + " from " + getStorageName() + " because this storage is readonly",
|
||||
ErrorCodes::ACCESS_STORAGE_READONLY);
|
||||
throw Exception(ErrorCodes::ACCESS_STORAGE_READONLY, "Cannot remove {} from {} because this storage is readonly",
|
||||
formatEntityTypeWithName(type, name), getStorageName());
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwAddressNotAllowed(const Poco::Net::IPAddress & address)
|
||||
{
|
||||
throw Exception("Connections from " + address.toString() + " are not allowed", ErrorCodes::IP_ADDRESS_NOT_ALLOWED);
|
||||
throw Exception(ErrorCodes::IP_ADDRESS_NOT_ALLOWED, "Connections from {} are not allowed", address.toString());
|
||||
}
|
||||
|
||||
void IAccessStorage::throwAuthenticationTypeNotAllowed(AuthenticationType auth_type)
|
||||
@ -715,7 +706,7 @@ void IAccessStorage::throwAuthenticationTypeNotAllowed(AuthenticationType auth_t
|
||||
|
||||
void IAccessStorage::throwInvalidCredentials()
|
||||
{
|
||||
throw Exception("Invalid credentials", ErrorCodes::WRONG_PASSWORD);
|
||||
throw Exception(ErrorCodes::WRONG_PASSWORD, "Invalid credentials");
|
||||
}
|
||||
|
||||
void IAccessStorage::throwBackupNotAllowed() const
|
||||
|
@ -71,7 +71,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
const char *deftype = nullptr;
|
||||
|
||||
if (!std::filesystem::exists(keytab_file))
|
||||
throw Exception("Keytab file does not exist", ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Keytab file does not exist");
|
||||
|
||||
ret = krb5_init_context(&k5.ctx);
|
||||
if (ret)
|
||||
@ -81,7 +81,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
{
|
||||
ret = krb5_cc_resolve(k5.ctx, cache_name.c_str(), &k5.out_cc);
|
||||
if (ret)
|
||||
throw Exception("Error in resolving cache" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in resolving cache{}", fmtError(ret));
|
||||
LOG_TRACE(log,"Resolved cache");
|
||||
}
|
||||
else
|
||||
@ -89,7 +89,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
// Resolve the default cache and get its type and default principal (if it is initialized).
|
||||
ret = krb5_cc_default(k5.ctx, &defcache);
|
||||
if (ret)
|
||||
throw Exception("Error while getting default cache" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error while getting default cache{}", fmtError(ret));
|
||||
LOG_TRACE(log,"Resolved default cache");
|
||||
deftype = krb5_cc_get_type(k5.ctx, defcache);
|
||||
if (krb5_cc_get_principal(k5.ctx, defcache, &defcache_princ) != 0)
|
||||
@ -99,7 +99,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
// Use the specified principal name.
|
||||
ret = krb5_parse_name_flags(k5.ctx, principal.c_str(), 0, &k5.me);
|
||||
if (ret)
|
||||
throw Exception("Error when parsing principal name " + principal + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error when parsing principal name {}", principal + fmtError(ret));
|
||||
|
||||
// Cache related commands
|
||||
if (k5.out_cc == nullptr && krb5_cc_support_switch(k5.ctx, deftype))
|
||||
@ -107,7 +107,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
// Use an existing cache for the client principal if we can.
|
||||
ret = krb5_cc_cache_match(k5.ctx, k5.me, &k5.out_cc);
|
||||
if (ret && ret != KRB5_CC_NOTFOUND)
|
||||
throw Exception("Error while searching for cache for " + principal + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error while searching for cache for {}", principal + fmtError(ret));
|
||||
if (0 == ret)
|
||||
{
|
||||
LOG_TRACE(log,"Using default cache: {}", krb5_cc_get_name(k5.ctx, k5.out_cc));
|
||||
@ -118,7 +118,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
// Create a new cache to avoid overwriting the initialized default cache.
|
||||
ret = krb5_cc_new_unique(k5.ctx, deftype, nullptr, &k5.out_cc);
|
||||
if (ret)
|
||||
throw Exception("Error while generating new cache" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error while generating new cache{}", fmtError(ret));
|
||||
LOG_TRACE(log,"Using default cache: {}", krb5_cc_get_name(k5.ctx, k5.out_cc));
|
||||
k5.switch_to_cache = 1;
|
||||
}
|
||||
@ -134,24 +134,24 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
|
||||
ret = krb5_unparse_name(k5.ctx, k5.me, &k5.name);
|
||||
if (ret)
|
||||
throw Exception("Error when unparsing name" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error when unparsing name{}", fmtError(ret));
|
||||
LOG_TRACE(log,"Using principal: {}", k5.name);
|
||||
|
||||
// Allocate a new initial credential options structure.
|
||||
ret = krb5_get_init_creds_opt_alloc(k5.ctx, &options);
|
||||
if (ret)
|
||||
throw Exception("Error in options allocation" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in options allocation{}", fmtError(ret));
|
||||
|
||||
// Resolve keytab
|
||||
ret = krb5_kt_resolve(k5.ctx, keytab_file.c_str(), &keytab);
|
||||
if (ret)
|
||||
throw Exception("Error in resolving keytab "+keytab_file + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in resolving keytab {}{}", keytab_file, fmtError(ret));
|
||||
LOG_TRACE(log,"Using keytab: {}", keytab_file);
|
||||
|
||||
// Set an output credential cache in initial credential options.
|
||||
ret = krb5_get_init_creds_opt_set_out_ccache(k5.ctx, options, k5.out_cc);
|
||||
if (ret)
|
||||
throw Exception("Error in setting output credential cache" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in setting output credential cache{}", fmtError(ret));
|
||||
|
||||
// Action: init or renew
|
||||
LOG_TRACE(log,"Trying to renew credentials");
|
||||
@ -165,7 +165,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
// Request KDC for an initial credentials using keytab.
|
||||
ret = krb5_get_init_creds_keytab(k5.ctx, &my_creds, k5.me, keytab, 0, nullptr, options);
|
||||
if (ret)
|
||||
throw Exception("Error in getting initial credentials" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in getting initial credentials{}", fmtError(ret));
|
||||
else
|
||||
LOG_TRACE(log,"Got initial credentials");
|
||||
}
|
||||
@ -175,7 +175,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
// Initialize a credential cache. Destroy any existing contents of cache and initialize it for the default principal.
|
||||
ret = krb5_cc_initialize(k5.ctx, k5.out_cc, k5.me);
|
||||
if (ret)
|
||||
throw Exception("Error when initializing cache" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error when initializing cache{}", fmtError(ret));
|
||||
LOG_TRACE(log,"Initialized cache");
|
||||
// Store credentials in a credential cache.
|
||||
ret = krb5_cc_store_cred(k5.ctx, k5.out_cc, &my_creds);
|
||||
@ -189,7 +189,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
// Make a credential cache the primary cache for its collection.
|
||||
ret = krb5_cc_switch(k5.ctx, k5.out_cc);
|
||||
if (ret)
|
||||
throw Exception("Error while switching to new cache" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error while switching to new cache{}", fmtError(ret));
|
||||
}
|
||||
|
||||
LOG_TRACE(log,"Authenticated to Kerberos v5");
|
||||
|
@ -53,11 +53,11 @@ void LDAPAccessStorage::setConfiguration(const Poco::Util::AbstractConfiguration
|
||||
const bool has_role_mapping = config.has(prefix_str + "role_mapping");
|
||||
|
||||
if (!has_server)
|
||||
throw Exception("Missing 'server' field for LDAP user directory", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Missing 'server' field for LDAP user directory");
|
||||
|
||||
const auto ldap_server_name_cfg = config.getString(prefix_str + "server");
|
||||
if (ldap_server_name_cfg.empty())
|
||||
throw Exception("Empty 'server' field for LDAP user directory", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Empty 'server' field for LDAP user directory");
|
||||
|
||||
std::set<String> common_roles_cfg;
|
||||
if (has_roles)
|
||||
@ -321,7 +321,7 @@ std::set<String> LDAPAccessStorage::mapExternalRolesNoLock(const LDAPClient::Sea
|
||||
std::set<String> role_names;
|
||||
|
||||
if (external_roles.size() != role_search_params.size())
|
||||
throw Exception("Unable to map external roles", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unable to map external roles");
|
||||
|
||||
for (std::size_t i = 0; i < external_roles.size(); ++i)
|
||||
{
|
||||
|
@ -226,13 +226,13 @@ bool LDAPClient::openConnection()
|
||||
|
||||
auto * uri = ldap_url_desc2str(&url);
|
||||
if (!uri)
|
||||
throw Exception("ldap_url_desc2str() failed", ErrorCodes::LDAP_ERROR);
|
||||
throw Exception(ErrorCodes::LDAP_ERROR, "ldap_url_desc2str() failed");
|
||||
|
||||
SCOPE_EXIT({ ldap_memfree(uri); });
|
||||
|
||||
handleError(ldap_initialize(&handle, uri));
|
||||
if (!handle)
|
||||
throw Exception("ldap_initialize() failed", ErrorCodes::LDAP_ERROR);
|
||||
throw Exception(ErrorCodes::LDAP_ERROR, "ldap_initialize() failed");
|
||||
}
|
||||
|
||||
{
|
||||
@ -371,10 +371,10 @@ bool LDAPClient::openConnection()
|
||||
const auto user_dn_search_results = search(*params.user_dn_detection);
|
||||
|
||||
if (user_dn_search_results.empty())
|
||||
throw Exception("Failed to detect user DN: empty search results", ErrorCodes::LDAP_ERROR);
|
||||
throw Exception(ErrorCodes::LDAP_ERROR, "Failed to detect user DN: empty search results");
|
||||
|
||||
if (user_dn_search_results.size() > 1)
|
||||
throw Exception("Failed to detect user DN: more than one entry in the search results", ErrorCodes::LDAP_ERROR);
|
||||
throw Exception(ErrorCodes::LDAP_ERROR, "Failed to detect user DN: more than one entry in the search results");
|
||||
|
||||
final_user_dn = *user_dn_search_results.begin();
|
||||
}
|
||||
@ -383,7 +383,7 @@ bool LDAPClient::openConnection()
|
||||
}
|
||||
|
||||
default:
|
||||
throw Exception("Unknown SASL mechanism", ErrorCodes::LDAP_ERROR);
|
||||
throw Exception(ErrorCodes::LDAP_ERROR, "Unknown SASL mechanism");
|
||||
}
|
||||
}
|
||||
|
||||
@ -576,7 +576,7 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params)
|
||||
}
|
||||
|
||||
case -1:
|
||||
throw Exception("Failed to process LDAP search message", ErrorCodes::LDAP_ERROR);
|
||||
throw Exception(ErrorCodes::LDAP_ERROR, "Failed to process LDAP search message");
|
||||
}
|
||||
}
|
||||
|
||||
@ -586,10 +586,10 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params)
|
||||
bool LDAPSimpleAuthClient::authenticate(const RoleSearchParamsList * role_search_params, SearchResultsList * role_search_results)
|
||||
{
|
||||
if (params.user.empty())
|
||||
throw Exception("LDAP authentication of a user with empty name is not allowed", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "LDAP authentication of a user with empty name is not allowed");
|
||||
|
||||
if (!role_search_params != !role_search_results)
|
||||
throw Exception("Cannot return LDAP search results", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot return LDAP search results");
|
||||
|
||||
// Silently reject authentication attempt if the password is empty as if it didn't match.
|
||||
if (params.password.empty())
|
||||
@ -628,12 +628,12 @@ bool LDAPSimpleAuthClient::authenticate(const RoleSearchParamsList * role_search
|
||||
|
||||
void LDAPClient::handleError(const int, String)
|
||||
{
|
||||
throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
|
||||
throw Exception(ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME, "ClickHouse was built without LDAP support");
|
||||
}
|
||||
|
||||
bool LDAPClient::openConnection()
|
||||
{
|
||||
throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
|
||||
throw Exception(ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME, "ClickHouse was built without LDAP support");
|
||||
}
|
||||
|
||||
void LDAPClient::closeConnection() noexcept
|
||||
@ -642,12 +642,12 @@ void LDAPClient::closeConnection() noexcept
|
||||
|
||||
LDAPClient::SearchResults LDAPClient::search(const SearchParams &)
|
||||
{
|
||||
throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
|
||||
throw Exception(ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME, "ClickHouse was built without LDAP support");
|
||||
}
|
||||
|
||||
bool LDAPSimpleAuthClient::authenticate(const RoleSearchParamsList *, SearchResultsList *)
|
||||
{
|
||||
throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
|
||||
throw Exception(ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME, "ClickHouse was built without LDAP support");
|
||||
}
|
||||
|
||||
#endif // USE_LDAP
|
||||
|
@ -316,10 +316,8 @@ bool MultipleAccessStorage::updateImpl(const UUID & id, const UpdateFunc & updat
|
||||
break;
|
||||
if (storage->find(new_entity->getType(), new_entity->getName()))
|
||||
{
|
||||
throw Exception(
|
||||
old_entity->formatTypeWithName() + ": cannot rename to " + backQuote(new_entity->getName()) + " because "
|
||||
+ new_entity->formatTypeWithName() + " already exists in " + storage->getStorageName(),
|
||||
ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS);
|
||||
throw Exception(ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS, "{}: cannot rename to {} because {} already exists in {}",
|
||||
old_entity->formatTypeWithName(), backQuote(new_entity->getName()), new_entity->formatTypeWithName(), storage->getStorageName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -55,9 +55,8 @@ String QuotaCache::QuotaInfo::calculateKey(const EnabledQuota & enabled) const
|
||||
{
|
||||
if (!params.client_key.empty())
|
||||
return params.client_key;
|
||||
throw Exception(
|
||||
"Quota " + quota->getName() + " (for user " + params.user_name + ") requires a client supplied key.",
|
||||
ErrorCodes::QUOTA_REQUIRES_CLIENT_KEY);
|
||||
throw Exception(ErrorCodes::QUOTA_REQUIRES_CLIENT_KEY, "Quota {} (for user {}) requires a client supplied key.",
|
||||
quota->getName(), params.user_name);
|
||||
}
|
||||
case QuotaKeyType::CLIENT_KEY_OR_USER_NAME:
|
||||
{
|
||||
@ -73,7 +72,7 @@ String QuotaCache::QuotaInfo::calculateKey(const EnabledQuota & enabled) const
|
||||
}
|
||||
case QuotaKeyType::MAX: break;
|
||||
}
|
||||
throw Exception("Unexpected quota key type: " + std::to_string(static_cast<int>(quota->key_type)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected quota key type: {}", static_cast<int>(quota->key_type));
|
||||
}
|
||||
|
||||
|
||||
|
@ -51,7 +51,7 @@ ReplicatedAccessStorage::ReplicatedAccessStorage(
|
||||
, backup_allowed(allow_backup_)
|
||||
{
|
||||
if (zookeeper_path.empty())
|
||||
throw Exception("ZooKeeper path must be non-empty", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "ZooKeeper path must be non-empty");
|
||||
|
||||
if (zookeeper_path.back() == '/')
|
||||
zookeeper_path.resize(zookeeper_path.size() - 1);
|
||||
@ -458,7 +458,7 @@ zkutil::ZooKeeperPtr ReplicatedAccessStorage::getZooKeeperNoLock()
|
||||
{
|
||||
auto zookeeper = get_zookeeper();
|
||||
if (!zookeeper)
|
||||
throw Exception("Can't have Replicated access without ZooKeeper", ErrorCodes::NO_ZOOKEEPER);
|
||||
throw Exception(ErrorCodes::NO_ZOOKEEPER, "Can't have Replicated access without ZooKeeper");
|
||||
|
||||
/// It's possible that we connected to different [Zoo]Keeper instance
|
||||
/// so we may read a bit stale state.
|
||||
|
@ -254,7 +254,7 @@ bool RolesOrUsersSet::match(const UUID & user_id, const boost::container::flat_s
|
||||
std::vector<UUID> RolesOrUsersSet::getMatchingIDs() const
|
||||
{
|
||||
if (all)
|
||||
throw Exception("getAllMatchingIDs() can't get ALL ids without access_control", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "getAllMatchingIDs() can't get ALL ids without access_control");
|
||||
std::vector<UUID> res;
|
||||
boost::range::set_difference(ids, except_ids, std::back_inserter(res));
|
||||
return res;
|
||||
|
@ -45,7 +45,7 @@ void RowPolicy::setFullName(const RowPolicyName & full_name_)
|
||||
|
||||
void RowPolicy::setName(const String &)
|
||||
{
|
||||
throw Exception("RowPolicy::setName() is not implemented", ErrorCodes::NOT_IMPLEMENTED);
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "RowPolicy::setName() is not implemented");
|
||||
}
|
||||
|
||||
|
||||
|
@ -124,7 +124,7 @@ void SettingsConstraints::check(const Settings & current_settings, const Setting
|
||||
if (new_value != old_value)
|
||||
{
|
||||
if (old_value == SettingConstraintWritability::CONST)
|
||||
throw Exception("Setting " + element.setting_name + " should not be changed", ErrorCodes::SETTING_CONSTRAINT_VIOLATION);
|
||||
throw Exception(ErrorCodes::SETTING_CONSTRAINT_VIOLATION, "Setting {} should not be changed", element.setting_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -274,7 +274,7 @@ bool SettingsConstraints::Checker::check(SettingChange & change, const Field & n
|
||||
if (constraint.writability == SettingConstraintWritability::CONST)
|
||||
{
|
||||
if (reaction == THROW_ON_VIOLATION)
|
||||
throw Exception("Setting " + setting_name + " should not be changed", ErrorCodes::SETTING_CONSTRAINT_VIOLATION);
|
||||
throw Exception(ErrorCodes::SETTING_CONSTRAINT_VIOLATION, "Setting {} should not be changed", setting_name);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
@ -285,7 +285,7 @@ bool SettingsConstraints::Checker::check(SettingChange & change, const Field & n
|
||||
if (!min_value.isNull() && !max_value.isNull() && less_or_cannot_compare(max_value, min_value))
|
||||
{
|
||||
if (reaction == THROW_ON_VIOLATION)
|
||||
throw Exception("Setting " + setting_name + " should not be changed", ErrorCodes::SETTING_CONSTRAINT_VIOLATION);
|
||||
throw Exception(ErrorCodes::SETTING_CONSTRAINT_VIOLATION, "Setting {} should not be changed", setting_name);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
@ -294,9 +294,8 @@ bool SettingsConstraints::Checker::check(SettingChange & change, const Field & n
|
||||
{
|
||||
if (reaction == THROW_ON_VIOLATION)
|
||||
{
|
||||
throw Exception(
|
||||
"Setting " + setting_name + " shouldn't be less than " + applyVisitor(FieldVisitorToString(), min_value),
|
||||
ErrorCodes::SETTING_CONSTRAINT_VIOLATION);
|
||||
throw Exception(ErrorCodes::SETTING_CONSTRAINT_VIOLATION, "Setting {} shouldn't be less than {}",
|
||||
setting_name, applyVisitor(FieldVisitorToString(), min_value));
|
||||
}
|
||||
else
|
||||
change.value = min_value;
|
||||
@ -306,9 +305,8 @@ bool SettingsConstraints::Checker::check(SettingChange & change, const Field & n
|
||||
{
|
||||
if (reaction == THROW_ON_VIOLATION)
|
||||
{
|
||||
throw Exception(
|
||||
"Setting " + setting_name + " shouldn't be greater than " + applyVisitor(FieldVisitorToString(), max_value),
|
||||
ErrorCodes::SETTING_CONSTRAINT_VIOLATION);
|
||||
throw Exception(ErrorCodes::SETTING_CONSTRAINT_VIOLATION, "Setting {} shouldn't be greater than {}",
|
||||
setting_name, applyVisitor(FieldVisitorToString(), max_value));
|
||||
}
|
||||
else
|
||||
change.value = max_value;
|
||||
|
@ -58,7 +58,9 @@ void SettingsProfileElement::init(const ASTSettingsProfileElement & ast, const A
|
||||
access_control->checkSettingNameIsAllowed(setting_name);
|
||||
/// Check if a CHANGEABLE_IN_READONLY is allowed.
|
||||
if (ast.writability == SettingConstraintWritability::CHANGEABLE_IN_READONLY && !access_control->doesSettingsConstraintsReplacePrevious())
|
||||
throw Exception("CHANGEABLE_IN_READONLY for " + setting_name + " is not allowed unless settings_constraints_replace_previous is enabled", ErrorCodes::NOT_IMPLEMENTED);
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED,
|
||||
"CHANGEABLE_IN_READONLY for {} "
|
||||
"is not allowed unless settings_constraints_replace_previous is enabled", setting_name);
|
||||
}
|
||||
|
||||
value = ast.value;
|
||||
|
@ -94,7 +94,7 @@ void SettingsProfilesCache::setDefaultProfileName(const String & default_profile
|
||||
|
||||
auto it = profiles_by_name.find(default_profile_name);
|
||||
if (it == profiles_by_name.end())
|
||||
throw Exception("Settings profile " + backQuote(default_profile_name) + " not found", ErrorCodes::THERE_IS_NO_PROFILE);
|
||||
throw Exception(ErrorCodes::THERE_IS_NO_PROFILE, "Settings profile {} not found", backQuote(default_profile_name));
|
||||
|
||||
default_profile_id = it->second;
|
||||
}
|
||||
|
@ -67,11 +67,15 @@ namespace
|
||||
size_t num_password_fields = has_no_password + has_password_plaintext + has_password_sha256_hex + has_password_double_sha1_hex + has_ldap + has_kerberos + has_certificates;
|
||||
|
||||
if (num_password_fields > 1)
|
||||
throw Exception("More than one field of 'password', 'password_sha256_hex', 'password_double_sha1_hex', 'no_password', 'ldap', 'kerberos', 'ssl_certificates' are used to specify authentication info for user " + user_name + ". Must be only one of them.",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "More than one field of 'password', 'password_sha256_hex', "
|
||||
"'password_double_sha1_hex', 'no_password', 'ldap', 'kerberos', 'ssl_certificates' "
|
||||
"are used to specify authentication info for user {}. "
|
||||
"Must be only one of them.", user_name);
|
||||
|
||||
if (num_password_fields < 1)
|
||||
throw Exception("Either 'password' or 'password_sha256_hex' or 'password_double_sha1_hex' or 'no_password' or 'ldap' or 'kerberos' or 'ssl_certificates' must be specified for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Either 'password' or 'password_sha256_hex' "
|
||||
"or 'password_double_sha1_hex' or 'no_password' or 'ldap' or 'kerberos' "
|
||||
"or 'ssl_certificates' must be specified for user {}.", user_name);
|
||||
|
||||
if (has_password_plaintext)
|
||||
{
|
||||
@ -92,11 +96,11 @@ namespace
|
||||
{
|
||||
bool has_ldap_server = config.has(user_config + ".ldap.server");
|
||||
if (!has_ldap_server)
|
||||
throw Exception("Missing mandatory 'server' in 'ldap', with LDAP server name, for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Missing mandatory 'server' in 'ldap', with LDAP server name, for user {}.", user_name);
|
||||
|
||||
const auto ldap_server_name = config.getString(user_config + ".ldap.server");
|
||||
if (ldap_server_name.empty())
|
||||
throw Exception("LDAP server name cannot be empty for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "LDAP server name cannot be empty for user {}.", user_name);
|
||||
|
||||
user->auth_data = AuthenticationData{AuthenticationType::LDAP};
|
||||
user->auth_data.setLDAPServerName(ldap_server_name);
|
||||
@ -124,7 +128,7 @@ namespace
|
||||
common_names.insert(std::move(value));
|
||||
}
|
||||
else
|
||||
throw Exception("Unknown certificate pattern type: " + key, ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown certificate pattern type: {}", key);
|
||||
}
|
||||
user->auth_data.setSSLCertificateCommonNames(std::move(common_names));
|
||||
}
|
||||
@ -167,7 +171,7 @@ namespace
|
||||
else if (key.starts_with("host"))
|
||||
user->allowed_client_hosts.addName(value);
|
||||
else
|
||||
throw Exception("Unknown address pattern type: " + key, ErrorCodes::UNKNOWN_ADDRESS_PATTERN_TYPE);
|
||||
throw Exception(ErrorCodes::UNKNOWN_ADDRESS_PATTERN_TYPE, "Unknown address pattern type: {}", key);
|
||||
}
|
||||
}
|
||||
|
||||
@ -466,13 +470,15 @@ namespace
|
||||
if (access_control.doesSettingsConstraintsReplacePrevious())
|
||||
profile_element.writability = SettingConstraintWritability::CHANGEABLE_IN_READONLY;
|
||||
else
|
||||
throw Exception("Setting changeable_in_readonly for " + setting_name + " is not allowed unless settings_constraints_replace_previous is enabled", ErrorCodes::NOT_IMPLEMENTED);
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Setting changeable_in_readonly for {} is not allowed "
|
||||
"unless settings_constraints_replace_previous is enabled", setting_name);
|
||||
}
|
||||
else
|
||||
throw Exception("Setting " + constraint_type + " value for " + setting_name + " isn't supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Setting {} value for {} isn't supported", constraint_type, setting_name);
|
||||
}
|
||||
if (writability_count > 1)
|
||||
throw Exception("Not more than one constraint writability specifier (const/readonly/changeable_in_readonly) is allowed for " + setting_name, ErrorCodes::NOT_IMPLEMENTED);
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not more than one constraint writability specifier "
|
||||
"(const/readonly/changeable_in_readonly) is allowed for {}", setting_name);
|
||||
|
||||
profile_elements.push_back(std::move(profile_element));
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ public:
|
||||
void create(AggregateDataPtr __restrict place) const override
|
||||
{
|
||||
if (std::uniform_real_distribution<>(0.0, 1.0)(thread_local_rng) <= throw_probability)
|
||||
throw Exception("Aggregate function " + getName() + " has thrown exception successfully", ErrorCodes::AGGREGATE_FUNCTION_THROW);
|
||||
throw Exception(ErrorCodes::AGGREGATE_FUNCTION_THROW, "Aggregate function {} has thrown exception successfully", getName());
|
||||
|
||||
new (place) Data;
|
||||
}
|
||||
@ -116,7 +116,7 @@ void registerAggregateFunctionAggThrow(AggregateFunctionFactory & factory)
|
||||
if (parameters.size() == 1)
|
||||
throw_probability = parameters[0].safeGet<Float64>();
|
||||
else if (parameters.size() > 1)
|
||||
throw Exception("Aggregate function " + name + " cannot have more than one parameter", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Aggregate function {} cannot have more than one parameter", name);
|
||||
|
||||
return std::make_shared<AggregateFunctionThrow>(argument_types, parameters, throw_probability);
|
||||
});
|
||||
|
@ -78,7 +78,7 @@ public:
|
||||
{
|
||||
auto f_stat = data(place).getFStatistic();
|
||||
if (std::isinf(f_stat) || isNaN(f_stat) || f_stat < 0)
|
||||
throw Exception("F statistic is not defined or infinite for these arguments", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "F statistic is not defined or infinite for these arguments");
|
||||
|
||||
auto p_value = data(place).getPValue(f_stat);
|
||||
|
||||
|
@ -52,8 +52,9 @@ public:
|
||||
, serialization_val(type_val->getDefaultSerialization())
|
||||
{
|
||||
if (!type_val->isComparable())
|
||||
throw Exception("Illegal type " + type_val->getName() + " of second argument of aggregate function " + getName()
|
||||
+ " because the values of that data type are not comparable", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of second argument of "
|
||||
"aggregate function {} because the values of that data type are not comparable",
|
||||
type_val->getName(), getName());
|
||||
}
|
||||
|
||||
String getName() const override
|
||||
|
@ -26,7 +26,7 @@ public:
|
||||
DataTypes transformArguments(const DataTypes & arguments) const override
|
||||
{
|
||||
if (arguments.empty())
|
||||
throw Exception("-Array aggregate functions require at least one argument", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "-Array aggregate functions require at least one argument");
|
||||
|
||||
DataTypes nested_arguments;
|
||||
for (const auto & type : arguments)
|
||||
@ -34,8 +34,8 @@ public:
|
||||
if (const DataTypeArray * array = typeid_cast<const DataTypeArray *>(type.get()))
|
||||
nested_arguments.push_back(array->getNestedType());
|
||||
else
|
||||
throw Exception("Illegal type " + type->getName() + " of argument"
|
||||
" for aggregate function with " + getName() + " suffix. Must be array.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument"
|
||||
" for aggregate function with {} suffix. Must be array.", type->getName(), getName());
|
||||
}
|
||||
|
||||
return nested_arguments;
|
||||
|
@ -36,7 +36,7 @@ public:
|
||||
assert(parameters == nested_func->getParameters());
|
||||
for (const auto & type : arguments)
|
||||
if (!isArray(type))
|
||||
throw Exception("All arguments for aggregate function " + getName() + " must be arrays", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "All arguments for aggregate function {} must be arrays", getName());
|
||||
}
|
||||
|
||||
String getName() const override
|
||||
@ -129,7 +129,7 @@ public:
|
||||
const IColumn::Offsets & ith_offsets = ith_column.getOffsets();
|
||||
|
||||
if (ith_offsets[row_num] != end || (row_num != 0 && ith_offsets[row_num - 1] != begin))
|
||||
throw Exception("Arrays passed to " + getName() + " aggregate function have different sizes", ErrorCodes::SIZES_OF_ARRAYS_DOESNT_MATCH);
|
||||
throw Exception(ErrorCodes::SIZES_OF_ARRAYS_DOESNT_MATCH, "Arrays passed to {} aggregate function have different sizes", getName());
|
||||
}
|
||||
|
||||
for (size_t i = begin; i < end; ++i)
|
||||
|
@ -29,8 +29,8 @@ AggregateFunctionPtr createAggregateFunctionAvg(const std::string & name, const
|
||||
const DataTypePtr& data_type = argument_types[0];
|
||||
|
||||
if (!allowType(data_type))
|
||||
throw Exception("Illegal type " + data_type->getName() + " of argument for aggregate function " + name,
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}",
|
||||
data_type->getName(), name);
|
||||
|
||||
AggregateFunctionPtr res;
|
||||
|
||||
|
@ -71,11 +71,9 @@ createAggregateFunctionAvgWeighted(const std::string & name, const DataTypes & a
|
||||
const auto data_type_weight = static_cast<const DataTypePtr>(argument_types[1]);
|
||||
|
||||
if (!allowTypes(data_type, data_type_weight))
|
||||
throw Exception(
|
||||
"Types " + data_type->getName() +
|
||||
" and " + data_type_weight->getName() +
|
||||
" are non-conforming as arguments for aggregate function " + name,
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Types {} and {} are non-conforming as arguments for aggregate function {}",
|
||||
data_type->getName(), data_type_weight->getName(), name);
|
||||
|
||||
AggregateFunctionPtr ptr;
|
||||
|
||||
|
@ -23,14 +23,15 @@ AggregateFunctionPtr createAggregateFunctionBitwise(const std::string & name, co
|
||||
assertUnary(name, argument_types);
|
||||
|
||||
if (!argument_types[0]->canBeUsedInBitOperations())
|
||||
throw Exception("The type " + argument_types[0]->getName() + " of argument for aggregate function " + name
|
||||
+ " is illegal, because it cannot be used in bitwise operations",
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "The type {} of argument for aggregate function {} "
|
||||
"is illegal, because it cannot be used in bitwise operations",
|
||||
argument_types[0]->getName(), name);
|
||||
|
||||
AggregateFunctionPtr res(createWithUnsignedIntegerType<AggregateFunctionBitwise, Data>(*argument_types[0], argument_types[0]));
|
||||
|
||||
if (!res)
|
||||
throw Exception("Illegal type " + argument_types[0]->getName() + " of argument for aggregate function " + name, ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type {} of argument for aggregate function {}", argument_types[0]->getName(), name);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -21,8 +21,9 @@ AggregateFunctionPtr createAggregateFunctionRate(const std::string & name, const
|
||||
assertBinary(name, argument_types);
|
||||
|
||||
if (argument_types.size() < 2)
|
||||
throw Exception("Aggregate function " + name + " requires at least two arguments",
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||
"Aggregate function {} requires at least two arguments",
|
||||
name);
|
||||
|
||||
return std::make_shared<AggregateFunctionBoundingRatio>(argument_types);
|
||||
}
|
||||
|
@ -118,7 +118,9 @@ public:
|
||||
const auto * y_arg = arguments.at(1).get();
|
||||
|
||||
if (!x_arg->isValueRepresentedByNumber() || !y_arg->isValueRepresentedByNumber())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Illegal types of arguments of aggregate function {}, must have number representation.", getName());
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Illegal types of arguments of aggregate function {}, must have number representation.",
|
||||
getName());
|
||||
}
|
||||
|
||||
bool allocatesMemoryInArena() const override { return false; }
|
||||
|
@ -163,16 +163,14 @@ AggregateFunctionPtr createAggregateFunctionCategoricalIV(
|
||||
assertNoParameters(name, params);
|
||||
|
||||
if (arguments.size() < 2)
|
||||
throw Exception(
|
||||
"Aggregate function " + name + " requires two or more arguments",
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Aggregate function {} requires two or more arguments",
|
||||
name);
|
||||
|
||||
for (const auto & argument : arguments)
|
||||
{
|
||||
if (!WhichDataType(argument).isUInt8())
|
||||
throw Exception(
|
||||
"All the arguments of aggregate function " + name + " should be UInt8",
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "All the arguments of aggregate function {} should be UInt8",
|
||||
name);
|
||||
}
|
||||
|
||||
return std::make_shared<AggregateFunctionCategoricalIV>(arguments, params);
|
||||
|
@ -219,7 +219,7 @@ public:
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionCountData, AggregateFunctionCountNotNullUnary>({argument}, params, createResultType())
|
||||
{
|
||||
if (!argument->isNullable())
|
||||
throw Exception("Logical error: not Nullable data type passed to AggregateFunctionCountNotNullUnary", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: not Nullable data type passed to AggregateFunctionCountNotNullUnary");
|
||||
}
|
||||
|
||||
String getName() const override { return "count"; }
|
||||
|
@ -27,8 +27,8 @@ AggregateFunctionPtr createAggregateFunctionDeltaSum(
|
||||
assertNoParameters(name, params);
|
||||
|
||||
if (arguments.size() != 1)
|
||||
throw Exception("Incorrect number of arguments for aggregate function " + name,
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||
"Incorrect number of arguments for aggregate function {}", name);
|
||||
|
||||
const DataTypePtr & data_type = arguments[0];
|
||||
|
||||
@ -36,8 +36,8 @@ AggregateFunctionPtr createAggregateFunctionDeltaSum(
|
||||
return AggregateFunctionPtr(createWithNumericType<AggregationFunctionDeltaSum>(
|
||||
*data_type, arguments, params));
|
||||
else
|
||||
throw Exception("Illegal type " + arguments[0]->getName() + " of argument for aggregate function " + name,
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}",
|
||||
arguments[0]->getName(), name);
|
||||
}
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user