Merge remote-tracking branch 'origin/master' into block-non-float-gorilla-v2

This commit is contained in:
Robert Schulze 2023-01-24 10:14:06 +00:00
commit aaf7653108
No known key found for this signature in database
GPG Key ID: 26703B55FB13728A
1350 changed files with 12002 additions and 8440 deletions

2
contrib/poco vendored

@ -1 +1 @@
Subproject commit 0ab9bba7ccad3c8dacce04a35cb3b78218547ab4
Subproject commit 4b1c8dd9913d2a16db62df0e509fa598da5c8219

View File

@ -18,13 +18,25 @@ repo_dir=ch
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-15_debug_none_unsplitted_disable_False_binary"}
BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"}
function git_clone_with_retry
{
for _ in 1 2 3 4; do
if git clone --depth 1 https://github.com/ClickHouse/ClickHouse.git -- "$1" 2>&1 | ts '%Y-%m-%d %H:%M:%S';then
return 0
else
sleep 0.5
fi
done
return 1
}
function clone
{
# For local runs, start directly from the "fuzz" stage.
rm -rf "$repo_dir" ||:
mkdir "$repo_dir" ||:
git clone --depth 1 https://github.com/ClickHouse/ClickHouse.git -- "$repo_dir" 2>&1 | ts '%Y-%m-%d %H:%M:%S'
git_clone_with_retry "$repo_dir"
(
cd "$repo_dir"
if [ "$PR_TO_TEST" != "0" ]; then

View File

@ -50,7 +50,7 @@ Action required for every item -- these are errors that must be fixed.
A query is supposed to run longer than 0.1 second. If your query runs faster, increase the amount of processed data to bring the run time above this threshold. You can use a bigger table (e.g. `hits_100m` instead of `hits_10m`), increase a `LIMIT`, make a query single-threaded, and so on. Queries that are too fast suffer from poor stability and precision.
#### Partial Queries
#### Backward-incompatible Queries
Action required for the cells marked in red.
Shows the queries we are unable to run on an old server -- probably because they contain a new function. You should see this table when you add a new function and a performance test for it. Check that the run time and variance are acceptable (run time between 0.1 and 1 seconds, variance below 10%). If not, they will be highlighted in red.

View File

@ -399,7 +399,7 @@ clickhouse-local --query "
create view query_runs as select * from file('analyze/query-runs.tsv', TSV,
'test text, query_index int, query_id text, version UInt8, time float');
-- Separately process 'partial' queries which we could only run on the new server
-- Separately process backward-incompatible ('partial') queries which we could only run on the new server
-- because they use new functions. We can't make normal stats for them, but still
-- have to show some stats so that the PR author can tweak them.
create view partial_queries as select test, query_index
@ -650,7 +650,7 @@ create view partial_query_times as select * from
'test text, query_index int, time_stddev float, time_median double')
;
-- Report for partial queries that we could only run on the new server (e.g.
-- Report for backward-incompatible ('partial') queries that we could only run on the new server (e.g.
-- queries with new functions added in the tested PR).
create table partial_queries_report engine File(TSV, 'report/partial-queries-report.tsv')
settings output_format_decimal_trailing_zeros = 1
@ -829,7 +829,7 @@ create view query_runs as select * from file('analyze/query-runs.tsv', TSV,
-- Guess the number of query runs used for this test. The number is required to
-- calculate and check the average query run time in the report.
-- We have to be careful, because we will encounter:
-- 1) partial queries which run only on one server
-- 1) backward-incompatible ('partial') queries which run only on one server
-- 3) some errors that make query run for a different number of times on a
-- particular server.
--

View File

@ -30,7 +30,7 @@ faster_queries = 0
slower_queries = 0
unstable_queries = 0
very_unstable_queries = 0
unstable_partial_queries = 0
unstable_backward_incompatible_queries = 0
# max seconds to run one query by itself, not counting preparation
allowed_single_run_time = 2
@ -378,13 +378,13 @@ if args.report == "main":
]
)
def add_partial():
def add_backward_incompatible():
rows = tsvRows("report/partial-queries-report.tsv")
if not rows:
return
global unstable_partial_queries, slow_average_tests, tables
text = tableStart("Partial Queries")
global unstable_backward_incompatible_queries, slow_average_tests, tables
text = tableStart("Backward-incompatible queries")
columns = ["Median time, s", "Relative time variance", "Test", "#", "Query"]
text += tableHeader(columns)
attrs = ["" for c in columns]
@ -392,7 +392,7 @@ if args.report == "main":
anchor = f"{currentTableAnchor()}.{row[2]}.{row[3]}"
if float(row[1]) > 0.10:
attrs[1] = f'style="background: {color_bad}"'
unstable_partial_queries += 1
unstable_backward_incompatible_queries += 1
errors_explained.append(
[
f"<a href=\"#{anchor}\">The query no. {row[3]} of test '{row[2]}' has excessive variance of run time. Keep it below 10%</a>"
@ -414,7 +414,7 @@ if args.report == "main":
text += tableEnd()
tables.append(text)
add_partial()
add_backward_incompatible()
def add_changes():
rows = tsvRows("report/changed-perf.tsv")
@ -630,8 +630,8 @@ if args.report == "main":
status = "failure"
message_array.append(str(slower_queries) + " slower")
if unstable_partial_queries:
very_unstable_queries += unstable_partial_queries
if unstable_backward_incompatible_queries:
very_unstable_queries += unstable_backward_incompatible_queries
status = "failure"
# Don't show mildly unstable queries, only the very unstable ones we

View File

@ -128,9 +128,10 @@ function run_tests()
if [[ "${HIGH_LEVEL_COVERAGE}" = "YES" ]]; then
ADDITIONAL_OPTIONS+=('--report-coverage')
ADDITIONAL_OPTIONS+=('--report-logs-stats')
fi
ADDITIONAL_OPTIONS+=('--report-logs-stats')
set +e
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
--test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \

View File

@ -40,8 +40,8 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1],
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2],
...
INDEX index_name1 expr1 TYPE type1(...) GRANULARITY value1,
INDEX index_name2 expr2 TYPE type2(...) GRANULARITY value2,
INDEX index_name1 expr1 TYPE type1(...) [GRANULARITY value1],
INDEX index_name2 expr2 TYPE type2(...) [GRANULARITY value2],
...
PROJECTION projection_name_1 (SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY]),
PROJECTION projection_name_2 (SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY])
@ -359,13 +359,15 @@ ClickHouse uses this logic not only for days of the month sequences, but for any
The index declaration is in the columns section of the `CREATE` query.
``` sql
INDEX index_name expr TYPE type(...) GRANULARITY granularity_value
INDEX index_name expr TYPE type(...) [GRANULARITY granularity_value]
```
For tables from the `*MergeTree` family, data skipping indices can be specified.
These indices aggregate some information about the specified expression on blocks, which consist of `granularity_value` granules (the size of the granule is specified using the `index_granularity` setting in the table engine). Then these aggregates are used in `SELECT` queries for reducing the amount of data to read from the disk by skipping big blocks of data where the `where` query cannot be satisfied.
The `GRANULARITY` clause can be omitted, the default value of `granularity_value` is 1.
**Example**
``` sql

View File

@ -22,8 +22,8 @@ functions in ClickHouse. The sample datasets include:
- The [Cell Towers dataset](../getting-started/example-datasets/cell-towers.md) imports a CSV into ClickHouse
- The [NYPD Complaint Data](../getting-started/example-datasets/nypd_complaint_data.md) demonstrates how to use data inference to simplify creating tables
- The ["What's on the Menu?" dataset](../getting-started/example-datasets/menus.md) has an example of denormalizing data
- The [Getting Data Into ClickHouse - Part 1](https://clickhouse.com/blog/getting-data-into-clickhouse-part-1) provides examples of defining a schema and loading a small Hacker News dataset
- The [Getting Data Into ClickHouse - Part 2 - A JSON detour](https://clickhouse.com/blog/getting-data-into-clickhouse-part-2-json) shows how JSON data can be loaded
- The [Getting Data Into ClickHouse - Part 3 - Using S3](https://clickhouse.com/blog/getting-data-into-clickhouse-part-3-s3) has examples of loading data from s3
- [Getting Data Into ClickHouse - Part 1](https://clickhouse.com/blog/getting-data-into-clickhouse-part-1) provides examples of defining a schema and loading a small Hacker News dataset
- [Getting Data Into ClickHouse - Part 3 - Using S3](https://clickhouse.com/blog/getting-data-into-clickhouse-part-3-s3) has examples of loading data from s3
- [Generating random data in ClickHouse](https://clickhouse.com/blog/generating-random-test-distribution-data-for-clickhouse) shows how to generate random data if none of the above fit your needs.
View the **Tutorials and Datasets** menu for a complete list of sample datasets.

View File

@ -1714,7 +1714,7 @@ something_weird{problem="division by zero"} +Inf -3982045
## Protobuf {#protobuf}
Protobuf - is a [Protocol Buffers](https://developers.google.com/protocol-buffers/) format.
Protobuf - is a [Protocol Buffers](https://protobuf.dev/) format.
This format requires an external format schema. The schema is cached between queries.
ClickHouse supports both `proto2` and `proto3` syntaxes. Repeated/optional/required fields are supported.

View File

@ -22,5 +22,6 @@ Additional cache types:
- [Dictionaries](../sql-reference/dictionaries/index.md) data cache.
- Schema inference cache.
- [Filesystem cache](storing-data.md) over S3, Azure, Local and other disks.
- [(Experimental) Query result cache](query-result-cache.md).
To drop one of the caches, use [SYSTEM DROP ... CACHE](../sql-reference/statements/system.md#drop-mark-cache) statements.

View File

@ -0,0 +1,99 @@
---
slug: /en/operations/query-result-cache
sidebar_position: 65
sidebar_label: Query Result Cache [experimental]
---
# Query Result Cache [experimental]
The query result cache allows to compute SELECT queries just once and to serve further executions of the same query directly from the cache.
Depending on the type of the queries, this can dramatically reduce latency and resource consumption of the ClickHouse server.
## Background, Design and Limitations
Query result caches can generally be viewed as transactionally consistent or inconsistent.
- In transactionally consistent caches, the database invalidates (discards) cached query results if the result of the SELECT query changes
or potentially changes. In ClickHouse, operations which change the data include inserts/updates/deletes in/of/from tables or collapsing
merges. Transactionally consistent caching is especially suitable for OLTP databases, for example
[MySQL](https://dev.mysql.com/doc/refman/5.6/en/query-cache.html) (which removed query result cache after v8.0) and
[Oracle](https://docs.oracle.com/database/121/TGDBA/tune_result_cache.htm).
- In transactionally inconsistent caches, slight inaccuracies in query results are accepted under the assumption that all cache entries are
assigned a validity period after which they expire (e.g. 1 minute) and that the underlying data changes only little during this period.
This approach is overall more suitable for OLAP databases. As an example where transactionally inconsistent caching is sufficient,
consider an hourly sales report in a reporting tool which is simultaneously accessed by multiple users. Sales data changes typically
slowly enough that the database only needs to compute the report once (represented by the first SELECT query). Further queries can be
served directly from the query result cache. In this example, a reasonable validity period could be 30 min.
Transactionally inconsistent caching is traditionally provided by client tools or proxy packages interacting with the database. As a result,
the same caching logic and configuration is often duplicated. With ClickHouse's query result cache, the caching logic moves to the server
side. This reduces maintenance effort and avoids redundancy.
:::warning
The query result cache is an experimental feature that should not be used in production. There are known cases (e.g. in distributed query
processing) where wrong results are returned.
:::
## Configuration Settings and Usage
Parameter [enable_experimental_query_result_cache](settings/settings.md#enable-experimental-query-result-cache) controls whether query
results are inserted into / retrieved from the cache for the current query or session. For example, the first execution of query
``` sql
SELECT some_expensive_calculation(column_1, column_2)
FROM table
SETTINGS enable_experimental_query_result_cache = true;
```
stores the query result into the query result cache. Subsequent executions of the same query (also with parameter
`enable_experimental_query_result_cache = true`) will read the computed result directly from the cache.
Sometimes, it is desirable to use the query result cache only passively, i.e. to allow reading from it but not writing into it (if the cache
result is not stored yet). Parameter [enable_experimental_query_result_cache_passive_usage](settings/settings.md#enable-experimental-query-result-cache-passive-usage)
instead of 'enable_experimental_query_result_cache' can be used for that.
For maximum control, it is generally recommended to provide settings "enable_experimental_query_result_cache" or
"enable_experimental_query_result_cache_passive_usage" only with specific queries. It is also possible to enable caching at user or profile
level but one should keep in mind that all SELECT queries may return a cached results, including monitoring or debugging queries to system
tables.
The query result cache can be cleared using statement `SYSTEM DROP QUERY RESULT CACHE`. The content of the query result cache is displayed
in system table `SYSTEM.QUERY_RESULT_CACHE`. The number of query result cache hits and misses are shown as events "QueryResultCacheHits" and
"QueryResultCacheMisses" in system table `SYSTEM.EVENTS`. Both counters are only updated for SELECT queries which run with settings
"enable_experimental_query_result_cache = true" or "enable_experimental_query_result_cache_passive_usage = true". Other queries do not
affect the cache miss counter.
The query result cache exists once per ClickHouse server process. However, cache results are by default not shared between users. This can
be changed (see below) but doing so is not recommended for security reasons.
Query results are referenced in the query result cache by the [Abstract Syntax Tree (AST)](https://en.wikipedia.org/wiki/Abstract_syntax_tree)
of their query. This means that caching is agnostic to upper/lowercase, for example `SELECT 1` and `select 1` are treated as the same query.
To make the matching more natural, all query-level settings related to the query result cache are removed from the AST.
If the query was aborted due to an exception or user cancellation, no entry is written into the query result cache.
The size of the query result cache, the maximum number of cache entries and the maximum size of cache entries (in bytes and in records) can
be configured using different [server configuration options](server-configuration-parameters/settings.md#server_configuration_parameters_query-result-cache).
To define how long a query must run at least such that its result can be cached, you can use setting
[query_result_cache_min_query_duration](settings/settings.md#query-result-cache-min-query-duration). For example, the result of query
``` sql
SELECT some_expensive_calculation(column_1, column_2)
FROM table
SETTINGS enable_experimental_query_result_cache = true, query_result_cache_min_query_duration = 5000;
```
is only cached if the query runs longer than 5 seconds. It is also possible to specify how often a query needs to run until its result is
cached - for that use setting [query_result_cache_min_query_runs](settings/settings.md#query-result-cache-min-query-runs).
Entries in the query result cache become stale after a certain time period (time-to-live). By default, this period is 60 seconds but a
different value can be specified at session, profile or query level using setting [query_result_cache_ttl](settings/settings.md#query-result-cache-ttl).
Also, results of queries with non-deterministic functions such as `rand()` and `now()` are not cached. This can be overruled using
setting [query_result_cache_store_results_of_queries_with_nondeterministic_functions](settings/settings.md#query-result-cache-store-results-of-queries-with-nondeterministic-functions).
Finally, entries in the query cache are not shared between users due to security reasons. For example, user A must not be able to bypass a
row policy on a table by running the same query as another user B for whom no such policy exists. However, if necessary, cache entries can
be marked accessible by other users (i.e. shared) by supplying setting
[query_result_cache_share_between_users]{settings/settings.md#query-result-cache-share-between-users}.

View File

@ -1270,6 +1270,32 @@ If the table does not exist, ClickHouse will create it. If the structure of the
</query_log>
```
## query_result_cache {#server_configuration_parameters_query-result-cache}
[Query result cache](../query-result-cache.md) configuration.
The following settings are available:
- `size`: The maximum cache size in bytes. 0 means the query result cache is disabled. Default value: `1073741824` (1 GiB).
- `max_entries`: The maximum number of SELECT query results stored in the cache. Default value: `1024`.
- `max_entry_size`: The maximum size in bytes SELECT query results may have to be saved in the cache. Default value: `1048576` (1 MiB).
- `max_entry_records`: The maximum number of records SELECT query results may have to be saved in the cache. Default value: `30000000` (30 mil).
:::warning
Data for the query result cache is allocated in DRAM. If memory is scarce, make sure to set a small value for `size` or disable the query result cache altogether.
:::
**Example**
```xml
<query_result_cache>
<size>1073741824</size>
<max_entries>1024</max_entries>
<max_entry_size>1048576</max_entry_size>
<max_entry_records>30000000</max_entry_records>
</query_result_cache>
```
## query_thread_log {#server_configuration_parameters-query_thread_log}
Setting for logging threads of queries received with the [log_query_threads=1](../../operations/settings/settings.md#settings-log-query-threads) setting.

View File

@ -176,6 +176,59 @@ Similar to [replicated_deduplication_window](#replicated-deduplication-window),
The time is relative to the time of the most recent record, not to the wall time. If it's the only record it will be stored forever.
## replicated_deduplication_window_for_async_inserts {#replicated-deduplication-window-for-async-inserts}
The number of most recently async inserted blocks for which ClickHouse Keeper stores hash sums to check for duplicates.
Possible values:
- Any positive integer.
- 0 (disable deduplication for async_inserts)
Default value: 10000.
The [Async Insert](./settings.md#async-insert) command will be cached in one or more blocks (parts). For [insert deduplication](../../engines/table-engines/mergetree-family/replication.md), when writing into replicated tables, ClickHouse writes the hash sums of each insert into ClickHouse Keeper. Hash sums are stored only for the most recent `replicated_deduplication_window_for_async_inserts` blocks. The oldest hash sums are removed from ClickHouse Keeper.
A large number of `replicated_deduplication_window_for_async_inserts` slows down `Async Inserts` because it needs to compare more entries.
The hash sum is calculated from the composition of the field names and types and the data of the insert (stream of bytes).
## replicated_deduplication_window_seconds_for_async_inserts {#replicated-deduplication-window-seconds-for-async_inserts}
The number of seconds after which the hash sums of the async inserts are removed from ClickHouse Keeper.
Possible values:
- Any positive integer.
Default value: 604800 (1 week).
Similar to [replicated_deduplication_window_for_async_inserts](#replicated-deduplication-window-for-async-inserts), `replicated_deduplication_window_seconds_for_async_inserts` specifies how long to store hash sums of blocks for async insert deduplication. Hash sums older than `replicated_deduplication_window_seconds_for_async_inserts` are removed from ClickHouse Keeper, even if they are less than ` replicated_deduplication_window_for_async_inserts`.
The time is relative to the time of the most recent record, not to the wall time. If it's the only record it will be stored forever.
## use_async_block_ids_cache {#use-async-block-ids-cache}
If true, we cache the hash sums of the async inserts.
Possible values:
- true, false
Default value: false.
A block bearing multiple async inserts will generate multiple hash sums. When some of the inserts are duplicated, keeper will only return one duplicated hash sum in one RPC, which will cause unnecessary RPC retries. This cache will watch the hash sums path in Keeper. If updates are watched in the Keeper, the cache will update as soon as possible, so that we are able to filter the duplicated inserts in the memory.
## async_block_ids_cache_min_update_interval_ms
The minimum interval (in milliseconds) to update the `use_async_block_ids_cache`
Possible values:
- Any positive integer.
Default value: 100.
Normally, the `use_async_block_ids_cache` updates as soon as there are updates in the watching keeper path. However, the cache updates might be too frequent and become a heavy burden. This minimum interval prevents the cache from updating too fast. Note that if we set this value too long, the block with duplicated inserts will have a longer retry time.
## max_replicated_logs_to_keep
How many records may be in the ClickHouse Keeper log if there is inactive replica. An inactive replica becomes lost when when this number exceed.
@ -745,4 +798,4 @@ You can see which parts of `s` were stored using the sparse serialization:
│ id │ Default │
│ s │ Sparse │
└────────┴────────────────────┘
```
```

View File

@ -408,51 +408,51 @@ Several algorithms can be specified, and an available one would be chosen for a
Possible values:
### `default`
- default
This is the equivalent of `hash` or `direct`, if possible (same as `direct,hash`)
This is the equivalent of `hash` or `direct`, if possible (same as `direct,hash`)
### `grace_hash`
- grace_hash
[Grace hash join](https://en.wikipedia.org/wiki/Hash_join#Grace_hash_join) is used. Grace hash provides an algorithm option that provides performant complex joins while limiting memory use.
[Grace hash join](https://en.wikipedia.org/wiki/Hash_join#Grace_hash_join) is used. Grace hash provides an algorithm option that provides performant complex joins while limiting memory use.
The first phase of a grace join reads the right table and splits it into N buckets depending on the hash value of key columns (initially, N is `grace_hash_join_initial_buckets`). This is done in a way to ensure that each bucket can be processed independently. Rows from the first bucket are added to an in-memory hash table while the others are saved to disk. If the hash table grows beyond the memory limit (e.g., as set by [`max_bytes_in_join`](/docs/en/operations/settings/query-complexity.md/#settings-max_bytes_in_join)), the number of buckets is increased and the assigned bucket for each row. Any rows which dont belong to the current bucket are flushed and reassigned.
The first phase of a grace join reads the right table and splits it into N buckets depending on the hash value of key columns (initially, N is `grace_hash_join_initial_buckets`). This is done in a way to ensure that each bucket can be processed independently. Rows from the first bucket are added to an in-memory hash table while the others are saved to disk. If the hash table grows beyond the memory limit (e.g., as set by [`max_bytes_in_join`](/docs/en/operations/settings/query-complexity.md/#settings-max_bytes_in_join)), the number of buckets is increased and the assigned bucket for each row. Any rows which dont belong to the current bucket are flushed and reassigned.
### `hash`
- hash
[Hash join algorithm](https://en.wikipedia.org/wiki/Hash_join) is used. The most generic implementation that supports all combinations of kind and strictness and multiple join keys that are combined with `OR` in the `JOIN ON` section.
[Hash join algorithm](https://en.wikipedia.org/wiki/Hash_join) is used. The most generic implementation that supports all combinations of kind and strictness and multiple join keys that are combined with `OR` in the `JOIN ON` section.
### `parallel_hash`
- parallel_hash
A variation of `hash` join that splits the data into buckets and builds several hashtables instead of one concurrently to speed up this process.
A variation of `hash` join that splits the data into buckets and builds several hashtables instead of one concurrently to speed up this process.
When using the `hash` algorithm, the right part of `JOIN` is uploaded into RAM.
When using the `hash` algorithm, the right part of `JOIN` is uploaded into RAM.
### `partial_merge`
- partial_merge
A variation of the [sort-merge algorithm](https://en.wikipedia.org/wiki/Sort-merge_join), where only the right table is fully sorted.
A variation of the [sort-merge algorithm](https://en.wikipedia.org/wiki/Sort-merge_join), where only the right table is fully sorted.
The `RIGHT JOIN` and `FULL JOIN` are supported only with `ALL` strictness (`SEMI`, `ANTI`, `ANY`, and `ASOF` are not supported).
The `RIGHT JOIN` and `FULL JOIN` are supported only with `ALL` strictness (`SEMI`, `ANTI`, `ANY`, and `ASOF` are not supported).
When using the `partial_merge` algorithm, ClickHouse sorts the data and dumps it to the disk. The `partial_merge` algorithm in ClickHouse differs slightly from the classic realization. First, ClickHouse sorts the right table by joining keys in blocks and creates a min-max index for sorted blocks. Then it sorts parts of the left table by the `join key` and joins them over the right table. The min-max index is also used to skip unneeded right table blocks.
When using the `partial_merge` algorithm, ClickHouse sorts the data and dumps it to the disk. The `partial_merge` algorithm in ClickHouse differs slightly from the classic realization. First, ClickHouse sorts the right table by joining keys in blocks and creates a min-max index for sorted blocks. Then it sorts parts of the left table by the `join key` and joins them over the right table. The min-max index is also used to skip unneeded right table blocks.
### `direct`
- direct
This algorithm can be applied when the storage for the right table supports key-value requests.
This algorithm can be applied when the storage for the right table supports key-value requests.
The `direct` algorithm performs a lookup in the right table using rows from the left table as keys. It's supported only by special storage such as [Dictionary](../../engines/table-engines/special/dictionary.md/#dictionary) or [EmbeddedRocksDB](../../engines/table-engines/integrations/embedded-rocksdb.md) and only the `LEFT` and `INNER` JOINs.
The `direct` algorithm performs a lookup in the right table using rows from the left table as keys. It's supported only by special storage such as [Dictionary](../../engines/table-engines/special/dictionary.md/#dictionary) or [EmbeddedRocksDB](../../engines/table-engines/integrations/embedded-rocksdb.md) and only the `LEFT` and `INNER` JOINs.
### `auto`
- auto
When set to `auto`, `hash` join is tried first, and the algorithm is switched on the fly to another algorithm if the memory limit is violated.
When set to `auto`, `hash` join is tried first, and the algorithm is switched on the fly to another algorithm if the memory limit is violated.
### `full_sorting_merge`
- full_sorting_merge
[Sort-merge algorithm](https://en.wikipedia.org/wiki/Sort-merge_join) with full sorting joined tables before joining.
[Sort-merge algorithm](https://en.wikipedia.org/wiki/Sort-merge_join) with full sorting joined tables before joining.
### `prefer_partial_merge`
- prefer_partial_merge
ClickHouse always tries to use `partial_merge` join if possible, otherwise, it uses `hash`. *Deprecated*, same as `partial_merge,hash`.
ClickHouse always tries to use `partial_merge` join if possible, otherwise, it uses `hash`. *Deprecated*, same as `partial_merge,hash`.
## join_any_take_last_row {#settings-join_any_take_last_row}
@ -1300,6 +1300,81 @@ Possible values:
Default value: `3`.
## enable_experimental_query_result_cache {#enable-experimental-query-result-cache}
If turned on, results of SELECT queries are stored in and (if available) retrieved from the [query result cache](../query-result-cache.md).
Possible values:
- 0 - Disabled
- 1 - Enabled
Default value: `0`.
## enable_experimental_query_result_cache_passive_usage {#enable-experimental-query-result-cache-passive-usage}
If turned on, results of SELECT queries are (if available) retrieved from the [query result cache](../query-result-cache.md).
Possible values:
- 0 - Disabled
- 1 - Enabled
Default value: `0`.
## query_result_cache_store_results_of_queries_with_nondeterministic_functions {#query-result-cache-store-results-of-queries-with-nondeterministic-functions}
If turned on, then results of SELECT queries with non-deterministic functions (e.g. `rand()`, `now()`) can be cached in the [query result cache](../query-result-cache.md).
Possible values:
- 0 - Disabled
- 1 - Enabled
Default value: `0`.
## query_result_cache_min_query_runs {#query-result-cache-min-query-runs}
Minimum number of times a SELECT query must run before its result is stored in the [query result cache](../query-result-cache.md).
Possible values:
- Positive integer >= 0.
Default value: `0`
## query_result_cache_min_query_duration {#query-result-cache-min-query-duration}
Minimum duration in milliseconds a query needs to run for its result to be stored in the [query result cache](../query-result-cache.md).
Possible values:
- Positive integer >= 0.
Default value: `0`
## query_result_cache_ttl {#query-result-cache-ttl}
After this time in seconds entries in the [query result cache](../query-result-cache.md) become stale.
Possible values:
- Positive integer >= 0.
Default value: `60`
## query_result_cache_share_between_users {#query-result-cache-share-between-users}
If turned on, the result of SELECT queries cached in the [query result cache](../query-result-cache.md) can be read by other users.
It is not recommended to enable this setting due to security reasons.
Possible values:
- 0 - Disabled
- 1 - Enabled
Default value: `0`.
## insert_quorum {#settings-insert_quorum}
Enables the quorum writes.
@ -1394,6 +1469,22 @@ By default, blocks inserted into replicated tables by the `INSERT` statement are
For the replicated tables by default the only 100 of the most recent blocks for each partition are deduplicated (see [replicated_deduplication_window](merge-tree-settings.md/#replicated-deduplication-window), [replicated_deduplication_window_seconds](merge-tree-settings.md/#replicated-deduplication-window-seconds)).
For not replicated tables see [non_replicated_deduplication_window](merge-tree-settings.md/#non-replicated-deduplication-window).
## async_insert_deduplicate {#settings-async-insert-deduplicate}
Enables or disables insert deduplication of `ASYNC INSERT` (for Replicated\* tables).
Possible values:
- 0 — Disabled.
- 1 — Enabled.
Default value: 1.
By default, async inserts are inserted into replicated tables by the `INSERT` statement enabling [async_isnert](#async-insert) are deduplicated (see [Data Replication](../../engines/table-engines/mergetree-family/replication.md)).
For the replicated tables, by default, only 10000 of the most recent inserts for each partition are deduplicated (see [replicated_deduplication_window_for_async_inserts](merge-tree-settings.md/#replicated-deduplication-window-async-inserts), [replicated_deduplication_window_seconds_for_async_inserts](merge-tree-settings.md/#replicated-deduplication-window-seconds-async-inserts)).
We recommend enabling the [async_block_ids_cache](merge-tree-settings.md/#use-async-block-ids-cache) to increase the efficiency of deduplication.
This function does not work for non-replicated tables.
## deduplicate_blocks_in_dependent_materialized_views {#settings-deduplicate-blocks-in-dependent-materialized-views}
Enables or disables the deduplication check for materialized views that receive data from Replicated\* tables.

View File

@ -72,3 +72,10 @@ If procfs is supported and enabled on the system, ClickHouse server collects the
- `OSWriteChars`
- `OSReadBytes`
- `OSWriteBytes`
## Related content
- Blog: [System Tables and a window into the internals of ClickHouse](https://clickhouse.com/blog/clickhouse-debugging-issues-with-system-tables)
- Blog: [Essential monitoring queries - part 1 - INSERT queries](https://clickhouse.com/blog/monitoring-troubleshooting-insert-queries-clickhouse)
- Blog: [Essential monitoring queries - part 2 - SELECT queries](https://clickhouse.com/blog/monitoring-troubleshooting-select-queries-clickhouse)

View File

@ -54,7 +54,9 @@ Functions:
- [toLowCardinality](../../sql-reference/functions/type-conversion-functions.md#tolowcardinality)
## See Also
## Related content
- [Reducing ClickHouse Storage Cost with the Low Cardinality Type Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/).
- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/ClickHouse/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf).
- [Reducing ClickHouse Storage Cost with the Low Cardinality Type Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/)
- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/ClickHouse/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf)
- Blog: [Optimizing ClickHouse with Schemas and Codecs](https://clickhouse.com/blog/optimize-clickhouse-codecs-compression-schema)
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)

View File

@ -1582,3 +1582,8 @@ Result:
│ 2020-01-01 │
└────────────────────────────────────┘
```
## Related content
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)

View File

@ -588,3 +588,6 @@ Result:
│ aeca2A │
└───────────────────────────────────────┘
```
## Related content
- Blog: [Generating random data in ClickHouse](https://clickhouse.com/blog/generating-random-test-distribution-data-for-clickhouse)

View File

@ -115,3 +115,7 @@ Returns the exclusive upper bound of the corresponding hopping window.
hopEnd(bounds_tuple);
hopEnd(time_attr, hop_interval, window_interval [, timezone]);
```
## Related content
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)

View File

@ -1402,6 +1402,8 @@ The output value is a timestamp in UTC, not in the timezone of `DateTime64`.
```sql
toUnixTimestamp64Milli(value)
toUnixTimestamp64Micro(value)
toUnixTimestamp64Nano(value)
```
**Arguments**
@ -1455,7 +1457,9 @@ Converts an `Int64` to a `DateTime64` value with fixed sub-second precision and
**Syntax**
``` sql
fromUnixTimestamp64Milli(value [, ti])
fromUnixTimestamp64Milli(value [, timezone])
fromUnixTimestamp64Micro(value [, timezone])
fromUnixTimestamp64Nano(value [, timezone])
```
**Arguments**

View File

@ -158,8 +158,6 @@ For examples of columns TTL modifying, see [Column TTL](/docs/en/engines/table-e
If the `IF EXISTS` clause is specified, the query wont return an error if the column does not exist.
The query also can change the order of the columns using `FIRST | AFTER` clause, see [ADD COLUMN](#alter_add-column) description.
When changing the type, values are converted as if the [toType](/docs/en/sql-reference/functions/type-conversion-functions.md) functions were applied to them. If only the default expression is changed, the query does not do anything complex, and is completed almost instantly.
Example:
@ -170,6 +168,40 @@ ALTER TABLE visits MODIFY COLUMN browser Array(String)
Changing the column type is the only complex action it changes the contents of files with data. For large tables, this may take a long time.
The query also can change the order of the columns using `FIRST | AFTER` clause, see [ADD COLUMN](#alter_add-column) description, but column type is mandatory in this case.
Example:
```sql
CREATE TABLE users (
c1 Int16,
c2 String
) ENGINE = MergeTree
ORDER BY c1;
DESCRIBE users;
┌─name─┬─type───┬
│ c1 │ Int16 │
│ c2 │ String │
└──────┴────────┴
ALTER TABLE users MODIFY COLUMN c2 String FIRST;
DESCRIBE users;
┌─name─┬─type───┬
│ c2 │ String │
│ c1 │ Int16 │
└──────┴────────┴
ALTER TABLE users ALTER COLUMN c2 TYPE String AFTER c1;
DESCRIBE users;
┌─name─┬─type───┬
│ c1 │ Int16 │
│ c2 │ String │
└──────┴────────┴
```
The `ALTER` query is atomic. For MergeTree tables it is also lock-free.
The `ALTER` query for changing columns is replicated. The instructions are saved in ZooKeeper, then each replica applies them. All `ALTER` queries are run in the same order. The query waits for the appropriate actions to be completed on the other replicas. However, a query to change columns in a replicated table can be interrupted, and all actions will be performed asynchronously.

View File

@ -504,3 +504,9 @@ Result:
│ t1 │ The temporary table │
└──────┴─────────────────────┘
```
## Related content
- Blog: [Optimizing ClickHouse with Schemas and Codecs](https://clickhouse.com/blog/optimize-clickhouse-codecs-compression-schema)
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)

View File

@ -350,3 +350,7 @@ The window view is useful in the following scenarios:
* **Monitoring**: Aggregate and calculate the metrics logs by time, and output the results to a target table. The dashboard can use the target table as a source table.
* **Analyzing**: Automatically aggregate and preprocess data in the time window. This can be useful when analyzing a large number of logs. The preprocessing eliminates repeated calculations in multiple queries and reduces query latency.
## Related Content
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)

View File

@ -299,3 +299,8 @@ ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num;
## Implementation Details
The query execution order is optimized when running `ARRAY JOIN`. Although `ARRAY JOIN` must always be specified before the [WHERE](../../../sql-reference/statements/select/where.md)/[PREWHERE](../../../sql-reference/statements/select/prewhere.md) clause in a query, technically they can be performed in any order, unless result of `ARRAY JOIN` is used for filtering. The processing order is controlled by the query optimizer.
## Related content
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)

View File

@ -543,3 +543,7 @@ Result:
│ 7 │ original │ 7 │
└─────┴──────────┴───────┘
```
## Related content
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)

View File

@ -72,7 +72,7 @@ For more convenient (automatic) cache management, see disable_internal_dns_cache
## DROP MARK CACHE
Resets the mark cache. Used in development of ClickHouse and performance tests.
Resets the mark cache.
## DROP REPLICA
@ -94,13 +94,18 @@ The fourth one is useful to remove metadata of dead replica when all other repli
## DROP UNCOMPRESSED CACHE
Reset the uncompressed data cache. Used in development of ClickHouse and performance tests.
For manage uncompressed data cache parameters use following server level settings [uncompressed_cache_size](../../operations/server-configuration-parameters/settings.md#server-settings-uncompressed_cache_size) and query/user/profile level settings [use_uncompressed_cache](../../operations/settings/settings.md#setting-use_uncompressed_cache)
Reset the uncompressed data cache.
The uncompressed data cache is enabled/disabled with the query/user/profile-level setting [use_uncompressed_cache](../../operations/settings/settings.md#setting-use_uncompressed_cache).
Its size can be configured using the server-level setting [uncompressed_cache_size](../../operations/server-configuration-parameters/settings.md#server-settings-uncompressed_cache_size).
## DROP COMPILED EXPRESSION CACHE
Reset the compiled expression cache. Used in development of ClickHouse and performance tests.
Compiled expression cache used when query/user/profile enable option [compile-expressions](../../operations/settings/settings.md#compile-expressions)
Reset the compiled expression cache.
The compiled expression cache is enabled/disabled with the query/user/profile-level setting [compile_expressions](../../operations/settings/settings.md#compile-expressions).
## DROP QUERY RESULT CACHE
Resets the [query result cache](../../operations/query-result-cache.md).
## FLUSH LOGS

View File

@ -51,4 +51,7 @@ SELECT * FROM random;
│ [] │ 68091.8197 │ ('2037-10-02 12:44:23.368','039ecab7-81c2-45ee-208c-844e5c6c5652') │
│ [8,-83,0,-22,65,9,-30,28,64] │ -186233.4909 │ ('2062-01-11 00:06:04.124','69563ea1-5ad1-f870-16d8-67061da0df25') │
└──────────────────────────────┴──────────────┴────────────────────────────────────────────────────────────────────┘
```
```
## Related content
- Blog: [Generating random data in ClickHouse](https://clickhouse.com/blog/generating-random-test-distribution-data-for-clickhouse)

View File

@ -131,3 +131,6 @@ CREATE TABLE pg_table_schema_with_dots (a UInt32)
- [The PostgreSQL table engine](../../engines/table-engines/integrations/postgresql.md)
- [Using PostgreSQL as a dictionary source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql)
## Related content
- Blog: [ClickHouse and PostgreSQL - a match made in data heaven - part 1](https://clickhouse.com/blog/migrating-data-between-clickhouse-postgres)

View File

@ -590,5 +590,6 @@ ORDER BY
## Related Content
- [Window and array functions for Git commit sequences](https://clickhouse.com/blog/clickhouse-window-array-functions-git-commits)
- [Getting Data Into ClickHouse - Part 3 - Using S3](https://clickhouse.com/blog/getting-data-into-clickhouse-part-3-s3)
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
- Blog: [Window and array functions for Git commit sequences](https://clickhouse.com/blog/clickhouse-window-array-functions-git-commits)
- Blog: [Getting Data Into ClickHouse - Part 3 - Using S3](https://clickhouse.com/blog/getting-data-into-clickhouse-part-3-s3)

View File

@ -1,97 +1 @@
## Generating ClickHouse documentation {#how-clickhouse-documentation-is-generated}
ClickHouse documentation is built using [Docusaurus](https://docusaurus.io).
## Check the look of your documentation changes {#how-to-check-if-the-documentation-will-look-fine}
There are a few options that are all useful depending on how large or complex your edits are.
### Use the GitHub web interface to edit
Every page in the docs has an **Edit this page** link that opens the page in the GitHub editor. GitHub has Markdown support with a preview feature. The details of GitHub Markdown and the documentation Markdown are a bit different but generally this is close enough, and the person merging your PR will build the docs and check them.
### Install a Markdown editor or plugin for your IDE {#install-markdown-editor-or-plugin-for-your-ide}
Usually, these plugins provide a preview of how the markdown will render, and they catch basic errors like unclosed tags very early.
## Build the docs locally {#use-build-py}
You can build the docs locally. It takes a few minutes to set up, but once you have done it the first time, the process is very simple.
### Clone the repos
The documentation is in two repos, clone both of them:
- [ClickHouse/ClickHouse](https://github.com/ClickHouse/ClickHouse)
- [ClickHouse/ClickHouse-docs](https://github.com/ClickHouse/clickhouse-docs)
### Install Node.js
The documentation is built with Docusaurus, which requires Node.js. We recommend version 16. Install [Node.js](https://nodejs.org/en/download/).
### Copy files into place
Docusaurus expects all of the markdown files to be located in the directory tree `clickhouse-docs/docs/`. This is not the way our repos are set up, so some copying of files is needed to build the docs:
```bash
# from the parent directory of both the ClickHouse/ClickHouse and ClickHouse-clickhouse-docs repos:
cp -r ClickHouse/docs/en/development clickhouse-docs/docs/en/
cp -r ClickHouse/docs/en/engines clickhouse-docs/docs/en/
cp -r ClickHouse/docs/en/getting-started clickhouse-docs/docs/en/
cp -r ClickHouse/docs/en/interfaces clickhouse-docs/docs/en/
cp -r ClickHouse/docs/en/operations clickhouse-docs/docs/en/
cp -r ClickHouse/docs/en/sql-reference clickhouse-docs/docs/en/
cp -r ClickHouse/docs/ru/* clickhouse-docs/docs/ru/
cp -r ClickHouse/docs/zh clickhouse-docs/docs/
```
#### Note: Symlinks will not work.
### Setup Docusaurus
There are two commands that you may need to use with Docusaurus:
- `yarn install`
- `yarn start`
#### Install Docusaurus and its dependencies:
```bash
cd clickhouse-docs
yarn install
```
#### Start a development Docusaurus environment
This command will start Docusaurus in development mode, which means that as you edit source (for example, `.md` files) files the changes will be rendered into HTML files and served by the Docusaurus development server.
```bash
yarn start
```
### Make your changes to the markdown files
Edit your files. Remember that if you are editing files in the `ClickHouse/ClickHouse` repo then you should edit them
in that repo and then copy the edited file into the `ClickHouse/clickhouse-docs/` directory structure so that they are updated in your develoment environment.
`yarn start` probably opened a browser for you when you ran it; if not, open a browser to `http://localhost:3000/docs/en/intro` and navigate to the documentation that you are changing. If you have already made the changes, you can verify them here; if not, make them, and you will see the page update as you save the changes.
## How to change code highlighting? {#how-to-change-code-hl}
Code highlighting is based on the language chosen for your code blocks. Specify the language when you start the code block:
<pre lang="no-highlight"><code>```sql
SELECT firstname from imdb.actors;
```
</code></pre>
```sql
SELECT firstname from imdb.actors;
```
If you need a language supported then open an issue in [ClickHouse-docs](https://github.com/ClickHouse/clickhouse-docs/issues).
## How to subscribe on documentation changes? {#how-to-subscribe-on-documentation-changes}
At the moment theres no easy way to do just that, but you can consider:
- To hit the “Watch” button on top of GitHub web interface to know as early as possible, even during pull request. Alternative to this is `#github-activity` channel of [public ClickHouse Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-qfort0u8-TWqK4wIP0YSdoDE0btKa1w).
- Some search engines allow to subscribe on specific website changes via email and you can opt-in for that for https://clickhouse.com.
See https://github.com/ClickHouse/clickhouse-docs/blob/main/contrib-writing-guide.md

View File

@ -968,7 +968,7 @@ void Client::processOptions(const OptionsDescription & options_description,
if (external_tables.back().file == "-")
++number_of_external_tables_with_stdin_source;
if (number_of_external_tables_with_stdin_source > 1)
throw Exception("Two or more external tables has stdin (-) set as --file field", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Two or more external tables has stdin (-) set as --file field");
}
catch (const Exception & e)
{
@ -1021,7 +1021,7 @@ void Client::processOptions(const OptionsDescription & options_description,
}
if (options.count("config-file") && options.count("config"))
throw Exception("Two or more configuration files referenced in arguments", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Two or more configuration files referenced in arguments");
if (options.count("config"))
config().setString("config-file", options["config"].as<std::string>());
@ -1212,14 +1212,14 @@ void Client::readArguments(
/// param_name value
++arg_num;
if (arg_num >= argc)
throw Exception("Parameter requires value", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter requires value");
arg = argv[arg_num];
query_parameters.emplace(String(param_continuation), String(arg));
}
else
{
if (equal_pos == 0)
throw Exception("Parameter name cannot be empty", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter name cannot be empty");
/// param_name=value
query_parameters.emplace(param_continuation.substr(0, equal_pos), param_continuation.substr(equal_pos + 1));
@ -1233,7 +1233,7 @@ void Client::readArguments(
{
++arg_num;
if (arg_num >= argc)
throw Exception("Host argument requires value", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Host argument requires value");
arg = argv[arg_num];
host_arg = "--host=";
host_arg.append(arg);
@ -1265,7 +1265,7 @@ void Client::readArguments(
port_arg.push_back('=');
++arg_num;
if (arg_num >= argc)
throw Exception("Port argument requires value", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Port argument requires value");
arg = argv[arg_num];
port_arg.append(arg);
}

View File

@ -46,7 +46,7 @@ void checkAndWriteHeader(DB::ReadBuffer & in, DB::WriteBuffer & out)
UInt32 size_compressed = unalignedLoad<UInt32>(&header[1]);
if (size_compressed > DBMS_MAX_COMPRESSED_SIZE)
throw DB::Exception("Too large size_compressed. Most likely corrupted data.", DB::ErrorCodes::TOO_LARGE_SIZE_COMPRESSED);
throw DB::Exception(DB::ErrorCodes::TOO_LARGE_SIZE_COMPRESSED, "Too large size_compressed. Most likely corrupted data.");
UInt32 size_decompressed = unalignedLoad<UInt32>(&header[5]);
@ -113,10 +113,10 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
codecs = options["codec"].as<std::vector<std::string>>();
if ((use_lz4hc || use_zstd || use_deflate_qpl || use_none) && !codecs.empty())
throw Exception("Wrong options, codec flags like --zstd and --codec options are mutually exclusive", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong options, codec flags like --zstd and --codec options are mutually exclusive");
if (!codecs.empty() && options.count("level"))
throw Exception("Wrong options, --level is not compatible with --codec list", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong options, --level is not compatible with --codec list");
std::string method_family = "LZ4";

View File

@ -77,7 +77,7 @@ decltype(auto) ClusterCopier::retry(T && func, UInt64 max_tries)
std::exception_ptr exception;
if (max_tries == 0)
throw Exception("Cannot perform zero retries", ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot perform zero retries");
for (UInt64 try_number = 1; try_number <= max_tries; ++try_number)
{
@ -123,7 +123,7 @@ void ClusterCopier::discoverShardPartitions(const ConnectionTimeouts & timeouts,
}
catch (Exception & e)
{
throw Exception("Partition " + partition_text_quoted + " has incorrect format. " + e.displayText(), ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Partition {} has incorrect format. {}", partition_text_quoted, e.displayText());
}
};
@ -325,8 +325,8 @@ void ClusterCopier::process(const ConnectionTimeouts & timeouts)
if (!table_is_done)
{
throw Exception("Too many tries to process table " + task_table.table_id + ". Abort remaining execution",
ErrorCodes::UNFINISHED);
throw Exception(ErrorCodes::UNFINISHED, "Too many tries to process table {}. Abort remaining execution",
task_table.table_id);
}
}
}
@ -666,7 +666,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t
}
if (inject_fault)
throw Exception("Copy fault injection is activated", ErrorCodes::UNFINISHED);
throw Exception(ErrorCodes::UNFINISHED, "Copy fault injection is activated");
}
/// Create node to signal that we finished moving
@ -753,7 +753,7 @@ std::shared_ptr<ASTCreateQuery> rewriteCreateQueryStorage(const ASTPtr & create_
auto res = std::make_shared<ASTCreateQuery>(create);
if (create.storage == nullptr || new_storage_ast == nullptr)
throw Exception("Storage is not specified", ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Storage is not specified");
res->setDatabase(new_table.first);
res->setTable(new_table.second);
@ -775,7 +775,7 @@ bool ClusterCopier::tryDropPartitionPiece(
const CleanStateClock & clean_state_clock)
{
if (is_safe_mode)
throw Exception("DROP PARTITION is prohibited in safe mode", ErrorCodes::NOT_IMPLEMENTED);
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "DROP PARTITION is prohibited in safe mode");
TaskTable & task_table = task_partition.task_shard.task_table;
ShardPartitionPiece & partition_piece = task_partition.pieces[current_piece_number];
@ -944,7 +944,7 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab
for (const String & partition_name : task_table.ordered_partition_names)
{
if (!task_table.cluster_partitions.contains(partition_name))
throw Exception("There are no expected partition " + partition_name + ". It is a bug", ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "There are no expected partition {}. It is a bug", partition_name);
ClusterPartition & cluster_partition = task_table.cluster_partitions[partition_name];
@ -1006,7 +1006,7 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab
/// Previously when we discovered that shard does not contain current partition, we skipped it.
/// At this moment partition have to be present.
if (it_shard_partition == shard->partition_tasks.end())
throw Exception("There are no such partition in a shard. This is a bug.", ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "There are no such partition in a shard. This is a bug.");
auto & partition = it_shard_partition->second;
expected_shards.emplace_back(shard);
@ -1587,7 +1587,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
auto cancel_check = [&] ()
{
if (zookeeper->expired())
throw Exception("ZooKeeper session is expired, cancel INSERT SELECT", ErrorCodes::UNFINISHED);
throw Exception(ErrorCodes::UNFINISHED, "ZooKeeper session is expired, cancel INSERT SELECT");
if (!future_is_dirty_checker.valid())
future_is_dirty_checker = zookeeper->asyncExists(piece_is_dirty_flag_path);
@ -1603,7 +1603,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
LogicalClock dirt_discovery_epoch (status.stat.mzxid);
if (dirt_discovery_epoch == clean_state_clock.discovery_zxid)
return false;
throw Exception("Partition is dirty, cancel INSERT SELECT", ErrorCodes::UNFINISHED);
throw Exception(ErrorCodes::UNFINISHED, "Partition is dirty, cancel INSERT SELECT");
}
}
@ -1646,7 +1646,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
future_is_dirty_checker.get();
if (inject_fault)
throw Exception("Copy fault injection is activated", ErrorCodes::UNFINISHED);
throw Exception(ErrorCodes::UNFINISHED, "Copy fault injection is activated");
}
catch (...)
{

View File

@ -90,9 +90,7 @@ ASTPtr extractPartitionKey(const ASTPtr & storage_ast)
if (!endsWith(engine.name, "MergeTree"))
{
throw Exception(
"Unsupported engine was specified in " + storage_str + ", only *MergeTree engines are supported",
ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported engine was specified in {}, only *MergeTree engines are supported", storage_str);
}
if (isExtendedDefinitionStorage(storage_ast))
@ -109,14 +107,13 @@ ASTPtr extractPartitionKey(const ASTPtr & storage_ast)
size_t min_args = is_replicated ? 3 : 1;
if (!engine.arguments)
throw Exception("Expected arguments in " + storage_str, ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected arguments in {}", storage_str);
ASTPtr arguments_ast = engine.arguments->clone();
ASTs & arguments = arguments_ast->children;
if (arguments.size() < min_args)
throw Exception("Expected at least " + toString(min_args) + " arguments in " + storage_str,
ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected at least {} arguments in {}", min_args, storage_str);
ASTPtr & month_arg = is_replicated ? arguments[2] : arguments[1];
return makeASTFunction("toYYYYMM", month_arg->clone());
@ -132,14 +129,12 @@ ASTPtr extractPrimaryKey(const ASTPtr & storage_ast)
if (!endsWith(engine.name, "MergeTree"))
{
throw Exception("Unsupported engine was specified in " + storage_str + ", only *MergeTree engines are supported",
ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported engine was specified in {}, only *MergeTree engines are supported", storage_str);
}
if (!isExtendedDefinitionStorage(storage_ast))
{
throw Exception("Is not extended deginition storage " + storage_str + " Will be fixed later.",
ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Is not extended deginition storage {} Will be fixed later.", storage_str);
}
if (storage.primary_key)
@ -158,20 +153,18 @@ ASTPtr extractOrderBy(const ASTPtr & storage_ast)
if (!endsWith(engine.name, "MergeTree"))
{
throw Exception("Unsupported engine was specified in " + storage_str + ", only *MergeTree engines are supported",
ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported engine was specified in {}, only *MergeTree engines are supported", storage_str);
}
if (!isExtendedDefinitionStorage(storage_ast))
{
throw Exception("Is not extended deginition storage " + storage_str + " Will be fixed later.",
ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Is not extended deginition storage {} Will be fixed later.", storage_str);
}
if (storage.order_by)
return storage.order_by->clone();
throw Exception("ORDER BY cannot be empty", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "ORDER BY cannot be empty");
}
/// Wraps only identifiers with backticks.
@ -191,7 +184,7 @@ std::string wrapIdentifiersWithBackticks(const ASTPtr & root)
return boost::algorithm::join(function_arguments, ", ");
}
throw Exception("Primary key could be represented only as columns or functions from columns.", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Primary key could be represented only as columns or functions from columns.");
}
@ -210,9 +203,9 @@ Names extractPrimaryKeyColumnNames(const ASTPtr & storage_ast)
size_t sorting_key_size = sorting_key_expr_list->children.size();
if (primary_key_size > sorting_key_size)
throw Exception("Primary key must be a prefix of the sorting key, but its length: "
+ toString(primary_key_size) + " is greater than the sorting key length: " + toString(sorting_key_size),
ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Primary key must be a prefix of the sorting key, but its length: "
"{} is greater than the sorting key length: {}",
primary_key_size, sorting_key_size);
Names primary_key_columns;
NameSet primary_key_columns_set;
@ -228,12 +221,12 @@ Names extractPrimaryKeyColumnNames(const ASTPtr & storage_ast)
{
String pk_column = primary_key_expr_list->children[i]->getColumnName();
if (pk_column != sorting_key_column)
throw Exception("Primary key must be a prefix of the sorting key, but the column in the position "
+ toString(i) + " is " + sorting_key_column +", not " + pk_column,
ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Primary key must be a prefix of the sorting key, "
"but the column in the position {} is {}, not {}", i, sorting_key_column, pk_column);
if (!primary_key_columns_set.emplace(pk_column).second)
throw Exception("Primary key contains duplicate columns", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Primary key contains duplicate columns");
primary_key_columns.push_back(wrapIdentifiersWithBackticks(primary_key_expr_list->children[i]));
}
@ -250,9 +243,7 @@ bool isReplicatedTableEngine(const ASTPtr & storage_ast)
if (!endsWith(engine.name, "MergeTree"))
{
String storage_str = queryToString(storage_ast);
throw Exception(
"Unsupported engine was specified in " + storage_str + ", only *MergeTree engines are supported",
ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported engine was specified in {}, only *MergeTree engines are supported", storage_str);
}
return startsWith(engine.name, "Replicated");

View File

@ -119,7 +119,7 @@ struct TaskStateWithOwner
rb >> state >> "\n" >> escape >> res.owner;
if (state >= static_cast<int>(TaskState::Unknown))
throw Exception("Unknown state " + data, ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown state {}", data);
res.state = static_cast<TaskState>(state);
return res;

View File

@ -19,7 +19,7 @@ void DB::TaskCluster::loadTasks(const Poco::Util::AbstractConfiguration & config
clusters_prefix = prefix + "remote_servers";
if (!config.has(clusters_prefix))
throw Exception("You should specify list of clusters in " + clusters_prefix, ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "You should specify list of clusters in {}", clusters_prefix);
Poco::Util::AbstractConfiguration::Keys tables_keys;
config.keys(prefix + "tables", tables_keys);

View File

@ -102,7 +102,7 @@ TaskTable::TaskTable(TaskCluster & parent, const Poco::Util::AbstractConfigurati
for (const String &key : keys)
{
if (!startsWith(key, "partition"))
throw Exception("Unknown key " + key + " in " + enabled_partitions_prefix, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
throw Exception(ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG, "Unknown key {} in {}", key, enabled_partitions_prefix);
enabled_partitions.emplace_back(config.getString(enabled_partitions_prefix + "." + key));
}
@ -213,8 +213,7 @@ ClusterPartition & TaskTable::getClusterPartition(const String & partition_name)
{
auto it = cluster_partitions.find(partition_name);
if (it == cluster_partitions.end())
throw Exception("There are no cluster partition " + partition_name + " in " + table_id,
ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "There are no cluster partition {} in {}", partition_name, table_id);
return it->second;
}

View File

@ -44,7 +44,7 @@ public:
if (command_arguments.size() != 2)
{
printHelpMessage();
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
}
String disk_name_from = config.getString("diskFrom", config.getString("disk", "default"));

View File

@ -33,7 +33,7 @@ public:
if (command_arguments.size() != 2)
{
printHelpMessage();
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
}
String disk_name = config.getString("disk", "default");

View File

@ -40,7 +40,7 @@ public:
if (command_arguments.size() != 1)
{
printHelpMessage();
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
}
String disk_name = config.getString("disk", "default");

View File

@ -32,7 +32,7 @@ public:
if (!command_arguments.empty())
{
printHelpMessage();
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
}
for (const auto & [disk_name, _] : global_context->getDisksMap())

View File

@ -41,7 +41,7 @@ public:
if (command_arguments.size() != 1)
{
printHelpMessage();
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
}
String disk_name = config.getString("disk", "default");

View File

@ -32,7 +32,7 @@ public:
if (command_arguments.size() != 2)
{
printHelpMessage();
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
}
String disk_name = config.getString("disk", "default");

View File

@ -43,7 +43,7 @@ public:
if (command_arguments.size() != 1)
{
printHelpMessage();
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
}
String disk_name = config.getString("disk", "default");

View File

@ -32,7 +32,7 @@ public:
if (command_arguments.size() != 1)
{
printHelpMessage();
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
}
String disk_name = config.getString("disk", "default");

View File

@ -44,7 +44,7 @@ public:
if (command_arguments.size() != 1)
{
printHelpMessage();
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
}
String disk_name = config.getString("disk", "default");

View File

@ -119,7 +119,7 @@ void DisksApp::init(std::vector<String> & common_arguments)
{
std::cerr << "Unknown command name: " << command_name << "\n";
printHelpMessage(options_description);
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
}
processOptions();

View File

@ -1160,7 +1160,7 @@ void processLog(const Options & options)
/// Will run multiple processes in parallel
size_t num_threads = options.threads;
if (num_threads == 0)
throw Exception("num-threads cannot be zero", ErrorCodes::INCORRECT_DATA);
throw Exception(ErrorCodes::INCORRECT_DATA, "num-threads cannot be zero");
std::vector<std::unique_ptr<ShellCommand>> show_commands(num_threads);
for (size_t i = 0; i < num_commits && i < num_threads; ++i)

View File

@ -484,8 +484,7 @@ try
config().getUInt64("keeper_server.socket_send_timeout_sec", DBMS_DEFAULT_SEND_TIMEOUT_SEC), true), server_pool, socket));
#else
UNUSED(port);
throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
ErrorCodes::SUPPORT_IS_DISABLED};
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");
#endif
});

View File

@ -169,7 +169,7 @@ std::vector<PODArray<char>> placeStringColumns(const ColumnRawPtrs & columns, si
else if (const auto * column_fixed_string = typeid_cast<const ColumnFixedString *>(column))
data.push_back(placeFixedStringColumn(*column_fixed_string, buffer + i, size));
else
throw Exception("Cannot place string column.", ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot place string column.");
}
return data;

View File

@ -32,7 +32,7 @@ ExternalDictionaryLibraryHandler::ExternalDictionaryLibraryHandler(
if (lib_new)
lib_data = lib_new(&settings_holder->strings, ExternalDictionaryLibraryAPI::log);
else
throw Exception("Method extDict_libNew failed", ErrorCodes::EXTERNAL_LIBRARY_ERROR);
throw Exception(ErrorCodes::EXTERNAL_LIBRARY_ERROR, "Method extDict_libNew failed");
}
@ -173,22 +173,21 @@ Block ExternalDictionaryLibraryHandler::loadKeys(const Columns & key_columns)
Block ExternalDictionaryLibraryHandler::dataToBlock(ExternalDictionaryLibraryAPI::RawClickHouseLibraryTable data)
{
if (!data)
throw Exception("LibraryDictionarySource: No data returned", ErrorCodes::EXTERNAL_LIBRARY_ERROR);
throw Exception(ErrorCodes::EXTERNAL_LIBRARY_ERROR, "LibraryDictionarySource: No data returned");
const auto * columns_received = static_cast<const ExternalDictionaryLibraryAPI::Table *>(data);
if (columns_received->error_code)
throw Exception(
"LibraryDictionarySource: Returned error: " + std::to_string(columns_received->error_code) + " " + (columns_received->error_string ? columns_received->error_string : ""),
ErrorCodes::EXTERNAL_LIBRARY_ERROR);
throw Exception(ErrorCodes::EXTERNAL_LIBRARY_ERROR, "LibraryDictionarySource: Returned error: {} {}",
std::to_string(columns_received->error_code), (columns_received->error_string ? columns_received->error_string : ""));
MutableColumns columns = sample_block.cloneEmptyColumns();
for (size_t col_n = 0; col_n < columns_received->size; ++col_n)
{
if (columns.size() != columns_received->data[col_n].size)
throw Exception(
"LibraryDictionarySource: Returned unexpected number of columns: " + std::to_string(columns_received->data[col_n].size) + ", must be " + std::to_string(columns.size()),
ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH);
throw Exception(ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH, "LibraryDictionarySource: "
"Returned unexpected number of columns: {}, must be {}",
columns_received->data[col_n].size, columns.size());
for (size_t row_n = 0; row_n < columns_received->data[col_n].size; ++row_n)
{

View File

@ -359,7 +359,7 @@ void LocalServer::setupUsers()
if (users_config)
global_context->setUsersConfig(users_config);
else
throw Exception("Can't load config for users", ErrorCodes::CANNOT_LOAD_CONFIG);
throw Exception(ErrorCodes::CANNOT_LOAD_CONFIG, "Can't load config for users");
}
void LocalServer::connect()
@ -489,7 +489,7 @@ void LocalServer::processConfig()
if (is_interactive && !delayed_interactive)
{
if (config().has("query") && config().has("queries-file"))
throw Exception("Specify either `query` or `queries-file` option", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Specify either `query` or `queries-file` option");
if (config().has("multiquery"))
is_multiquery = true;

View File

@ -880,7 +880,7 @@ public:
}
if (!it)
throw Exception("Logical error in markov model", ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error in markov model");
size_t offset_from_begin_of_string = pos - data;
size_t determinator_sliding_window_size = params.determinator_sliding_window_size;
@ -1139,7 +1139,7 @@ public:
if (const auto * type = typeid_cast<const DataTypeNullable *>(&data_type))
return std::make_unique<NullableModel>(get(*type->getNestedType(), seed, markov_model_params));
throw Exception("Unsupported data type", ErrorCodes::NOT_IMPLEMENTED);
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported data type");
}
};
@ -1384,7 +1384,7 @@ try
UInt8 version = 0;
readBinary(version, model_in);
if (version != 0)
throw Exception("Unknown version of the model file", ErrorCodes::UNKNOWN_FORMAT_VERSION);
throw Exception(ErrorCodes::UNKNOWN_FORMAT_VERSION, "Unknown version of the model file");
readBinary(source_rows, model_in);
@ -1392,14 +1392,14 @@ try
size_t header_size = 0;
readBinary(header_size, model_in);
if (header_size != data_types.size())
throw Exception("The saved model was created for different number of columns", ErrorCodes::INCORRECT_NUMBER_OF_COLUMNS);
throw Exception(ErrorCodes::INCORRECT_NUMBER_OF_COLUMNS, "The saved model was created for different number of columns");
for (size_t i = 0; i < header_size; ++i)
{
String type;
readBinary(type, model_in);
if (type != data_types[i])
throw Exception("The saved model was created for different types of columns", ErrorCodes::TYPE_MISMATCH);
throw Exception(ErrorCodes::TYPE_MISMATCH, "The saved model was created for different types of columns");
}
obfuscator.deserialize(model_in);

View File

@ -181,7 +181,7 @@ void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServ
}
if (columns.empty())
throw Exception("Columns definition was not returned", ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Columns definition was not returned");
WriteBufferFromHTTPServerResponse out(
response,

View File

@ -163,7 +163,7 @@ void ODBCSource::insertValue(
break;
}
default:
throw Exception("Unsupported value type", ErrorCodes::UNKNOWN_TYPE);
throw Exception(ErrorCodes::UNKNOWN_TYPE, "Unsupported value type");
}
}

View File

@ -151,7 +151,7 @@ public:
auto connection_available = pool->tryBorrowObject(connection, []() { return nullptr; }, ODBC_POOL_WAIT_TIMEOUT);
if (!connection_available)
throw Exception("Unable to fetch connection within the timeout", ErrorCodes::NO_FREE_CONNECTION);
throw Exception(ErrorCodes::NO_FREE_CONNECTION, "Unable to fetch connection within the timeout");
try
{

View File

@ -44,7 +44,8 @@ IdentifierQuotingStyle getQuotingStyle(nanodbc::ConnectionHolderPtr connection)
else if (identifier_quote[0] == '"')
return IdentifierQuotingStyle::DoubleQuotes;
else
throw Exception("Can not map quote identifier '" + identifier_quote + "' to IdentifierQuotingStyle value", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Can not map quote identifier '{}' to IdentifierQuotingStyle value", identifier_quote);
}
}

View File

@ -38,10 +38,10 @@ std::string validateODBCConnectionString(const std::string & connection_string)
static constexpr size_t MAX_CONNECTION_STRING_SIZE = 1000;
if (connection_string.empty())
throw Exception("ODBC connection string cannot be empty", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string cannot be empty");
if (connection_string.size() >= MAX_CONNECTION_STRING_SIZE)
throw Exception("ODBC connection string is too long", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string is too long");
const char * pos = connection_string.data();
const char * end = pos + connection_string.size();
@ -51,7 +51,7 @@ std::string validateODBCConnectionString(const std::string & connection_string)
while (pos < end && isWhitespaceASCII(*pos))
{
if (*pos != ' ')
throw Exception("ODBC connection string parameter contains unusual whitespace character", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string parameter contains unusual whitespace character");
++pos;
}
};
@ -63,7 +63,8 @@ std::string validateODBCConnectionString(const std::string & connection_string)
if (pos < end && isValidIdentifierBegin(*pos))
++pos;
else
throw Exception("ODBC connection string parameter name doesn't begin with valid identifier character", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING,
"ODBC connection string parameter name doesn't begin with valid identifier character");
/// Additionally allow dash and dot symbols in names.
/// Strictly speaking, the name with that characters should be escaped.
@ -83,7 +84,8 @@ std::string validateODBCConnectionString(const std::string & connection_string)
{
signed char c = *pos;
if (c < 32 || strchr("[]{}(),;?*=!@'\"", c) != nullptr)
throw Exception("ODBC connection string parameter value is unescaped and contains illegal character", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING,
"ODBC connection string parameter value is unescaped and contains illegal character");
++pos;
}
@ -97,7 +99,7 @@ std::string validateODBCConnectionString(const std::string & connection_string)
if (pos < end && *pos == '{')
++pos;
else
throw Exception("ODBC connection string parameter value doesn't begin with opening curly brace", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string parameter value doesn't begin with opening curly brace");
while (pos < end)
{
@ -109,13 +111,13 @@ std::string validateODBCConnectionString(const std::string & connection_string)
}
if (*pos == 0)
throw Exception("ODBC connection string parameter value contains ASCII NUL character", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string parameter value contains ASCII NUL character");
res += *pos;
++pos;
}
throw Exception("ODBC connection string parameter is escaped but there is no closing curly brace", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string parameter is escaped but there is no closing curly brace");
};
auto read_value = [&]
@ -139,25 +141,25 @@ std::string validateODBCConnectionString(const std::string & connection_string)
Poco::toUpperInPlace(name);
if (name == "FILEDSN" || name == "SAVEFILE" || name == "DRIVER")
throw Exception("ODBC connection string has forbidden parameter", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string has forbidden parameter");
if (pos >= end)
throw Exception("ODBC connection string parameter doesn't have value", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string parameter doesn't have value");
if (*pos == '=')
++pos;
else
throw Exception("ODBC connection string parameter doesn't have value", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string parameter doesn't have value");
skip_whitespaces();
std::string value = read_value();
skip_whitespaces();
if (name.size() > MAX_ELEMENT_SIZE || value.size() > MAX_ELEMENT_SIZE)
throw Exception("ODBC connection string has too long keyword or value", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string has too long keyword or value");
if (!parameters.emplace(name, value).second)
throw Exception("Duplicate parameter found in ODBC connection string", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "Duplicate parameter found in ODBC connection string");
if (pos >= end)
break;
@ -165,7 +167,7 @@ std::string validateODBCConnectionString(const std::string & connection_string)
if (*pos == ';')
++pos;
else
throw Exception("Unexpected character found after parameter value in ODBC connection string", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "Unexpected character found after parameter value in ODBC connection string");
}
/// Reconstruct the connection string.
@ -173,12 +175,12 @@ std::string validateODBCConnectionString(const std::string & connection_string)
auto it = parameters.find("DSN");
if (parameters.end() == it)
throw Exception("DSN parameter is mandatory for ODBC connection string", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "DSN parameter is mandatory for ODBC connection string");
std::string dsn = it->second;
if (dsn.empty())
throw Exception("DSN parameter cannot be empty in ODBC connection string", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "DSN parameter cannot be empty in ODBC connection string");
parameters.erase(it);
@ -241,7 +243,7 @@ std::string validateODBCConnectionString(const std::string & connection_string)
write_element(elem.first, elem.second);
if (reconstructed_connection_string.size() >= MAX_CONNECTION_STRING_SIZE)
throw Exception("ODBC connection string is too long", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string is too long");
return reconstructed_connection_string;
}

View File

@ -257,7 +257,7 @@ static std::string getCanonicalPath(std::string && path)
{
Poco::trimInPlace(path);
if (path.empty())
throw Exception("path configuration parameter is empty", ErrorCodes::INVALID_CONFIG_PARAMETER);
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "path configuration parameter is empty");
if (path.back() != '/')
path += '/';
return std::move(path);
@ -1116,7 +1116,7 @@ try
#endif
if (config().has("interserver_http_port") && config().has("interserver_https_port"))
throw Exception("Both http and https interserver ports are specified", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
throw Exception(ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG, "Both http and https interserver ports are specified");
static const auto interserver_tags =
{
@ -1141,7 +1141,7 @@ try
int port = parse<int>(port_str);
if (port < 0 || port > 0xFFFF)
throw Exception("Out of range '" + String(port_tag) + "': " + toString(port), ErrorCodes::ARGUMENT_OUT_OF_BOUND);
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Out of range '{}': {}", String(port_tag), port);
global_context->setInterserverIOAddress(this_host, port);
global_context->setInterserverScheme(scheme);
@ -1419,8 +1419,7 @@ try
global_context->getSettingsRef().send_timeout.totalSeconds(), true), server_pool, socket));
#else
UNUSED(port);
throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
ErrorCodes::SUPPORT_IS_DISABLED};
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");
#endif
});
}
@ -1465,7 +1464,7 @@ try
size_t max_cache_size = static_cast<size_t>(memory_amount * cache_size_to_ram_max_ratio);
/// Size of cache for uncompressed blocks. Zero means disabled.
String uncompressed_cache_policy = config().getString("uncompressed_cache_policy", "");
String uncompressed_cache_policy = config().getString("uncompressed_cache_policy", "SLRU");
LOG_INFO(log, "Uncompressed cache policy name {}", uncompressed_cache_policy);
size_t uncompressed_cache_size = config().getUInt64("uncompressed_cache_size", 0);
if (uncompressed_cache_size > max_cache_size)
@ -1491,7 +1490,7 @@ try
/// Size of cache for marks (index of MergeTree family of tables).
size_t mark_cache_size = config().getUInt64("mark_cache_size", 5368709120);
String mark_cache_policy = config().getString("mark_cache_policy", "");
String mark_cache_policy = config().getString("mark_cache_policy", "SLRU");
if (!mark_cache_size)
LOG_ERROR(log, "Too low mark cache size will lead to severe performance degradation.");
if (mark_cache_size > max_cache_size)
@ -1517,6 +1516,15 @@ try
if (mmap_cache_size)
global_context->setMMappedFileCache(mmap_cache_size);
/// A cache for query results.
size_t query_result_cache_size = config().getUInt64("query_result_cache.size", 1_GiB);
if (query_result_cache_size)
global_context->setQueryResultCache(
query_result_cache_size,
config().getUInt64("query_result_cache.max_entries", 1024),
config().getUInt64("query_result_cache.max_entry_size", 1_MiB),
config().getUInt64("query_result_cache.max_entry_records", 30'000'000));
#if USE_EMBEDDED_COMPILER
/// 128 MB
constexpr size_t compiled_expression_cache_size_default = 1024 * 1024 * 128;
@ -1740,14 +1748,15 @@ try
std::lock_guard lock(servers_lock);
createServers(config(), listen_hosts, interserver_listen_hosts, listen_try, server_pool, async_metrics, servers);
if (servers.empty())
throw Exception(
"No servers started (add valid listen_host and 'tcp_port' or 'http_port' to configuration file.)",
ErrorCodes::NO_ELEMENTS_IN_CONFIG);
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG,
"No servers started (add valid listen_host and 'tcp_port' or 'http_port' "
"to configuration file.)");
}
if (servers.empty())
throw Exception("No servers started (add valid listen_host and 'tcp_port' or 'http_port' to configuration file.)",
ErrorCodes::NO_ELEMENTS_IN_CONFIG);
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG,
"No servers started (add valid listen_host and 'tcp_port' or 'http_port' "
"to configuration file.)");
#if USE_SSL
CertificateReloader::instance().tryLoad(config());
@ -1807,7 +1816,7 @@ try
String ddl_zookeeper_path = config().getString("distributed_ddl.path", "/clickhouse/task_queue/ddl/");
int pool_size = config().getInt("distributed_ddl.pool_size", 1);
if (pool_size < 1)
throw Exception("distributed_ddl.pool_size should be greater then 0", ErrorCodes::ARGUMENT_OUT_OF_BOUND);
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "distributed_ddl.pool_size should be greater then 0");
global_context->setDDLWorker(std::make_unique<DDLWorker>(pool_size, ddl_zookeeper_path, global_context, &config(),
"distributed_ddl", "DDLWorker",
&CurrentMetrics::MaxDDLEntryID, &CurrentMetrics::MaxPushedDDLEntryID));
@ -1936,8 +1945,7 @@ std::unique_ptr<TCPProtocolStackFactory> Server::buildProtocolStackFromConfig(
#if USE_SSL
return TCPServerConnectionFactory::Ptr(new TLSHandlerFactory(*this, conf_name));
#else
throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
ErrorCodes::SUPPORT_IS_DISABLED};
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");
#endif
if (type == "proxy1")
@ -2104,8 +2112,7 @@ void Server::createServers(
httpContext(), createHandlerFactory(*this, config, async_metrics, "HTTPSHandler-factory"), server_pool, socket, http_params));
#else
UNUSED(port);
throw Exception{"HTTPS protocol is disabled because Poco library was built without NetSSL support.",
ErrorCodes::SUPPORT_IS_DISABLED};
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "HTTPS protocol is disabled because Poco library was built without NetSSL support.");
#endif
});
@ -2167,8 +2174,7 @@ void Server::createServers(
new Poco::Net::TCPServerParams));
#else
UNUSED(port);
throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
ErrorCodes::SUPPORT_IS_DISABLED};
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");
#endif
});
@ -2273,8 +2279,7 @@ void Server::createServers(
http_params));
#else
UNUSED(port);
throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
ErrorCodes::SUPPORT_IS_DISABLED};
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");
#endif
});
}

View File

@ -1453,6 +1453,14 @@
</rocksdb>
-->
<!-- Configuration for the query result cache -->
<!-- <query_result_cache> -->
<!-- <size>1073741824</size> -->
<!-- <max_entries>1024</max_entries> -->
<!-- <max_entry_size>1048576</max_entry_size> -->
<!-- <max_entry_records>30000000</max_entry_records> -->
<!-- </query_result_cache> -->
<!-- Uncomment if enable merge tree metadata cache -->
<!--merge_tree_metadata_cache>
<lru_cache_size>268435456</lru_cache_size>

View File

@ -65,7 +65,7 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
}
if (gid == 0 && getgid() != 0)
throw Exception("Group has id 0, but dropping privileges to gid 0 does not make sense", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Group has id 0, but dropping privileges to gid 0 does not make sense");
if (0 != setgid(gid))
throwFromErrno(fmt::format("Cannot do 'setgid' to user ({})", arg_gid), ErrorCodes::SYSTEM_ERROR);
@ -90,7 +90,7 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
}
if (uid == 0 && getuid() != 0)
throw Exception("User has id 0, but dropping privileges to uid 0 does not make sense", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "User has id 0, but dropping privileges to uid 0 does not make sense");
if (0 != setuid(uid))
throwFromErrno(fmt::format("Cannot do 'setuid' to user ({})", arg_uid), ErrorCodes::SYSTEM_ERROR);

View File

@ -289,7 +289,7 @@ namespace
}
default:
throw Exception("Unknown type: " + toString(entity_type), ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown type: {}", toString(entity_type));
}
}
return res;

View File

@ -126,10 +126,10 @@ public:
std::lock_guard lock{mutex};
if (!registered_prefixes.empty())
{
throw Exception(
"Setting " + String{setting_name} + " is neither a builtin setting nor started with the prefix '"
+ boost::algorithm::join(registered_prefixes, "' or '") + "' registered for user-defined settings",
ErrorCodes::UNKNOWN_SETTING);
throw Exception(ErrorCodes::UNKNOWN_SETTING,
"Setting {} is neither a builtin setting nor started with the prefix '{}"
"' registered for user-defined settings",
String{setting_name}, boost::algorithm::join(registered_prefixes, "' or '"));
}
else
BaseSettingsHelpers::throwSettingNotFound(setting_name);
@ -450,7 +450,7 @@ void AccessControl::addStoragesFromUserDirectoriesConfig(
addReplicatedStorage(name, zookeeper_path, get_zookeeper_function, allow_backup);
}
else
throw Exception("Unknown storage type '" + type + "' at " + prefix + " in config", ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
throw Exception(ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG, "Unknown storage type '{}' at {} in config", type, prefix);
}
}
@ -575,7 +575,9 @@ UUID AccessControl::authenticate(const Credentials & credentials, const Poco::Ne
/// We use the same message for all authentication failures because we don't want to give away any unnecessary information for security reasons,
/// only the log will show the exact reason.
throw Exception(message.str(), ErrorCodes::AUTHENTICATION_FAILED);
throw Exception(PreformattedMessage{message.str(),
"{}: Authentication failed: password is incorrect, or there is no user with such name.{}"},
ErrorCodes::AUTHENTICATION_FAILED);
}
}

View File

@ -80,53 +80,53 @@ AccessEntityPtr deserializeAccessEntityImpl(const String & definition)
if (auto * create_user_query = query->as<ASTCreateUserQuery>())
{
if (res)
throw Exception("Two access entities attached in the same file", ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "Two access entities attached in the same file");
res = user = std::make_unique<User>();
InterpreterCreateUserQuery::updateUserFromQuery(*user, *create_user_query, /* allow_no_password = */ true, /* allow_plaintext_password = */ true);
}
else if (auto * create_role_query = query->as<ASTCreateRoleQuery>())
{
if (res)
throw Exception("Two access entities attached in the same file", ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "Two access entities attached in the same file");
res = role = std::make_unique<Role>();
InterpreterCreateRoleQuery::updateRoleFromQuery(*role, *create_role_query);
}
else if (auto * create_policy_query = query->as<ASTCreateRowPolicyQuery>())
{
if (res)
throw Exception("Two access entities attached in the same file", ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "Two access entities attached in the same file");
res = policy = std::make_unique<RowPolicy>();
InterpreterCreateRowPolicyQuery::updateRowPolicyFromQuery(*policy, *create_policy_query);
}
else if (auto * create_quota_query = query->as<ASTCreateQuotaQuery>())
{
if (res)
throw Exception("Two access entities attached in the same file", ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "Two access entities attached in the same file");
res = quota = std::make_unique<Quota>();
InterpreterCreateQuotaQuery::updateQuotaFromQuery(*quota, *create_quota_query);
}
else if (auto * create_profile_query = query->as<ASTCreateSettingsProfileQuery>())
{
if (res)
throw Exception("Two access entities attached in the same file", ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "Two access entities attached in the same file");
res = profile = std::make_unique<SettingsProfile>();
InterpreterCreateSettingsProfileQuery::updateSettingsProfileFromQuery(*profile, *create_profile_query);
}
else if (auto * grant_query = query->as<ASTGrantQuery>())
{
if (!user && !role)
throw Exception("A user or role should be attached before grant", ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "A user or role should be attached before grant");
if (user)
InterpreterGrantQuery::updateUserFromQuery(*user, *grant_query);
else
InterpreterGrantQuery::updateRoleFromQuery(*role, *grant_query);
}
else
throw Exception("No interpreter found for query " + query->getID(), ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "No interpreter found for query {}", query->getID());
}
if (!res)
throw Exception("No access entities attached", ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "No access entities attached");
return res;
}

View File

@ -797,7 +797,7 @@ template <bool with_grant_option>
void AccessRights::grantImpl(const AccessRightsElement & element)
{
if (element.is_partial_revoke)
throw Exception("A partial revoke should be revoked, not granted", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "A partial revoke should be revoked, not granted");
if constexpr (with_grant_option)
{
grantImplHelper<true>(element);

View File

@ -176,7 +176,7 @@ bool Authentication::areCredentialsValid(const Credentials & credentials, const
if ([[maybe_unused]] const auto * always_allow_credentials = typeid_cast<const AlwaysAllowCredentials *>(&credentials))
return true;
throw Exception("areCredentialsValid(): authentication type " + toString(auth_data.getType()) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "areCredentialsValid(): authentication type {} not supported", toString(auth_data.getType()));
}
}

View File

@ -83,7 +83,7 @@ const AccessEntityTypeInfo & AccessEntityTypeInfo::get(AccessEntityType type_)
}
case AccessEntityType::MAX: break;
}
throw Exception("Unknown type: " + std::to_string(static_cast<size_t>(type_)), ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown type: {}", static_cast<size_t>(type_));
}
AccessEntityType AccessEntityTypeInfo::parseType(const String & name_)

View File

@ -44,7 +44,7 @@ namespace
boost::to_upper(uppercased_keyword);
it = keyword_to_flags_map.find(uppercased_keyword);
if (it == keyword_to_flags_map.end())
throw Exception("Unknown access type: " + String(keyword), ErrorCodes::UNKNOWN_ACCESS_TYPE);
throw Exception(ErrorCodes::UNKNOWN_ACCESS_TYPE, "Unknown access type: {}", String(keyword));
}
return it->second;
}
@ -179,7 +179,7 @@ namespace
else
{
if (nodes.contains(keyword))
throw Exception(keyword + " declared twice", ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "{} declared twice", keyword);
node = std::make_unique<Node>(keyword, node_type);
nodes[node->keyword] = node.get();
}
@ -225,9 +225,9 @@ namespace
# undef MAKE_ACCESS_FLAGS_NODE
if (!owned_nodes.contains("NONE"))
throw Exception("'NONE' not declared", ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "'NONE' not declared");
if (!owned_nodes.contains("ALL"))
throw Exception("'ALL' not declared", ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "'ALL' not declared");
all_node = std::move(owned_nodes["ALL"]);
none_node = std::move(owned_nodes["NONE"]);
@ -238,9 +238,9 @@ namespace
{
const auto & unused_node = *(owned_nodes.begin()->second);
if (unused_node.node_type == UNKNOWN)
throw Exception("Parent group '" + unused_node.keyword + "' not found", ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Parent group '{}' not found", unused_node.keyword);
else
throw Exception("Access type '" + unused_node.keyword + "' should have parent group", ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Access type '{}' should have parent group", unused_node.keyword);
}
}

View File

@ -142,6 +142,7 @@ enum class AccessType
M(SYSTEM_DROP_MARK_CACHE, "SYSTEM DROP MARK, DROP MARK CACHE, DROP MARKS", GLOBAL, SYSTEM_DROP_CACHE) \
M(SYSTEM_DROP_UNCOMPRESSED_CACHE, "SYSTEM DROP UNCOMPRESSED, DROP UNCOMPRESSED CACHE, DROP UNCOMPRESSED", GLOBAL, SYSTEM_DROP_CACHE) \
M(SYSTEM_DROP_MMAP_CACHE, "SYSTEM DROP MMAP, DROP MMAP CACHE, DROP MMAP", GLOBAL, SYSTEM_DROP_CACHE) \
M(SYSTEM_DROP_QUERY_RESULT_CACHE, "SYSTEM DROP QUERY RESULT, DROP QUERY RESULT CACHE, DROP QUERY RESULT", GLOBAL, SYSTEM_DROP_CACHE) \
M(SYSTEM_DROP_COMPILED_EXPRESSION_CACHE, "SYSTEM DROP COMPILED EXPRESSION, DROP COMPILED EXPRESSION CACHE, DROP COMPILED EXPRESSIONS", GLOBAL, SYSTEM_DROP_CACHE) \
M(SYSTEM_DROP_FILESYSTEM_CACHE, "SYSTEM DROP FILESYSTEM CACHE, DROP FILESYSTEM CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
M(SYSTEM_DROP_SCHEMA_CACHE, "SYSTEM DROP SCHEMA CACHE, DROP SCHEMA CACHE", GLOBAL, SYSTEM_DROP_CACHE) \

View File

@ -67,7 +67,7 @@ const AuthenticationTypeInfo & AuthenticationTypeInfo::get(AuthenticationType ty
case AuthenticationType::MAX:
break;
}
throw Exception("Unknown authentication type: " + std::to_string(static_cast<int>(type_)), ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown authentication type: {}", static_cast<int>(type_));
}
@ -119,19 +119,19 @@ void AuthenticationData::setPassword(const String & password_)
case AuthenticationType::LDAP:
case AuthenticationType::KERBEROS:
case AuthenticationType::SSL_CERTIFICATE:
throw Exception("Cannot specify password for authentication type " + toString(type), ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot specify password for authentication type {}", toString(type));
case AuthenticationType::MAX:
break;
}
throw Exception("setPassword(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "setPassword(): authentication type {} not supported", toString(type));
}
String AuthenticationData::getPassword() const
{
if (type != AuthenticationType::PLAINTEXT_PASSWORD)
throw Exception("Cannot decode the password", ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot decode the password");
return String(password_hash.data(), password_hash.data() + password_hash.size());
}
@ -157,7 +157,7 @@ void AuthenticationData::setPasswordHashHex(const String & hash)
String AuthenticationData::getPasswordHashHex() const
{
if (type == AuthenticationType::LDAP || type == AuthenticationType::KERBEROS || type == AuthenticationType::SSL_CERTIFICATE)
throw Exception("Cannot get password hex hash for authentication type " + toString(type), ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot get password hex hash for authentication type {}", toString(type));
String hex;
hex.resize(password_hash.size() * 2);
@ -179,10 +179,9 @@ void AuthenticationData::setPasswordHashBinary(const Digest & hash)
case AuthenticationType::SHA256_PASSWORD:
{
if (hash.size() != 32)
throw Exception(
"Password hash for the 'SHA256_PASSWORD' authentication type has length " + std::to_string(hash.size())
+ " but must be exactly 32 bytes.",
ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Password hash for the 'SHA256_PASSWORD' authentication type has length {} "
"but must be exactly 32 bytes.", hash.size());
password_hash = hash;
return;
}
@ -190,10 +189,9 @@ void AuthenticationData::setPasswordHashBinary(const Digest & hash)
case AuthenticationType::DOUBLE_SHA1_PASSWORD:
{
if (hash.size() != 20)
throw Exception(
"Password hash for the 'DOUBLE_SHA1_PASSWORD' authentication type has length " + std::to_string(hash.size())
+ " but must be exactly 20 bytes.",
ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Password hash for the 'DOUBLE_SHA1_PASSWORD' authentication type has length {} "
"but must be exactly 20 bytes.", hash.size());
password_hash = hash;
return;
}
@ -202,18 +200,18 @@ void AuthenticationData::setPasswordHashBinary(const Digest & hash)
case AuthenticationType::LDAP:
case AuthenticationType::KERBEROS:
case AuthenticationType::SSL_CERTIFICATE:
throw Exception("Cannot specify password binary hash for authentication type " + toString(type), ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot specify password binary hash for authentication type {}", toString(type));
case AuthenticationType::MAX:
break;
}
throw Exception("setPasswordHashBinary(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "setPasswordHashBinary(): authentication type {} not supported", toString(type));
}
void AuthenticationData::setSalt(String salt_)
{
if (type != AuthenticationType::SHA256_PASSWORD)
throw Exception("setSalt(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "setSalt(): authentication type {} not supported", toString(type));
salt = std::move(salt_);
}
@ -225,7 +223,7 @@ String AuthenticationData::getSalt() const
void AuthenticationData::setSSLCertificateCommonNames(boost::container::flat_set<String> common_names_)
{
if (common_names_.empty())
throw Exception("The 'SSL CERTIFICATE' authentication type requires a non-empty list of common names.", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The 'SSL CERTIFICATE' authentication type requires a non-empty list of common names.");
ssl_certificate_common_names = std::move(common_names_);
}

View File

@ -113,7 +113,7 @@ const QuotaTypeInfo & QuotaTypeInfo::get(QuotaType type)
}
case QuotaType::MAX: break;
}
throw Exception("Unexpected quota type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected quota type: {}", static_cast<int>(type));
}
String toString(QuotaKeyType type)
@ -187,7 +187,7 @@ const QuotaKeyTypeInfo & QuotaKeyTypeInfo::get(QuotaKeyType type)
}
case QuotaKeyType::MAX: break;
}
throw Exception("Unexpected quota key type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected quota key type: {}", static_cast<int>(type));
}
}

View File

@ -75,7 +75,7 @@ const RowPolicyFilterTypeInfo & RowPolicyFilterTypeInfo::get(RowPolicyFilterType
#endif
case RowPolicyFilterType::MAX: break;
}
throw Exception("Unknown type: " + std::to_string(static_cast<size_t>(type_)), ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown type: {}", static_cast<size_t>(type_));
}
}

View File

@ -470,7 +470,7 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg
/// If the current user has been dropped we always throw an exception (even if `throw_if_denied` is false)
/// because dropping of the current user is considered as a situation which is exceptional enough to stop
/// query execution.
throw Exception(getUserName() + ": User has been dropped", ErrorCodes::UNKNOWN_USER);
throw Exception(ErrorCodes::UNKNOWN_USER, "{}: User has been dropped", getUserName());
}
if (is_full_access)
@ -790,7 +790,7 @@ void ContextAccess::checkGranteeIsAllowed(const UUID & grantee_id, const IAccess
auto current_user = getUser();
if (!current_user->grantees.match(grantee_id))
throw Exception(grantee.formatTypeWithName() + " is not allowed as grantee", ErrorCodes::ACCESS_DENIED);
throw Exception(ErrorCodes::ACCESS_DENIED, "{} is not allowed as grantee", grantee.formatTypeWithName());
}
void ContextAccess::checkGranteesAreAllowed(const std::vector<UUID> & grantee_ids) const

View File

@ -29,7 +29,7 @@ bool Credentials::isReady() const
void Credentials::throwNotReady()
{
throw Exception("Credentials are not ready", ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Credentials are not ready");
}
AlwaysAllowCredentials::AlwaysAllowCredentials()

View File

@ -172,7 +172,8 @@ DiskAccessStorage::DiskAccessStorage(const String & storage_name_, const String
std::filesystem::create_directories(directory_path, create_dir_error_code);
if (!std::filesystem::exists(directory_path) || !std::filesystem::is_directory(directory_path) || create_dir_error_code)
throw Exception("Couldn't create directory " + directory_path + " reason: '" + create_dir_error_code.message() + "'", ErrorCodes::DIRECTORY_DOESNT_EXIST);
throw Exception(ErrorCodes::DIRECTORY_DOESNT_EXIST, "Couldn't create directory {} reason: '{}'",
directory_path, create_dir_error_code.message());
bool should_rebuild_lists = std::filesystem::exists(getNeedRebuildListsMarkFilePath(directory_path));
if (!should_rebuild_lists)
@ -722,7 +723,7 @@ void DiskAccessStorage::deleteAccessEntityOnDisk(const UUID & id) const
{
auto file_path = getEntityFilePath(directory_path, id);
if (!std::filesystem::remove(file_path))
throw Exception("Couldn't delete " + file_path, ErrorCodes::FILE_DOESNT_EXIST);
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Couldn't delete {}", file_path);
}

View File

@ -29,11 +29,11 @@ struct EnabledQuota::Impl
std::chrono::system_clock::time_point end_of_interval)
{
const auto & type_info = QuotaTypeInfo::get(quota_type);
throw Exception(
"Quota for user " + backQuote(user_name) + " for " + to_string(duration) + " has been exceeded: "
+ type_info.valueToStringWithName(used) + "/" + type_info.valueToString(max) + ". "
+ "Interval will end at " + to_string(end_of_interval) + ". " + "Name of quota template: " + backQuote(quota_name),
ErrorCodes::QUOTA_EXCEEDED);
throw Exception(ErrorCodes::QUOTA_EXCEEDED, "Quota for user {} for {} has been exceeded: {}/{}. "
"Interval will end at {}. Name of quota template: {}",
backQuote(user_name), to_string(duration),
type_info.valueToStringWithName(used),
type_info.valueToString(max), to_string(end_of_interval), backQuote(quota_name));
}

View File

@ -47,15 +47,16 @@ void parseLDAPSearchParams(LDAPClient::SearchParams & params, const Poco::Util::
else if (scope == "subtree") params.scope = LDAPClient::SearchParams::Scope::SUBTREE;
else if (scope == "children") params.scope = LDAPClient::SearchParams::Scope::CHILDREN;
else
throw Exception("Invalid value for 'scope' field of LDAP search parameters in '" + prefix +
"' section, must be one of 'base', 'one_level', 'subtree', or 'children'", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Invalid value for 'scope' field of LDAP search parameters "
"in '{}' section, must be one of 'base', 'one_level', 'subtree', or 'children'", prefix);
}
}
void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConfiguration & config, const String & name)
{
if (name.empty())
throw Exception("LDAP server name cannot be empty", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "LDAP server name cannot be empty");
const String ldap_server_config = "ldap_servers." + name;
@ -77,17 +78,17 @@ void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConf
const bool has_search_limit = config.has(ldap_server_config + ".search_limit");
if (!has_host)
throw Exception("Missing 'host' entry", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Missing 'host' entry");
params.host = config.getString(ldap_server_config + ".host");
if (params.host.empty())
throw Exception("Empty 'host' entry", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Empty 'host' entry");
if (has_bind_dn)
{
if (has_auth_dn_prefix || has_auth_dn_suffix)
throw Exception("Deprecated 'auth_dn_prefix' and 'auth_dn_suffix' entries cannot be used with 'bind_dn' entry", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Deprecated 'auth_dn_prefix' and 'auth_dn_suffix' entries cannot be used with 'bind_dn' entry");
params.bind_dn = config.getString(ldap_server_config + ".bind_dn");
}
@ -141,7 +142,9 @@ void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConf
else if (tls_minimum_protocol_version_lc_str == "tls1.2")
params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_2; //-V1048
else
throw Exception("Bad value for 'tls_minimum_protocol_version' entry, allowed values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2'", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Bad value for 'tls_minimum_protocol_version' entry, allowed values are: "
"'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2'");
}
if (has_tls_require_cert)
@ -158,7 +161,9 @@ void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConf
else if (tls_require_cert_lc_str == "demand")
params.tls_require_cert = LDAPClient::Params::TLSRequireCert::DEMAND; //-V1048
else
throw Exception("Bad value for 'tls_require_cert' entry, allowed values are: 'never', 'allow', 'try', 'demand'", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Bad value for 'tls_require_cert' entry, allowed values are: "
"'never', 'allow', 'try', 'demand'");
}
if (has_tls_cert_file)
@ -180,7 +185,7 @@ void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConf
{
UInt32 port = config.getUInt(ldap_server_config + ".port");
if (port > 65535)
throw Exception("Bad value for 'port' entry", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad value for 'port' entry");
params.port = port;
}
@ -212,13 +217,13 @@ void parseKerberosParams(GSSAcceptorContext::Params & params, const Poco::Util::
}
if (reealm_key_count > 0 && principal_keys_count > 0)
throw Exception("Realm and principal name cannot be specified simultaneously", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Realm and principal name cannot be specified simultaneously");
if (reealm_key_count > 1)
throw Exception("Multiple realm sections are not allowed", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Multiple realm sections are not allowed");
if (principal_keys_count > 1)
throw Exception("Multiple principal sections are not allowed", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Multiple principal sections are not allowed");
params.realm = config.getString("kerberos.realm", "");
params.principal = config.getString("kerberos.principal", "");
@ -274,10 +279,10 @@ void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfigur
}
if (ldap_servers_key_count > 1)
throw Exception("Multiple ldap_servers sections are not allowed", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Multiple ldap_servers sections are not allowed");
if (kerberos_keys_count > 1)
throw Exception("Multiple kerberos sections are not allowed", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Multiple kerberos sections are not allowed");
Poco::Util::AbstractConfiguration::Keys ldap_server_names;
config.keys("ldap_servers", ldap_server_names);
@ -291,7 +296,7 @@ void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfigur
ldap_server_name.resize(bracket_pos);
if (ldap_client_params_blueprint.contains(ldap_server_name))
throw Exception("Multiple LDAP servers with the same name are not allowed", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Multiple LDAP servers with the same name are not allowed");
LDAPClient::Params ldap_client_params_tmp;
parseLDAPServer(ldap_client_params_tmp, config, ldap_server_name);
@ -346,7 +351,7 @@ bool ExternalAuthenticators::checkLDAPCredentials(const String & server, const B
// Retrieve the server parameters.
const auto pit = ldap_client_params_blueprint.find(server);
if (pit == ldap_client_params_blueprint.end())
throw Exception("LDAP server '" + server + "' is not configured", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "LDAP server '{}' is not configured", server);
params = pit->second;
params->user = credentials.getUserName();
@ -461,7 +466,7 @@ bool ExternalAuthenticators::checkKerberosCredentials(const String & realm, cons
std::scoped_lock lock(mutex);
if (!kerberos_params.has_value())
throw Exception("Kerberos is not enabled", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Kerberos is not enabled");
if (!credentials.isReady())
return false;
@ -480,7 +485,7 @@ GSSAcceptorContext::Params ExternalAuthenticators::getKerberosParams() const
std::scoped_lock lock(mutex);
if (!kerberos_params.has_value())
throw Exception("Kerberos is not enabled", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Kerberos is not enabled");
return kerberos_params.value();
}

View File

@ -265,16 +265,16 @@ void GSSAcceptorContext::initHandles()
if (!params.keytab.empty())
{
if (!std::filesystem::exists(params.keytab))
throw Exception("Keytab file not found", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Keytab file not found");
if (krb5_gss_register_acceptor_identity(params.keytab.c_str()))
throw Exception("Failed to register keytab file", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Failed to register keytab file");
}
if (!params.principal.empty())
{
if (!params.realm.empty())
throw Exception("Realm and principal name cannot be specified simultaneously", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Realm and principal name cannot be specified simultaneously");
gss_buffer_desc acceptor_name_buf;
acceptor_name_buf.length = params.principal.size();
@ -305,7 +305,7 @@ void GSSAcceptorContext::initHandles()
if (GSS_ERROR(major_status))
{
const auto messages = extractStatusMessages(major_status, minor_status, GSS_C_NO_OID);
throw Exception("gss_import_name() failed" + (messages.empty() ? "" : ": " + messages), ErrorCodes::KERBEROS_ERROR);
throw Exception(ErrorCodes::KERBEROS_ERROR, "gss_import_name() failed{}", (messages.empty() ? "" : ": " + messages));
}
minor_status = 0;
@ -323,7 +323,7 @@ void GSSAcceptorContext::initHandles()
if (GSS_ERROR(major_status))
{
const auto messages = extractStatusMessages(major_status, minor_status, GSS_C_NO_OID);
throw Exception("gss_acquire_cred() failed" + (messages.empty() ? "" : ": " + messages), ErrorCodes::KERBEROS_ERROR);
throw Exception(ErrorCodes::KERBEROS_ERROR, "gss_acquire_cred() failed{}", (messages.empty() ? "" : ": " + messages));
}
}
}
@ -387,21 +387,26 @@ String GSSAcceptorContext::processToken(const String & input_token, Poco::Logger
if (major_status == GSS_S_COMPLETE)
{
if (!params.mechanism.empty() && !equalMechanisms(params.mechanism, mech_type))
throw Exception("gss_accept_sec_context() succeeded, but: the authentication mechanism is not what was expected", ErrorCodes::KERBEROS_ERROR);
throw Exception(ErrorCodes::KERBEROS_ERROR,
"gss_accept_sec_context() succeeded, but: "
"the authentication mechanism is not what was expected");
if (flags & GSS_C_ANON_FLAG)
throw Exception("gss_accept_sec_context() succeeded, but: the initiator does not wish to be authenticated", ErrorCodes::KERBEROS_ERROR);
throw Exception(ErrorCodes::KERBEROS_ERROR, "gss_accept_sec_context() succeeded, but: the initiator does not wish to be authenticated");
std::tie(user_name, realm) = extractNameAndRealm(initiator_name);
if (user_name.empty())
throw Exception("gss_accept_sec_context() succeeded, but: the initiator name cannot be extracted", ErrorCodes::KERBEROS_ERROR);
throw Exception(ErrorCodes::KERBEROS_ERROR, "gss_accept_sec_context() succeeded, but: the initiator name cannot be extracted");
if (realm.empty())
throw Exception("gss_accept_sec_context() succeeded, but: the initiator realm cannot be extracted", ErrorCodes::KERBEROS_ERROR);
throw Exception(ErrorCodes::KERBEROS_ERROR, "gss_accept_sec_context() succeeded, but: the initiator realm cannot be extracted");
if (!params.realm.empty() && params.realm != realm)
throw Exception("gss_accept_sec_context() succeeded, but: the initiator realm is not what was expected (expected: " + params.realm + ", actual: " + realm + ")", ErrorCodes::KERBEROS_ERROR);
throw Exception(ErrorCodes::KERBEROS_ERROR,
"gss_accept_sec_context() succeeded, but: "
"the initiator realm is not what was expected (expected: {}, actual: {})",
params.realm, realm);
output_token = bufferToString(output_token_buf);
@ -420,7 +425,7 @@ String GSSAcceptorContext::processToken(const String & input_token, Poco::Logger
else
{
const auto messages = extractStatusMessages(major_status, minor_status, mech_type);
throw Exception("gss_accept_sec_context() failed" + (messages.empty() ? "" : ": " + messages), ErrorCodes::KERBEROS_ERROR);
throw Exception(ErrorCodes::KERBEROS_ERROR, "gss_accept_sec_context() failed{}", (messages.empty() ? "" : ": " + messages));
}
}
catch (...)
@ -452,7 +457,7 @@ void GSSAcceptorContext::initHandles()
String GSSAcceptorContext::processToken(const String &, Poco::Logger *)
{
throw Exception("ClickHouse was built without GSS-API/Kerberos support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
throw Exception(ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME, "ClickHouse was built without GSS-API/Kerberos support");
}
#endif // USE_KRB5

View File

@ -630,79 +630,70 @@ Poco::Logger * IAccessStorage::getLogger() const
void IAccessStorage::throwNotFound(const UUID & id) const
{
throw Exception(outputID(id) + " not found in " + getStorageName(), ErrorCodes::ACCESS_ENTITY_NOT_FOUND);
throw Exception(ErrorCodes::ACCESS_ENTITY_NOT_FOUND, "{} not found in {}", outputID(id), getStorageName());
}
void IAccessStorage::throwNotFound(AccessEntityType type, const String & name) const
{
int error_code = AccessEntityTypeInfo::get(type).not_found_error_code;
throw Exception("There is no " + formatEntityTypeWithName(type, name) + " in " + getStorageName(), error_code);
throw Exception(error_code, "There is no {} in {}", formatEntityTypeWithName(type, name), getStorageName());
}
void IAccessStorage::throwBadCast(const UUID & id, AccessEntityType type, const String & name, AccessEntityType required_type)
{
throw Exception(
outputID(id) + ": " + formatEntityTypeWithName(type, name) + " expected to be of type " + toString(required_type),
ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "{}: {} expected to be of type {}", outputID(id),
formatEntityTypeWithName(type, name), toString(required_type));
}
void IAccessStorage::throwIDCollisionCannotInsert(const UUID & id, AccessEntityType type, const String & name, AccessEntityType existing_type, const String & existing_name) const
{
throw Exception(
formatEntityTypeWithName(type, name) + ": cannot insert because the " + outputID(id) + " is already used by "
+ formatEntityTypeWithName(existing_type, existing_name) + " in " + getStorageName(),
ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS);
throw Exception(ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS, "{}: "
"cannot insert because the {} is already used by {} in {}", formatEntityTypeWithName(type, name),
outputID(id), formatEntityTypeWithName(existing_type, existing_name), getStorageName());
}
void IAccessStorage::throwNameCollisionCannotInsert(AccessEntityType type, const String & name) const
{
throw Exception(
formatEntityTypeWithName(type, name) + ": cannot insert because " + formatEntityTypeWithName(type, name) + " already exists in "
+ getStorageName(),
ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS);
throw Exception(ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS, "{}: cannot insert because {} already exists in {}",
formatEntityTypeWithName(type, name), formatEntityTypeWithName(type, name), getStorageName());
}
void IAccessStorage::throwNameCollisionCannotRename(AccessEntityType type, const String & old_name, const String & new_name) const
{
throw Exception(
formatEntityTypeWithName(type, old_name) + ": cannot rename to " + backQuote(new_name) + " because "
+ formatEntityTypeWithName(type, new_name) + " already exists in " + getStorageName(),
ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS);
throw Exception(ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS, "{}: cannot rename to {} because {} already exists in {}",
formatEntityTypeWithName(type, old_name), backQuote(new_name), formatEntityTypeWithName(type, new_name), getStorageName());
}
void IAccessStorage::throwReadonlyCannotInsert(AccessEntityType type, const String & name) const
{
throw Exception(
"Cannot insert " + formatEntityTypeWithName(type, name) + " to " + getStorageName() + " because this storage is readonly",
ErrorCodes::ACCESS_STORAGE_READONLY);
throw Exception(ErrorCodes::ACCESS_STORAGE_READONLY, "Cannot insert {} to {} because this storage is readonly",
formatEntityTypeWithName(type, name), getStorageName());
}
void IAccessStorage::throwReadonlyCannotUpdate(AccessEntityType type, const String & name) const
{
throw Exception(
"Cannot update " + formatEntityTypeWithName(type, name) + " in " + getStorageName() + " because this storage is readonly",
ErrorCodes::ACCESS_STORAGE_READONLY);
throw Exception(ErrorCodes::ACCESS_STORAGE_READONLY, "Cannot update {} in {} because this storage is readonly",
formatEntityTypeWithName(type, name), getStorageName());
}
void IAccessStorage::throwReadonlyCannotRemove(AccessEntityType type, const String & name) const
{
throw Exception(
"Cannot remove " + formatEntityTypeWithName(type, name) + " from " + getStorageName() + " because this storage is readonly",
ErrorCodes::ACCESS_STORAGE_READONLY);
throw Exception(ErrorCodes::ACCESS_STORAGE_READONLY, "Cannot remove {} from {} because this storage is readonly",
formatEntityTypeWithName(type, name), getStorageName());
}
void IAccessStorage::throwAddressNotAllowed(const Poco::Net::IPAddress & address)
{
throw Exception("Connections from " + address.toString() + " are not allowed", ErrorCodes::IP_ADDRESS_NOT_ALLOWED);
throw Exception(ErrorCodes::IP_ADDRESS_NOT_ALLOWED, "Connections from {} are not allowed", address.toString());
}
void IAccessStorage::throwAuthenticationTypeNotAllowed(AuthenticationType auth_type)
@ -715,7 +706,7 @@ void IAccessStorage::throwAuthenticationTypeNotAllowed(AuthenticationType auth_t
void IAccessStorage::throwInvalidCredentials()
{
throw Exception("Invalid credentials", ErrorCodes::WRONG_PASSWORD);
throw Exception(ErrorCodes::WRONG_PASSWORD, "Invalid credentials");
}
void IAccessStorage::throwBackupNotAllowed() const

View File

@ -71,7 +71,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
const char *deftype = nullptr;
if (!std::filesystem::exists(keytab_file))
throw Exception("Keytab file does not exist", ErrorCodes::KERBEROS_ERROR);
throw Exception(ErrorCodes::KERBEROS_ERROR, "Keytab file does not exist");
ret = krb5_init_context(&k5.ctx);
if (ret)
@ -81,7 +81,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
{
ret = krb5_cc_resolve(k5.ctx, cache_name.c_str(), &k5.out_cc);
if (ret)
throw Exception("Error in resolving cache" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in resolving cache{}", fmtError(ret));
LOG_TRACE(log,"Resolved cache");
}
else
@ -89,7 +89,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
// Resolve the default cache and get its type and default principal (if it is initialized).
ret = krb5_cc_default(k5.ctx, &defcache);
if (ret)
throw Exception("Error while getting default cache" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error while getting default cache{}", fmtError(ret));
LOG_TRACE(log,"Resolved default cache");
deftype = krb5_cc_get_type(k5.ctx, defcache);
if (krb5_cc_get_principal(k5.ctx, defcache, &defcache_princ) != 0)
@ -99,7 +99,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
// Use the specified principal name.
ret = krb5_parse_name_flags(k5.ctx, principal.c_str(), 0, &k5.me);
if (ret)
throw Exception("Error when parsing principal name " + principal + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error when parsing principal name {}", principal + fmtError(ret));
// Cache related commands
if (k5.out_cc == nullptr && krb5_cc_support_switch(k5.ctx, deftype))
@ -107,7 +107,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
// Use an existing cache for the client principal if we can.
ret = krb5_cc_cache_match(k5.ctx, k5.me, &k5.out_cc);
if (ret && ret != KRB5_CC_NOTFOUND)
throw Exception("Error while searching for cache for " + principal + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error while searching for cache for {}", principal + fmtError(ret));
if (0 == ret)
{
LOG_TRACE(log,"Using default cache: {}", krb5_cc_get_name(k5.ctx, k5.out_cc));
@ -118,7 +118,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
// Create a new cache to avoid overwriting the initialized default cache.
ret = krb5_cc_new_unique(k5.ctx, deftype, nullptr, &k5.out_cc);
if (ret)
throw Exception("Error while generating new cache" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error while generating new cache{}", fmtError(ret));
LOG_TRACE(log,"Using default cache: {}", krb5_cc_get_name(k5.ctx, k5.out_cc));
k5.switch_to_cache = 1;
}
@ -134,24 +134,24 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
ret = krb5_unparse_name(k5.ctx, k5.me, &k5.name);
if (ret)
throw Exception("Error when unparsing name" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error when unparsing name{}", fmtError(ret));
LOG_TRACE(log,"Using principal: {}", k5.name);
// Allocate a new initial credential options structure.
ret = krb5_get_init_creds_opt_alloc(k5.ctx, &options);
if (ret)
throw Exception("Error in options allocation" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in options allocation{}", fmtError(ret));
// Resolve keytab
ret = krb5_kt_resolve(k5.ctx, keytab_file.c_str(), &keytab);
if (ret)
throw Exception("Error in resolving keytab "+keytab_file + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in resolving keytab {}{}", keytab_file, fmtError(ret));
LOG_TRACE(log,"Using keytab: {}", keytab_file);
// Set an output credential cache in initial credential options.
ret = krb5_get_init_creds_opt_set_out_ccache(k5.ctx, options, k5.out_cc);
if (ret)
throw Exception("Error in setting output credential cache" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in setting output credential cache{}", fmtError(ret));
// Action: init or renew
LOG_TRACE(log,"Trying to renew credentials");
@ -165,7 +165,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
// Request KDC for an initial credentials using keytab.
ret = krb5_get_init_creds_keytab(k5.ctx, &my_creds, k5.me, keytab, 0, nullptr, options);
if (ret)
throw Exception("Error in getting initial credentials" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in getting initial credentials{}", fmtError(ret));
else
LOG_TRACE(log,"Got initial credentials");
}
@ -175,7 +175,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
// Initialize a credential cache. Destroy any existing contents of cache and initialize it for the default principal.
ret = krb5_cc_initialize(k5.ctx, k5.out_cc, k5.me);
if (ret)
throw Exception("Error when initializing cache" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error when initializing cache{}", fmtError(ret));
LOG_TRACE(log,"Initialized cache");
// Store credentials in a credential cache.
ret = krb5_cc_store_cred(k5.ctx, k5.out_cc, &my_creds);
@ -189,7 +189,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
// Make a credential cache the primary cache for its collection.
ret = krb5_cc_switch(k5.ctx, k5.out_cc);
if (ret)
throw Exception("Error while switching to new cache" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error while switching to new cache{}", fmtError(ret));
}
LOG_TRACE(log,"Authenticated to Kerberos v5");

View File

@ -53,11 +53,11 @@ void LDAPAccessStorage::setConfiguration(const Poco::Util::AbstractConfiguration
const bool has_role_mapping = config.has(prefix_str + "role_mapping");
if (!has_server)
throw Exception("Missing 'server' field for LDAP user directory", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Missing 'server' field for LDAP user directory");
const auto ldap_server_name_cfg = config.getString(prefix_str + "server");
if (ldap_server_name_cfg.empty())
throw Exception("Empty 'server' field for LDAP user directory", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Empty 'server' field for LDAP user directory");
std::set<String> common_roles_cfg;
if (has_roles)
@ -321,7 +321,7 @@ std::set<String> LDAPAccessStorage::mapExternalRolesNoLock(const LDAPClient::Sea
std::set<String> role_names;
if (external_roles.size() != role_search_params.size())
throw Exception("Unable to map external roles", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unable to map external roles");
for (std::size_t i = 0; i < external_roles.size(); ++i)
{

View File

@ -226,13 +226,13 @@ bool LDAPClient::openConnection()
auto * uri = ldap_url_desc2str(&url);
if (!uri)
throw Exception("ldap_url_desc2str() failed", ErrorCodes::LDAP_ERROR);
throw Exception(ErrorCodes::LDAP_ERROR, "ldap_url_desc2str() failed");
SCOPE_EXIT({ ldap_memfree(uri); });
handleError(ldap_initialize(&handle, uri));
if (!handle)
throw Exception("ldap_initialize() failed", ErrorCodes::LDAP_ERROR);
throw Exception(ErrorCodes::LDAP_ERROR, "ldap_initialize() failed");
}
{
@ -371,10 +371,10 @@ bool LDAPClient::openConnection()
const auto user_dn_search_results = search(*params.user_dn_detection);
if (user_dn_search_results.empty())
throw Exception("Failed to detect user DN: empty search results", ErrorCodes::LDAP_ERROR);
throw Exception(ErrorCodes::LDAP_ERROR, "Failed to detect user DN: empty search results");
if (user_dn_search_results.size() > 1)
throw Exception("Failed to detect user DN: more than one entry in the search results", ErrorCodes::LDAP_ERROR);
throw Exception(ErrorCodes::LDAP_ERROR, "Failed to detect user DN: more than one entry in the search results");
final_user_dn = *user_dn_search_results.begin();
}
@ -383,7 +383,7 @@ bool LDAPClient::openConnection()
}
default:
throw Exception("Unknown SASL mechanism", ErrorCodes::LDAP_ERROR);
throw Exception(ErrorCodes::LDAP_ERROR, "Unknown SASL mechanism");
}
}
@ -576,7 +576,7 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params)
}
case -1:
throw Exception("Failed to process LDAP search message", ErrorCodes::LDAP_ERROR);
throw Exception(ErrorCodes::LDAP_ERROR, "Failed to process LDAP search message");
}
}
@ -586,10 +586,10 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params)
bool LDAPSimpleAuthClient::authenticate(const RoleSearchParamsList * role_search_params, SearchResultsList * role_search_results)
{
if (params.user.empty())
throw Exception("LDAP authentication of a user with empty name is not allowed", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "LDAP authentication of a user with empty name is not allowed");
if (!role_search_params != !role_search_results)
throw Exception("Cannot return LDAP search results", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot return LDAP search results");
// Silently reject authentication attempt if the password is empty as if it didn't match.
if (params.password.empty())
@ -628,12 +628,12 @@ bool LDAPSimpleAuthClient::authenticate(const RoleSearchParamsList * role_search
void LDAPClient::handleError(const int, String)
{
throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
throw Exception(ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME, "ClickHouse was built without LDAP support");
}
bool LDAPClient::openConnection()
{
throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
throw Exception(ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME, "ClickHouse was built without LDAP support");
}
void LDAPClient::closeConnection() noexcept
@ -642,12 +642,12 @@ void LDAPClient::closeConnection() noexcept
LDAPClient::SearchResults LDAPClient::search(const SearchParams &)
{
throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
throw Exception(ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME, "ClickHouse was built without LDAP support");
}
bool LDAPSimpleAuthClient::authenticate(const RoleSearchParamsList *, SearchResultsList *)
{
throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
throw Exception(ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME, "ClickHouse was built without LDAP support");
}
#endif // USE_LDAP

View File

@ -316,10 +316,8 @@ bool MultipleAccessStorage::updateImpl(const UUID & id, const UpdateFunc & updat
break;
if (storage->find(new_entity->getType(), new_entity->getName()))
{
throw Exception(
old_entity->formatTypeWithName() + ": cannot rename to " + backQuote(new_entity->getName()) + " because "
+ new_entity->formatTypeWithName() + " already exists in " + storage->getStorageName(),
ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS);
throw Exception(ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS, "{}: cannot rename to {} because {} already exists in {}",
old_entity->formatTypeWithName(), backQuote(new_entity->getName()), new_entity->formatTypeWithName(), storage->getStorageName());
}
}
}

View File

@ -55,9 +55,8 @@ String QuotaCache::QuotaInfo::calculateKey(const EnabledQuota & enabled) const
{
if (!params.client_key.empty())
return params.client_key;
throw Exception(
"Quota " + quota->getName() + " (for user " + params.user_name + ") requires a client supplied key.",
ErrorCodes::QUOTA_REQUIRES_CLIENT_KEY);
throw Exception(ErrorCodes::QUOTA_REQUIRES_CLIENT_KEY, "Quota {} (for user {}) requires a client supplied key.",
quota->getName(), params.user_name);
}
case QuotaKeyType::CLIENT_KEY_OR_USER_NAME:
{
@ -73,7 +72,7 @@ String QuotaCache::QuotaInfo::calculateKey(const EnabledQuota & enabled) const
}
case QuotaKeyType::MAX: break;
}
throw Exception("Unexpected quota key type: " + std::to_string(static_cast<int>(quota->key_type)), ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected quota key type: {}", static_cast<int>(quota->key_type));
}

View File

@ -51,7 +51,7 @@ ReplicatedAccessStorage::ReplicatedAccessStorage(
, backup_allowed(allow_backup_)
{
if (zookeeper_path.empty())
throw Exception("ZooKeeper path must be non-empty", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "ZooKeeper path must be non-empty");
if (zookeeper_path.back() == '/')
zookeeper_path.resize(zookeeper_path.size() - 1);
@ -458,7 +458,7 @@ zkutil::ZooKeeperPtr ReplicatedAccessStorage::getZooKeeperNoLock()
{
auto zookeeper = get_zookeeper();
if (!zookeeper)
throw Exception("Can't have Replicated access without ZooKeeper", ErrorCodes::NO_ZOOKEEPER);
throw Exception(ErrorCodes::NO_ZOOKEEPER, "Can't have Replicated access without ZooKeeper");
/// It's possible that we connected to different [Zoo]Keeper instance
/// so we may read a bit stale state.

View File

@ -254,7 +254,7 @@ bool RolesOrUsersSet::match(const UUID & user_id, const boost::container::flat_s
std::vector<UUID> RolesOrUsersSet::getMatchingIDs() const
{
if (all)
throw Exception("getAllMatchingIDs() can't get ALL ids without access_control", ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR, "getAllMatchingIDs() can't get ALL ids without access_control");
std::vector<UUID> res;
boost::range::set_difference(ids, except_ids, std::back_inserter(res));
return res;

View File

@ -45,7 +45,7 @@ void RowPolicy::setFullName(const RowPolicyName & full_name_)
void RowPolicy::setName(const String &)
{
throw Exception("RowPolicy::setName() is not implemented", ErrorCodes::NOT_IMPLEMENTED);
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "RowPolicy::setName() is not implemented");
}

View File

@ -124,7 +124,7 @@ void SettingsConstraints::check(const Settings & current_settings, const Setting
if (new_value != old_value)
{
if (old_value == SettingConstraintWritability::CONST)
throw Exception("Setting " + element.setting_name + " should not be changed", ErrorCodes::SETTING_CONSTRAINT_VIOLATION);
throw Exception(ErrorCodes::SETTING_CONSTRAINT_VIOLATION, "Setting {} should not be changed", element.setting_name);
}
}
}
@ -274,7 +274,7 @@ bool SettingsConstraints::Checker::check(SettingChange & change, const Field & n
if (constraint.writability == SettingConstraintWritability::CONST)
{
if (reaction == THROW_ON_VIOLATION)
throw Exception("Setting " + setting_name + " should not be changed", ErrorCodes::SETTING_CONSTRAINT_VIOLATION);
throw Exception(ErrorCodes::SETTING_CONSTRAINT_VIOLATION, "Setting {} should not be changed", setting_name);
else
return false;
}
@ -285,7 +285,7 @@ bool SettingsConstraints::Checker::check(SettingChange & change, const Field & n
if (!min_value.isNull() && !max_value.isNull() && less_or_cannot_compare(max_value, min_value))
{
if (reaction == THROW_ON_VIOLATION)
throw Exception("Setting " + setting_name + " should not be changed", ErrorCodes::SETTING_CONSTRAINT_VIOLATION);
throw Exception(ErrorCodes::SETTING_CONSTRAINT_VIOLATION, "Setting {} should not be changed", setting_name);
else
return false;
}
@ -294,9 +294,8 @@ bool SettingsConstraints::Checker::check(SettingChange & change, const Field & n
{
if (reaction == THROW_ON_VIOLATION)
{
throw Exception(
"Setting " + setting_name + " shouldn't be less than " + applyVisitor(FieldVisitorToString(), min_value),
ErrorCodes::SETTING_CONSTRAINT_VIOLATION);
throw Exception(ErrorCodes::SETTING_CONSTRAINT_VIOLATION, "Setting {} shouldn't be less than {}",
setting_name, applyVisitor(FieldVisitorToString(), min_value));
}
else
change.value = min_value;
@ -306,9 +305,8 @@ bool SettingsConstraints::Checker::check(SettingChange & change, const Field & n
{
if (reaction == THROW_ON_VIOLATION)
{
throw Exception(
"Setting " + setting_name + " shouldn't be greater than " + applyVisitor(FieldVisitorToString(), max_value),
ErrorCodes::SETTING_CONSTRAINT_VIOLATION);
throw Exception(ErrorCodes::SETTING_CONSTRAINT_VIOLATION, "Setting {} shouldn't be greater than {}",
setting_name, applyVisitor(FieldVisitorToString(), max_value));
}
else
change.value = max_value;

View File

@ -58,7 +58,9 @@ void SettingsProfileElement::init(const ASTSettingsProfileElement & ast, const A
access_control->checkSettingNameIsAllowed(setting_name);
/// Check if a CHANGEABLE_IN_READONLY is allowed.
if (ast.writability == SettingConstraintWritability::CHANGEABLE_IN_READONLY && !access_control->doesSettingsConstraintsReplacePrevious())
throw Exception("CHANGEABLE_IN_READONLY for " + setting_name + " is not allowed unless settings_constraints_replace_previous is enabled", ErrorCodes::NOT_IMPLEMENTED);
throw Exception(ErrorCodes::NOT_IMPLEMENTED,
"CHANGEABLE_IN_READONLY for {} "
"is not allowed unless settings_constraints_replace_previous is enabled", setting_name);
}
value = ast.value;

View File

@ -94,7 +94,7 @@ void SettingsProfilesCache::setDefaultProfileName(const String & default_profile
auto it = profiles_by_name.find(default_profile_name);
if (it == profiles_by_name.end())
throw Exception("Settings profile " + backQuote(default_profile_name) + " not found", ErrorCodes::THERE_IS_NO_PROFILE);
throw Exception(ErrorCodes::THERE_IS_NO_PROFILE, "Settings profile {} not found", backQuote(default_profile_name));
default_profile_id = it->second;
}

View File

@ -67,11 +67,15 @@ namespace
size_t num_password_fields = has_no_password + has_password_plaintext + has_password_sha256_hex + has_password_double_sha1_hex + has_ldap + has_kerberos + has_certificates;
if (num_password_fields > 1)
throw Exception("More than one field of 'password', 'password_sha256_hex', 'password_double_sha1_hex', 'no_password', 'ldap', 'kerberos', 'ssl_certificates' are used to specify authentication info for user " + user_name + ". Must be only one of them.",
ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "More than one field of 'password', 'password_sha256_hex', "
"'password_double_sha1_hex', 'no_password', 'ldap', 'kerberos', 'ssl_certificates' "
"are used to specify authentication info for user {}. "
"Must be only one of them.", user_name);
if (num_password_fields < 1)
throw Exception("Either 'password' or 'password_sha256_hex' or 'password_double_sha1_hex' or 'no_password' or 'ldap' or 'kerberos' or 'ssl_certificates' must be specified for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Either 'password' or 'password_sha256_hex' "
"or 'password_double_sha1_hex' or 'no_password' or 'ldap' or 'kerberos' "
"or 'ssl_certificates' must be specified for user {}.", user_name);
if (has_password_plaintext)
{
@ -92,11 +96,11 @@ namespace
{
bool has_ldap_server = config.has(user_config + ".ldap.server");
if (!has_ldap_server)
throw Exception("Missing mandatory 'server' in 'ldap', with LDAP server name, for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Missing mandatory 'server' in 'ldap', with LDAP server name, for user {}.", user_name);
const auto ldap_server_name = config.getString(user_config + ".ldap.server");
if (ldap_server_name.empty())
throw Exception("LDAP server name cannot be empty for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "LDAP server name cannot be empty for user {}.", user_name);
user->auth_data = AuthenticationData{AuthenticationType::LDAP};
user->auth_data.setLDAPServerName(ldap_server_name);
@ -124,7 +128,7 @@ namespace
common_names.insert(std::move(value));
}
else
throw Exception("Unknown certificate pattern type: " + key, ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown certificate pattern type: {}", key);
}
user->auth_data.setSSLCertificateCommonNames(std::move(common_names));
}
@ -167,7 +171,7 @@ namespace
else if (key.starts_with("host"))
user->allowed_client_hosts.addName(value);
else
throw Exception("Unknown address pattern type: " + key, ErrorCodes::UNKNOWN_ADDRESS_PATTERN_TYPE);
throw Exception(ErrorCodes::UNKNOWN_ADDRESS_PATTERN_TYPE, "Unknown address pattern type: {}", key);
}
}
@ -466,13 +470,15 @@ namespace
if (access_control.doesSettingsConstraintsReplacePrevious())
profile_element.writability = SettingConstraintWritability::CHANGEABLE_IN_READONLY;
else
throw Exception("Setting changeable_in_readonly for " + setting_name + " is not allowed unless settings_constraints_replace_previous is enabled", ErrorCodes::NOT_IMPLEMENTED);
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Setting changeable_in_readonly for {} is not allowed "
"unless settings_constraints_replace_previous is enabled", setting_name);
}
else
throw Exception("Setting " + constraint_type + " value for " + setting_name + " isn't supported", ErrorCodes::NOT_IMPLEMENTED);
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Setting {} value for {} isn't supported", constraint_type, setting_name);
}
if (writability_count > 1)
throw Exception("Not more than one constraint writability specifier (const/readonly/changeable_in_readonly) is allowed for " + setting_name, ErrorCodes::NOT_IMPLEMENTED);
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not more than one constraint writability specifier "
"(const/readonly/changeable_in_readonly) is allowed for {}", setting_name);
profile_elements.push_back(std::move(profile_element));
}

View File

@ -68,7 +68,7 @@ public:
void create(AggregateDataPtr __restrict place) const override
{
if (std::uniform_real_distribution<>(0.0, 1.0)(thread_local_rng) <= throw_probability)
throw Exception("Aggregate function " + getName() + " has thrown exception successfully", ErrorCodes::AGGREGATE_FUNCTION_THROW);
throw Exception(ErrorCodes::AGGREGATE_FUNCTION_THROW, "Aggregate function {} has thrown exception successfully", getName());
new (place) Data;
}
@ -116,7 +116,7 @@ void registerAggregateFunctionAggThrow(AggregateFunctionFactory & factory)
if (parameters.size() == 1)
throw_probability = parameters[0].safeGet<Float64>();
else if (parameters.size() > 1)
throw Exception("Aggregate function " + name + " cannot have more than one parameter", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Aggregate function {} cannot have more than one parameter", name);
return std::make_shared<AggregateFunctionThrow>(argument_types, parameters, throw_probability);
});

View File

@ -78,7 +78,7 @@ public:
{
auto f_stat = data(place).getFStatistic();
if (std::isinf(f_stat) || isNaN(f_stat) || f_stat < 0)
throw Exception("F statistic is not defined or infinite for these arguments", ErrorCodes::BAD_ARGUMENTS);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "F statistic is not defined or infinite for these arguments");
auto p_value = data(place).getPValue(f_stat);

View File

@ -52,8 +52,9 @@ public:
, serialization_val(type_val->getDefaultSerialization())
{
if (!type_val->isComparable())
throw Exception("Illegal type " + type_val->getName() + " of second argument of aggregate function " + getName()
+ " because the values of that data type are not comparable", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of second argument of "
"aggregate function {} because the values of that data type are not comparable",
type_val->getName(), getName());
}
String getName() const override

View File

@ -26,7 +26,7 @@ public:
DataTypes transformArguments(const DataTypes & arguments) const override
{
if (arguments.empty())
throw Exception("-Array aggregate functions require at least one argument", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "-Array aggregate functions require at least one argument");
DataTypes nested_arguments;
for (const auto & type : arguments)
@ -34,8 +34,8 @@ public:
if (const DataTypeArray * array = typeid_cast<const DataTypeArray *>(type.get()))
nested_arguments.push_back(array->getNestedType());
else
throw Exception("Illegal type " + type->getName() + " of argument"
" for aggregate function with " + getName() + " suffix. Must be array.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument"
" for aggregate function with {} suffix. Must be array.", type->getName(), getName());
}
return nested_arguments;

View File

@ -36,7 +36,7 @@ public:
assert(parameters == nested_func->getParameters());
for (const auto & type : arguments)
if (!isArray(type))
throw Exception("All arguments for aggregate function " + getName() + " must be arrays", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "All arguments for aggregate function {} must be arrays", getName());
}
String getName() const override
@ -129,7 +129,7 @@ public:
const IColumn::Offsets & ith_offsets = ith_column.getOffsets();
if (ith_offsets[row_num] != end || (row_num != 0 && ith_offsets[row_num - 1] != begin))
throw Exception("Arrays passed to " + getName() + " aggregate function have different sizes", ErrorCodes::SIZES_OF_ARRAYS_DOESNT_MATCH);
throw Exception(ErrorCodes::SIZES_OF_ARRAYS_DOESNT_MATCH, "Arrays passed to {} aggregate function have different sizes", getName());
}
for (size_t i = begin; i < end; ++i)

View File

@ -29,8 +29,8 @@ AggregateFunctionPtr createAggregateFunctionAvg(const std::string & name, const
const DataTypePtr& data_type = argument_types[0];
if (!allowType(data_type))
throw Exception("Illegal type " + data_type->getName() + " of argument for aggregate function " + name,
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}",
data_type->getName(), name);
AggregateFunctionPtr res;

View File

@ -71,11 +71,9 @@ createAggregateFunctionAvgWeighted(const std::string & name, const DataTypes & a
const auto data_type_weight = static_cast<const DataTypePtr>(argument_types[1]);
if (!allowTypes(data_type, data_type_weight))
throw Exception(
"Types " + data_type->getName() +
" and " + data_type_weight->getName() +
" are non-conforming as arguments for aggregate function " + name,
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Types {} and {} are non-conforming as arguments for aggregate function {}",
data_type->getName(), data_type_weight->getName(), name);
AggregateFunctionPtr ptr;

View File

@ -23,14 +23,15 @@ AggregateFunctionPtr createAggregateFunctionBitwise(const std::string & name, co
assertUnary(name, argument_types);
if (!argument_types[0]->canBeUsedInBitOperations())
throw Exception("The type " + argument_types[0]->getName() + " of argument for aggregate function " + name
+ " is illegal, because it cannot be used in bitwise operations",
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "The type {} of argument for aggregate function {} "
"is illegal, because it cannot be used in bitwise operations",
argument_types[0]->getName(), name);
AggregateFunctionPtr res(createWithUnsignedIntegerType<AggregateFunctionBitwise, Data>(*argument_types[0], argument_types[0]));
if (!res)
throw Exception("Illegal type " + argument_types[0]->getName() + " of argument for aggregate function " + name, ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal type {} of argument for aggregate function {}", argument_types[0]->getName(), name);
return res;
}

View File

@ -21,8 +21,9 @@ AggregateFunctionPtr createAggregateFunctionRate(const std::string & name, const
assertBinary(name, argument_types);
if (argument_types.size() < 2)
throw Exception("Aggregate function " + name + " requires at least two arguments",
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Aggregate function {} requires at least two arguments",
name);
return std::make_shared<AggregateFunctionBoundingRatio>(argument_types);
}

Some files were not shown because too many files have changed in this diff Show More