mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 01:51:59 +00:00
Merge branch 'master' into fix-system-flush-test
This commit is contained in:
commit
bc2015356a
@ -52,7 +52,6 @@
|
||||
* Add new setting `disable_url_encoding` that allows to disable decoding/encoding path in uri in URL engine. [#52337](https://github.com/ClickHouse/ClickHouse/pull/52337) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Writing parquet files is 10x faster, it's multi-threaded now. Almost the same speed as reading. [#49367](https://github.com/ClickHouse/ClickHouse/pull/49367) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Enable automatic selection of the sparse serialization format by default. It improves performance. The format is supported since version 22.1. After this change, downgrading to versions older than 22.1 might not be possible. You can turn off the usage of the sparse serialization format by providing the `ratio_of_defaults_for_sparse_serialization = 1` setting for your MergeTree tables. [#49631](https://github.com/ClickHouse/ClickHouse/pull/49631) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Enable `move_all_conditions_to_prewhere` and `enable_multiple_prewhere_read_steps` settings by default. [#46365](https://github.com/ClickHouse/ClickHouse/pull/46365) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Improves performance of some queries by tuning allocator. [#46416](https://github.com/ClickHouse/ClickHouse/pull/46416) ([Azat Khuzhin](https://github.com/azat)).
|
||||
@ -114,6 +113,7 @@
|
||||
* Now interserver port will be closed only after tables are shut down. [#52498](https://github.com/ClickHouse/ClickHouse/pull/52498) ([alesapin](https://github.com/alesapin)).
|
||||
|
||||
#### Experimental Feature
|
||||
* Writing parquet files is 10x faster, it's multi-threaded now. Almost the same speed as reading. [#49367](https://github.com/ClickHouse/ClickHouse/pull/49367) ([Michael Kolupaev](https://github.com/al13n321)). This is controlled by the setting `output_format_parquet_use_custom_encoder` which is disabled by default, because the feature is non-ideal.
|
||||
* Added support for [PRQL](https://prql-lang.org/) as a query language. [#50686](https://github.com/ClickHouse/ClickHouse/pull/50686) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Allow to add disk name for custom disks. Previously custom disks would use an internal generated disk name. Now it will be possible with `disk = disk_<name>(...)` (e.g. disk will have name `name`) . [#51552](https://github.com/ClickHouse/ClickHouse/pull/51552) ([Kseniia Sumarokova](https://github.com/kssenii)). This syntax can be changed in this release.
|
||||
* (experimental MaterializedMySQL) Fixed crash when `mysqlxx::Pool::Entry` is used after it was disconnected. [#52063](https://github.com/ClickHouse/ClickHouse/pull/52063) ([Val Doroshchuk](https://github.com/valbok)).
|
||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
esac
|
||||
|
||||
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
||||
ARG VERSION="23.7.3.14"
|
||||
ARG VERSION="23.7.4.5"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="23.7.3.14"
|
||||
ARG VERSION="23.7.4.5"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -23,7 +23,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="23.7.3.14"
|
||||
ARG VERSION="23.7.4.5"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
17
docs/changelogs/v23.7.4.5-stable.md
Normal file
17
docs/changelogs/v23.7.4.5-stable.md
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.7.4.5-stable (bd2fcd44553) FIXME as compared to v23.7.3.14-stable (bd9a510550c)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Disable the new parquet encoder [#53130](https://github.com/ClickHouse/ClickHouse/pull/53130) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Revert changes in `ZstdDeflatingAppendableWriteBuffer` [#53111](https://github.com/ClickHouse/ClickHouse/pull/53111) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
@ -21,7 +21,7 @@ CREATE TABLE azure_blob_storage_table (name String, value UInt32)
|
||||
|
||||
- `connection_string|storage_account_url` — connection_string includes account name & key ([Create connection string](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&bc=%2Fazure%2Fstorage%2Fblobs%2Fbreadcrumb%2Ftoc.json#configure-a-connection-string-for-an-azure-storage-account)) or you could also provide the storage account url here and account name & account key as separate parameters (see parameters account_name & account_key)
|
||||
- `container_name` - Container name
|
||||
- `blobpath` - file path. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings.
|
||||
- `blobpath` - file path. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings.
|
||||
- `account_name` - if storage_account_url is used, then account name can be specified here
|
||||
- `account_key` - if storage_account_url is used, then account key can be specified here
|
||||
- `format` — The [format](/docs/en/interfaces/formats.md) of the file.
|
||||
|
@ -37,7 +37,7 @@ CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
|
||||
### Engine parameters
|
||||
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
||||
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
||||
- `format` — The [format](../../../interfaces/formats.md#formats) of the file.
|
||||
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3).
|
||||
@ -164,6 +164,7 @@ For more information about virtual columns see [here](../../../engines/table-eng
|
||||
`path` argument can specify multiple files using bash-like wildcards. For being processed file should exist and match to the whole path pattern. Listing of files is determined during `SELECT` (not at `CREATE` moment).
|
||||
|
||||
- `*` — Substitutes any number of any characters except `/` including empty string.
|
||||
- `**` — Substitutes any number of any character include `/` including empty string.
|
||||
- `?` — Substitutes any single character.
|
||||
- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`.
|
||||
- `{N..M}` — Substitutes any number in range from N to M including both borders. N and M can have leading zeroes e.g. `000..078`.
|
||||
|
@ -27,7 +27,7 @@ CREATE TABLE s3_queue_engine_table (name String, value UInt32)
|
||||
|
||||
**Engine parameters**
|
||||
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
||||
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
||||
- `format` — The [format](../../../interfaces/formats.md#formats) of the file.
|
||||
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3).
|
||||
@ -213,6 +213,7 @@ For more information about virtual columns see [here](../../../engines/table-eng
|
||||
`path` argument can specify multiple files using bash-like wildcards. For being processed file should exist and match to the whole path pattern. Listing of files is determined during `SELECT` (not at `CREATE` moment).
|
||||
|
||||
- `*` — Substitutes any number of any characters except `/` including empty string.
|
||||
- `**` — Substitutes any number of any characters include `/` including empty string.
|
||||
- `?` — Substitutes any single character.
|
||||
- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`.
|
||||
- `{N..M}` — Substitutes any number in range from N to M including both borders. N and M can have leading zeroes e.g. `000..078`.
|
||||
|
@ -7,6 +7,10 @@ pagination_next: en/operations/settings/settings
|
||||
|
||||
# Settings Overview
|
||||
|
||||
:::note
|
||||
XML-based Settings Profiles and [configuration files](https://clickhouse.com/docs/en/operations/configuration-files) are currently not supported for ClickHouse Cloud. To specify settings for your ClickHouse Cloud service, you must use [SQL-driven Settings Profiles](https://clickhouse.com/docs/en/operations/access-rights#settings-profiles-management).
|
||||
:::
|
||||
|
||||
There are two main groups of ClickHouse settings:
|
||||
|
||||
- Global server settings
|
||||
|
@ -6,42 +6,42 @@ sidebar_label: UUID
|
||||
|
||||
# UUID
|
||||
|
||||
A universally unique identifier (UUID) is a 16-byte number used to identify records. For detailed information about the UUID, see [Wikipedia](https://en.wikipedia.org/wiki/Universally_unique_identifier).
|
||||
A Universally Unique Identifier (UUID) is a 16-byte value used to identify records. For detailed information about UUIDs, see [Wikipedia](https://en.wikipedia.org/wiki/Universally_unique_identifier).
|
||||
|
||||
The example of UUID type value is represented below:
|
||||
While different UUID variants exist (see [here](https://datatracker.ietf.org/doc/html/draft-ietf-uuidrev-rfc4122bis)), ClickHouse does not validate that inserted UUIDs conform to a particular variant. UUIDs are internally treated as a sequence of 16 random bytes with [8-4-4-4-12 representation](https://en.wikipedia.org/wiki/Universally_unique_identifier#Textual_representation) at SQL level.
|
||||
|
||||
Example UUID value:
|
||||
|
||||
``` text
|
||||
61f0c404-5cb3-11e7-907b-a6006ad3dba0
|
||||
```
|
||||
|
||||
If you do not specify the UUID column value when inserting a new record, the UUID value is filled with zero:
|
||||
The default UUID is all-zero. It is used, for example, when a new record is inserted but no value for a UUID column is specified:
|
||||
|
||||
``` text
|
||||
00000000-0000-0000-0000-000000000000
|
||||
```
|
||||
|
||||
## How to Generate
|
||||
## Generating UUIDs
|
||||
|
||||
To generate the UUID value, ClickHouse provides the [generateUUIDv4](../../sql-reference/functions/uuid-functions.md) function.
|
||||
ClickHouse provides the [generateUUIDv4](../../sql-reference/functions/uuid-functions.md) function to generate random UUID version 4 values.
|
||||
|
||||
## Usage Example
|
||||
|
||||
**Example 1**
|
||||
|
||||
This example demonstrates creating a table with the UUID type column and inserting a value into the table.
|
||||
This example demonstrates the creation of a table with a UUID column and the insertion of a value into the table.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE t_uuid (x UUID, y String) ENGINE=TinyLog
|
||||
```
|
||||
|
||||
``` sql
|
||||
INSERT INTO t_uuid SELECT generateUUIDv4(), 'Example 1'
|
||||
```
|
||||
|
||||
``` sql
|
||||
SELECT * FROM t_uuid
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌────────────────────────────────────x─┬─y─────────┐
|
||||
│ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │
|
||||
@ -50,13 +50,11 @@ SELECT * FROM t_uuid
|
||||
|
||||
**Example 2**
|
||||
|
||||
In this example, the UUID column value is not specified when inserting a new record.
|
||||
In this example, no UUID column value is specified when the record is inserted, i.e. the default UUID value is inserted:
|
||||
|
||||
``` sql
|
||||
INSERT INTO t_uuid (y) VALUES ('Example 2')
|
||||
```
|
||||
|
||||
``` sql
|
||||
SELECT * FROM t_uuid
|
||||
```
|
||||
|
||||
|
@ -18,7 +18,7 @@ file(path[, default])
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `path` — The path of the file relative to [user_files_path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Supports wildcards `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` are numbers and `'abc', 'def'` are strings.
|
||||
- `path` — The path of the file relative to [user_files_path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Supports wildcards `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` are numbers and `'abc', 'def'` are strings.
|
||||
- `default` — The value returned if the file does not exist or cannot be accessed. Supported data types: [String](../../sql-reference/data-types/string.md) and [NULL](../../sql-reference/syntax.md#null-literal).
|
||||
|
||||
**Example**
|
||||
|
@ -19,7 +19,7 @@ azureBlobStorage(- connection_string|storage_account_url, container_name, blobpa
|
||||
|
||||
- `connection_string|storage_account_url` — connection_string includes account name & key ([Create connection string](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&bc=%2Fazure%2Fstorage%2Fblobs%2Fbreadcrumb%2Ftoc.json#configure-a-connection-string-for-an-azure-storage-account)) or you could also provide the storage account url here and account name & account key as separate parameters (see parameters account_name & account_key)
|
||||
- `container_name` - Container name
|
||||
- `blobpath` - file path. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings.
|
||||
- `blobpath` - file path. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings.
|
||||
- `account_name` - if storage_account_url is used, then account name can be specified here
|
||||
- `account_key` - if storage_account_url is used, then account key can be specified here
|
||||
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
|
||||
|
@ -19,7 +19,7 @@ azureBlobStorageCluster(cluster_name, connection_string|storage_account_url, con
|
||||
- `cluster_name` — Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers.
|
||||
- `connection_string|storage_account_url` — connection_string includes account name & key ([Create connection string](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&bc=%2Fazure%2Fstorage%2Fblobs%2Fbreadcrumb%2Ftoc.json#configure-a-connection-string-for-an-azure-storage-account)) or you could also provide the storage account url here and account name & account key as separate parameters (see parameters account_name & account_key)
|
||||
- `container_name` - Container name
|
||||
- `blobpath` - file path. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings.
|
||||
- `blobpath` - file path. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings.
|
||||
- `account_name` - if storage_account_url is used, then account name can be specified here
|
||||
- `account_key` - if storage_account_url is used, then account key can be specified here
|
||||
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
|
||||
|
@ -22,7 +22,7 @@ The GCS Table Function integrates with Google Cloud Storage by using the GCS XML
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings.
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings.
|
||||
|
||||
:::note GCS
|
||||
The GCS path is in this format as the endpoint for the Google XML API is different than the JSON API:
|
||||
|
@ -17,7 +17,7 @@ hdfsCluster(cluster_name, URI, format, structure)
|
||||
**Arguments**
|
||||
|
||||
- `cluster_name` — Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers.
|
||||
- `URI` — URI to a file or a bunch of files. Supports following wildcards in readonly mode: `*`, `?`, `{'abc','def'}` and `{N..M}` where `N`, `M` — numbers, `abc`, `def` — strings. For more information see [Wildcards In Path](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
|
||||
- `URI` — URI to a file or a bunch of files. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{'abc','def'}` and `{N..M}` where `N`, `M` — numbers, `abc`, `def` — strings. For more information see [Wildcards In Path](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
|
||||
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
|
||||
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
|
||||
|
||||
|
@ -23,7 +23,7 @@ For GCS, substitute your HMAC key and HMAC secret where you see `aws_access_key_
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [here](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [here](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
|
||||
|
||||
:::note GCS
|
||||
The GCS path is in this format as the endpoint for the Google XML API is different than the JSON API:
|
||||
|
@ -16,7 +16,7 @@ s3Cluster(cluster_name, source, [,access_key_id, secret_access_key] [,format] [,
|
||||
**Arguments**
|
||||
|
||||
- `cluster_name` — Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers.
|
||||
- `source` — URL to a file or a bunch of files. Supports following wildcards in readonly mode: `*`, `?`, `{'abc','def'}` and `{N..M}` where `N`, `M` — numbers, `abc`, `def` — strings. For more information see [Wildcards In Path](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
|
||||
- `source` — URL to a file or a bunch of files. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{'abc','def'}` and `{N..M}` where `N`, `M` — numbers, `abc`, `def` — strings. For more information see [Wildcards In Path](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
|
||||
- `access_key_id` and `secret_access_key` — Keys that specify credentials to use with given endpoint. Optional.
|
||||
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
|
||||
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
|
||||
|
@ -18,6 +18,9 @@ void HTTPHeaderFilter::checkHeaders(const HTTPHeaderEntries & entries) const
|
||||
|
||||
for (const auto & entry : entries)
|
||||
{
|
||||
if (entry.name.contains('\n') || entry.value.contains('\n'))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "HTTP header \"{}\" has invalid character", entry.name);
|
||||
|
||||
if (forbidden_headers.contains(entry.name))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "HTTP header \"{}\" is forbidden in configuration file, "
|
||||
"see <http_forbid_headers>", entry.name);
|
||||
|
@ -818,6 +818,31 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep
|
||||
query_context->setQueryKindReplicatedDatabaseInternal();
|
||||
query_context->setCurrentDatabase(getDatabaseName());
|
||||
query_context->setCurrentQueryId("");
|
||||
|
||||
/// We will execute some CREATE queries for recovery (not ATTACH queries),
|
||||
/// so we need to allow experimental features that can be used in a CREATE query
|
||||
query_context->setSetting("allow_experimental_inverted_index", 1);
|
||||
query_context->setSetting("allow_experimental_codecs", 1);
|
||||
query_context->setSetting("allow_experimental_live_view", 1);
|
||||
query_context->setSetting("allow_experimental_window_view", 1);
|
||||
query_context->setSetting("allow_experimental_funnel_functions", 1);
|
||||
query_context->setSetting("allow_experimental_nlp_functions", 1);
|
||||
query_context->setSetting("allow_experimental_hash_functions", 1);
|
||||
query_context->setSetting("allow_experimental_object_type", 1);
|
||||
query_context->setSetting("allow_experimental_annoy_index", 1);
|
||||
query_context->setSetting("allow_experimental_bigint_types", 1);
|
||||
query_context->setSetting("allow_experimental_window_functions", 1);
|
||||
query_context->setSetting("allow_experimental_geo_types", 1);
|
||||
query_context->setSetting("allow_experimental_map_type", 1);
|
||||
|
||||
query_context->setSetting("allow_suspicious_low_cardinality_types", 1);
|
||||
query_context->setSetting("allow_suspicious_fixed_string_types", 1);
|
||||
query_context->setSetting("allow_suspicious_indices", 1);
|
||||
query_context->setSetting("allow_suspicious_codecs", 1);
|
||||
query_context->setSetting("allow_hyperscan", 1);
|
||||
query_context->setSetting("allow_simdjson", 1);
|
||||
query_context->setSetting("allow_deprecated_syntax_for_merge_tree", 1);
|
||||
|
||||
auto txn = std::make_shared<ZooKeeperMetadataTransaction>(current_zookeeper, zookeeper_path, false, "");
|
||||
query_context->initZooKeeperMetadataTransaction(txn);
|
||||
return query_context;
|
||||
|
@ -8,7 +8,7 @@ namespace DB
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int UNEXPECTED_END_OF_FILE;
|
||||
extern const int CANNOT_SEEK_THROUGH_FILE;
|
||||
extern const int SEEK_POSITION_OUT_OF_BOUND;
|
||||
|
||||
@ -260,7 +260,7 @@ void ParallelReadBuffer::readerThreadFunction(ReadWorkerPtr read_worker)
|
||||
|
||||
if (!on_progress(r) && r < read_worker->segment.size())
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
ErrorCodes::UNEXPECTED_END_OF_FILE,
|
||||
"Failed to read all the data from the reader at offset {}, got {}/{} bytes",
|
||||
read_worker->start_offset, r, read_worker->segment.size());
|
||||
}
|
||||
|
@ -1101,17 +1101,14 @@ inline void readBinary(bool & x, ReadBuffer & buf)
|
||||
}
|
||||
|
||||
inline void readBinary(String & x, ReadBuffer & buf) { readStringBinary(x, buf); }
|
||||
inline void readBinary(Int32 & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||
inline void readBinary(Int128 & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||
inline void readBinary(Int256 & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||
inline void readBinary(UInt32 & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||
inline void readBinary(UInt128 & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||
inline void readBinary(UInt256 & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||
inline void readBinary(Decimal32 & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||
inline void readBinary(Decimal64 & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||
inline void readBinary(Decimal128 & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||
inline void readBinary(Decimal256 & x, ReadBuffer & buf) { readPODBinary(x.value, buf); }
|
||||
inline void readBinary(LocalDate & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||
inline void readBinary(UUID & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||
inline void readBinary(IPv4 & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||
inline void readBinary(IPv6 & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||
|
||||
inline void readBinary(StackTrace::FramePointers & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||
|
||||
|
@ -520,8 +520,6 @@ ContextMutablePtr Session::makeSessionContext()
|
||||
{},
|
||||
session_context->getSettingsRef().max_sessions_for_user);
|
||||
|
||||
recordLoginSucess(session_context);
|
||||
|
||||
return session_context;
|
||||
}
|
||||
|
||||
@ -584,8 +582,6 @@ ContextMutablePtr Session::makeSessionContext(const String & session_name_, std:
|
||||
{ session_name_ },
|
||||
max_sessions_for_user);
|
||||
|
||||
recordLoginSucess(session_context);
|
||||
|
||||
return session_context;
|
||||
}
|
||||
|
||||
@ -659,38 +655,24 @@ ContextMutablePtr Session::makeQueryContextImpl(const ClientInfo * client_info_t
|
||||
if (user_id)
|
||||
user = query_context->getUser();
|
||||
|
||||
/// Interserver does not create session context
|
||||
recordLoginSucess(query_context);
|
||||
if (!notified_session_log_about_login)
|
||||
{
|
||||
if (auto session_log = getSessionLog())
|
||||
{
|
||||
session_log->addLoginSuccess(
|
||||
auth_id,
|
||||
named_session ? std::optional<std::string>(named_session->key.second) : std::nullopt,
|
||||
*query_context,
|
||||
user);
|
||||
|
||||
notified_session_log_about_login = true;
|
||||
}
|
||||
}
|
||||
|
||||
return query_context;
|
||||
}
|
||||
|
||||
|
||||
void Session::recordLoginSucess(ContextPtr login_context) const
|
||||
{
|
||||
if (notified_session_log_about_login)
|
||||
return;
|
||||
|
||||
if (!login_context)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Session or query context must be created");
|
||||
|
||||
if (auto session_log = getSessionLog())
|
||||
{
|
||||
const auto & settings = login_context->getSettingsRef();
|
||||
const auto access = login_context->getAccess();
|
||||
|
||||
session_log->addLoginSuccess(auth_id,
|
||||
named_session ? named_session->key.second : "",
|
||||
settings,
|
||||
access,
|
||||
getClientInfo(),
|
||||
user);
|
||||
|
||||
notified_session_log_about_login = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Session::releaseSessionID()
|
||||
{
|
||||
if (!named_session)
|
||||
|
@ -97,8 +97,6 @@ public:
|
||||
private:
|
||||
std::shared_ptr<SessionLog> getSessionLog() const;
|
||||
ContextMutablePtr makeQueryContextImpl(const ClientInfo * client_info_to_copy, ClientInfo * client_info_to_move) const;
|
||||
void recordLoginSucess(ContextPtr login_context) const;
|
||||
|
||||
|
||||
mutable bool notified_session_log_about_login = false;
|
||||
const UUID auth_id;
|
||||
|
@ -199,13 +199,12 @@ void SessionLogElement::appendToBlock(MutableColumns & columns) const
|
||||
columns[i++]->insertData(auth_failure_reason.data(), auth_failure_reason.length());
|
||||
}
|
||||
|
||||
void SessionLog::addLoginSuccess(const UUID & auth_id,
|
||||
const String & session_id,
|
||||
const Settings & settings,
|
||||
const ContextAccessPtr & access,
|
||||
const ClientInfo & client_info,
|
||||
const UserPtr & login_user)
|
||||
void SessionLog::addLoginSuccess(const UUID & auth_id, std::optional<String> session_id, const Context & login_context, const UserPtr & login_user)
|
||||
{
|
||||
const auto access = login_context.getAccess();
|
||||
const auto & settings = login_context.getSettingsRef();
|
||||
const auto & client_info = login_context.getClientInfo();
|
||||
|
||||
DB::SessionLogElement log_entry(auth_id, SESSION_LOGIN_SUCCESS);
|
||||
log_entry.client_info = client_info;
|
||||
|
||||
@ -216,7 +215,8 @@ void SessionLog::addLoginSuccess(const UUID & auth_id,
|
||||
}
|
||||
log_entry.external_auth_server = login_user ? login_user->auth_data.getLDAPServerName() : "";
|
||||
|
||||
log_entry.session_id = session_id;
|
||||
if (session_id)
|
||||
log_entry.session_id = *session_id;
|
||||
|
||||
if (const auto roles_info = access->getRolesInfo())
|
||||
log_entry.roles = roles_info->getCurrentRolesNames();
|
||||
|
@ -20,7 +20,6 @@ enum SessionLogElementType : int8_t
|
||||
class ContextAccess;
|
||||
struct User;
|
||||
using UserPtr = std::shared_ptr<const User>;
|
||||
using ContextAccessPtr = std::shared_ptr<const ContextAccess>;
|
||||
|
||||
/** A struct which will be inserted as row into session_log table.
|
||||
*
|
||||
@ -73,13 +72,7 @@ class SessionLog : public SystemLog<SessionLogElement>
|
||||
using SystemLog<SessionLogElement>::SystemLog;
|
||||
|
||||
public:
|
||||
void addLoginSuccess(const UUID & auth_id,
|
||||
const String & session_id,
|
||||
const Settings & settings,
|
||||
const ContextAccessPtr & access,
|
||||
const ClientInfo & client_info,
|
||||
const UserPtr & login_user);
|
||||
|
||||
void addLoginSuccess(const UUID & auth_id, std::optional<String> session_id, const Context & login_context, const UserPtr & login_user);
|
||||
void addLoginFailure(const UUID & auth_id, const ClientInfo & info, const std::optional<String> & user, const Exception & reason);
|
||||
void addLogOut(const UUID & auth_id, const UserPtr & login_user, const ClientInfo & client_info);
|
||||
};
|
||||
|
@ -561,7 +561,8 @@ void HTTPHandler::processQuery(
|
||||
session->makeSessionContext();
|
||||
}
|
||||
|
||||
auto context = session->makeQueryContext();
|
||||
auto client_info = session->getClientInfo();
|
||||
auto context = session->makeQueryContext(std::move(client_info));
|
||||
|
||||
/// This parameter is used to tune the behavior of output formats (such as Native) for compatibility.
|
||||
if (params.has("client_protocol_version"))
|
||||
|
@ -98,7 +98,9 @@ void S3QueueFilesMetadata::S3QueueProcessedCollection::parse(const String & coll
|
||||
|
||||
void S3QueueFilesMetadata::S3QueueProcessedCollection::add(const String & file_name)
|
||||
{
|
||||
TrackedCollectionItem processed_file = { .file_path=file_name, .timestamp = getCurrentTime() };
|
||||
TrackedCollectionItem processed_file;
|
||||
processed_file.file_path = file_name;
|
||||
processed_file.timestamp = getCurrentTime();
|
||||
files.push_back(processed_file);
|
||||
|
||||
/// TODO: it is strange that in parse() we take into account only max_age, but here only max_size.
|
||||
|
@ -17,6 +17,9 @@ class S3QueueFilesMetadata
|
||||
public:
|
||||
struct TrackedCollectionItem
|
||||
{
|
||||
TrackedCollectionItem() = default;
|
||||
TrackedCollectionItem(const String & file_path_, UInt64 timestamp_, UInt64 retries_count_, const String & last_exception_)
|
||||
: file_path(file_path_), timestamp(timestamp_), retries_count(retries_count_), last_exception(last_exception_) {}
|
||||
String file_path;
|
||||
UInt64 timestamp = 0;
|
||||
UInt64 retries_count = 0;
|
||||
|
@ -175,7 +175,7 @@ void TableFunctionRemote::parseArguments(const ASTPtr & ast_function, ContextPtr
|
||||
{
|
||||
if (arg_num >= args.size())
|
||||
{
|
||||
throw Exception(help_message, ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Table name was not found in function arguments. {}", static_cast<const std::string>(help_message));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -220,15 +220,19 @@ void TableFunctionRemote::parseArguments(const ASTPtr & ast_function, ContextPtr
|
||||
++arg_num;
|
||||
}
|
||||
|
||||
if (arg_num < args.size() && !sharding_key)
|
||||
if (arg_num < args.size())
|
||||
{
|
||||
if (sharding_key)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Arguments `user` and `password` should be string literals (in single quotes)");
|
||||
sharding_key = args[arg_num];
|
||||
++arg_num;
|
||||
}
|
||||
}
|
||||
|
||||
if (arg_num < args.size())
|
||||
{
|
||||
throw Exception(help_message, ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
}
|
||||
}
|
||||
|
||||
if (!cluster_name.empty())
|
||||
|
@ -20,7 +20,12 @@ def tune_local_port_range():
|
||||
#
|
||||
# NOTE: 5K is not enough, and sometimes leads to EADDRNOTAVAIL error.
|
||||
# NOTE: it is not inherited, so you may need to specify this in docker_compose_$SERVICE.yml
|
||||
run_and_check(["sysctl net.ipv4.ip_local_port_range='55000 65535'"], shell=True)
|
||||
try:
|
||||
run_and_check(["sysctl net.ipv4.ip_local_port_range='55000 65535'"], shell=True)
|
||||
except Exception as ex:
|
||||
logging.warning(
|
||||
"Failed to run sysctl, tests may fail with EADDRINUSE %s", str(ex)
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope="session")
|
||||
|
@ -1,4 +1,10 @@
|
||||
import pytest
|
||||
|
||||
# FIXME This test is too flaky
|
||||
# https://github.com/ClickHouse/ClickHouse/issues/51471
|
||||
|
||||
pytestmark = pytest.mark.skip
|
||||
|
||||
import socket
|
||||
from helpers.cluster import ClickHouseCluster, get_docker_compose_path, run_and_check
|
||||
from time import sleep
|
||||
|
@ -27,7 +27,10 @@ proto_dir = os.path.join(SCRIPT_DIR, "./protos")
|
||||
gen_dir = os.path.join(SCRIPT_DIR, "./_gen")
|
||||
os.makedirs(gen_dir, exist_ok=True)
|
||||
run_and_check(
|
||||
f"python3 -m grpc_tools.protoc -I{proto_dir} --python_out={gen_dir} --grpc_python_out={gen_dir} {proto_dir}/clickhouse_grpc.proto",
|
||||
"python3 -m grpc_tools.protoc -I{proto_dir} --python_out={gen_dir} --grpc_python_out={gen_dir} \
|
||||
{proto_dir}/clickhouse_grpc.proto".format(
|
||||
proto_dir=proto_dir, gen_dir=gen_dir
|
||||
),
|
||||
shell=True,
|
||||
)
|
||||
|
||||
|
@ -4,6 +4,8 @@
|
||||
<allow_drop_detached>1</allow_drop_detached>
|
||||
<allow_experimental_database_replicated>1</allow_experimental_database_replicated>
|
||||
<allow_experimental_alter_materialized_view_structure>1</allow_experimental_alter_materialized_view_structure>
|
||||
<allow_experimental_object_type>0</allow_experimental_object_type>
|
||||
<allow_suspicious_codecs>0</allow_suspicious_codecs>
|
||||
</default>
|
||||
</profiles>
|
||||
<users>
|
||||
|
@ -672,7 +672,11 @@ def test_alters_from_different_replicas(started_cluster):
|
||||
|
||||
|
||||
def create_some_tables(db):
|
||||
settings = {"distributed_ddl_task_timeout": 0}
|
||||
settings = {
|
||||
"distributed_ddl_task_timeout": 0,
|
||||
"allow_experimental_object_type": 1,
|
||||
"allow_suspicious_codecs": 1,
|
||||
}
|
||||
main_node.query(f"CREATE TABLE {db}.t1 (n int) ENGINE=Memory", settings=settings)
|
||||
dummy_node.query(
|
||||
f"CREATE TABLE {db}.t2 (s String) ENGINE=Memory", settings=settings
|
||||
@ -690,11 +694,11 @@ def create_some_tables(db):
|
||||
settings=settings,
|
||||
)
|
||||
dummy_node.query(
|
||||
f"CREATE TABLE {db}.rmt2 (n int) ENGINE=ReplicatedMergeTree order by n",
|
||||
f"CREATE TABLE {db}.rmt2 (n int CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12))) ENGINE=ReplicatedMergeTree order by n",
|
||||
settings=settings,
|
||||
)
|
||||
main_node.query(
|
||||
f"CREATE TABLE {db}.rmt3 (n int) ENGINE=ReplicatedMergeTree order by n",
|
||||
f"CREATE TABLE {db}.rmt3 (n int, json Object('json') materialized '') ENGINE=ReplicatedMergeTree order by n",
|
||||
settings=settings,
|
||||
)
|
||||
dummy_node.query(
|
||||
@ -868,7 +872,10 @@ def test_recover_staled_replica(started_cluster):
|
||||
]:
|
||||
assert main_node.query(f"SELECT (*,).1 FROM recover.{table}") == "42\n"
|
||||
for table in ["t2", "rmt1", "rmt2", "rmt4", "d1", "d2", "mt2", "mv1", "mv3"]:
|
||||
assert dummy_node.query(f"SELECT (*,).1 FROM recover.{table}") == "42\n"
|
||||
assert (
|
||||
dummy_node.query(f"SELECT '{table}', (*,).1 FROM recover.{table}")
|
||||
== f"{table}\t42\n"
|
||||
)
|
||||
for table in ["m1", "mt1"]:
|
||||
assert dummy_node.query(f"SELECT count() FROM recover.{table}") == "0\n"
|
||||
global test_recover_staled_replica_run
|
||||
|
@ -1,4 +1,10 @@
|
||||
import pytest
|
||||
|
||||
# FIXME This test is broken
|
||||
# https://github.com/ClickHouse/ClickHouse/issues/53194
|
||||
|
||||
pytestmark = pytest.mark.skip
|
||||
|
||||
from helpers.cluster import ClickHouseCluster, get_docker_compose_path, run_and_check
|
||||
from time import sleep
|
||||
import os
|
||||
|
@ -1 +0,0 @@
|
||||
_gen
|
@ -1,9 +0,0 @@
|
||||
<clickhouse>
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
</logger>
|
||||
</clickhouse>
|
@ -1,9 +0,0 @@
|
||||
<clickhouse>
|
||||
<postgresql_port>5433</postgresql_port>
|
||||
<mysql_port>9001</mysql_port>
|
||||
<grpc_port>9100</grpc_port>
|
||||
<grpc replace="replace">
|
||||
<!-- Enable if you want very detailed logs -->
|
||||
<verbose_logs>false</verbose_logs>
|
||||
</grpc>
|
||||
</clickhouse>
|
@ -1,9 +0,0 @@
|
||||
<clickhouse>
|
||||
<session_log>
|
||||
<database>system</database>
|
||||
<table>session_log</table>
|
||||
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</session_log>
|
||||
</clickhouse>
|
@ -1,23 +0,0 @@
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
<function_sleep_max_microseconds_per_block>0</function_sleep_max_microseconds_per_block>
|
||||
</default>
|
||||
</profiles>
|
||||
<users>
|
||||
<default>
|
||||
</default>
|
||||
<mysql_user>
|
||||
<password>pass</password>
|
||||
</mysql_user>
|
||||
<postgres_user>
|
||||
<password>pass</password>
|
||||
</postgres_user>
|
||||
<grpc_user>
|
||||
<password>pass</password>
|
||||
</grpc_user>
|
||||
<parallel_user>
|
||||
<password>pass</password>
|
||||
</parallel_user>
|
||||
</users>
|
||||
</clickhouse>
|
@ -1 +0,0 @@
|
||||
../../../../src/Server/grpc_protos/clickhouse_grpc.proto
|
@ -1,289 +0,0 @@
|
||||
import os
|
||||
|
||||
import grpc
|
||||
import pymysql.connections
|
||||
import psycopg2 as py_psql
|
||||
import pytest
|
||||
import random
|
||||
import sys
|
||||
import threading
|
||||
|
||||
from helpers.cluster import ClickHouseCluster, run_and_check
|
||||
|
||||
POSTGRES_SERVER_PORT = 5433
|
||||
MYSQL_SERVER_PORT = 9001
|
||||
GRPC_PORT = 9100
|
||||
SESSION_LOG_MATCHING_FIELDS = "auth_id, auth_type, client_version_major, client_version_minor, client_version_patch, interface"
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
DEFAULT_ENCODING = "utf-8"
|
||||
|
||||
# Use grpcio-tools to generate *pb2.py files from *.proto.
|
||||
proto_dir = os.path.join(SCRIPT_DIR, "./protos")
|
||||
gen_dir = os.path.join(SCRIPT_DIR, "./_gen")
|
||||
os.makedirs(gen_dir, exist_ok=True)
|
||||
run_and_check(
|
||||
f"python3 -m grpc_tools.protoc -I{proto_dir} --python_out={gen_dir} --grpc_python_out={gen_dir} {proto_dir}/clickhouse_grpc.proto",
|
||||
shell=True,
|
||||
)
|
||||
|
||||
sys.path.append(gen_dir)
|
||||
|
||||
import clickhouse_grpc_pb2
|
||||
import clickhouse_grpc_pb2_grpc
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
instance = cluster.add_instance(
|
||||
"node",
|
||||
main_configs=[
|
||||
"configs/ports.xml",
|
||||
"configs/log.xml",
|
||||
"configs/session_log.xml",
|
||||
],
|
||||
user_configs=["configs/users.xml"],
|
||||
# Bug in TSAN reproduces in this test https://github.com/grpc/grpc/issues/29550#issuecomment-1188085387
|
||||
env_variables={
|
||||
"TSAN_OPTIONS": "report_atomic_races=0 " + os.getenv("TSAN_OPTIONS", default="")
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def grpc_get_url():
|
||||
return f"{instance.ip_address}:{GRPC_PORT}"
|
||||
|
||||
|
||||
def grpc_create_insecure_channel():
|
||||
channel = grpc.insecure_channel(grpc_get_url())
|
||||
grpc.channel_ready_future(channel).result(timeout=2)
|
||||
return channel
|
||||
|
||||
|
||||
session_id_counter = 0
|
||||
|
||||
|
||||
def next_session_id():
|
||||
global session_id_counter
|
||||
session_id = session_id_counter
|
||||
session_id_counter += 1
|
||||
return str(session_id)
|
||||
|
||||
|
||||
def grpc_query(query, user_, pass_, raise_exception):
|
||||
try:
|
||||
query_info = clickhouse_grpc_pb2.QueryInfo(
|
||||
query=query,
|
||||
session_id=next_session_id(),
|
||||
user_name=user_,
|
||||
password=pass_,
|
||||
)
|
||||
channel = grpc_create_insecure_channel()
|
||||
stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(channel)
|
||||
result = stub.ExecuteQuery(query_info)
|
||||
if result and result.HasField("exception"):
|
||||
raise Exception(result.exception.display_text)
|
||||
|
||||
return result.output.decode(DEFAULT_ENCODING)
|
||||
except Exception:
|
||||
assert raise_exception
|
||||
|
||||
|
||||
def postgres_query(query, user_, pass_, raise_exception):
|
||||
try:
|
||||
client = py_psql.connect(
|
||||
host=instance.ip_address,
|
||||
port=POSTGRES_SERVER_PORT,
|
||||
user=user_,
|
||||
password=pass_,
|
||||
database="default",
|
||||
)
|
||||
cursor = client.cursor()
|
||||
cursor.execute(query)
|
||||
cursor.fetchall()
|
||||
except Exception:
|
||||
assert raise_exception
|
||||
|
||||
|
||||
def mysql_query(query, user_, pass_, raise_exception):
|
||||
try:
|
||||
client = pymysql.connections.Connection(
|
||||
host=instance.ip_address,
|
||||
user=user_,
|
||||
password=pass_,
|
||||
database="default",
|
||||
port=MYSQL_SERVER_PORT,
|
||||
)
|
||||
cursor = client.cursor(pymysql.cursors.DictCursor)
|
||||
if raise_exception:
|
||||
with pytest.raises(Exception):
|
||||
cursor.execute(query)
|
||||
else:
|
||||
cursor.execute(query)
|
||||
cursor.fetchall()
|
||||
except Exception:
|
||||
assert raise_exception
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
yield cluster
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def test_grpc_session(started_cluster):
|
||||
grpc_query("SELECT 1", "grpc_user", "pass", False)
|
||||
grpc_query("SELECT 2", "grpc_user", "wrong_pass", True)
|
||||
grpc_query("SELECT 3", "wrong_grpc_user", "pass", True)
|
||||
|
||||
instance.query("SYSTEM FLUSH LOGS")
|
||||
login_success_records = instance.query(
|
||||
"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='grpc_user' AND type = 'LoginSuccess'"
|
||||
)
|
||||
assert login_success_records == "grpc_user\t1\t1\n"
|
||||
logout_records = instance.query(
|
||||
"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='grpc_user' AND type = 'Logout'"
|
||||
)
|
||||
assert logout_records == "grpc_user\t1\t1\n"
|
||||
login_failure_records = instance.query(
|
||||
"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='grpc_user' AND type = 'LoginFailure'"
|
||||
)
|
||||
assert login_failure_records == "grpc_user\t1\t1\n"
|
||||
logins_and_logouts = instance.query(
|
||||
f"SELECT COUNT(*) FROM (SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'grpc_user' AND type = 'LoginSuccess' INTERSECT SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'grpc_user' AND type = 'Logout')"
|
||||
)
|
||||
assert logins_and_logouts == "1\n"
|
||||
|
||||
|
||||
def test_mysql_session(started_cluster):
|
||||
mysql_query("SELECT 1", "mysql_user", "pass", False)
|
||||
mysql_query("SELECT 2", "mysql_user", "wrong_pass", True)
|
||||
mysql_query("SELECT 3", "wrong_mysql_user", "pass", True)
|
||||
|
||||
instance.query("SYSTEM FLUSH LOGS")
|
||||
login_success_records = instance.query(
|
||||
"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='mysql_user' AND type = 'LoginSuccess'"
|
||||
)
|
||||
assert login_success_records == "mysql_user\t1\t1\n"
|
||||
logout_records = instance.query(
|
||||
"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='mysql_user' AND type = 'Logout'"
|
||||
)
|
||||
assert logout_records == "mysql_user\t1\t1\n"
|
||||
login_failure_records = instance.query(
|
||||
"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='mysql_user' AND type = 'LoginFailure'"
|
||||
)
|
||||
assert login_failure_records == "mysql_user\t1\t1\n"
|
||||
logins_and_logouts = instance.query(
|
||||
f"SELECT COUNT(*) FROM (SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'mysql_user' AND type = 'LoginSuccess' INTERSECT SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'mysql_user' AND type = 'Logout')"
|
||||
)
|
||||
assert logins_and_logouts == "1\n"
|
||||
|
||||
|
||||
def test_postgres_session(started_cluster):
|
||||
postgres_query("SELECT 1", "postgres_user", "pass", False)
|
||||
postgres_query("SELECT 2", "postgres_user", "wrong_pass", True)
|
||||
postgres_query("SELECT 3", "wrong_postgres_user", "pass", True)
|
||||
|
||||
instance.query("SYSTEM FLUSH LOGS")
|
||||
login_success_records = instance.query(
|
||||
"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='postgres_user' AND type = 'LoginSuccess'"
|
||||
)
|
||||
assert login_success_records == "postgres_user\t1\t1\n"
|
||||
logout_records = instance.query(
|
||||
"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='postgres_user' AND type = 'Logout'"
|
||||
)
|
||||
assert logout_records == "postgres_user\t1\t1\n"
|
||||
login_failure_records = instance.query(
|
||||
"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='postgres_user' AND type = 'LoginFailure'"
|
||||
)
|
||||
assert login_failure_records == "postgres_user\t1\t1\n"
|
||||
logins_and_logouts = instance.query(
|
||||
f"SELECT COUNT(*) FROM (SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'postgres_user' AND type = 'LoginSuccess' INTERSECT SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'postgres_user' AND type = 'Logout')"
|
||||
)
|
||||
assert logins_and_logouts == "1\n"
|
||||
|
||||
|
||||
def test_parallel_sessions(started_cluster):
|
||||
thread_list = []
|
||||
for _ in range(10):
|
||||
# Sleep time does not significantly matter here,
|
||||
# test should pass even without sleeping.
|
||||
for function in [postgres_query, grpc_query, mysql_query]:
|
||||
thread = threading.Thread(
|
||||
target=function,
|
||||
args=(
|
||||
f"SELECT sleep({random.uniform(0.03, 0.04)})",
|
||||
"parallel_user",
|
||||
"pass",
|
||||
False,
|
||||
),
|
||||
)
|
||||
thread.start()
|
||||
thread_list.append(thread)
|
||||
thread = threading.Thread(
|
||||
target=function,
|
||||
args=(
|
||||
f"SELECT sleep({random.uniform(0.03, 0.04)})",
|
||||
"parallel_user",
|
||||
"wrong_pass",
|
||||
True,
|
||||
),
|
||||
)
|
||||
thread.start()
|
||||
thread_list.append(thread)
|
||||
thread = threading.Thread(
|
||||
target=function,
|
||||
args=(
|
||||
f"SELECT sleep({random.uniform(0.03, 0.04)})",
|
||||
"wrong_parallel_user",
|
||||
"pass",
|
||||
True,
|
||||
),
|
||||
)
|
||||
thread.start()
|
||||
thread_list.append(thread)
|
||||
|
||||
for thread in thread_list:
|
||||
thread.join()
|
||||
|
||||
instance.query("SYSTEM FLUSH LOGS")
|
||||
port_0_sessions = instance.query(
|
||||
f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user'"
|
||||
)
|
||||
assert port_0_sessions == "90\n"
|
||||
|
||||
port_0_sessions = instance.query(
|
||||
f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user' AND client_port = 0"
|
||||
)
|
||||
assert port_0_sessions == "0\n"
|
||||
|
||||
address_0_sessions = instance.query(
|
||||
f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user' AND client_address = toIPv6('::')"
|
||||
)
|
||||
assert address_0_sessions == "0\n"
|
||||
|
||||
grpc_sessions = instance.query(
|
||||
f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user' AND interface = 'gRPC'"
|
||||
)
|
||||
assert grpc_sessions == "30\n"
|
||||
|
||||
mysql_sessions = instance.query(
|
||||
f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user' AND interface = 'MySQL'"
|
||||
)
|
||||
assert mysql_sessions == "30\n"
|
||||
|
||||
postgres_sessions = instance.query(
|
||||
f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user' AND interface = 'PostgreSQL'"
|
||||
)
|
||||
assert postgres_sessions == "30\n"
|
||||
|
||||
logins_and_logouts = instance.query(
|
||||
f"SELECT COUNT(*) FROM (SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'parallel_user' AND type = 'LoginSuccess' INTERSECT SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'parallel_user' AND type = 'Logout')"
|
||||
)
|
||||
assert logins_and_logouts == "30\n"
|
||||
|
||||
logout_failure_sessions = instance.query(
|
||||
f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user' AND type = 'LoginFailure'"
|
||||
)
|
||||
assert logout_failure_sessions == "30\n"
|
@ -24,6 +24,11 @@ system flush logs;
|
||||
select * from system.zookeeper_log where path like '/test/02439/s1/' || currentDatabase() || '/block_numbers/%'
|
||||
and op_num in ('List', 'SimpleList', 'FilteredList')
|
||||
and path not like '%/block_numbers/1' and path not like '%/block_numbers/123'
|
||||
and event_time >= now() - interval 1 minute;
|
||||
and event_time >= now() - interval 1 minute
|
||||
-- avoid race with tests like 02311_system_zookeeper_insert
|
||||
and (query_id is null or query_id='' or query_id in
|
||||
(select query_id from system.query_log
|
||||
where event_time >= now() - interval 1 minute and current_database=currentDatabase())
|
||||
);
|
||||
|
||||
drop table rmt;
|
||||
|
@ -3,16 +3,24 @@
|
||||
|
||||
SELECT * FROM url('http://localhost:8123/', LineAsString, headers('exact_header' = 'value')); -- { serverError BAD_ARGUMENTS }
|
||||
SELECT * FROM url('http://localhost:8123/', LineAsString, headers('cAsE_INSENSITIVE_header' = 'value')); -- { serverError BAD_ARGUMENTS }
|
||||
SELECT * FROM url('http://localhost:8123/', LineAsString, headers('bad_header_name: test\nexact_header' = 'value')); -- { serverError BAD_ARGUMENTS }
|
||||
SELECT * FROM url('http://localhost:8123/', LineAsString, headers('bad_header_value' = 'test\nexact_header: value')); -- { serverError BAD_ARGUMENTS }
|
||||
SELECT * FROM url('http://localhost:8123/', LineAsString, headers('random_header' = 'value')) FORMAT Null;
|
||||
|
||||
SELECT * FROM urlCluster('test_cluster_two_shards_localhost', 'http://localhost:8123/', LineAsString, headers('exact_header' = 'value')); -- { serverError BAD_ARGUMENTS }
|
||||
SELECT * FROM urlCluster('test_cluster_two_shards_localhost', 'http://localhost:8123/', LineAsString, headers('cAsE_INSENSITIVE_header' = 'value')); -- { serverError BAD_ARGUMENTS }
|
||||
SELECT * FROM urlCluster('test_cluster_two_shards_localhost', 'http://localhost:8123/', LineAsString, headers('bad_header_name: test\nexact_header' = 'value')); -- { serverError BAD_ARGUMENTS }
|
||||
SELECT * FROM urlCluster('test_cluster_two_shards_localhost', 'http://localhost:8123/', LineAsString, headers('bad_header_value' = 'test\nexact_header: value')); -- { serverError BAD_ARGUMENTS }
|
||||
SELECT * FROM urlCluster('test_cluster_two_shards_localhost', 'http://localhost:8123/', LineAsString, headers('random_header' = 'value')) FORMAT Null;
|
||||
|
||||
SELECT * FROM s3('http://localhost:8123/123/4', LineAsString, headers('exact_header' = 'value')); -- { serverError BAD_ARGUMENTS }
|
||||
SELECT * FROM s3('http://localhost:8123/123/4', LineAsString, headers('cAsE_INSENSITIVE_header' = 'value')); -- { serverError BAD_ARGUMENTS }
|
||||
SELECT * FROM s3('http://localhost:8123/123/4', LineAsString, headers('bad_header_name: test\nexact_header' = 'value')); -- { serverError BAD_ARGUMENTS }
|
||||
SELECT * FROM s3('http://localhost:8123/123/4', LineAsString, headers('bad_header_value' = 'test\nexact_header: value')); -- { serverError BAD_ARGUMENTS }
|
||||
SELECT * FROM s3('http://localhost:8123/123/4', LineAsString, headers('random_header' = 'value')); -- { serverError S3_ERROR }
|
||||
|
||||
SELECT * FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:8123/123/4', LineAsString, headers('exact_header' = 'value')); -- { serverError BAD_ARGUMENTS }
|
||||
SELECT * FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:8123/123/4', LineAsString, headers('cAsE_INSENSITIVE_header' = 'value')); -- { serverError BAD_ARGUMENTS }
|
||||
SELECT * FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:8123/123/4', LineAsString, headers('bad_header_name: test\nexact_header' = 'value')); -- { serverError BAD_ARGUMENTS }
|
||||
SELECT * FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:8123/123/4', LineAsString, headers('bad_header_value' = 'test\nexact_header: value')); -- { serverError BAD_ARGUMENTS }
|
||||
SELECT * FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:8123/123/4', LineAsString, headers('random_header' = 'value')); -- { serverError S3_ERROR }
|
||||
|
@ -1,34 +0,0 @@
|
||||
sessions:
|
||||
150
|
||||
port_0_sessions:
|
||||
0
|
||||
address_0_sessions:
|
||||
0
|
||||
tcp_sessions
|
||||
60
|
||||
http_sessions
|
||||
30
|
||||
http_with_session_id_sessions
|
||||
30
|
||||
my_sql_sessions
|
||||
30
|
||||
Corresponding LoginSuccess/Logout
|
||||
10
|
||||
LoginFailure
|
||||
10
|
||||
Corresponding LoginSuccess/Logout
|
||||
10
|
||||
LoginFailure
|
||||
10
|
||||
Corresponding LoginSuccess/Logout
|
||||
10
|
||||
LoginFailure
|
||||
10
|
||||
Corresponding LoginSuccess/Logout
|
||||
10
|
||||
LoginFailure
|
||||
10
|
||||
Corresponding LoginSuccess/Logout
|
||||
10
|
||||
LoginFailure
|
||||
10
|
@ -1,138 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest, long
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
readonly PID=$$
|
||||
|
||||
# Each user uses a separate thread.
|
||||
readonly TCP_USERS=( "02833_TCP_USER_${PID}"_{1,2} ) # 2 concurrent TCP users
|
||||
readonly HTTP_USERS=( "02833_HTTP_USER_${PID}" )
|
||||
readonly HTTP_WITH_SESSION_ID_SESSION_USERS=( "02833_HTTP_WITH_SESSION_ID_USER_${PID}" )
|
||||
readonly MYSQL_USERS=( "02833_MYSQL_USER_${PID}")
|
||||
readonly ALL_USERS=( "${TCP_USERS[@]}" "${HTTP_USERS[@]}" "${HTTP_WITH_SESSION_ID_SESSION_USERS[@]}" "${MYSQL_USERS[@]}" )
|
||||
|
||||
readonly TCP_USERS_SQL_COLLECTION_STRING="$( echo "${TCP_USERS[*]}" | sed "s/[^[:space:]]\+/'&'/g" | sed 's/[[:space:]]/,/g' )"
|
||||
readonly HTTP_USERS_SQL_COLLECTION_STRING="$( echo "${HTTP_USERS[*]}" | sed "s/[^[:space:]]\+/'&'/g" | sed 's/[[:space:]]/,/g' )"
|
||||
readonly HTTP_WITH_SESSION_ID_USERS_SQL_COLLECTION_STRING="$( echo "${HTTP_WITH_SESSION_ID_SESSION_USERS[*]}" | sed "s/[^[:space:]]\+/'&'/g" | sed 's/[[:space:]]/,/g' )"
|
||||
readonly MYSQL_USERS_SQL_COLLECTION_STRING="$( echo "${MYSQL_USERS[*]}" | sed "s/[^[:space:]]\+/'&'/g" | sed 's/[[:space:]]/,/g' )"
|
||||
readonly ALL_USERS_SQL_COLLECTION_STRING="$( echo "${ALL_USERS[*]}" | sed "s/[^[:space:]]\+/'&'/g" | sed 's/[[:space:]]/,/g' )"
|
||||
|
||||
readonly SESSION_LOG_MATCHING_FIELDS="auth_id, auth_type, client_version_major, client_version_minor, client_version_patch, interface"
|
||||
|
||||
for user in "${ALL_USERS[@]}"; do
|
||||
${CLICKHOUSE_CLIENT} -q "CREATE USER IF NOT EXISTS ${user} IDENTIFIED WITH plaintext_password BY 'pass'"
|
||||
${CLICKHOUSE_CLIENT} -q "GRANT SELECT ON system.* TO ${user}"
|
||||
${CLICKHOUSE_CLIENT} -q "GRANT SELECT ON INFORMATION_SCHEMA.* TO ${user}";
|
||||
done
|
||||
|
||||
# All <type>_session functions execute in separate threads.
|
||||
# These functions try to create a session with successful login and logout.
|
||||
# Sleep a small, random amount of time to make concurrency more intense.
|
||||
# and try to login with an invalid password.
|
||||
function tcp_session()
|
||||
{
|
||||
local user=$1
|
||||
local i=0
|
||||
while (( (i++) < 10 )); do
|
||||
# login logout
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT 1, sleep(0.01${RANDOM})" --user="${user}" --password="pass"
|
||||
# login failure
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT 2" --user="${user}" --password 'invalid'
|
||||
done
|
||||
}
|
||||
|
||||
function http_session()
|
||||
{
|
||||
local user=$1
|
||||
local i=0
|
||||
while (( (i++) < 10 )); do
|
||||
# login logout
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&user=${user}&password=pass" -d "SELECT 3, sleep(0.01${RANDOM})"
|
||||
|
||||
# login failure
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&user=${user}&password=wrong" -d "SELECT 4"
|
||||
done
|
||||
}
|
||||
|
||||
function http_with_session_id_session()
|
||||
{
|
||||
local user=$1
|
||||
local i=0
|
||||
while (( (i++) < 10 )); do
|
||||
# login logout
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${user}&user=${user}&password=pass" -d "SELECT 5, sleep 0.01${RANDOM}"
|
||||
|
||||
# login failure
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${user}&user=${user}&password=wrong" -d "SELECT 6"
|
||||
done
|
||||
}
|
||||
|
||||
function mysql_session()
|
||||
{
|
||||
local user=$1
|
||||
local i=0
|
||||
while (( (i++) < 10 )); do
|
||||
# login logout
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT 1, sleep(0.01${RANDOM}) FROM mysql('127.0.0.1:9004', 'system', 'one', '${user}', 'pass')"
|
||||
|
||||
# login failure
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT 1 FROM mysql('127.0.0.1:9004', 'system', 'one', '${user}', 'wrong', SETTINGS connection_max_tries=1)"
|
||||
done
|
||||
}
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q "SYSTEM FLUSH LOGS"
|
||||
${CLICKHOUSE_CLIENT} -q "DELETE FROM system.session_log WHERE user IN (${ALL_USERS_SQL_COLLECTION_STRING})"
|
||||
|
||||
export -f tcp_session;
|
||||
export -f http_session;
|
||||
export -f http_with_session_id_session;
|
||||
export -f mysql_session;
|
||||
|
||||
for user in "${TCP_USERS[@]}"; do
|
||||
timeout 60s bash -c "tcp_session ${user}" >/dev/null 2>&1 &
|
||||
done
|
||||
|
||||
for user in "${HTTP_USERS[@]}"; do
|
||||
timeout 60s bash -c "http_session ${user}" >/dev/null 2>&1 &
|
||||
done
|
||||
|
||||
for user in "${HTTP_WITH_SESSION_ID_SESSION_USERS[@]}"; do
|
||||
timeout 60s bash -c "http_with_session_id_session ${user}" >/dev/null 2>&1 &
|
||||
done
|
||||
|
||||
for user in "${MYSQL_USERS[@]}"; do
|
||||
timeout 60s bash -c "mysql_session ${user}" >/dev/null 2>&1 &
|
||||
done
|
||||
|
||||
wait
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q "SYSTEM FLUSH LOGS"
|
||||
|
||||
echo "sessions:"
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user IN (${ALL_USERS_SQL_COLLECTION_STRING})"
|
||||
|
||||
echo "port_0_sessions:"
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user IN (${ALL_USERS_SQL_COLLECTION_STRING}) AND client_port = 0"
|
||||
|
||||
echo "address_0_sessions:"
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user IN (${ALL_USERS_SQL_COLLECTION_STRING}) AND client_address = toIPv6('::')"
|
||||
|
||||
echo "tcp_sessions"
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user IN (${TCP_USERS_SQL_COLLECTION_STRING}) AND interface = 'TCP'"
|
||||
echo "http_sessions"
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user IN (${HTTP_USERS_SQL_COLLECTION_STRING}) AND interface = 'HTTP'"
|
||||
echo "http_with_session_id_sessions"
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user IN (${HTTP_WITH_SESSION_ID_USERS_SQL_COLLECTION_STRING}) AND interface = 'HTTP'"
|
||||
echo "my_sql_sessions"
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user IN (${MYSQL_USERS_SQL_COLLECTION_STRING}) AND interface = 'MySQL'"
|
||||
|
||||
for user in "${ALL_USERS[@]}"; do
|
||||
${CLICKHOUSE_CLIENT} -q "DROP USER ${user}"
|
||||
echo "Corresponding LoginSuccess/Logout"
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT COUNT(*) FROM (SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${user}' AND type = 'LoginSuccess' INTERSECT SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${user}' AND type = 'Logout')"
|
||||
echo "LoginFailure"
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT COUNT(*) FROM system.session_log WHERE user = '${user}' AND type = 'LoginFailure'"
|
||||
done
|
@ -1,13 +0,0 @@
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
client_port 0 connections:
|
||||
0
|
||||
client_address '::' connections:
|
||||
0
|
||||
login failures:
|
||||
0
|
||||
TCP Login and logout count is equal
|
||||
HTTP Login and logout count is equal
|
||||
MySQL Login and logout count is equal
|
@ -1,56 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
readonly PID=$$
|
||||
readonly TEST_USER=$"02834_USER_${PID}"
|
||||
readonly SESSION_LOG_MATCHING_FIELDS="auth_id, auth_type, client_version_major, client_version_minor, client_version_patch, interface"
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q "CREATE USER IF NOT EXISTS ${TEST_USER} IDENTIFIED WITH plaintext_password BY 'pass'"
|
||||
${CLICKHOUSE_CLIENT} -q "GRANT SELECT ON INFORMATION_SCHEMA.* TO ${TEST_USER}"
|
||||
${CLICKHOUSE_CLIENT} -q "GRANT SELECT ON system.* TO ${TEST_USER}"
|
||||
${CLICKHOUSE_CLIENT} -q "GRANT CREATE TEMPORARY TABLE, MYSQL, REMOTE ON *.* TO ${TEST_USER}"
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q "SYSTEM FLUSH LOGS"
|
||||
${CLICKHOUSE_CLIENT} -q "DELETE FROM system.session_log WHERE user = '${TEST_USER}'"
|
||||
|
||||
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&user=${TEST_USER}&password=pass" \
|
||||
-d "SELECT * FROM remote('127.0.0.1:${CLICKHOUSE_PORT_TCP}', 'system', 'one', '${TEST_USER}', 'pass')"
|
||||
|
||||
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&user=${TEST_USER}&password=pass" \
|
||||
-d "SELECT * FROM mysql('127.0.0.1:9004', 'system', 'one', '${TEST_USER}', 'pass')"
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT * FROM remote('127.0.0.1:${CLICKHOUSE_PORT_TCP}', 'system', 'one', '${TEST_USER}', 'pass')" -u "${TEST_USER}" --password "pass"
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT * FROM mysql('127.0.0.1:9004', 'system', 'one', '${TEST_USER}', 'pass')" -u "${TEST_USER}" --password "pass"
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q "SYSTEM FLUSH LOGS"
|
||||
|
||||
echo "client_port 0 connections:"
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user = '${TEST_USER}' and client_port = 0"
|
||||
|
||||
echo "client_address '::' connections:"
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user = '${TEST_USER}' and client_address = toIPv6('::')"
|
||||
|
||||
echo "login failures:"
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user = '${TEST_USER}' and type = 'LoginFailure'"
|
||||
|
||||
# remote(...) function sometimes reuses old cached sessions for query execution.
|
||||
# This makes LoginSuccess/Logout entries count unstable, but success and logouts must always match.
|
||||
|
||||
for interface in 'TCP' 'HTTP' 'MySQL'
|
||||
do
|
||||
LOGIN_COUNT=`${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'LoginSuccess' AND interface = '${interface}'"`
|
||||
CORRESPONDING_LOGOUT_RECORDS_COUNT=`${CLICKHOUSE_CLIENT} -q "SELECT COUNT(*) FROM (SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'LoginSuccess' AND interface = '${interface}' INTERSECT SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'Logout' AND interface = '${interface}')"`
|
||||
|
||||
if [ "$LOGIN_COUNT" == "$CORRESPONDING_LOGOUT_RECORDS_COUNT" ]; then
|
||||
echo "${interface} Login and logout count is equal"
|
||||
else
|
||||
TOTAL_LOGOUT_COUNT=`${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'Logout' AND interface = '${interface}'"`
|
||||
echo "${interface} Login count ${LOGIN_COUNT} != corresponding logout count ${CORRESPONDING_LOGOUT_RECORDS_COUNT}. TOTAL_LOGOUT_COUNT ${TOTAL_LOGOUT_COUNT}"
|
||||
fi
|
||||
done
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q "DROP USER ${TEST_USER}"
|
@ -1,8 +0,0 @@
|
||||
port_0_sessions:
|
||||
0
|
||||
address_0_sessions:
|
||||
0
|
||||
Corresponding LoginSuccess/Logout
|
||||
9
|
||||
LoginFailure
|
||||
0
|
@ -1,113 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
readonly PID=$$
|
||||
|
||||
readonly TEST_USER="02835_USER_${PID}"
|
||||
readonly TEST_ROLE="02835_ROLE_${PID}"
|
||||
readonly TEST_PROFILE="02835_PROFILE_${PID}"
|
||||
readonly SESSION_LOG_MATCHING_FIELDS="auth_id, auth_type, client_version_major, client_version_minor, client_version_patch, interface"
|
||||
|
||||
function tcp_session()
|
||||
{
|
||||
local user=$1
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT COUNT(*) FROM system.numbers" --user="${user}"
|
||||
}
|
||||
|
||||
function http_session()
|
||||
{
|
||||
local user=$1
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&user=${user}&password=pass" -d "SELECT COUNT(*) FROM system.numbers"
|
||||
}
|
||||
|
||||
function http_with_session_id_session()
|
||||
{
|
||||
local user=$1
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&user=${user}&password=pass" -d "SELECT COUNT(*) FROM system.numbers"
|
||||
}
|
||||
|
||||
# Busy-waits until user $1, specified amount of queries ($2) will run simultaneously.
|
||||
function wait_for_queries_start()
|
||||
{
|
||||
local user=$1
|
||||
local queries_count=$2
|
||||
# 10 seconds waiting
|
||||
counter=0 retries=100
|
||||
while [[ $counter -lt $retries ]]; do
|
||||
result=$($CLICKHOUSE_CLIENT --query "SELECT COUNT(*) FROM system.processes WHERE user = '${user}'")
|
||||
if [[ $result == "${queries_count}" ]]; then
|
||||
break;
|
||||
fi
|
||||
sleep 0.1
|
||||
((++counter))
|
||||
done
|
||||
}
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q "SYSTEM FLUSH LOGS"
|
||||
${CLICKHOUSE_CLIENT} -q "DELETE FROM system.session_log WHERE user = '${TEST_USER}'"
|
||||
|
||||
# DROP USE CASE
|
||||
${CLICKHOUSE_CLIENT} -q "CREATE USER IF NOT EXISTS ${TEST_USER}"
|
||||
${CLICKHOUSE_CLIENT} -q "GRANT SELECT ON system.numbers TO ${TEST_USER}"
|
||||
|
||||
export -f tcp_session;
|
||||
export -f http_session;
|
||||
export -f http_with_session_id_session;
|
||||
|
||||
timeout 10s bash -c "tcp_session ${TEST_USER}" >/dev/null 2>&1 &
|
||||
timeout 10s bash -c "http_session ${TEST_USER}" >/dev/null 2>&1 &
|
||||
timeout 10s bash -c "http_with_session_id_session ${TEST_USER}" >/dev/null 2>&1 &
|
||||
|
||||
wait_for_queries_start $TEST_USER 3
|
||||
${CLICKHOUSE_CLIENT} -q "DROP USER ${TEST_USER}"
|
||||
${CLICKHOUSE_CLIENT} -q "KILL QUERY WHERE user = '${TEST_USER}' SYNC" >/dev/null &
|
||||
|
||||
wait
|
||||
|
||||
# DROP ROLE CASE
|
||||
${CLICKHOUSE_CLIENT} -q "CREATE ROLE IF NOT EXISTS ${TEST_ROLE}"
|
||||
${CLICKHOUSE_CLIENT} -q "CREATE USER ${TEST_USER} DEFAULT ROLE ${TEST_ROLE}"
|
||||
${CLICKHOUSE_CLIENT} -q "GRANT SELECT ON system.numbers TO ${TEST_USER}"
|
||||
|
||||
timeout 10s bash -c "tcp_session ${TEST_USER}" >/dev/null 2>&1 &
|
||||
timeout 10s bash -c "http_session ${TEST_USER}" >/dev/null 2>&1 &
|
||||
timeout 10s bash -c "http_with_session_id_session ${TEST_USER}" >/dev/null 2>&1 &
|
||||
|
||||
wait_for_queries_start $TEST_USER 3
|
||||
${CLICKHOUSE_CLIENT} -q "DROP ROLE ${TEST_ROLE}"
|
||||
${CLICKHOUSE_CLIENT} -q "DROP USER ${TEST_USER}"
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q "KILL QUERY WHERE user = '${TEST_USER}' SYNC" >/dev/null &
|
||||
|
||||
wait
|
||||
|
||||
# DROP PROFILE CASE
|
||||
${CLICKHOUSE_CLIENT} -q "CREATE SETTINGS PROFILE IF NOT EXISTS '${TEST_PROFILE}'"
|
||||
${CLICKHOUSE_CLIENT} -q "CREATE USER ${TEST_USER} SETTINGS PROFILE '${TEST_PROFILE}'"
|
||||
${CLICKHOUSE_CLIENT} -q "GRANT SELECT ON system.numbers TO ${TEST_USER}"
|
||||
|
||||
timeout 10s bash -c "tcp_session ${TEST_USER}" >/dev/null 2>&1 &
|
||||
timeout 10s bash -c "http_session ${TEST_USER}" >/dev/null 2>&1 &
|
||||
timeout 10s bash -c "http_with_session_id_session ${TEST_USER}" >/dev/null 2>&1 &
|
||||
|
||||
wait_for_queries_start $TEST_USER 3
|
||||
${CLICKHOUSE_CLIENT} -q "DROP SETTINGS PROFILE '${TEST_PROFILE}'"
|
||||
${CLICKHOUSE_CLIENT} -q "DROP USER ${TEST_USER}"
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q "KILL QUERY WHERE user = '${TEST_USER}' SYNC" >/dev/null &
|
||||
|
||||
wait
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q "SYSTEM FLUSH LOGS"
|
||||
|
||||
echo "port_0_sessions:"
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user = '${TEST_USER}' AND client_port = 0"
|
||||
echo "address_0_sessions:"
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user = '${TEST_USER}' AND client_address = toIPv6('::')"
|
||||
echo "Corresponding LoginSuccess/Logout"
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT COUNT(*) FROM (SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'LoginSuccess' INTERSECT SELECT ${SESSION_LOG_MATCHING_FIELDS}, FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'Logout')"
|
||||
echo "LoginFailure"
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT COUNT(*) FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'LoginFailure'"
|
@ -0,0 +1,14 @@
|
||||
-- Tags: shard
|
||||
|
||||
|
||||
select * from remote('127.0.0.1', sys); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
|
||||
select * from remote('127.0.0.1', system); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
|
||||
select * from remote('127.0.0.1', system.o); -- { serverError UNKNOWN_TABLE }
|
||||
select * from remote('127.0.0.1', system.one, default); -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select * from remote('127.0.0.1', system.one, default, ''); -- { serverError BAD_ARGUMENTS }
|
||||
select * from remote('127.0.0.1', system.one, default, key1); -- { serverError BAD_ARGUMENTS }
|
||||
select * from remote('127.0.0.1', system.one, 'default', '', key1); -- { serverError UNKNOWN_IDENTIFIER }
|
||||
select * from remote('127.0.0.1', system.one, default, '', key1); -- { serverError BAD_ARGUMENTS }
|
||||
select * from remote('127.0.0.1', system.one, 'default', pwd, key1); -- { serverError BAD_ARGUMENTS }
|
||||
select * from remote('127.0.0.1', system.one, 'default', '', key1, key2); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
|
||||
select * from remote('127.0.0.1', system, one, 'default', '', key1, key2); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
|
@ -1,3 +1,4 @@
|
||||
v23.7.4.5-stable 2023-08-08
|
||||
v23.7.3.14-stable 2023-08-05
|
||||
v23.7.2.25-stable 2023-08-03
|
||||
v23.7.1.2470-stable 2023-07-27
|
||||
|
|
Loading…
Reference in New Issue
Block a user