From 750a82a4ff615190a2793c0cfae9f4c1f5c75433 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 15 Feb 2024 13:23:33 +0100 Subject: [PATCH 001/374] Update doc --- .../mergetree-family/mergetree.md | 2 + docs/en/operations/storing-data.md | 146 ++++++++++++++++-- 2 files changed, 134 insertions(+), 14 deletions(-) diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index f185c11bab3..e1eef8db9ab 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -1106,6 +1106,8 @@ Configuration markup: ``` +Also see [configuring external storage options](/docs/en/operations/storing-data.md/#configuring-external-storage). + :::note cache configuration ClickHouse versions 22.3 through 22.7 use a different cache configuration, see [using local cache](/docs/en/operations/storing-data.md/#using-local-cache) if you are using one of those versions. ::: diff --git a/docs/en/operations/storing-data.md b/docs/en/operations/storing-data.md index 003277c8d4f..7a7edfb1a90 100644 --- a/docs/en/operations/storing-data.md +++ b/docs/en/operations/storing-data.md @@ -11,45 +11,163 @@ To work with data stored on `Amazon S3` disks use [S3](/docs/en/engines/table-en To load data from a web server with static files use a disk with type [web](#storing-data-on-webserver). -## Configuring HDFS {#configuring-hdfs} +## Configuring external storage {#configuring-external-storage} -[MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) and [Log](/docs/en/engines/table-engines/log-family/log.md) family table engines can store data to HDFS using a disk with type `HDFS`. +[MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) and [Log](/docs/en/engines/table-engines/log-family/log.md) family table engines can store data to `S3`, `AzureBlobStorage`, `HDFS` using a disk with types `s3`, `azure_blob_storage`, `hdfs` accordingly. Configuration markup: +Let's take a loop at different storage configuration options on the example of `S3` storage. +Firstly, define configuration in server configuration file. In order to configure `S3` storage the following configuration can be used: + ``` xml - - hdfs - hdfs://hdfs1:9000/clickhouse/ - + + s3 + https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/ + 1 + - +
- hdfs + s3
-
+
+
+``` +Starting with 24.1 clickhouse version, a different type of configuration is supported in addition to the older one: + +``` xml + + + + + object_storage + s3 + local + https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/ + 1 + + + + + +
+ s3 +
+
+
+
+
+
+``` + +In order to make a specific kind of storage a default option for all `MergeTree` tables add the following section to configuration file: + +``` xml + - 0 + s3 ``` -Required parameters: +If you want to configure a specific storage policy only to specific table, you can define it in settings while creating the table: -- `endpoint` — HDFS endpoint URL in `path` format. Endpoint URL should contain a root path to store data. +``` sql +CREATE TABLE test (a Int32, b String) +ENGINE = MergeTree() ORDER BY a +SETTINGS storage_policy = 's3'; +``` -Optional parameters: +You can also use `disk` instead of `storage_policy`. In this case it is not requires to have `storage_policy` section in configuration file, only `disk` section would be enough. -- `min_bytes_for_seek` — The minimal number of bytes to use seek operation instead of sequential read. Default value: `1 Mb`. +``` sql +CREATE TABLE test (a Int32, b String) +ENGINE = MergeTree() ORDER BY a +SETTINGS disk = 's3'; +``` + +There is also a possibility to specify storage configuration without a preconfigured disk in configuration file: + +``` sql +CREATE TABLE test (a Int32, b String) +ENGINE = MergeTree() ORDER BY a +SETTINGS disk = disk(name = 's3_disk', type = 's3', endpoint = 'https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/', use_environment_credentials = 1); +``` + +Adding cache is also possible: + +``` sql +CREATE TABLE test (a Int32, b String) +ENGINE = MergeTree() ORDER BY a +SETTINGS disk = disk(name = 'cached_s3_disk', type = 'cache', max_size = '10Gi', path = '/s3_cache', disk = disk(name = 's3_disk', type = 's3', endpoint = 'https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/', use_environment_credentials = 1)); +``` + +A combination of config file disk configuration and sql-defined configuration is also possible: + +``` sql +CREATE TABLE test (a Int32, b String) +ENGINE = MergeTree() ORDER BY a +SETTINGS disk = disk(name = 'cached_s3_disk', type = 'cache', max_size = '10Gi', path = '/s3_cache', disk = 's3'); +``` + +Here `s3` is a disk name from server configuration file, while `cache` disk is defined via sql. + +Let's take a closer look at configuration parameters. + +All disk configuration require `type` section, equal to one of `s3`, `azure_blob_storage`, `hdfs`, `local`, `cache`, `web`. Then goes configuration of a specific storage type. +Starting from 24.1 clickhouse version, you can you a new configuration option. For it you are required to specify `type` as `object_storage`, `object_storage_type` as one of `s3`, `azure_blob_storage`, `hdfs`, `local`, `cache`, `web`, and optionally you can specify `metadata_type`, which is `local` by default, but it can also be set to `plain`, `web`. + +E.g. first configuration option: +``` xml + + s3 + https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/ + 1 + +``` + +and second (from `24.1`): +``` xml + + object_storage + s3 + local + https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/ + 1 + +``` + +Configuration like +``` xml + + s3_plain + https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/ + 1 + +``` + +is equal to +``` xml + + object_storage + s3 + plain + https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/ + 1 + +``` + +For details configuration options of each storage see [MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md). ## Using Virtual File System for Data Encryption {#encrypted-virtual-file-system} From 9bcd4daabe56e29132fc5098420afb4dcba9001d Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 15 Feb 2024 16:19:31 +0100 Subject: [PATCH 002/374] Better --- .../mergetree-family/mergetree.md | 294 +------------ docs/en/operations/storing-data.md | 411 +++++++++++++++--- 2 files changed, 346 insertions(+), 359 deletions(-) diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index e1eef8db9ab..0fff13c906f 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -987,49 +987,6 @@ ORDER BY (postcode1, postcode2, addr1, addr2) # highlight-end ``` -### Nested Dynamic Storage - -This example query builds on the above dynamic disk configuration and shows how to -use a local disk to cache data from a table stored at a URL. Neither the cache disk -nor the web storage is configured in the ClickHouse configuration files; both are -configured in the CREATE/ATTACH query settings. - -In the settings highlighted below notice that the disk of `type=web` is nested within -the disk of `type=cache`. - -```sql -ATTACH TABLE uk_price_paid UUID 'cf712b4f-2ca8-435c-ac23-c4393efe52f7' -( - price UInt32, - date Date, - postcode1 LowCardinality(String), - postcode2 LowCardinality(String), - type Enum8('other' = 0, 'terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4), - is_new UInt8, - duration Enum8('unknown' = 0, 'freehold' = 1, 'leasehold' = 2), - addr1 String, - addr2 String, - street LowCardinality(String), - locality LowCardinality(String), - town LowCardinality(String), - district LowCardinality(String), - county LowCardinality(String) -) -ENGINE = MergeTree -ORDER BY (postcode1, postcode2, addr1, addr2) - # highlight-start - SETTINGS disk = disk( - type=cache, - max_size='1Gi', - path='/var/lib/clickhouse/custom_disk_cache/', - disk=disk( - type=web, - endpoint='https://raw.githubusercontent.com/ClickHouse/web-tables-demo/main/web/' - ) - ); - # highlight-end -``` - ### Details {#details} In the case of `MergeTree` tables, data is getting to disk in different ways: @@ -1058,19 +1015,17 @@ During this time, they are not moved to other volumes or disks. Therefore, until User can assign new big parts to different disks of a [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures) volume in a balanced way using the [min_bytes_to_rebalance_partition_over_jbod](/docs/en/operations/settings/merge-tree-settings.md/#min-bytes-to-rebalance-partition-over-jbod) setting. -## Using S3 for Data Storage {#table_engine-mergetree-s3} +## Using External Storage for Data Storage {#table_engine-mergetree-s3} -:::note -Google Cloud Storage (GCS) is also supported using the type `s3`. See [GCS backed MergeTree](/docs/en/integrations/gcs). -::: +[MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) family table engines can store data to `S3`, `AzureBlobStorage`, `HDFS` using a disk with types `s3`, `azure_blob_storage`, `hdfs` accordingly. See [configuring external storage options](/docs/en/operations/storing-data.md/#configuring-external-storage) for more details. -`MergeTree` family table engines can store data to [S3](https://aws.amazon.com/s3/) using a disk with type `s3`. +Example for [S3](https://aws.amazon.com/s3/) as external storage using a disk with type `s3`. Configuration markup: ``` xml ... - +e s3 true @@ -1112,247 +1067,6 @@ Also see [configuring external storage options](/docs/en/operations/storing-data ClickHouse versions 22.3 through 22.7 use a different cache configuration, see [using local cache](/docs/en/operations/storing-data.md/#using-local-cache) if you are using one of those versions. ::: -### Configuring the S3 disk - -Required parameters: - -- `endpoint` — S3 endpoint URL in `path` or `virtual hosted` [styles](https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). Endpoint URL should contain a bucket and root path to store data. -- `access_key_id` — S3 access key id. -- `secret_access_key` — S3 secret access key. - -Optional parameters: - -- `region` — S3 region name. -- `support_batch_delete` — This controls the check to see if batch deletes are supported. Set this to `false` when using Google Cloud Storage (GCS) as GCS does not support batch deletes and preventing the checks will prevent error messages in the logs. -- `use_environment_credentials` — Reads AWS credentials from the Environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN if they exist. Default value is `false`. -- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Default value is `false`. -- `expiration_window_seconds` — Grace period for checking if expiration-based credentials have expired. Optional, default value is `120`. -- `proxy` — Proxy configuration for S3 endpoint. Each `uri` element inside `proxy` block should contain a proxy URL. -- `connect_timeout_ms` — Socket connect timeout in milliseconds. Default value is `10 seconds`. -- `request_timeout_ms` — Request timeout in milliseconds. Default value is `5 seconds`. -- `retry_attempts` — Number of retry attempts in case of failed request. Default value is `10`. -- `single_read_retries` — Number of retry attempts in case of connection drop during read. Default value is `4`. -- `min_bytes_for_seek` — Minimal number of bytes to use seek operation instead of sequential read. Default value is `1 Mb`. -- `metadata_path` — Path on local FS to store metadata files for S3. Default value is `/var/lib/clickhouse/disks//`. -- `skip_access_check` — If true, disk access checks will not be performed on disk start-up. Default value is `false`. -- `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be specified multiple times. -- `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set. -- `server_side_encryption_kms_key_id` - If specified, required headers for accessing S3 objects with [SSE-KMS encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) will be set. If an empty string is specified, the AWS managed S3 key will be used. Optional. -- `server_side_encryption_kms_encryption_context` - If specified alongside `server_side_encryption_kms_key_id`, the given encryption context header for SSE-KMS will be set. Optional. -- `server_side_encryption_kms_bucket_key_enabled` - If specified alongside `server_side_encryption_kms_key_id`, the header to enable S3 bucket keys for SSE-KMS will be set. Optional, can be `true` or `false`, defaults to nothing (matches the bucket-level setting). -- `s3_max_put_rps` — Maximum PUT requests per second rate before throttling. Default value is `0` (unlimited). -- `s3_max_put_burst` — Max number of requests that can be issued simultaneously before hitting request per second limit. By default (`0` value) equals to `s3_max_put_rps`. -- `s3_max_get_rps` — Maximum GET requests per second rate before throttling. Default value is `0` (unlimited). -- `s3_max_get_burst` — Max number of requests that can be issued simultaneously before hitting request per second limit. By default (`0` value) equals to `s3_max_get_rps`. -- `read_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of read requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk). -- `write_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of write requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk). -- `key_template` — Define the format with which the object keys are generated. By default, Clickhouse takes `root path` from `endpoint` option and adds random generated suffix. That suffix is a dir with 3 random symbols and a file name with 29 random symbols. With that option you have a full control how to the object keys are generated. Some usage scenarios require having random symbols in the prefix or in the middle of object key. For example: `[a-z]{3}-prefix-random/constant-part/random-middle-[a-z]{3}/random-suffix-[a-z]{29}`. The value is parsed with [`re2`](https://github.com/google/re2/wiki/Syntax). Only some subset of the syntax is supported. Check if your preferred format is supported before using that option. Disk isn't initialized if clickhouse is unable to generate a key by the value of `key_template`. It requires enabled feature flag [storage_metadata_write_full_object_key](/docs/en/operations/settings/settings#storage_metadata_write_full_object_key). It forbids declaring the `root path` in `endpoint` option. It requires definition of the option `key_compatibility_prefix`. -- `key_compatibility_prefix` — That option is required when option `key_template` is in use. In order to be able to read the objects keys which were stored in the metadata files with the metadata version lower that `VERSION_FULL_OBJECT_KEY`, the previous `root path` from the `endpoint` option should be set here. - -### Configuring the cache - -This is the cache configuration from above: -```xml - - cache - s3 - /var/lib/clickhouse/disks/s3_cache/ - 10Gi - -``` - -These parameters define the cache layer: -- `type` — If a disk is of type `cache` it caches mark and index files in memory. -- `disk` — The name of the disk that will be cached. - -Cache parameters: -- `path` — The path where metadata for the cache is stored. -- `max_size` — The size (amount of disk space) that the cache can grow to. - -:::tip -There are several other cache parameters that you can use to tune your storage, see [using local cache](/docs/en/operations/storing-data.md/#using-local-cache) for the details. -::: - -S3 disk can be configured as `main` or `cold` storage: -``` xml - - ... - - - s3 - https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/root-path/ - your_access_key_id - your_secret_access_key - - - - - -
- s3 -
-
-
- - -
- default -
- - s3 - -
- 0.2 -
-
- ... -
-``` - -In case of `cold` option a data can be moved to S3 if local disk free size will be smaller than `move_factor * disk_size` or by TTL move rule. - -## Using Azure Blob Storage for Data Storage {#table_engine-mergetree-azure-blob-storage} - -`MergeTree` family table engines can store data to [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/) using a disk with type `azure_blob_storage`. - -As of February 2022, this feature is still a fresh addition, so expect that some Azure Blob Storage functionalities might be unimplemented. - -Configuration markup: -``` xml - - ... - - - azure_blob_storage - http://account.blob.core.windows.net - container - account - pass123 - /var/lib/clickhouse/disks/blob_storage_disk/ - /var/lib/clickhouse/disks/blob_storage_disk/cache/ - false - - - ... - -``` - -Connection parameters: -* `storage_account_url` - **Required**, Azure Blob Storage account URL, like `http://account.blob.core.windows.net` or `http://azurite1:10000/devstoreaccount1`. -* `container_name` - Target container name, defaults to `default-container`. -* `container_already_exists` - If set to `false`, a new container `container_name` is created in the storage account, if set to `true`, disk connects to the container directly, and if left unset, disk connects to the account, checks if the container `container_name` exists, and creates it if it doesn't exist yet. - -Authentication parameters (the disk will try all available methods **and** Managed Identity Credential): -* `connection_string` - For authentication using a connection string. -* `account_name` and `account_key` - For authentication using Shared Key. - -Limit parameters (mainly for internal usage): -* `s3_max_single_part_upload_size` - Limits the size of a single block upload to Blob Storage. -* `min_bytes_for_seek` - Limits the size of a seekable region. -* `max_single_read_retries` - Limits the number of attempts to read a chunk of data from Blob Storage. -* `max_single_download_retries` - Limits the number of attempts to download a readable buffer from Blob Storage. -* `thread_pool_size` - Limits the number of threads with which `IDiskRemote` is instantiated. -* `s3_max_inflight_parts_for_one_file` - Limits the number of put requests that can be run concurrently for one object. - -Other parameters: -* `metadata_path` - Path on local FS to store metadata files for Blob Storage. Default value is `/var/lib/clickhouse/disks//`. -* `skip_access_check` - If true, disk access checks will not be performed on disk start-up. Default value is `false`. -* `read_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of read requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk). -* `write_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of write requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk). - -Examples of working configurations can be found in integration tests directory (see e.g. [test_merge_tree_azure_blob_storage](https://github.com/ClickHouse/ClickHouse/blob/master/tests/integration/test_merge_tree_azure_blob_storage/configs/config.d/storage_conf.xml) or [test_azure_blob_storage_zero_copy_replication](https://github.com/ClickHouse/ClickHouse/blob/master/tests/integration/test_azure_blob_storage_zero_copy_replication/configs/config.d/storage_conf.xml)). - -:::note Zero-copy replication is not ready for production -Zero-copy replication is disabled by default in ClickHouse version 22.8 and higher. This feature is not recommended for production use. -::: - -## HDFS storage {#hdfs-storage} - -In this sample configuration: -- the disk is of type `hdfs` -- the data is hosted at `hdfs://hdfs1:9000/clickhouse/` - -```xml - - - - - hdfs - hdfs://hdfs1:9000/clickhouse/ - true - - - local - / - - - - - -
- hdfs -
- - hdd - -
-
-
-
-
-``` - -## Web storage (read-only) {#web-storage} - -Web storage can be used for read-only purposes. An example use is for hosting sample -data, or for migrating data. - -:::tip -Storage can also be configured temporarily within a query, if a web dataset is not expected -to be used routinely, see [dynamic storage](#dynamic-storage) and skip editing the -configuration file. -::: - -In this sample configuration: -- the disk is of type `web` -- the data is hosted at `http://nginx:80/test1/` -- a cache on local storage is used - -```xml - - - - - web - http://nginx:80/test1/ - - - cache - web - cached_web_cache/ - 100000000 - - - - - -
- web -
-
-
- - -
- cached_web -
-
-
-
-
-
-``` - ## Virtual Columns {#virtual-columns} - `_part` — Name of a part. diff --git a/docs/en/operations/storing-data.md b/docs/en/operations/storing-data.md index 7a7edfb1a90..baf4e1999a7 100644 --- a/docs/en/operations/storing-data.md +++ b/docs/en/operations/storing-data.md @@ -5,21 +5,68 @@ sidebar_label: "External Disks for Storing Data" title: "External Disks for Storing Data" --- -Data, processed in ClickHouse, is usually stored in the local file system — on the same machine with the ClickHouse server. That requires large-capacity disks, which can be expensive enough. To avoid that you can store the data remotely — on [Amazon S3](https://aws.amazon.com/s3/) disks or in the Hadoop Distributed File System ([HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)). +Data, processed in ClickHouse, is usually stored in the local file system — on the same machine with the ClickHouse server. That requires large-capacity disks, which can be expensive enough. To avoid that you can store the data remotely. Various storages are supported: +1. [Amazon S3](https://aws.amazon.com/s3/) object storage. +2. The Hadoop Distributed File System ([HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)) +3. [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs). -To work with data stored on `Amazon S3` disks use [S3](/docs/en/engines/table-engines/integrations/s3.md) table engine, and to work with data in the Hadoop Distributed File System — [HDFS](/docs/en/engines/table-engines/integrations/hdfs.md) table engine. - -To load data from a web server with static files use a disk with type [web](#storing-data-on-webserver). +Note: to work with data stored on `Amazon S3` disks use [S3](/docs/en/engines/table-engines/integrations/s3.md) table engine, to work with data in the Hadoop Distributed File System — [HDFS](/docs/en/engines/table-engines/integrations/hdfs.md) table engine, and to work with data stored in Azure Blob Storage use [AzureBlobStorage](/docs/en/engines/table-engines/integrations/AzureBlobStorage.md) table engine. They are different from external storage described on this page as they allow to read data stored in some general file format (like Parquet), while on this page we are describing storage configuration for ClickHouse `MergeTree` famility or `Log` family tables. ## Configuring external storage {#configuring-external-storage} [MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) and [Log](/docs/en/engines/table-engines/log-family/log.md) family table engines can store data to `S3`, `AzureBlobStorage`, `HDFS` using a disk with types `s3`, `azure_blob_storage`, `hdfs` accordingly. -Configuration markup: +Disk configuration requires: +1. `type` section, equal to one of `s3`, `azure_blob_storage`, `hdfs`, `local_blob_storage`, `web`. +2. Configuration of a specific external storage type. -Let's take a loop at different storage configuration options on the example of `S3` storage. -Firstly, define configuration in server configuration file. In order to configure `S3` storage the following configuration can be used: +Starting from 24.1 clickhouse version, it is possible to use a new configuration option. +It requires to specify: +1. `type` equal to `object_storage` +2. `object_storage_type`, equal to one of `s3`, `azure_blob_storage`, `hdfs`, `local_blob_storage`, `web`. +Optionally, `metadata_type` can be specified (it is equal to `local` by default), but it can also be set to `plain`, `web`. +E.g. configuration option +``` xml + + s3 + https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/ + 1 + +``` + +is equal to configuration (from `24.1`): +``` xml + + object_storage + s3 + local + https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/ + 1 + +``` + +Configuration +``` xml + + s3_plain + https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/ + 1 + +``` + +is equal to +``` xml + + object_storage + s3 + plain + https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/ + 1 + +``` + +Example of full storage configuration will look like: ``` xml @@ -43,8 +90,7 @@ Firstly, define configuration in server configuration file. In order to configur ``` -Starting with 24.1 clickhouse version, a different type of configuration is supported in addition to the older one: - +Starting with 24.1 clickhouse version, it can also look like: ``` xml @@ -71,7 +117,6 @@ Starting with 24.1 clickhouse version, a different type of configuration is supp ``` In order to make a specific kind of storage a default option for all `MergeTree` tables add the following section to configuration file: - ``` xml @@ -96,80 +141,259 @@ ENGINE = MergeTree() ORDER BY a SETTINGS disk = 's3'; ``` -There is also a possibility to specify storage configuration without a preconfigured disk in configuration file: +## Dynamic Configuration {#dynamic-configuration} -``` sql -CREATE TABLE test (a Int32, b String) -ENGINE = MergeTree() ORDER BY a -SETTINGS disk = disk(name = 's3_disk', type = 's3', endpoint = 'https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/', use_environment_credentials = 1); +There is also a possibility to specify storage configuration without a predefined disk in configuration in a configuration file, but can be configured in the CREATE/ATTACH query settings. + +The following example query builds on the above dynamic disk configuration and shows how to use a local disk to cache data from a table stored at a URL. + +```sql +ATTACH TABLE uk_price_paid UUID 'cf712b4f-2ca8-435c-ac23-c4393efe52f7' +( + price UInt32, + date Date, + postcode1 LowCardinality(String), + postcode2 LowCardinality(String), + type Enum8('other' = 0, 'terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4), + is_new UInt8, + duration Enum8('unknown' = 0, 'freehold' = 1, 'leasehold' = 2), + addr1 String, + addr2 String, + street LowCardinality(String), + locality LowCardinality(String), + town LowCardinality(String), + district LowCardinality(String), + county LowCardinality(String) +) +ENGINE = MergeTree +ORDER BY (postcode1, postcode2, addr1, addr2) + # highlight-start + SETTINGS disk = disk( + type=web, + endpoint='https://raw.githubusercontent.com/ClickHouse/web-tables-demo/main/web/' + ); + # highlight-end ``` -Adding cache is also possible: +The example below adds cache to external storage. -``` sql -CREATE TABLE test (a Int32, b String) -ENGINE = MergeTree() ORDER BY a -SETTINGS disk = disk(name = 'cached_s3_disk', type = 'cache', max_size = '10Gi', path = '/s3_cache', disk = disk(name = 's3_disk', type = 's3', endpoint = 'https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/', use_environment_credentials = 1)); +```sql +ATTACH TABLE uk_price_paid UUID 'cf712b4f-2ca8-435c-ac23-c4393efe52f7' +( + price UInt32, + date Date, + postcode1 LowCardinality(String), + postcode2 LowCardinality(String), + type Enum8('other' = 0, 'terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4), + is_new UInt8, + duration Enum8('unknown' = 0, 'freehold' = 1, 'leasehold' = 2), + addr1 String, + addr2 String, + street LowCardinality(String), + locality LowCardinality(String), + town LowCardinality(String), + district LowCardinality(String), + county LowCardinality(String) +) +ENGINE = MergeTree +ORDER BY (postcode1, postcode2, addr1, addr2) + # highlight-start + SETTINGS disk = disk( + type=cache, + max_size='1Gi', + path='/var/lib/clickhouse/custom_disk_cache/', + disk=disk( + type=web, + endpoint='https://raw.githubusercontent.com/ClickHouse/web-tables-demo/main/web/' + ) + ); + # highlight-end ``` -A combination of config file disk configuration and sql-defined configuration is also possible: +In the settings highlighted below notice that the disk of `type=web` is nested within +the disk of `type=cache`. -``` sql -CREATE TABLE test (a Int32, b String) -ENGINE = MergeTree() ORDER BY a -SETTINGS disk = disk(name = 'cached_s3_disk', type = 'cache', max_size = '10Gi', path = '/s3_cache', disk = 's3'); +A combination of config-based configuration and sql-defined configuration is also possible: + +```sql +ATTACH TABLE uk_price_paid UUID 'cf712b4f-2ca8-435c-ac23-c4393efe52f7' +( + price UInt32, + date Date, + postcode1 LowCardinality(String), + postcode2 LowCardinality(String), + type Enum8('other' = 0, 'terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4), + is_new UInt8, + duration Enum8('unknown' = 0, 'freehold' = 1, 'leasehold' = 2), + addr1 String, + addr2 String, + street LowCardinality(String), + locality LowCardinality(String), + town LowCardinality(String), + district LowCardinality(String), + county LowCardinality(String) +) +ENGINE = MergeTree +ORDER BY (postcode1, postcode2, addr1, addr2) + # highlight-start + SETTINGS disk = disk( + type=cache, + max_size='1Gi', + path='/var/lib/clickhouse/custom_disk_cache/', + disk=disk( + type=web, + endpoint='https://raw.githubusercontent.com/ClickHouse/web-tables-demo/main/web/' + ) + ); + # highlight-end ``` -Here `s3` is a disk name from server configuration file, while `cache` disk is defined via sql. +where `web` is a from a server configuration file: -Let's take a closer look at configuration parameters. - -All disk configuration require `type` section, equal to one of `s3`, `azure_blob_storage`, `hdfs`, `local`, `cache`, `web`. Then goes configuration of a specific storage type. -Starting from 24.1 clickhouse version, you can you a new configuration option. For it you are required to specify `type` as `object_storage`, `object_storage_type` as one of `s3`, `azure_blob_storage`, `hdfs`, `local`, `cache`, `web`, and optionally you can specify `metadata_type`, which is `local` by default, but it can also be set to `plain`, `web`. - -E.g. first configuration option: ``` xml - - s3 - https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/ - 1 - + + + + web + 'https://raw.githubusercontent.com/ClickHouse/web-tables-demo/main/web/' + + + ``` -and second (from `24.1`): +### Using S3 Storage {#s3-storage} + +Required parameters: + +- `endpoint` — S3 endpoint URL in `path` or `virtual hosted` [styles](https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). Endpoint URL should contain a bucket and root path to store data. +- `access_key_id` — S3 access key id. +- `secret_access_key` — S3 secret access key. + +Optional parameters: + +- `region` — S3 region name. +- `support_batch_delete` — This controls the check to see if batch deletes are supported. Set this to `false` when using Google Cloud Storage (GCS) as GCS does not support batch deletes and preventing the checks will prevent error messages in the logs. +- `use_environment_credentials` — Reads AWS credentials from the Environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN if they exist. Default value is `false`. +- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Default value is `false`. +- `expiration_window_seconds` — Grace period for checking if expiration-based credentials have expired. Optional, default value is `120`. +- `proxy` — Proxy configuration for S3 endpoint. Each `uri` element inside `proxy` block should contain a proxy URL. +- `connect_timeout_ms` — Socket connect timeout in milliseconds. Default value is `10 seconds`. +- `request_timeout_ms` — Request timeout in milliseconds. Default value is `5 seconds`. +- `retry_attempts` — Number of retry attempts in case of failed request. Default value is `10`. +- `single_read_retries` — Number of retry attempts in case of connection drop during read. Default value is `4`. +- `min_bytes_for_seek` — Minimal number of bytes to use seek operation instead of sequential read. Default value is `1 Mb`. +- `metadata_path` — Path on local FS to store metadata files for S3. Default value is `/var/lib/clickhouse/disks//`. +- `skip_access_check` — If true, disk access checks will not be performed on disk start-up. Default value is `false`. +- `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be specified multiple times. +- `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set. +- `server_side_encryption_kms_key_id` - If specified, required headers for accessing S3 objects with [SSE-KMS encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) will be set. If an empty string is specified, the AWS managed S3 key will be used. Optional. +- `server_side_encryption_kms_encryption_context` - If specified alongside `server_side_encryption_kms_key_id`, the given encryption context header for SSE-KMS will be set. Optional. +- `server_side_encryption_kms_bucket_key_enabled` - If specified alongside `server_side_encryption_kms_key_id`, the header to enable S3 bucket keys for SSE-KMS will be set. Optional, can be `true` or `false`, defaults to nothing (matches the bucket-level setting). +- `s3_max_put_rps` — Maximum PUT requests per second rate before throttling. Default value is `0` (unlimited). +- `s3_max_put_burst` — Max number of requests that can be issued simultaneously before hitting request per second limit. By default (`0` value) equals to `s3_max_put_rps`. +- `s3_max_get_rps` — Maximum GET requests per second rate before throttling. Default value is `0` (unlimited). +- `s3_max_get_burst` — Max number of requests that can be issued simultaneously before hitting request per second limit. By default (`0` value) equals to `s3_max_get_rps`. +- `read_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of read requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk). +- `write_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of write requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk). +- `key_template` — Define the format with which the object keys are generated. By default, Clickhouse takes `root path` from `endpoint` option and adds random generated suffix. That suffix is a dir with 3 random symbols and a file name with 29 random symbols. With that option you have a full control how to the object keys are generated. Some usage scenarios require having random symbols in the prefix or in the middle of object key. For example: `[a-z]{3}-prefix-random/constant-part/random-middle-[a-z]{3}/random-suffix-[a-z]{29}`. The value is parsed with [`re2`](https://github.com/google/re2/wiki/Syntax). Only some subset of the syntax is supported. Check if your preferred format is supported before using that option. Disk isn't initialized if clickhouse is unable to generate a key by the value of `key_template`. It requires enabled feature flag [storage_metadata_write_full_object_key](/docs/en/operations/settings/settings#storage_metadata_write_full_object_key). It forbids declaring the `root path` in `endpoint` option. It requires definition of the option `key_compatibility_prefix`. +- `key_compatibility_prefix` — That option is required when option `key_template` is in use. In order to be able to read the objects keys which were stored in the metadata files with the metadata version lower that `VERSION_FULL_OBJECT_KEY`, the previous `root path` from the `endpoint` option should be set here. + +:::note +Google Cloud Storage (GCS) is also supported using the type `s3`. See [GCS backed MergeTree](/docs/en/integrations/gcs). +::: + +### Using Azure Blob Storage {#azure-blob-storage} + +`MergeTree` family table engines can store data to [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/) using a disk with type `azure_blob_storage`. + +As of February 2022, this feature is still a fresh addition, so expect that some Azure Blob Storage functionalities might be unimplemented. + +Configuration markup: ``` xml - - object_storage - s3 - local - https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/ - 1 - + + ... + + + azure_blob_storage + http://account.blob.core.windows.net + container + account + pass123 + /var/lib/clickhouse/disks/blob_storage_disk/ + /var/lib/clickhouse/disks/blob_storage_disk/cache/ + false + + + ... + ``` -Configuration like -``` xml - - s3_plain - https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/ - 1 - +Connection parameters: +* `storage_account_url` - **Required**, Azure Blob Storage account URL, like `http://account.blob.core.windows.net` or `http://azurite1:10000/devstoreaccount1`. +* `container_name` - Target container name, defaults to `default-container`. +* `container_already_exists` - If set to `false`, a new container `container_name` is created in the storage account, if set to `true`, disk connects to the container directly, and if left unset, disk connects to the account, checks if the container `container_name` exists, and creates it if it doesn't exist yet. + +Authentication parameters (the disk will try all available methods **and** Managed Identity Credential): +* `connection_string` - For authentication using a connection string. +* `account_name` and `account_key` - For authentication using Shared Key. + +Limit parameters (mainly for internal usage): +* `s3_max_single_part_upload_size` - Limits the size of a single block upload to Blob Storage. +* `min_bytes_for_seek` - Limits the size of a seekable region. +* `max_single_read_retries` - Limits the number of attempts to read a chunk of data from Blob Storage. +* `max_single_download_retries` - Limits the number of attempts to download a readable buffer from Blob Storage. +* `thread_pool_size` - Limits the number of threads with which `IDiskRemote` is instantiated. +* `s3_max_inflight_parts_for_one_file` - Limits the number of put requests that can be run concurrently for one object. + +Other parameters: +* `metadata_path` - Path on local FS to store metadata files for Blob Storage. Default value is `/var/lib/clickhouse/disks//`. +* `skip_access_check` - If true, disk access checks will not be performed on disk start-up. Default value is `false`. +* `read_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of read requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk). +* `write_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of write requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk). + +Examples of working configurations can be found in integration tests directory (see e.g. [test_merge_tree_azure_blob_storage](https://github.com/ClickHouse/ClickHouse/blob/master/tests/integration/test_merge_tree_azure_blob_storage/configs/config.d/storage_conf.xml) or [test_azure_blob_storage_zero_copy_replication](https://github.com/ClickHouse/ClickHouse/blob/master/tests/integration/test_azure_blob_storage_zero_copy_replication/configs/config.d/storage_conf.xml)). + +:::note Zero-copy replication is not ready for production +Zero-copy replication is disabled by default in ClickHouse version 22.8 and higher. This feature is not recommended for production use. +::: + +## Using HDFS storage {#hdfs-storage} + +In this sample configuration: +- the disk is of type `hdfs` +- the data is hosted at `hdfs://hdfs1:9000/clickhouse/` + +```xml + + + + + hdfs + hdfs://hdfs1:9000/clickhouse/ + true + + + local + / + + + + + +
+ hdfs +
+ + hdd + +
+
+
+
+
``` -is equal to -``` xml - - object_storage - s3 - plain - https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/ - 1 - -``` - -For details configuration options of each storage see [MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md). - -## Using Virtual File System for Data Encryption {#encrypted-virtual-file-system} +### Using Data Encryption {#encrypted-virtual-file-system} You can encrypt the data stored on [S3](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-s3), or [HDFS](#configuring-hdfs) external disks, or on a local disk. To turn on the encryption mode, in the configuration file you must define a disk with the type `encrypted` and choose a disk on which the data will be saved. An `encrypted` disk ciphers all written files on the fly, and when you read files from an `encrypted` disk it deciphers them automatically. So you can work with an `encrypted` disk like with a normal one. @@ -230,7 +454,7 @@ Example of disk configuration:
``` -## Using local cache {#using-local-cache} +### Using local cache {#using-local-cache} It is possible to configure local cache over disks in storage configuration starting from version 22.3. For versions 22.3 - 22.7 cache is supported only for `s3` disk type. For versions >= 22.8 cache is supported for any disk type: S3, Azure, Local, Encrypted, etc. @@ -393,7 +617,56 @@ Cache profile events: - `CachedWriteBufferCacheWriteBytes`, `CachedWriteBufferCacheWriteMicroseconds` -## Storing Data on Web Server {#storing-data-on-webserver} +### Using static Web storage (read-only) {#web-storage} + +Web storage can be used for read-only purposes. An example use is for hosting sample +data, or for migrating data. + +:::tip +Storage can also be configured temporarily within a query, if a web dataset is not expected +to be used routinely, see [dynamic storage](#dynamic-storage) and skip editing the +configuration file. +::: + +In this sample configuration: +- the disk is of type `web` +- the data is hosted at `http://nginx:80/test1/` +- a cache on local storage is used + +```xml + + + + + web + http://nginx:80/test1/ + + + cache + web + cached_web_cache/ + 100000000 + + + + + +
+ web +
+
+
+ + +
+ cached_web +
+
+
+
+
+
+``` There is a tool `clickhouse-static-files-uploader`, which prepares a data directory for a given table (`SELECT data_paths FROM system.tables WHERE name = 'table_name'`). For each table you need, you get a directory of files. These files can be uploaded to, for example, a web server with static files. After this preparation, you can load this table into any ClickHouse server via `DiskWeb`. @@ -595,7 +868,7 @@ If URL is not reachable on disk load when the server is starting up tables, then Use [http_max_single_read_retries](/docs/en/operations/settings/settings.md/#http-max-single-read-retries) setting to limit the maximum number of retries during a single HTTP read. -## Zero-copy Replication (not ready for production) {#zero-copy} +### Zero-copy Replication (not ready for production) {#zero-copy} Zero-copy replication is possible, but not recommended, with `S3` and `HDFS` disks. Zero-copy replication means that if the data is stored remotely on several machines and needs to be synchronized, then only the metadata is replicated (paths to the data parts), but not the data itself. From 5ae410e6339fe52e33b41bbc9c6c115ac6293f57 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 15 Feb 2024 18:33:38 +0100 Subject: [PATCH 003/374] A bit more explanation --- .../mergetree-family/mergetree.md | 49 +------------------ docs/en/operations/storing-data.md | 44 ++++++++++++++++- 2 files changed, 44 insertions(+), 49 deletions(-) diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 0fff13c906f..f23b251f3a1 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -940,53 +940,6 @@ You could change storage policy after table creation with [ALTER TABLE ... MODIF The number of threads performing background moves of data parts can be changed by [background_move_pool_size](/docs/en/operations/server-configuration-parameters/settings.md/#background_move_pool_size) setting. -### Dynamic Storage - -This example query shows how to attach a table stored at a URL and configure the -remote storage within the query. The web storage is not configured in the ClickHouse -configuration files; all the settings are in the CREATE/ATTACH query. - -:::note -The example uses `type=web`, but any disk type can be configured as dynamic, even Local disk. Local disks require a path argument to be inside the server config parameter `custom_local_disks_base_directory`, which has no default, so set that also when using local disk. -::: - -#### Example dynamic web storage - -:::tip -A [demo dataset](https://github.com/ClickHouse/web-tables-demo) is hosted in GitHub. To prepare your own tables for web storage see the tool [clickhouse-static-files-uploader](/docs/en/operations/storing-data.md/#storing-data-on-webserver) -::: - -In this `ATTACH TABLE` query the `UUID` provided matches the directory name of the data, and the endpoint is the URL for the raw GitHub content. - -```sql -# highlight-next-line -ATTACH TABLE uk_price_paid UUID 'cf712b4f-2ca8-435c-ac23-c4393efe52f7' -( - price UInt32, - date Date, - postcode1 LowCardinality(String), - postcode2 LowCardinality(String), - type Enum8('other' = 0, 'terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4), - is_new UInt8, - duration Enum8('unknown' = 0, 'freehold' = 1, 'leasehold' = 2), - addr1 String, - addr2 String, - street LowCardinality(String), - locality LowCardinality(String), - town LowCardinality(String), - district LowCardinality(String), - county LowCardinality(String) -) -ENGINE = MergeTree -ORDER BY (postcode1, postcode2, addr1, addr2) - # highlight-start - SETTINGS disk = disk( - type=web, - endpoint='https://raw.githubusercontent.com/ClickHouse/web-tables-demo/main/web/' - ); - # highlight-end -``` - ### Details {#details} In the case of `MergeTree` tables, data is getting to disk in different ways: @@ -1025,7 +978,7 @@ Configuration markup: ``` xml ... -e + s3 true diff --git a/docs/en/operations/storing-data.md b/docs/en/operations/storing-data.md index baf4e1999a7..0f818b813bf 100644 --- a/docs/en/operations/storing-data.md +++ b/docs/en/operations/storing-data.md @@ -213,6 +213,10 @@ ORDER BY (postcode1, postcode2, addr1, addr2) In the settings highlighted below notice that the disk of `type=web` is nested within the disk of `type=cache`. +:::note +The example uses `type=web`, but any disk type can be configured as dynamic, even Local disk. Local disks require a path argument to be inside the server config parameter `custom_local_disks_base_directory`, which has no default, so set that also when using local disk. +::: + A combination of config-based configuration and sql-defined configuration is also possible: ```sql @@ -302,6 +306,11 @@ Optional parameters: Google Cloud Storage (GCS) is also supported using the type `s3`. See [GCS backed MergeTree](/docs/en/integrations/gcs). ::: +### Using Plain Storage {#s3-storage} + +There is a disk type `s3_plain`, which provides a write-once storage. Unlike `s3` disk type, it stores data as is, e.g. instead of randomly-generated blob names, it uses normal file names as clickhouse stores files on local disk. So this disk type allows to keeper a static version of the table and can also be used to create backups on it. +Configuration parameters are the same as for `s3` disk type. + ### Using Azure Blob Storage {#azure-blob-storage} `MergeTree` family table engines can store data to [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/) using a disk with type `azure_blob_storage`. @@ -672,7 +681,40 @@ There is a tool `clickhouse-static-files-uploader`, which prepares a data direct This is a read-only disk. Its data is only read and never modified. A new table is loaded to this disk via `ATTACH TABLE` query (see example below). Local disk is not actually used, each `SELECT` query will result in a `http` request to fetch required data. All modification of the table data will result in an exception, i.e. the following types of queries are not allowed: [CREATE TABLE](/docs/en/sql-reference/statements/create/table.md), [ALTER TABLE](/docs/en/sql-reference/statements/alter/index.md), [RENAME TABLE](/docs/en/sql-reference/statements/rename.md/#misc_operations-rename_table), [DETACH TABLE](/docs/en/sql-reference/statements/detach.md) and [TRUNCATE TABLE](/docs/en/sql-reference/statements/truncate.md). -Web server storage is supported only for the [MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) and [Log](/docs/en/engines/table-engines/log-family/log.md) engine families. To access the data stored on a `web` disk, use the [storage_policy](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#terms) setting when executing the query. For example, `ATTACH TABLE table_web UUID '{}' (id Int32) ENGINE = MergeTree() ORDER BY id SETTINGS storage_policy = 'web'`. +:::tip +A [demo dataset](https://github.com/ClickHouse/web-tables-demo) is hosted in GitHub. To prepare your own tables for web storage see the tool [clickhouse-static-files-uploader](/docs/en/operations/storing-data.md/#storing-data-on-webserver) +::: + +In this `ATTACH TABLE` query the `UUID` provided matches the directory name of the data, and the endpoint is the URL for the raw GitHub content. + +```sql +# highlight-next-line +ATTACH TABLE uk_price_paid UUID 'cf712b4f-2ca8-435c-ac23-c4393efe52f7' +( + price UInt32, + date Date, + postcode1 LowCardinality(String), + postcode2 LowCardinality(String), + type Enum8('other' = 0, 'terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4), + is_new UInt8, + duration Enum8('unknown' = 0, 'freehold' = 1, 'leasehold' = 2), + addr1 String, + addr2 String, + street LowCardinality(String), + locality LowCardinality(String), + town LowCardinality(String), + district LowCardinality(String), + county LowCardinality(String) +) +ENGINE = MergeTree +ORDER BY (postcode1, postcode2, addr1, addr2) + # highlight-start + SETTINGS disk = disk( + type=web, + endpoint='https://raw.githubusercontent.com/ClickHouse/web-tables-demo/main/web/' + ); + # highlight-end +``` A ready test case. You need to add this configuration to config: From 09e630e02be9ccd19681b34f33e24cea849ca9fd Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Thu, 15 Feb 2024 19:00:08 +0100 Subject: [PATCH 004/374] Update storing-data.md --- docs/en/operations/storing-data.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/operations/storing-data.md b/docs/en/operations/storing-data.md index 0f818b813bf..60e33fe2849 100644 --- a/docs/en/operations/storing-data.md +++ b/docs/en/operations/storing-data.md @@ -10,7 +10,7 @@ Data, processed in ClickHouse, is usually stored in the local file system — on 2. The Hadoop Distributed File System ([HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)) 3. [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs). -Note: to work with data stored on `Amazon S3` disks use [S3](/docs/en/engines/table-engines/integrations/s3.md) table engine, to work with data in the Hadoop Distributed File System — [HDFS](/docs/en/engines/table-engines/integrations/hdfs.md) table engine, and to work with data stored in Azure Blob Storage use [AzureBlobStorage](/docs/en/engines/table-engines/integrations/AzureBlobStorage.md) table engine. They are different from external storage described on this page as they allow to read data stored in some general file format (like Parquet), while on this page we are describing storage configuration for ClickHouse `MergeTree` famility or `Log` family tables. +Note: to work with data stored on `Amazon S3` disks use [S3](/docs/en/engines/table-engines/integrations/s3.md) table engine, to work with data in the Hadoop Distributed File System — [HDFS](/docs/en/engines/table-engines/integrations/hdfs.md) table engine, and to work with data stored in Azure Blob Storage use [AzureBlobStorage](/docs/en/engines/table-engines/integrations/AzureBlobStorage.md) table engine. They are different from external storage described on this page as they allow to read data stored in some general file format (like Parquet), while on this page we are describing storage configuration for ClickHouse `MergeTree` family or `Log` family tables. ## Configuring external storage {#configuring-external-storage} From 8c11f59ba82bd9ae3a322f7a9729c4a5a8644512 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 19 Feb 2024 11:01:37 +0100 Subject: [PATCH 005/374] Fix bad link, update disk web description --- docs/en/operations/storing-data.md | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/docs/en/operations/storing-data.md b/docs/en/operations/storing-data.md index 60e33fe2849..4b0345a3206 100644 --- a/docs/en/operations/storing-data.md +++ b/docs/en/operations/storing-data.md @@ -628,14 +628,9 @@ Cache profile events: ### Using static Web storage (read-only) {#web-storage} -Web storage can be used for read-only purposes. An example use is for hosting sample -data, or for migrating data. - -:::tip -Storage can also be configured temporarily within a query, if a web dataset is not expected -to be used routinely, see [dynamic storage](#dynamic-storage) and skip editing the -configuration file. -::: +This is a read-only disk. Its data is only read and never modified. A new table is loaded to this disk via `ATTACH TABLE` query (see example below). Local disk is not actually used, each `SELECT` query will result in a `http` request to fetch required data. All modification of the table data will result in an exception, i.e. the following types of queries are not allowed: [CREATE TABLE](/docs/en/sql-reference/statements/create/table.md), [ALTER TABLE](/docs/en/sql-reference/statements/alter/index.md), [RENAME TABLE](/docs/en/sql-reference/statements/rename.md/#misc_operations-rename_table), [DETACH TABLE](/docs/en/sql-reference/statements/detach.md) and [TRUNCATE TABLE](/docs/en/sql-reference/statements/truncate.md). +Web storage can be used for read-only purposes. An example use is for hosting sample data, or for migrating data. +There is a tool `clickhouse-static-files-uploader`, which prepares a data directory for a given table (`SELECT data_paths FROM system.tables WHERE name = 'table_name'`). For each table you need, you get a directory of files. These files can be uploaded to, for example, a web server with static files. After this preparation, you can load this table into any ClickHouse server via `DiskWeb`. In this sample configuration: - the disk is of type `web` @@ -677,9 +672,11 @@ In this sample configuration:
``` -There is a tool `clickhouse-static-files-uploader`, which prepares a data directory for a given table (`SELECT data_paths FROM system.tables WHERE name = 'table_name'`). For each table you need, you get a directory of files. These files can be uploaded to, for example, a web server with static files. After this preparation, you can load this table into any ClickHouse server via `DiskWeb`. - -This is a read-only disk. Its data is only read and never modified. A new table is loaded to this disk via `ATTACH TABLE` query (see example below). Local disk is not actually used, each `SELECT` query will result in a `http` request to fetch required data. All modification of the table data will result in an exception, i.e. the following types of queries are not allowed: [CREATE TABLE](/docs/en/sql-reference/statements/create/table.md), [ALTER TABLE](/docs/en/sql-reference/statements/alter/index.md), [RENAME TABLE](/docs/en/sql-reference/statements/rename.md/#misc_operations-rename_table), [DETACH TABLE](/docs/en/sql-reference/statements/detach.md) and [TRUNCATE TABLE](/docs/en/sql-reference/statements/truncate.md). +:::tip +Storage can also be configured temporarily within a query, if a web dataset is not expected +to be used routinely, see [dynamic configuration](#dynamic-configuration) and skip editing the +configuration file. +::: :::tip A [demo dataset](https://github.com/ClickHouse/web-tables-demo) is hosted in GitHub. To prepare your own tables for web storage see the tool [clickhouse-static-files-uploader](/docs/en/operations/storing-data.md/#storing-data-on-webserver) From 601b1dfaa14323db28f169b6b193d59ec75e8bfc Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 19 Feb 2024 12:21:52 +0100 Subject: [PATCH 006/374] Fix bad link --- docs/en/operations/storing-data.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/operations/storing-data.md b/docs/en/operations/storing-data.md index 4b0345a3206..4f676904375 100644 --- a/docs/en/operations/storing-data.md +++ b/docs/en/operations/storing-data.md @@ -10,7 +10,7 @@ Data, processed in ClickHouse, is usually stored in the local file system — on 2. The Hadoop Distributed File System ([HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)) 3. [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs). -Note: to work with data stored on `Amazon S3` disks use [S3](/docs/en/engines/table-engines/integrations/s3.md) table engine, to work with data in the Hadoop Distributed File System — [HDFS](/docs/en/engines/table-engines/integrations/hdfs.md) table engine, and to work with data stored in Azure Blob Storage use [AzureBlobStorage](/docs/en/engines/table-engines/integrations/AzureBlobStorage.md) table engine. They are different from external storage described on this page as they allow to read data stored in some general file format (like Parquet), while on this page we are describing storage configuration for ClickHouse `MergeTree` family or `Log` family tables. +Note: to work with data stored on `Amazon S3` disks use [S3](/docs/en/engines/table-engines/integrations/s3.md) table engine, to work with data in the Hadoop Distributed File System — [HDFS](/docs/en/engines/table-engines/integrations/hdfs.md) table engine, and to work with data stored in Azure Blob Storage use [AzureBlobStorage](/docs/en/engines/table-engines/integrations/azureBlobStorage.md) table engine. They are different from external storage described on this page as they allow to read data stored in some general file format (like Parquet), while on this page we are describing storage configuration for ClickHouse `MergeTree` family or `Log` family tables. ## Configuring external storage {#configuring-external-storage} From e98d09c93e6c54a2cc4eadab8614539c0a5eb0f8 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 21 Feb 2024 21:41:04 +0100 Subject: [PATCH 007/374] Do not load useless columns from the index in memory --- contrib/rapidjson | 2 +- src/Processors/QueryPlan/PartsSplitter.cpp | 9 ++++--- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 21 +++++++++++++++ .../MergeTree/MergeTreeDataSelectExecutor.cpp | 26 +++++++++++++++---- src/Storages/MergeTree/MergeTreeSettings.h | 2 +- 5 files changed, 49 insertions(+), 11 deletions(-) diff --git a/contrib/rapidjson b/contrib/rapidjson index c4ef90ccdbc..a9bc56c9165 160000 --- a/contrib/rapidjson +++ b/contrib/rapidjson @@ -1 +1 @@ -Subproject commit c4ef90ccdbc21d5d5a628d08316bfd301e32d6fa +Subproject commit a9bc56c9165f1dbbbcada64221bd3a59042c5b95 diff --git a/src/Processors/QueryPlan/PartsSplitter.cpp b/src/Processors/QueryPlan/PartsSplitter.cpp index 0fc6ddd6408..fcb1d8dd92c 100644 --- a/src/Processors/QueryPlan/PartsSplitter.cpp +++ b/src/Processors/QueryPlan/PartsSplitter.cpp @@ -32,9 +32,9 @@ std::string toString(const Values & value) int compareValues(const Values & lhs, const Values & rhs) { - chassert(lhs.size() == rhs.size()); + size_t size = std::min(lhs.size(), rhs.size()); - for (size_t i = 0; i < lhs.size(); ++i) + for (size_t i = 0; i < size; ++i) { if (applyVisitor(FieldVisitorAccurateLess(), lhs[i], rhs[i])) return -1; @@ -55,8 +55,9 @@ public: Values getValue(size_t part_idx, size_t mark) const { const auto & index = parts[part_idx].data_part->getIndex(); - Values values(index.size()); - for (size_t i = 0; i < values.size(); ++i) + size_t size = index.size(); + Values values(size); + for (size_t i = 0; i < size; ++i) { index[i]->get(mark, values[i]); if (values[i].isNull()) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 11ede661f78..629f3688874 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -869,6 +869,27 @@ void IMergeTreeDataPart::loadIndex() const for (size_t j = 0; j < key_size; ++j) key_serializations[j]->deserializeBinary(*loaded_index[j], *index_file, {}); + /// Cut useless suffix columns, if necessary. + Float64 ratio_to_drop_suffix_columns = storage.getSettings()->primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns; + if (key_size > 1 && ratio_to_drop_suffix_columns > 0 && ratio_to_drop_suffix_columns < 1) + { + chassert(marks_count > 0); + for (size_t j = 0; j < key_size - 1; ++j) + { + size_t num_changes = 0; + for (size_t i = 1; i < marks_count; ++i) + if (0 != loaded_index[j]->compareAt(i, i - 1, *loaded_index[j], 0)) + ++num_changes; + + if (static_cast(num_changes) / marks_count >= ratio_to_drop_suffix_columns) + { + key_size = j + 1; + loaded_index.resize(key_size); + break; + } + } + } + for (size_t i = 0; i < key_size; ++i) { loaded_index[i]->shrinkToFit(); diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 1ba28713680..175419f20e0 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -1110,7 +1110,11 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( DataTypes key_types; for (size_t i : key_indices) { - index_columns->emplace_back(ColumnWithTypeAndName{index[i], primary_key.data_types[i], primary_key.column_names[i]}); + if (i < index.size()) + index_columns->emplace_back(index[i], primary_key.data_types[i], primary_key.column_names[i]); + else + index_columns->emplace_back(); /// The column of the primary key was not loaded in memory - we'll skip it. + key_types.emplace_back(primary_key.data_types[i]); } @@ -1119,7 +1123,6 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( std::function create_field_ref; if (key_condition.hasMonotonicFunctionsChain()) { - create_field_ref = [index_columns](size_t row, size_t column, FieldRef & field) { field = {index_columns.get(), row, column}; @@ -1159,7 +1162,11 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( { for (size_t i = 0; i < used_key_size; ++i) { - create_field_ref(range.begin, i, index_left[i]); + if ((*index_columns)[i].column) + create_field_ref(range.begin, i, index_left[i]); + else + index_left[i] = NEGATIVE_INFINITY; + index_right[i] = POSITIVE_INFINITY; } } @@ -1170,8 +1177,17 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( for (size_t i = 0; i < used_key_size; ++i) { - create_field_ref(range.begin, i, index_left[i]); - create_field_ref(range.end, i, index_right[i]); + if ((*index_columns)[i].column) + { + create_field_ref(range.begin, i, index_left[i]); + create_field_ref(range.end, i, index_right[i]); + } + else + { + /// If the PK column was not loaded in memory - exclude it from the analysis. + index_left[i] = NEGATIVE_INFINITY; + index_right[i] = POSITIVE_INFINITY; + } } } key_condition_maybe_true = key_condition.mayBeTrueInRange(used_key_size, index_left.data(), index_right.data(), key_types); diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index b64632b6139..1cff44142bc 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -202,7 +202,7 @@ struct Settings; M(UInt64, marks_compress_block_size, 65536, "Mark compress block size, the actual size of the block to compress.", 0) \ M(UInt64, primary_key_compress_block_size, 65536, "Primary compress block size, the actual size of the block to compress.", 0) \ M(Bool, primary_key_lazy_load, true, "Load primary key in memory on first use instead of on table initialization. This can save memory in the presence of a large number of tables.", 0) \ - \ + M(Float, primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns, 0.9f, "If the value of a column of the primary key in data part changes at least in this ratio of times, skip loading next columns in memory. This allows to save memory usage by not loading useless columns of the primary key.", 0) \ /** Projection settings. */ \ M(UInt64, max_projections, 25, "The maximum number of merge tree projections.", 0) \ From 31de27b149ab2922647ff7d9141871330cc9d743 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 21 Feb 2024 21:42:19 +0100 Subject: [PATCH 008/374] Do not load useless columns from the index in memory --- contrib/rapidjson | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/rapidjson b/contrib/rapidjson index a9bc56c9165..c4ef90ccdbc 160000 --- a/contrib/rapidjson +++ b/contrib/rapidjson @@ -1 +1 @@ -Subproject commit a9bc56c9165f1dbbbcada64221bd3a59042c5b95 +Subproject commit c4ef90ccdbc21d5d5a628d08316bfd301e32d6fa From d0d84a840151675ee3c7a108709e4b0b486af577 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 21 Feb 2024 23:03:01 +0100 Subject: [PATCH 009/374] Add a test --- .../02998_primary_key_skip_columns.reference | 18 ++++++++++ .../02998_primary_key_skip_columns.sql | 33 +++++++++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 tests/queries/0_stateless/02998_primary_key_skip_columns.reference create mode 100644 tests/queries/0_stateless/02998_primary_key_skip_columns.sql diff --git a/tests/queries/0_stateless/02998_primary_key_skip_columns.reference b/tests/queries/0_stateless/02998_primary_key_skip_columns.reference new file mode 100644 index 00000000000..9df0a2c097c --- /dev/null +++ b/tests/queries/0_stateless/02998_primary_key_skip_columns.reference @@ -0,0 +1,18 @@ +100000 +14954 +798 +15908 +108 +120 +2334 +19 +Key size: 2400000 +100000 +14954 +798 +15907 +108 +120 +2334 +19 +Key size: 800008 diff --git a/tests/queries/0_stateless/02998_primary_key_skip_columns.sql b/tests/queries/0_stateless/02998_primary_key_skip_columns.sql new file mode 100644 index 00000000000..801fa35fb52 --- /dev/null +++ b/tests/queries/0_stateless/02998_primary_key_skip_columns.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test (a UInt64, b UInt64, c UInt64) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 1, primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns = 1; +INSERT INTO test SELECT sipHash64(number, 1), sipHash64(number, 2), sipHash64(number, 3) FROM numbers(100000); + +SELECT count() FROM test; +SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760; +SELECT count() FROM test WHERE b > 7898976344263989848 AND b < 8040320939819153137; +SELECT count() FROM test WHERE c > 13239894303140990071 AND c < 16179795840886947236; +SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760 AND b > 7898976344263989848 AND b < 8040320939819153137; +SELECT count() FROM test WHERE b > 7898976344263989848 AND b < 8040320939819153137 AND c > 13239894303140990071 AND c < 16179795840886947236; +SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760 AND c > 13239894303140990071 AND c < 16179795840886947236; +SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760 AND b > 7898976344263989848 AND b < 8040320939819153137 AND c > 13239894303140990071 AND c < 16179795840886947236; + +SELECT 'Key size: ', round(sum(primary_key_bytes_in_memory), -5) FROM system.parts WHERE database = currentDatabase() AND table = 'test'; + +ALTER TABLE test MODIFY SETTING primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns = 0.9; + +DETACH TABLE test; +ATTACH TABLE test; + +SELECT count() FROM test; +SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760; +SELECT count() FROM test WHERE b > 7898976344263989848 AND b < 8040320939819153137; +SELECT count() FROM test WHERE c > 13239894303140990071 AND c < 16179795840886947236; +SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760 AND b > 7898976344263989848 AND b < 8040320939819153137; +SELECT count() FROM test WHERE b > 7898976344263989848 AND b < 8040320939819153137 AND c > 13239894303140990071 AND c < 16179795840886947236; +SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760 AND c > 13239894303140990071 AND c < 16179795840886947236; +SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760 AND b > 7898976344263989848 AND b < 8040320939819153137 AND c > 13239894303140990071 AND c < 16179795840886947236; + +SELECT 'Key size: ', round(sum(primary_key_bytes_in_memory), 5) FROM system.parts WHERE database = currentDatabase() AND table = 'test'; + +DROP TABLE test; From 395ad35c93a185291a16449b1ac4d1dcecb1a127 Mon Sep 17 00:00:00 2001 From: Shanfeng Pang Date: Thu, 22 Feb 2024 10:41:16 +0800 Subject: [PATCH 010/374] fix LRUResource Cache bug --- src/Common/LRUResourceCache.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/LRUResourceCache.h b/src/Common/LRUResourceCache.h index 4ccaa272346..60b4053bff5 100644 --- a/src/Common/LRUResourceCache.h +++ b/src/Common/LRUResourceCache.h @@ -221,7 +221,7 @@ private: { std::lock_guard lock(mutex); auto it = cells.find(key); - if (it != cells.end() && !it->second.expired) + if (it != cells.end()) { if (!it->second.expired) { From 999cf88ab79cf71bc82e7be3140496697a661416 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 22 Feb 2024 09:54:39 +0100 Subject: [PATCH 011/374] Improve test --- tests/queries/0_stateless/02998_primary_key_skip_columns.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02998_primary_key_skip_columns.sql b/tests/queries/0_stateless/02998_primary_key_skip_columns.sql index 801fa35fb52..b2dadcc5e7c 100644 --- a/tests/queries/0_stateless/02998_primary_key_skip_columns.sql +++ b/tests/queries/0_stateless/02998_primary_key_skip_columns.sql @@ -28,6 +28,6 @@ SELECT count() FROM test WHERE b > 7898976344263989848 AND b < 80403209398191531 SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760 AND c > 13239894303140990071 AND c < 16179795840886947236; SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760 AND b > 7898976344263989848 AND b < 8040320939819153137 AND c > 13239894303140990071 AND c < 16179795840886947236; -SELECT 'Key size: ', round(sum(primary_key_bytes_in_memory), 5) FROM system.parts WHERE database = currentDatabase() AND table = 'test'; +SELECT 'Key size: ', round(sum(primary_key_bytes_in_memory), -5) FROM system.parts WHERE database = currentDatabase() AND table = 'test'; DROP TABLE test; From 9b10aebecc690e36ec3591ba7115991b00920289 Mon Sep 17 00:00:00 2001 From: Shanfeng Pang Date: Thu, 22 Feb 2024 17:24:59 +0800 Subject: [PATCH 012/374] add unit-test for bug fix --- src/Common/tests/gtest_lru_resource_cache.cpp | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/src/Common/tests/gtest_lru_resource_cache.cpp b/src/Common/tests/gtest_lru_resource_cache.cpp index bc037824ff8..94490d1e86d 100644 --- a/src/Common/tests/gtest_lru_resource_cache.cpp +++ b/src/Common/tests/gtest_lru_resource_cache.cpp @@ -45,6 +45,33 @@ struct MyWeight size_t operator()(const int & x) const { return static_cast(x); } }; +TEST(LRUResourceCache, remove2) +{ + using MyCache = DB::LRUResourceCache; + auto mcache = MyCache(10, 10); + for (int i = 1; i < 5; ++i) + { + auto load_int = [&] { return std::make_shared(i); }; + mcache.getOrSet(i, load_int); + } + + auto n = mcache.size(); + ASSERT_EQ(n, 4); + auto w = mcache.weight(); + ASSERT_EQ(w, 10); + auto holder4 = mcache.get(4); + ASSERT_TRUE(holder4 != nullptr); + mcache.tryRemove(4); + auto holder_reget_4 = mcache.get(4); + ASSERT_TRUE(holder_reget_4 == nullptr); + mcache.getOrSet(4, [&]() { return std::make_shared(4); }); + holder4.reset(); + auto holder1 = mcache.getOrSet(1, [&]() { return std::make_shared(1); }); + ASSERT_TRUE(holder1 != nullptr); + auto holder7 = mcache.getOrSet(7, [&] { return std::make_shared(7); }); + ASSERT_TRUE(holder7 != nullptr); +} + TEST(LRUResourceCache, evictOnWweight) { using MyCache = DB::LRUResourceCache; From 3ef159853c78e438e6088d60a64bcff2bbb77b17 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Thu, 22 Feb 2024 14:34:18 +0000 Subject: [PATCH 013/374] Do something to the test --- .../0_stateless/02998_primary_key_skip_columns.reference | 2 +- tests/queries/0_stateless/02998_primary_key_skip_columns.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02998_primary_key_skip_columns.reference b/tests/queries/0_stateless/02998_primary_key_skip_columns.reference index 9df0a2c097c..ec44acbd16b 100644 --- a/tests/queries/0_stateless/02998_primary_key_skip_columns.reference +++ b/tests/queries/0_stateless/02998_primary_key_skip_columns.reference @@ -15,4 +15,4 @@ Key size: 2400000 120 2334 19 -Key size: 800008 +Key size ok: 1 1 diff --git a/tests/queries/0_stateless/02998_primary_key_skip_columns.sql b/tests/queries/0_stateless/02998_primary_key_skip_columns.sql index b2dadcc5e7c..27672d7854e 100644 --- a/tests/queries/0_stateless/02998_primary_key_skip_columns.sql +++ b/tests/queries/0_stateless/02998_primary_key_skip_columns.sql @@ -28,6 +28,6 @@ SELECT count() FROM test WHERE b > 7898976344263989848 AND b < 80403209398191531 SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760 AND c > 13239894303140990071 AND c < 16179795840886947236; SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760 AND b > 7898976344263989848 AND b < 8040320939819153137 AND c > 13239894303140990071 AND c < 16179795840886947236; -SELECT 'Key size: ', round(sum(primary_key_bytes_in_memory), -5) FROM system.parts WHERE database = currentDatabase() AND table = 'test'; +SELECT 'Key size ok: ', (sum(primary_key_bytes_in_memory) as s) >= 800000, s < 1200000 FROM system.parts WHERE database = currentDatabase() AND table = 'test'; DROP TABLE test; From f1e95fb78bae190bb87e93704cf5f88c70cdccf4 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 22 Feb 2024 15:38:44 +0100 Subject: [PATCH 014/374] Add a way to force read-through cache for merges --- src/Core/Settings.h | 1 + src/Disks/IO/ReadBufferFromRemoteFSGather.cpp | 10 +- .../Cached/CachedObjectStorage.cpp | 14 -- .../Cached/CachedObjectStorage.h | 2 - src/IO/ReadSettings.h | 2 +- src/Interpreters/Cache/FileSegment.cpp | 3 +- src/Interpreters/Context.cpp | 1 + .../MergeTree/MergeTreeSequentialSource.cpp | 2 +- .../integration/test_filesystem_cache/test.py | 79 ++++++++ .../users.d/cache_on_write_operations.xml | 7 + .../force_read_through_cache_on_merge.xml | 7 + ...system_cache_on_write_operations.reference | 170 ++++++++++++++++++ ...41_filesystem_cache_on_write_operations.sh | 81 +++++---- 13 files changed, 317 insertions(+), 62 deletions(-) create mode 100644 tests/integration/test_filesystem_cache/users.d/cache_on_write_operations.xml create mode 100644 tests/integration/test_filesystem_cache/users.d/force_read_through_cache_on_merge.xml diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 433195af9c3..db060bf712d 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -769,6 +769,7 @@ class IColumn; M(Bool, enable_filesystem_cache_on_write_operations, false, "Write into cache on write operations. To actually work this setting requires be added to disk config too", 0) \ M(Bool, enable_filesystem_cache_log, false, "Allows to record the filesystem caching log for each query", 0) \ M(Bool, read_from_filesystem_cache_if_exists_otherwise_bypass_cache, false, "Allow to use the filesystem cache in passive mode - benefit from the existing cache entries, but don't put more entries into the cache. If you set this setting for heavy ad-hoc queries and leave it disabled for short real-time queries, this will allows to avoid cache threshing by too heavy queries and to improve the overall system efficiency.", 0) \ + M(Bool, force_read_through_cache_for_merges, false, "Force read-through cache for merges", 0) \ M(Bool, skip_download_if_exceeds_query_cache, true, "Skip download from remote filesystem if exceeds query cache size", 0) \ M(UInt64, filesystem_cache_max_download_size, (128UL * 1024 * 1024 * 1024), "Max remote filesystem cache size that can be downloaded by a single query", 0) \ M(Bool, throw_on_error_from_cache_on_write_operations, false, "Ignore error from cache when caching on write operations (INSERT, merges)", 0) \ diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp index 0b3ecca3587..1da39c7011c 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp @@ -16,12 +16,10 @@ using namespace DB; namespace { -bool withCache(const ReadSettings & settings) -{ - return settings.remote_fs_cache && settings.enable_filesystem_cache - && (!CurrentThread::getQueryId().empty() || settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache - || !settings.avoid_readthrough_cache_outside_query_context); -} + bool withCache(const ReadSettings & settings) + { + return settings.remote_fs_cache && settings.enable_filesystem_cache; + } } namespace DB diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp index 1444f4c9c76..e3ab772e3b5 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp @@ -43,10 +43,6 @@ ReadSettings CachedObjectStorage::patchSettings(const ReadSettings & read_settin { ReadSettings modified_settings{read_settings}; modified_settings.remote_fs_cache = cache; - - if (!canUseReadThroughCache(read_settings)) - modified_settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = true; - return object_storage->patchSettings(modified_settings); } @@ -206,14 +202,4 @@ String CachedObjectStorage::getObjectsNamespace() const return object_storage->getObjectsNamespace(); } -bool CachedObjectStorage::canUseReadThroughCache(const ReadSettings & settings) -{ - if (!settings.avoid_readthrough_cache_outside_query_context) - return true; - - return CurrentThread::isInitialized() - && CurrentThread::get().getQueryContext() - && !CurrentThread::getQueryId().empty(); -} - } diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h index 437baead7be..961c2709efc 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h @@ -119,8 +119,6 @@ public: const FileCacheSettings & getCacheSettings() const { return cache_settings; } - static bool canUseReadThroughCache(const ReadSettings & settings); - #if USE_AZURE_BLOB_STORAGE std::shared_ptr getAzureBlobStorageClient() override { diff --git a/src/IO/ReadSettings.h b/src/IO/ReadSettings.h index c397689d6ad..2c79735317d 100644 --- a/src/IO/ReadSettings.h +++ b/src/IO/ReadSettings.h @@ -99,7 +99,7 @@ struct ReadSettings bool read_from_filesystem_cache_if_exists_otherwise_bypass_cache = false; bool enable_filesystem_cache_log = false; /// Don't populate cache when the read is not part of query execution (e.g. background thread). - bool avoid_readthrough_cache_outside_query_context = true; + bool force_read_through_cache_merges = false; size_t filesystem_cache_segments_batch_size = 20; size_t filesystem_cache_max_download_size = (128UL * 1024 * 1024 * 1024); diff --git a/src/Interpreters/Cache/FileSegment.cpp b/src/Interpreters/Cache/FileSegment.cpp index 8bd89465917..7c0505889da 100644 --- a/src/Interpreters/Cache/FileSegment.cpp +++ b/src/Interpreters/Cache/FileSegment.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include @@ -194,7 +195,7 @@ bool FileSegment::isDownloaded() const String FileSegment::getCallerId() { if (!CurrentThread::isInitialized() || CurrentThread::getQueryId().empty()) - return "None:" + toString(getThreadId()); + return fmt::format("None:{}:{}", getThreadName(), toString(getThreadId())); return std::string(CurrentThread::getQueryId()) + ":" + toString(getThreadId()); } diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 55a4df10206..36b362e36bb 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -5079,6 +5079,7 @@ ReadSettings Context::getReadSettings() const res.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache; res.enable_filesystem_cache_log = settings.enable_filesystem_cache_log; res.filesystem_cache_segments_batch_size = settings.filesystem_cache_segments_batch_size; + res.force_read_through_cache_merges = settings.force_read_through_cache_for_merges; res.filesystem_cache_max_download_size = settings.filesystem_cache_max_download_size; res.skip_download_if_exceeds_query_cache = settings.skip_download_if_exceeds_query_cache; diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index d0fbc316024..e375e8b0a9f 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -151,7 +151,7 @@ MergeTreeSequentialSource::MergeTreeSequentialSource( const auto & context = storage.getContext(); ReadSettings read_settings = context->getReadSettings(); - read_settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = true; + read_settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = !read_settings.force_read_through_cache_merges; /// It does not make sense to use pthread_threadpool for background merges/mutations /// And also to preserve backward compatibility read_settings.local_fs_method = LocalFSReadMethod::pread; diff --git a/tests/integration/test_filesystem_cache/test.py b/tests/integration/test_filesystem_cache/test.py index eb5f896f7a9..c1ba6702dcf 100644 --- a/tests/integration/test_filesystem_cache/test.py +++ b/tests/integration/test_filesystem_cache/test.py @@ -19,6 +19,9 @@ def cluster(): main_configs=[ "config.d/storage_conf.xml", ], + user_configs=[ + "users.d/cache_on_write_operations.xml", + ], stay_alive=True, ) cluster.add_instance( @@ -35,6 +38,17 @@ def cluster(): ], stay_alive=True, ) + cluster.add_instance( + "node_force_read_through_cache_on_merge", + main_configs=[ + "config.d/storage_conf.xml", + ], + user_configs=[ + "users.d/force_read_through_cache_on_merge.xml", + "users.d/cache_on_write_operations.xml", + ], + stay_alive=True, + ) logging.info("Starting cluster...") cluster.start() @@ -323,3 +337,68 @@ def test_custom_cached_disk(cluster): "SELECT cache_path FROM system.disks WHERE name = 'custom_cached4'" ).strip() ) + + +def test_force_filesystem_cache_on_merges(cluster): + def test(node, forced_read_through_cache_on_merge): + node.query( + """ + DROP TABLE IF EXISTS test SYNC; + + CREATE TABLE test (key UInt32, value String) + Engine=MergeTree() + ORDER BY value + SETTINGS disk = disk( + type = cache, + path = 'force_cache_on_merges', + disk = 'hdd_blob', + max_file_segment_size = '1Ki', + cache_on_write_operations = 1, + boundary_alignment = '1Ki', + max_size = '10Gi', + max_elements = 10000000, + load_metadata_threads = 30); + + SYSTEM DROP FILESYSTEM CACHE; + INSERT INTO test SELECT * FROM generateRandom('a Int32, b String') LIMIT 1000000; + INSERT INTO test SELECT * FROM generateRandom('a Int32, b String') LIMIT 1000000; + """ + ) + assert int(node.query("SELECT count() FROM system.filesystem_cache")) > 0 + assert int(node.query("SELECT max(size) FROM system.filesystem_cache")) == 1024 + + write_count = int( + node.query( + "SELECT value FROM system.events WHERE name = 'CachedWriteBufferCacheWriteBytes'" + ) + ) + assert write_count > 100000 + assert "" == node.query( + "SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes'" + ) + + node.query("SYSTEM DROP FILESYSTEM CACHE") + node.query("OPTIMIZE TABLE test FINAL") + + new_write_count = int( + node.query( + "SELECT value FROM system.events WHERE name = 'CachedWriteBufferCacheWriteBytes'" + ) + ) + assert new_write_count >= write_count + + if forced_read_through_cache_on_merge: + assert 100000 < int( + node.query( + "SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes'" + ) + ) + else: + assert "" == node.query( + "SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes'" + ) + + node = cluster.instances["node_force_read_through_cache_on_merge"] + test(node, True) + node = cluster.instances["node"] + test(node, False) diff --git a/tests/integration/test_filesystem_cache/users.d/cache_on_write_operations.xml b/tests/integration/test_filesystem_cache/users.d/cache_on_write_operations.xml new file mode 100644 index 00000000000..5de169edc1e --- /dev/null +++ b/tests/integration/test_filesystem_cache/users.d/cache_on_write_operations.xml @@ -0,0 +1,7 @@ + + + + 1 + + + diff --git a/tests/integration/test_filesystem_cache/users.d/force_read_through_cache_on_merge.xml b/tests/integration/test_filesystem_cache/users.d/force_read_through_cache_on_merge.xml new file mode 100644 index 00000000000..4d26a1a8bc7 --- /dev/null +++ b/tests/integration/test_filesystem_cache/users.d/force_read_through_cache_on_merge.xml @@ -0,0 +1,7 @@ + + + + 1 + + + diff --git a/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.reference b/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.reference index 157837983f7..c03b928684b 100644 --- a/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.reference +++ b/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.reference @@ -1,62 +1,232 @@ Using storage policy: s3_cache +DROP TABLE IF EXISTS test_02241 +CREATE TABLE test_02241 (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false, ratio_of_defaults_for_sparse_serialization = 1 +SYSTEM STOP MERGES test_02241 +SYSTEM DROP FILESYSTEM CACHE +SELECT file_segment_range_begin, file_segment_range_end, size, state + FROM + ( + SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path + FROM + ( + SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path + FROM system.remote_data_paths + ) AS data_paths + INNER JOIN + system.filesystem_cache AS caches + ON data_paths.cache_path = caches.cache_path + ) + WHERE endsWith(local_path, 'data.bin') + FORMAT Vertical +SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path 0 +SELECT count(), sum(size) FROM system.filesystem_cache 0 0 +INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100) +SELECT file_segment_range_begin, file_segment_range_end, size, state + FROM + ( + SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path + FROM + ( + SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path + FROM system.remote_data_paths + ) AS data_paths + INNER JOIN + system.filesystem_cache AS caches + ON data_paths.cache_path = caches.cache_path + ) + WHERE endsWith(local_path, 'data.bin') + FORMAT Vertical Row 1: ────── file_segment_range_begin: 0 file_segment_range_end: 745 size: 746 state: DOWNLOADED +SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path 8 +SELECT count(), sum(size) FROM system.filesystem_cache 8 1100 +SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0 0 +SELECT * FROM test_02241 FORMAT Null +SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0 2 +SELECT * FROM test_02241 FORMAT Null +SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0 2 +SELECT count(), sum(size) size FROM system.filesystem_cache 8 1100 +SYSTEM DROP FILESYSTEM CACHE +INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100, 200) +SELECT file_segment_range_begin, file_segment_range_end, size, state + FROM + ( + SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path + FROM + ( + SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path + FROM system.remote_data_paths + ) AS data_paths + INNER JOIN + system.filesystem_cache AS caches + ON data_paths.cache_path = caches.cache_path + ) + WHERE endsWith(local_path, 'data.bin') + FORMAT Vertical; Row 1: ────── file_segment_range_begin: 0 file_segment_range_end: 1659 size: 1660 state: DOWNLOADED +SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path 8 +SELECT count(), sum(size) FROM system.filesystem_cache 8 2014 +SELECT count(), sum(size) FROM system.filesystem_cache 8 2014 +INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100) SETTINGS enable_filesystem_cache_on_write_operations=0 +SELECT count(), sum(size) FROM system.filesystem_cache 8 2014 +INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100) +INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(300, 10000) +SELECT count(), sum(size) FROM system.filesystem_cache 24 84045 +SYSTEM START MERGES test_02241 +SELECT value FROM system.events WHERE name = 'CachedWriteBufferCacheWriteBytes' +85146 +SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes' +OPTIMIZE TABLE test_02241 FINAL +SELECT value FROM system.events WHERE name = 'CachedWriteBufferCacheWriteBytes' +251542 +SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes' +SELECT count(), sum(size) FROM system.filesystem_cache 32 167243 +ALTER TABLE test_02241 UPDATE value = 'kek' WHERE key = 100 +SELECT count(), sum(size) FROM system.filesystem_cache 41 250541 +INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(5000000) +SYSTEM FLUSH LOGS INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(5000000) 0 +SELECT count() FROM test_02241 5010500 +SELECT count() FROM test_02241 WHERE value LIKE '%010%' 18816 Using storage policy: local_cache +DROP TABLE IF EXISTS test_02241 +CREATE TABLE test_02241 (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='local_cache', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false, ratio_of_defaults_for_sparse_serialization = 1 +SYSTEM STOP MERGES test_02241 +SYSTEM DROP FILESYSTEM CACHE +SELECT file_segment_range_begin, file_segment_range_end, size, state + FROM + ( + SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path + FROM + ( + SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path + FROM system.remote_data_paths + ) AS data_paths + INNER JOIN + system.filesystem_cache AS caches + ON data_paths.cache_path = caches.cache_path + ) + WHERE endsWith(local_path, 'data.bin') + FORMAT Vertical +SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path 0 +SELECT count(), sum(size) FROM system.filesystem_cache 0 0 +INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100) +SELECT file_segment_range_begin, file_segment_range_end, size, state + FROM + ( + SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path + FROM + ( + SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path + FROM system.remote_data_paths + ) AS data_paths + INNER JOIN + system.filesystem_cache AS caches + ON data_paths.cache_path = caches.cache_path + ) + WHERE endsWith(local_path, 'data.bin') + FORMAT Vertical Row 1: ────── file_segment_range_begin: 0 file_segment_range_end: 745 size: 746 state: DOWNLOADED +SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path 8 +SELECT count(), sum(size) FROM system.filesystem_cache 8 1100 +SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0 0 +SELECT * FROM test_02241 FORMAT Null +SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0 2 +SELECT * FROM test_02241 FORMAT Null +SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0 2 +SELECT count(), sum(size) size FROM system.filesystem_cache 8 1100 +SYSTEM DROP FILESYSTEM CACHE +INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100, 200) +SELECT file_segment_range_begin, file_segment_range_end, size, state + FROM + ( + SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path + FROM + ( + SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path + FROM system.remote_data_paths + ) AS data_paths + INNER JOIN + system.filesystem_cache AS caches + ON data_paths.cache_path = caches.cache_path + ) + WHERE endsWith(local_path, 'data.bin') + FORMAT Vertical; Row 1: ────── file_segment_range_begin: 0 file_segment_range_end: 1659 size: 1660 state: DOWNLOADED +SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path 8 +SELECT count(), sum(size) FROM system.filesystem_cache 8 2014 +SELECT count(), sum(size) FROM system.filesystem_cache 8 2014 +INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100) SETTINGS enable_filesystem_cache_on_write_operations=0 +SELECT count(), sum(size) FROM system.filesystem_cache 8 2014 +INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100) +INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(300, 10000) +SELECT count(), sum(size) FROM system.filesystem_cache 24 84045 +SYSTEM START MERGES test_02241 +SELECT value FROM system.events WHERE name = 'CachedWriteBufferCacheWriteBytes' +81715476 +SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes' +OPTIMIZE TABLE test_02241 FINAL +SELECT value FROM system.events WHERE name = 'CachedWriteBufferCacheWriteBytes' +81881872 +SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes' +SELECT count(), sum(size) FROM system.filesystem_cache 32 167243 +ALTER TABLE test_02241 UPDATE value = 'kek' WHERE key = 100 +SELECT count(), sum(size) FROM system.filesystem_cache 41 250541 +INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(5000000) +SYSTEM FLUSH LOGS INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(5000000) 0 +SELECT count() FROM test_02241 5010500 +SELECT count() FROM test_02241 WHERE value LIKE '%010%' 18816 diff --git a/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.sh b/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.sh index 96f61cf61e8..2b237492e98 100755 --- a/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.sh +++ b/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.sh @@ -10,13 +10,13 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) for STORAGE_POLICY in 's3_cache' 'local_cache'; do echo "Using storage policy: $STORAGE_POLICY" - $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS test_02241" - $CLICKHOUSE_CLIENT --query "CREATE TABLE test_02241 (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='$STORAGE_POLICY', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false, ratio_of_defaults_for_sparse_serialization = 1" - $CLICKHOUSE_CLIENT --query "SYSTEM STOP MERGES test_02241" + $CLICKHOUSE_CLIENT --echo --query "DROP TABLE IF EXISTS test_02241" + $CLICKHOUSE_CLIENT --echo --query "CREATE TABLE test_02241 (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='$STORAGE_POLICY', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false, ratio_of_defaults_for_sparse_serialization = 1" + $CLICKHOUSE_CLIENT --echo --query "SYSTEM STOP MERGES test_02241" - $CLICKHOUSE_CLIENT --query "SYSTEM DROP FILESYSTEM CACHE" + $CLICKHOUSE_CLIENT --echo --query "SYSTEM DROP FILESYSTEM CACHE" - $CLICKHOUSE_CLIENT -n --query "SELECT file_segment_range_begin, file_segment_range_end, size, state + $CLICKHOUSE_CLIENT --echo -n --query "SELECT file_segment_range_begin, file_segment_range_end, size, state FROM ( SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path @@ -32,12 +32,12 @@ for STORAGE_POLICY in 's3_cache' 'local_cache'; do WHERE endsWith(local_path, 'data.bin') FORMAT Vertical" - $CLICKHOUSE_CLIENT --query "SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path" - $CLICKHOUSE_CLIENT --query "SELECT count(), sum(size) FROM system.filesystem_cache" + $CLICKHOUSE_CLIENT --echo --query "SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path" + $CLICKHOUSE_CLIENT --echo --query "SELECT count(), sum(size) FROM system.filesystem_cache" - $CLICKHOUSE_CLIENT --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100)" + $CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100)" - $CLICKHOUSE_CLIENT -n --query "SELECT file_segment_range_begin, file_segment_range_end, size, state + $CLICKHOUSE_CLIENT --echo -n --query "SELECT file_segment_range_begin, file_segment_range_end, size, state FROM ( SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path @@ -53,24 +53,24 @@ for STORAGE_POLICY in 's3_cache' 'local_cache'; do WHERE endsWith(local_path, 'data.bin') FORMAT Vertical" - $CLICKHOUSE_CLIENT --query "SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path" - $CLICKHOUSE_CLIENT --query "SELECT count(), sum(size) FROM system.filesystem_cache" + $CLICKHOUSE_CLIENT --echo --query "SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path" + $CLICKHOUSE_CLIENT --echo --query "SELECT count(), sum(size) FROM system.filesystem_cache" - $CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0" + $CLICKHOUSE_CLIENT --echo --query "SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0" - $CLICKHOUSE_CLIENT --query "SELECT * FROM test_02241 FORMAT Null" - $CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0" + $CLICKHOUSE_CLIENT --echo --query "SELECT * FROM test_02241 FORMAT Null" + $CLICKHOUSE_CLIENT --echo --query "SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0" - $CLICKHOUSE_CLIENT --query "SELECT * FROM test_02241 FORMAT Null" - $CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0" + $CLICKHOUSE_CLIENT --echo --query "SELECT * FROM test_02241 FORMAT Null" + $CLICKHOUSE_CLIENT --echo --query "SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0" - $CLICKHOUSE_CLIENT --query "SELECT count(), sum(size) size FROM system.filesystem_cache" + $CLICKHOUSE_CLIENT --echo --query "SELECT count(), sum(size) size FROM system.filesystem_cache" - $CLICKHOUSE_CLIENT --query "SYSTEM DROP FILESYSTEM CACHE" + $CLICKHOUSE_CLIENT --echo --query "SYSTEM DROP FILESYSTEM CACHE" - $CLICKHOUSE_CLIENT --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100, 200)" + $CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100, 200)" - $CLICKHOUSE_CLIENT -n --query "SELECT file_segment_range_begin, file_segment_range_end, size, state + $CLICKHOUSE_CLIENT --echo -n --query "SELECT file_segment_range_begin, file_segment_range_end, size, state FROM ( SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path @@ -86,27 +86,34 @@ for STORAGE_POLICY in 's3_cache' 'local_cache'; do WHERE endsWith(local_path, 'data.bin') FORMAT Vertical;" - $CLICKHOUSE_CLIENT --query "SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path" - $CLICKHOUSE_CLIENT --query "SELECT count(), sum(size) FROM system.filesystem_cache" + $CLICKHOUSE_CLIENT --echo --query "SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path" + $CLICKHOUSE_CLIENT --echo --query "SELECT count(), sum(size) FROM system.filesystem_cache" - $CLICKHOUSE_CLIENT --query "SELECT count(), sum(size) FROM system.filesystem_cache" - $CLICKHOUSE_CLIENT --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100) SETTINGS enable_filesystem_cache_on_write_operations=0" - $CLICKHOUSE_CLIENT --query "SELECT count(), sum(size) FROM system.filesystem_cache" + $CLICKHOUSE_CLIENT --echo --query "SELECT count(), sum(size) FROM system.filesystem_cache" + $CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100) SETTINGS enable_filesystem_cache_on_write_operations=0" + $CLICKHOUSE_CLIENT --echo --query "SELECT count(), sum(size) FROM system.filesystem_cache" - $CLICKHOUSE_CLIENT --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100)" - $CLICKHOUSE_CLIENT --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(300, 10000)" - $CLICKHOUSE_CLIENT --query "SELECT count(), sum(size) FROM system.filesystem_cache" + $CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100)" + $CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(300, 10000)" + $CLICKHOUSE_CLIENT --echo --query "SELECT count(), sum(size) FROM system.filesystem_cache" - $CLICKHOUSE_CLIENT --query "SYSTEM START MERGES test_02241" + $CLICKHOUSE_CLIENT --echo --query "SYSTEM START MERGES test_02241" - $CLICKHOUSE_CLIENT --enable_filesystem_cache_on_write_operations=1 --query "OPTIMIZE TABLE test_02241 FINAL" - $CLICKHOUSE_CLIENT --query "SELECT count(), sum(size) FROM system.filesystem_cache" + $CLICKHOUSE_CLIENT --echo --query "SELECT value FROM system.events WHERE name = 'CachedWriteBufferCacheWriteBytes'" + $CLICKHOUSE_CLIENT --echo --query "SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes'" - $CLICKHOUSE_CLIENT --enable_filesystem_cache_on_write_operations=1 --mutations_sync=2 --query "ALTER TABLE test_02241 UPDATE value = 'kek' WHERE key = 100" - $CLICKHOUSE_CLIENT --query "SELECT count(), sum(size) FROM system.filesystem_cache" - $CLICKHOUSE_CLIENT --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(5000000)" + $CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "OPTIMIZE TABLE test_02241 FINAL" - $CLICKHOUSE_CLIENT --query "SYSTEM FLUSH LOGS" + $CLICKHOUSE_CLIENT --echo --query "SELECT value FROM system.events WHERE name = 'CachedWriteBufferCacheWriteBytes'" + $CLICKHOUSE_CLIENT --echo --query "SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes'" + + $CLICKHOUSE_CLIENT --echo --query "SELECT count(), sum(size) FROM system.filesystem_cache" + + $CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --mutations_sync=2 --query "ALTER TABLE test_02241 UPDATE value = 'kek' WHERE key = 100" + $CLICKHOUSE_CLIENT --echo --query "SELECT count(), sum(size) FROM system.filesystem_cache" + $CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(5000000)" + + $CLICKHOUSE_CLIENT --echo --query "SYSTEM FLUSH LOGS" $CLICKHOUSE_CLIENT -n --query "SELECT query, ProfileEvents['RemoteFSReadBytes'] > 0 as remote_fs_read @@ -121,6 +128,6 @@ for STORAGE_POLICY in 's3_cache' 'local_cache'; do DESC LIMIT 1" - $CLICKHOUSE_CLIENT --query "SELECT count() FROM test_02241" - $CLICKHOUSE_CLIENT --query "SELECT count() FROM test_02241 WHERE value LIKE '%010%'" + $CLICKHOUSE_CLIENT --echo --query "SELECT count() FROM test_02241" + $CLICKHOUSE_CLIENT --echo --query "SELECT count() FROM test_02241 WHERE value LIKE '%010%'" done From a80747b2385647678771281d815867ef87b580f6 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Thu, 22 Feb 2024 15:57:10 +0000 Subject: [PATCH 015/374] Undo something to the test --- .../0_stateless/02998_primary_key_skip_columns.reference | 2 +- tests/queries/0_stateless/02998_primary_key_skip_columns.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02998_primary_key_skip_columns.reference b/tests/queries/0_stateless/02998_primary_key_skip_columns.reference index ec44acbd16b..08ccdb83b11 100644 --- a/tests/queries/0_stateless/02998_primary_key_skip_columns.reference +++ b/tests/queries/0_stateless/02998_primary_key_skip_columns.reference @@ -15,4 +15,4 @@ Key size: 2400000 120 2334 19 -Key size ok: 1 1 +Key size: 800000 diff --git a/tests/queries/0_stateless/02998_primary_key_skip_columns.sql b/tests/queries/0_stateless/02998_primary_key_skip_columns.sql index 27672d7854e..b2dadcc5e7c 100644 --- a/tests/queries/0_stateless/02998_primary_key_skip_columns.sql +++ b/tests/queries/0_stateless/02998_primary_key_skip_columns.sql @@ -28,6 +28,6 @@ SELECT count() FROM test WHERE b > 7898976344263989848 AND b < 80403209398191531 SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760 AND c > 13239894303140990071 AND c < 16179795840886947236; SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760 AND b > 7898976344263989848 AND b < 8040320939819153137 AND c > 13239894303140990071 AND c < 16179795840886947236; -SELECT 'Key size ok: ', (sum(primary_key_bytes_in_memory) as s) >= 800000, s < 1200000 FROM system.parts WHERE database = currentDatabase() AND table = 'test'; +SELECT 'Key size: ', round(sum(primary_key_bytes_in_memory), -5) FROM system.parts WHERE database = currentDatabase() AND table = 'test'; DROP TABLE test; From a34f42ca22c8a4820e4cbcf67cdd48a3589e3879 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 13 Jan 2024 18:48:47 +0300 Subject: [PATCH 016/374] Remove lock from the ReadProgressCallback It looks redundant (added in 5ef51ed), though it has "fix tests" in the log message, but CI reports is not available for the commits from that PR [1], so let's try. [1]: https://github.com/ClickHouse/ClickHouse/pull/37543 Also this can be a big problem, since the code under that lock (throttling or quotas with previous implementation that uses boost::atomic_shared_ptr) may sleep. Some numbers: run | time ------------------------|------ max_threads=100 before | 23.1 max_threads=100 after | 15.1 max_threads=4500 before | 4.5 max_threads=4500 after | 2.3 Query: select sum(number) from numbers_mt(2000000) settings max_threads=X, max_block_size = 1 Signed-off-by: Azat Khuzhin --- src/QueryPipeline/ReadProgressCallback.cpp | 2 -- src/QueryPipeline/ReadProgressCallback.h | 1 - tests/performance/small_block_contention.xml | 3 +++ 3 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 tests/performance/small_block_contention.xml diff --git a/src/QueryPipeline/ReadProgressCallback.cpp b/src/QueryPipeline/ReadProgressCallback.cpp index 59843d8791d..e90fc24d882 100644 --- a/src/QueryPipeline/ReadProgressCallback.cpp +++ b/src/QueryPipeline/ReadProgressCallback.cpp @@ -126,8 +126,6 @@ bool ReadProgressCallback::onProgress(uint64_t read_rows, uint64_t read_bytes, c CurrentThread::updatePerformanceCountersIfNeeded(); - std::lock_guard lock(limits_and_quotas_mutex); - /// TODO: Should be done in PipelineExecutor. for (const auto & limits : storage_limits) limits.local_limits.speed_limits.throttle(progress.read_rows, progress.read_bytes, total_rows, total_stopwatch.elapsedMicroseconds(), limits.local_limits.timeout_overflow_mode); diff --git a/src/QueryPipeline/ReadProgressCallback.h b/src/QueryPipeline/ReadProgressCallback.h index 5dbf3344bdf..7dfed9df5da 100644 --- a/src/QueryPipeline/ReadProgressCallback.h +++ b/src/QueryPipeline/ReadProgressCallback.h @@ -41,7 +41,6 @@ private: /// The total number of bytes to read. For progress bar. std::atomic_size_t total_bytes = 0; - std::mutex limits_and_quotas_mutex; Stopwatch total_stopwatch{CLOCK_MONOTONIC_COARSE}; /// Including waiting time bool update_profile_events = true; diff --git a/tests/performance/small_block_contention.xml b/tests/performance/small_block_contention.xml new file mode 100644 index 00000000000..ce1995a0a29 --- /dev/null +++ b/tests/performance/small_block_contention.xml @@ -0,0 +1,3 @@ + + select sum(number) from numbers_mt(200000) settings max_threads=100, max_block_size = 1 format Null + From 18741f122eabaeb7903f355958af1e1a88818e83 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 26 Feb 2024 12:42:13 +0800 Subject: [PATCH 017/374] Move a setting to server setting --- src/Core/ServerSettings.h | 2 ++ src/Core/Settings.h | 1 - src/Interpreters/Context.cpp | 2 +- .../config.d/force_read_through_cache_for_merges.xml | 3 +++ tests/integration/test_filesystem_cache/test.py | 2 +- .../users.d/force_read_through_cache_on_merge.xml | 7 ------- 6 files changed, 7 insertions(+), 10 deletions(-) create mode 100644 tests/integration/test_filesystem_cache/config.d/force_read_through_cache_for_merges.xml delete mode 100644 tests/integration/test_filesystem_cache/users.d/force_read_through_cache_on_merge.xml diff --git a/src/Core/ServerSettings.h b/src/Core/ServerSettings.h index de2a4e9b755..0283b98638f 100644 --- a/src/Core/ServerSettings.h +++ b/src/Core/ServerSettings.h @@ -103,6 +103,8 @@ namespace DB M(Bool, async_load_databases, false, "Enable asynchronous loading of databases and tables to speedup server startup. Queries to not yet loaded entity will be blocked until load is finished.", 0) \ M(Bool, display_secrets_in_show_and_select, false, "Allow showing secrets in SHOW and SELECT queries via a format setting and a grant", 0) \ \ + M(Bool, force_read_through_cache_for_merges, false, "Force read-through filesystem cache for merges", 0) \ + \ M(Seconds, keep_alive_timeout, DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT, "The number of seconds that ClickHouse waits for incoming requests before closing the connection.", 0) \ M(Seconds, replicated_fetches_http_connection_timeout, 0, "HTTP connection timeout for part fetch requests. Inherited from default profile `http_connection_timeout` if not set explicitly.", 0) \ M(Seconds, replicated_fetches_http_send_timeout, 0, "HTTP send timeout for part fetch requests. Inherited from default profile `http_send_timeout` if not set explicitly.", 0) \ diff --git a/src/Core/Settings.h b/src/Core/Settings.h index db060bf712d..433195af9c3 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -769,7 +769,6 @@ class IColumn; M(Bool, enable_filesystem_cache_on_write_operations, false, "Write into cache on write operations. To actually work this setting requires be added to disk config too", 0) \ M(Bool, enable_filesystem_cache_log, false, "Allows to record the filesystem caching log for each query", 0) \ M(Bool, read_from_filesystem_cache_if_exists_otherwise_bypass_cache, false, "Allow to use the filesystem cache in passive mode - benefit from the existing cache entries, but don't put more entries into the cache. If you set this setting for heavy ad-hoc queries and leave it disabled for short real-time queries, this will allows to avoid cache threshing by too heavy queries and to improve the overall system efficiency.", 0) \ - M(Bool, force_read_through_cache_for_merges, false, "Force read-through cache for merges", 0) \ M(Bool, skip_download_if_exceeds_query_cache, true, "Skip download from remote filesystem if exceeds query cache size", 0) \ M(UInt64, filesystem_cache_max_download_size, (128UL * 1024 * 1024 * 1024), "Max remote filesystem cache size that can be downloaded by a single query", 0) \ M(Bool, throw_on_error_from_cache_on_write_operations, false, "Ignore error from cache when caching on write operations (INSERT, merges)", 0) \ diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 36b362e36bb..a974eaca067 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -5079,7 +5079,7 @@ ReadSettings Context::getReadSettings() const res.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache; res.enable_filesystem_cache_log = settings.enable_filesystem_cache_log; res.filesystem_cache_segments_batch_size = settings.filesystem_cache_segments_batch_size; - res.force_read_through_cache_merges = settings.force_read_through_cache_for_merges; + res.force_read_through_cache_merges = getServerSettings().force_read_through_cache_for_merges; res.filesystem_cache_max_download_size = settings.filesystem_cache_max_download_size; res.skip_download_if_exceeds_query_cache = settings.skip_download_if_exceeds_query_cache; diff --git a/tests/integration/test_filesystem_cache/config.d/force_read_through_cache_for_merges.xml b/tests/integration/test_filesystem_cache/config.d/force_read_through_cache_for_merges.xml new file mode 100644 index 00000000000..bb2a6e850a4 --- /dev/null +++ b/tests/integration/test_filesystem_cache/config.d/force_read_through_cache_for_merges.xml @@ -0,0 +1,3 @@ + + 1 + diff --git a/tests/integration/test_filesystem_cache/test.py b/tests/integration/test_filesystem_cache/test.py index c1ba6702dcf..f32fa4e9823 100644 --- a/tests/integration/test_filesystem_cache/test.py +++ b/tests/integration/test_filesystem_cache/test.py @@ -42,9 +42,9 @@ def cluster(): "node_force_read_through_cache_on_merge", main_configs=[ "config.d/storage_conf.xml", + "config.d/force_read_through_cache_for_merges.xml", ], user_configs=[ - "users.d/force_read_through_cache_on_merge.xml", "users.d/cache_on_write_operations.xml", ], stay_alive=True, diff --git a/tests/integration/test_filesystem_cache/users.d/force_read_through_cache_on_merge.xml b/tests/integration/test_filesystem_cache/users.d/force_read_through_cache_on_merge.xml deleted file mode 100644 index 4d26a1a8bc7..00000000000 --- a/tests/integration/test_filesystem_cache/users.d/force_read_through_cache_on_merge.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - 1 - - - From bf5affbe640976d2b73e12f5213a13baacf40619 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 26 Feb 2024 16:37:09 +0800 Subject: [PATCH 018/374] Fix test --- .../02241_filesystem_cache_on_write_operations.sh | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.sh b/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.sh index 2b237492e98..ee1d942a421 100755 --- a/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.sh +++ b/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.sh @@ -99,14 +99,8 @@ for STORAGE_POLICY in 's3_cache' 'local_cache'; do $CLICKHOUSE_CLIENT --echo --query "SYSTEM START MERGES test_02241" - $CLICKHOUSE_CLIENT --echo --query "SELECT value FROM system.events WHERE name = 'CachedWriteBufferCacheWriteBytes'" - $CLICKHOUSE_CLIENT --echo --query "SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes'" - $CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "OPTIMIZE TABLE test_02241 FINAL" - $CLICKHOUSE_CLIENT --echo --query "SELECT value FROM system.events WHERE name = 'CachedWriteBufferCacheWriteBytes'" - $CLICKHOUSE_CLIENT --echo --query "SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes'" - $CLICKHOUSE_CLIENT --echo --query "SELECT count(), sum(size) FROM system.filesystem_cache" $CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --mutations_sync=2 --query "ALTER TABLE test_02241 UPDATE value = 'kek' WHERE key = 100" From 277e8d965555b4fcd09a755282666bcae36adae6 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 26 Feb 2024 14:03:53 +0800 Subject: [PATCH 019/374] Fix usage plain metadata type with new configuration option --- src/Disks/DiskType.cpp | 48 +++++++++++++++++++ src/Disks/DiskType.h | 34 +------------ src/Disks/ObjectStorages/IObjectStorage.h | 1 + .../ObjectStorages/MetadataStorageFactory.cpp | 36 +++++++++++--- .../ObjectStorages/MetadataStorageFactory.h | 7 +++ .../ObjectStorages/ObjectStorageFactory.cpp | 43 +++++++++++++---- src/Disks/ObjectStorages/PlainObjectStorage.h | 29 +++++++++++ .../RegisterDiskObjectStorage.cpp | 24 ++-------- src/Disks/ObjectStorages/S3/S3ObjectStorage.h | 21 -------- .../configs/disk_s3.xml | 7 +++ .../test_attach_backup_from_s3_plain/test.py | 25 ++++++---- 11 files changed, 178 insertions(+), 97 deletions(-) create mode 100644 src/Disks/ObjectStorages/PlainObjectStorage.h diff --git a/src/Disks/DiskType.cpp b/src/Disks/DiskType.cpp index 218b6ee7f26..1778ae8025b 100644 --- a/src/Disks/DiskType.cpp +++ b/src/Disks/DiskType.cpp @@ -1,7 +1,27 @@ #include "DiskType.h" +#include +#include namespace DB { +namespace ErrorCodes +{ + extern const int UNKNOWN_ELEMENT_IN_CONFIG; +} + +MetadataStorageType metadataTypeFromString(const String & type) +{ + auto check_type = Poco::toLower(type); + if (check_type == "local") + return MetadataStorageType::Local; + if (check_type == "plain") + return MetadataStorageType::Plain; + if (check_type == "web") + return MetadataStorageType::StaticWeb; + + throw Exception(ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG, + "MetadataStorageFactory: unknown metadata storage type: {}", type); +} bool DataSourceDescription::operator==(const DataSourceDescription & other) const { @@ -14,4 +34,32 @@ bool DataSourceDescription::sameKind(const DataSourceDescription & other) const == std::tie(other.type, other.object_storage_type, other.description); } +std::string DataSourceDescription::toString() const +{ + switch (type) + { + case DataSourceType::Local: + return "local"; + case DataSourceType::RAM: + return "memory"; + case DataSourceType::ObjectStorage: + { + switch (object_storage_type) + { + case ObjectStorageType::S3: + return "s3"; + case ObjectStorageType::HDFS: + return "hdfs"; + case ObjectStorageType::Azure: + return "azure_blob_storage"; + case ObjectStorageType::Local: + return "local_blob_storage"; + case ObjectStorageType::Web: + return "web"; + case ObjectStorageType::None: + return "none"; + } + } + } +} } diff --git a/src/Disks/DiskType.h b/src/Disks/DiskType.h index 15940ea9155..36fe4d83004 100644 --- a/src/Disks/DiskType.h +++ b/src/Disks/DiskType.h @@ -17,7 +17,6 @@ enum class ObjectStorageType { None, S3, - S3_Plain, Azure, HDFS, Web, @@ -30,9 +29,9 @@ enum class MetadataStorageType Local, Plain, StaticWeb, - Memory, }; +MetadataStorageType metadataTypeFromString(const String & type); String toString(DataSourceType data_source_type); struct DataSourceDescription @@ -49,36 +48,7 @@ struct DataSourceDescription bool operator==(const DataSourceDescription & other) const; bool sameKind(const DataSourceDescription & other) const; - std::string toString() const - { - switch (type) - { - case DataSourceType::Local: - return "local"; - case DataSourceType::RAM: - return "memory"; - case DataSourceType::ObjectStorage: - { - switch (object_storage_type) - { - case ObjectStorageType::S3: - return "s3"; - case ObjectStorageType::S3_Plain: - return "s3_plain"; - case ObjectStorageType::HDFS: - return "hdfs"; - case ObjectStorageType::Azure: - return "azure_blob_storage"; - case ObjectStorageType::Local: - return "local_blob_storage"; - case ObjectStorageType::Web: - return "web"; - case ObjectStorageType::None: - return "none"; - } - } - } - } + std::string toString() const; }; } diff --git a/src/Disks/ObjectStorages/IObjectStorage.h b/src/Disks/ObjectStorages/IObjectStorage.h index 56c269a3fc5..fde97d82ad1 100644 --- a/src/Disks/ObjectStorages/IObjectStorage.h +++ b/src/Disks/ObjectStorages/IObjectStorage.h @@ -218,6 +218,7 @@ public: virtual bool isReadOnly() const { return false; } virtual bool isWriteOnce() const { return false; } + virtual bool isPlain() const { return false; } virtual bool supportParallelWrite() const { return false; } diff --git a/src/Disks/ObjectStorages/MetadataStorageFactory.cpp b/src/Disks/ObjectStorages/MetadataStorageFactory.cpp index 52a0b9ec268..adc1f84372c 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFactory.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFactory.cpp @@ -32,6 +32,35 @@ void MetadataStorageFactory::registerMetadataStorageType(const std::string & met } } +std::string MetadataStorageFactory::getCompatibilityMetadataTypeHint(const ObjectStorageType & type) +{ + switch (type) + { + case ObjectStorageType::S3: + case ObjectStorageType::HDFS: + case ObjectStorageType::Local: + case ObjectStorageType::Azure: + return "local"; + case ObjectStorageType::Web: + return "web"; + default: + return ""; + } +} + +std::string MetadataStorageFactory::getMetadataType( + const Poco::Util::AbstractConfiguration & config, + const std::string & config_prefix, + const std::string & compatibility_type_hint) +{ + if (compatibility_type_hint.empty() && !config.has(config_prefix + ".metadata_type")) + { + throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "Expected `metadata_type` in config"); + } + + return config.getString(config_prefix + ".metadata_type", compatibility_type_hint); +} + MetadataStoragePtr MetadataStorageFactory::create( const std::string & name, const Poco::Util::AbstractConfiguration & config, @@ -39,12 +68,7 @@ MetadataStoragePtr MetadataStorageFactory::create( ObjectStoragePtr object_storage, const std::string & compatibility_type_hint) const { - if (compatibility_type_hint.empty() && !config.has(config_prefix + ".metadata_type")) - { - throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "Expected `metadata_type` in config"); - } - - const auto type = config.getString(config_prefix + ".metadata_type", compatibility_type_hint); + const auto type = getMetadataType(config, config_prefix, compatibility_type_hint); const auto it = registry.find(type); if (it == registry.end()) diff --git a/src/Disks/ObjectStorages/MetadataStorageFactory.h b/src/Disks/ObjectStorages/MetadataStorageFactory.h index 5f61125c599..467cd3cef98 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFactory.h +++ b/src/Disks/ObjectStorages/MetadataStorageFactory.h @@ -25,6 +25,13 @@ public: ObjectStoragePtr object_storage, const std::string & compatibility_type_hint) const; + static std::string getMetadataType( + const Poco::Util::AbstractConfiguration & config, + const std::string & config_prefix, + const std::string & compatibility_type_hint = ""); + + static std::string getCompatibilityMetadataTypeHint(const ObjectStorageType & type); + private: using Registry = std::unordered_map; Registry registry; diff --git a/src/Disks/ObjectStorages/ObjectStorageFactory.cpp b/src/Disks/ObjectStorages/ObjectStorageFactory.cpp index b3626135177..6f6ff199902 100644 --- a/src/Disks/ObjectStorages/ObjectStorageFactory.cpp +++ b/src/Disks/ObjectStorages/ObjectStorageFactory.cpp @@ -16,8 +16,10 @@ #ifndef CLICKHOUSE_KEEPER_STANDALONE_BUILD #include #include +#include #include #endif +#include #include #include @@ -32,6 +34,28 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } +namespace +{ + template + ObjectStoragePtr createObjectStorage( + const Poco::Util::AbstractConfiguration & config, + const std::string & config_prefix, + Args && ...args) + { + auto compatibility_hint = MetadataStorageFactory::getCompatibilityMetadataTypeHint(ObjectStorageType::S3); + auto metadata_type = MetadataStorageFactory::getMetadataType(config, config_prefix, compatibility_hint); + + if (metadataTypeFromString(metadata_type) == MetadataStorageType::Plain) + { + return std::make_shared>(std::forward(args)...); + } + else + { + return std::make_shared(std::forward(args)...); + } + } +} + ObjectStorageFactory & ObjectStorageFactory::instance() { static ObjectStorageFactory factory; @@ -129,12 +153,12 @@ void registerS3ObjectStorage(ObjectStorageFactory & factory) auto client = getClient(config, config_prefix, context, *settings); auto key_generator = getKeyGenerator(disk_type, uri, config, config_prefix); - auto object_storage = std::make_shared( - std::move(client), std::move(settings), uri, s3_capabilities, key_generator, name); + auto object_storage = createObjectStorage( + config, config_prefix, std::move(client), std::move(settings), uri, s3_capabilities, key_generator, name); /// NOTE: should we still perform this check for clickhouse-disks? if (!skip_access_check) - checkS3Capabilities(*object_storage, s3_capabilities, name, uri.key); + checkS3Capabilities(*dynamic_cast(object_storage.get()), s3_capabilities, name, uri.key); return object_storage; }); @@ -165,12 +189,12 @@ void registerS3PlainObjectStorage(ObjectStorageFactory & factory) auto client = getClient(config, config_prefix, context, *settings); auto key_generator = getKeyGenerator(disk_type, uri, config, config_prefix); - auto object_storage = std::make_shared( + auto object_storage = std::make_shared>( std::move(client), std::move(settings), uri, s3_capabilities, key_generator, name); /// NOTE: should we still perform this check for clickhouse-disks? if (!skip_access_check) - checkS3Capabilities(*object_storage, s3_capabilities, name, uri.key); + checkS3Capabilities(*dynamic_cast(object_storage.get()), s3_capabilities, name, uri.key); return object_storage; }); @@ -198,7 +222,7 @@ void registerHDFSObjectStorage(ObjectStorageFactory & factory) context->getSettingsRef().hdfs_replication ); - return std::make_unique(uri, std::move(settings), config); + return createObjectStorage(config, config_prefix, uri, std::move(settings), config); }); } #endif @@ -214,7 +238,8 @@ void registerAzureObjectStorage(ObjectStorageFactory & factory) bool /* skip_access_check */) -> ObjectStoragePtr { String container_name = config.getString(config_prefix + ".container_name", "default-container"); - return std::make_unique( + return createObjectStorage( + config, config_prefix, name, getAzureBlobContainerClient(config, config_prefix), getAzureBlobStorageSettings(config, config_prefix, context), @@ -248,7 +273,7 @@ void registerWebObjectStorage(ObjectStorageFactory & factory) ErrorCodes::BAD_ARGUMENTS, "Bad URI: `{}`. Error: {}", uri, e.what()); } - return std::make_shared(uri, context); + return createObjectStorage(config, config_prefix, uri, context); }); } @@ -266,7 +291,7 @@ void registerLocalObjectStorage(ObjectStorageFactory & factory) loadDiskLocalConfig(name, config, config_prefix, context, object_key_prefix, keep_free_space_bytes); /// keys are mapped to the fs, object_key_prefix is a directory also fs::create_directories(object_key_prefix); - return std::make_shared(object_key_prefix); + return createObjectStorage(config, config_prefix, object_key_prefix); }); } #endif diff --git a/src/Disks/ObjectStorages/PlainObjectStorage.h b/src/Disks/ObjectStorages/PlainObjectStorage.h new file mode 100644 index 00000000000..3a81b85c44b --- /dev/null +++ b/src/Disks/ObjectStorages/PlainObjectStorage.h @@ -0,0 +1,29 @@ +#pragma once +#include + +namespace DB +{ + +/// Do not encode keys, store as-is, and do not require separate disk for metadata. +/// But because of this does not support renames/hardlinks/attrs/... +/// +/// NOTE: This disk has excessive API calls. +template +class PlainObjectStorage : public BaseObjectStorage +{ +public: + template + explicit PlainObjectStorage(Args && ...args) + : BaseObjectStorage(std::forward(args)...) {} + + std::string getName() const override { return "" + BaseObjectStorage::getName(); } + + /// Notes: + /// - supports BACKUP to this disk + /// - does not support INSERT into MergeTree table on this disk + bool isWriteOnce() const override { return true; } + + bool isPlain() const override { return true; } +}; + +} diff --git a/src/Disks/ObjectStorages/RegisterDiskObjectStorage.cpp b/src/Disks/ObjectStorages/RegisterDiskObjectStorage.cpp index 383a0b079b5..669a0102951 100644 --- a/src/Disks/ObjectStorages/RegisterDiskObjectStorage.cpp +++ b/src/Disks/ObjectStorages/RegisterDiskObjectStorage.cpp @@ -10,25 +10,6 @@ namespace DB void registerObjectStorages(); void registerMetadataStorages(); -static std::string getCompatibilityMetadataTypeHint(const ObjectStorageType & type) -{ - switch (type) - { - case ObjectStorageType::S3: - case ObjectStorageType::HDFS: - case ObjectStorageType::Local: - case ObjectStorageType::Azure: - return "local"; - case ObjectStorageType::S3_Plain: - return "plain"; - case ObjectStorageType::Web: - return "web"; - case ObjectStorageType::None: - return ""; - } - UNREACHABLE(); -} - void registerDiskObjectStorage(DiskFactory & factory, bool global_skip_access_check) { registerObjectStorages(); @@ -47,7 +28,10 @@ void registerDiskObjectStorage(DiskFactory & factory, bool global_skip_access_ch std::string compatibility_metadata_type_hint; if (!config.has(config_prefix + ".metadata_type")) { - compatibility_metadata_type_hint = getCompatibilityMetadataTypeHint(object_storage->getType()); + if (object_storage->isPlain()) + compatibility_metadata_type_hint = "plain"; + else + compatibility_metadata_type_hint = MetadataStorageFactory::getCompatibilityMetadataTypeHint(object_storage->getType()); } auto metadata_storage = MetadataStorageFactory::instance().create( diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h index ab0fa5bed68..4ece98c5ec4 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h @@ -182,27 +182,6 @@ private: LoggerPtr log; }; -/// Do not encode keys, store as-is, and do not require separate disk for metadata. -/// But because of this does not support renames/hardlinks/attrs/... -/// -/// NOTE: This disk has excessive API calls. -class S3PlainObjectStorage : public S3ObjectStorage -{ -public: - std::string getName() const override { return "S3PlainObjectStorage"; } - - template - explicit S3PlainObjectStorage(Args && ...args) - : S3ObjectStorage("S3PlainObjectStorage", std::forward(args)...) {} - - ObjectStorageType getType() const override { return ObjectStorageType::S3_Plain; } - - /// Notes: - /// - supports BACKUP to this disk - /// - does not support INSERT into MergeTree table on this disk - bool isWriteOnce() const override { return true; } -}; - } #endif diff --git a/tests/integration/test_attach_backup_from_s3_plain/configs/disk_s3.xml b/tests/integration/test_attach_backup_from_s3_plain/configs/disk_s3.xml index 779e4b6ae21..3166eea7ccb 100644 --- a/tests/integration/test_attach_backup_from_s3_plain/configs/disk_s3.xml +++ b/tests/integration/test_attach_backup_from_s3_plain/configs/disk_s3.xml @@ -8,9 +8,16 @@ minio minio123 + + object_storage + local + plain + local_plain/ +
backup_disk_s3_plain + backup_disk_local_plain diff --git a/tests/integration/test_attach_backup_from_s3_plain/test.py b/tests/integration/test_attach_backup_from_s3_plain/test.py index e575c487b7a..4a8da1e6d66 100644 --- a/tests/integration/test_attach_backup_from_s3_plain/test.py +++ b/tests/integration/test_attach_backup_from_s3_plain/test.py @@ -20,17 +20,27 @@ def start_cluster(): finally: cluster.shutdown() +s3_disk_def = """disk(type=s3_plain, + endpoint='http://minio1:9001/root/data/disks/disk_s3_plain/{backup_name}/', + access_key_id='minio', + secret_access_key='minio123');""" + +local_disk_def = "disk(type=object_storage, object_storage_type = 'local', metadata_type = 'plain'" @pytest.mark.parametrize( - "table_name,backup_name,storage_policy,min_bytes_for_wide_part", + "table_name,backup_name,storage_policy,disk_def,min_bytes_for_wide_part", [ pytest.param( - "compact", "backup_compact", "s3_backup_compact", int(1e9), id="compact" + "compact", "backup_compact_s3", "backup_disk_s3_plain", s3_disk_def, int(1e9), id="compact" ), - pytest.param("wide", "backup_wide", "s3_backup_wide", int(0), id="wide"), + pytest.param("wide", "backup_wide_s3", "backup_disk_s3_plain", s3_disk_def, int(0), id="wide"), + pytest.param( + "compact", "backup_compact_local", "backup_disk_local_plain", local_disk_def, int(1e9), id="compact" + ), + pytest.param("wide", "backup_wide_local", "backup_disk_local_plain", local_disk_def, int(0), id="wide"), ], ) -def test_attach_part(table_name, backup_name, storage_policy, min_bytes_for_wide_part): +def test_attach_part(table_name, backup_name, storage_policy, disk_def, min_bytes_for_wide_part): node.query( f""" -- Catch any errors (NOTE: warnings are ok) @@ -45,7 +55,7 @@ def test_attach_part(table_name, backup_name, storage_policy, min_bytes_for_wide settings min_bytes_for_wide_part={min_bytes_for_wide_part} as select number%5 part, number key from numbers(100); - backup table ordinary_db.{table_name} TO Disk('backup_disk_s3_plain', '{backup_name}') settings deduplicate_files=0; + backup table ordinary_db.{table_name} TO Disk('{storage_policy}', '{backup_name}') settings deduplicate_files=0; drop table ordinary_db.{table_name}; attach table ordinary_db.{table_name} (part UInt8, key UInt64) @@ -53,10 +63,7 @@ def test_attach_part(table_name, backup_name, storage_policy, min_bytes_for_wide order by key partition by part settings max_suspicious_broken_parts=0, - disk=disk(type=s3_plain, - endpoint='http://minio1:9001/root/data/disks/disk_s3_plain/{backup_name}/', - access_key_id='minio', - secret_access_key='minio123'); + disk={disk_def} """ ) From 69b5bd02a915ae044b4116de759d11ae80525dc5 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 26 Feb 2024 09:37:17 +0000 Subject: [PATCH 020/374] Automatic style fix --- .../test_attach_backup_from_s3_plain/test.py | 42 ++++++++++++++++--- 1 file changed, 36 insertions(+), 6 deletions(-) diff --git a/tests/integration/test_attach_backup_from_s3_plain/test.py b/tests/integration/test_attach_backup_from_s3_plain/test.py index 4a8da1e6d66..900366b2c9c 100644 --- a/tests/integration/test_attach_backup_from_s3_plain/test.py +++ b/tests/integration/test_attach_backup_from_s3_plain/test.py @@ -20,27 +20,57 @@ def start_cluster(): finally: cluster.shutdown() + s3_disk_def = """disk(type=s3_plain, endpoint='http://minio1:9001/root/data/disks/disk_s3_plain/{backup_name}/', access_key_id='minio', secret_access_key='minio123');""" -local_disk_def = "disk(type=object_storage, object_storage_type = 'local', metadata_type = 'plain'" +local_disk_def = ( + "disk(type=object_storage, object_storage_type = 'local', metadata_type = 'plain'" +) + @pytest.mark.parametrize( "table_name,backup_name,storage_policy,disk_def,min_bytes_for_wide_part", [ pytest.param( - "compact", "backup_compact_s3", "backup_disk_s3_plain", s3_disk_def, int(1e9), id="compact" + "compact", + "backup_compact_s3", + "backup_disk_s3_plain", + s3_disk_def, + int(1e9), + id="compact", ), - pytest.param("wide", "backup_wide_s3", "backup_disk_s3_plain", s3_disk_def, int(0), id="wide"), pytest.param( - "compact", "backup_compact_local", "backup_disk_local_plain", local_disk_def, int(1e9), id="compact" + "wide", + "backup_wide_s3", + "backup_disk_s3_plain", + s3_disk_def, + int(0), + id="wide", + ), + pytest.param( + "compact", + "backup_compact_local", + "backup_disk_local_plain", + local_disk_def, + int(1e9), + id="compact", + ), + pytest.param( + "wide", + "backup_wide_local", + "backup_disk_local_plain", + local_disk_def, + int(0), + id="wide", ), - pytest.param("wide", "backup_wide_local", "backup_disk_local_plain", local_disk_def, int(0), id="wide"), ], ) -def test_attach_part(table_name, backup_name, storage_policy, disk_def, min_bytes_for_wide_part): +def test_attach_part( + table_name, backup_name, storage_policy, disk_def, min_bytes_for_wide_part +): node.query( f""" -- Catch any errors (NOTE: warnings are ok) From ac4af6a4ad3b67860eae79b2ed3320fc5981a954 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 26 Feb 2024 19:58:49 +0000 Subject: [PATCH 021/374] Don't allow to set max_parallel_replicas to 0 as it doesn't make sense --- src/Client/ConnectionPoolWithFailover.cpp | 9 +++++++++ src/Client/HedgedConnectionsFactory.cpp | 3 +++ src/Client/HedgedConnectionsFactory.h | 2 +- src/Interpreters/InterpreterSelectQuery.cpp | 4 ++-- src/Planner/PlannerJoinTree.cpp | 4 ++-- .../03001_max_parallel_replicas_zero_value.reference | 0 .../03001_max_parallel_replicas_zero_value.sql | 5 +++++ 7 files changed, 22 insertions(+), 5 deletions(-) create mode 100644 tests/queries/0_stateless/03001_max_parallel_replicas_zero_value.reference create mode 100644 tests/queries/0_stateless/03001_max_parallel_replicas_zero_value.sql diff --git a/src/Client/ConnectionPoolWithFailover.cpp b/src/Client/ConnectionPoolWithFailover.cpp index 492fd4ae9e2..46b9741c812 100644 --- a/src/Client/ConnectionPoolWithFailover.cpp +++ b/src/Client/ConnectionPoolWithFailover.cpp @@ -191,11 +191,20 @@ std::vector ConnectionPoolWithFailover::g max_entries = nested_pools.size(); } else if (pool_mode == PoolMode::GET_ONE) + { max_entries = 1; + } else if (pool_mode == PoolMode::GET_MANY) + { + if (settings.max_parallel_replicas == 0) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The value of the setting max_parallel_replicas must be greater than 0"); + max_entries = settings.max_parallel_replicas; + } else + { throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Unknown pool allocation mode"); + } if (!priority_func) priority_func = makeGetPriorityFunc(settings); diff --git a/src/Client/HedgedConnectionsFactory.cpp b/src/Client/HedgedConnectionsFactory.cpp index f5b074a0257..a4e5dbf04ac 100644 --- a/src/Client/HedgedConnectionsFactory.cpp +++ b/src/Client/HedgedConnectionsFactory.cpp @@ -82,6 +82,9 @@ std::vector HedgedConnectionsFactory::getManyConnections(PoolMode } case PoolMode::GET_MANY: { + if (max_parallel_replicas == 0) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The value of the setting max_parallel_replicas must be greater than 0"); + max_entries = max_parallel_replicas; break; } diff --git a/src/Client/HedgedConnectionsFactory.h b/src/Client/HedgedConnectionsFactory.h index ce7b553acdd..dd600d58e1e 100644 --- a/src/Client/HedgedConnectionsFactory.h +++ b/src/Client/HedgedConnectionsFactory.h @@ -158,7 +158,7 @@ private: /// checking the number of requested replicas that are still in process). size_t requested_connections_count = 0; - const size_t max_parallel_replicas = 0; + const size_t max_parallel_replicas = 1; const bool skip_unavailable_shards = 0; }; diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index d34294b4c4b..fe5e5dc69d1 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -871,7 +871,7 @@ bool InterpreterSelectQuery::adjustParallelReplicasAfterAnalysis() { /// The query could use trivial count if it didn't use parallel replicas, so let's disable it and reanalyze context->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); - context->setSetting("max_parallel_replicas", UInt64{0}); + context->setSetting("max_parallel_replicas", UInt64{1}); LOG_DEBUG(log, "Disabling parallel replicas to be able to use a trivial count optimization"); return true; } @@ -909,7 +909,7 @@ bool InterpreterSelectQuery::adjustParallelReplicasAfterAnalysis() if (number_of_replicas_to_use <= 1) { context->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); - context->setSetting("max_parallel_replicas", UInt64{0}); + context->setSetting("max_parallel_replicas", UInt64{1}); LOG_DEBUG(log, "Disabling parallel replicas because there aren't enough rows to read"); return true; } diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index e6a459d0e8a..2b1cd7fb353 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -295,7 +295,7 @@ bool applyTrivialCountIfPossible( /// The query could use trivial count if it didn't use parallel replicas, so let's disable it query_context->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); - query_context->setSetting("max_parallel_replicas", UInt64{0}); + query_context->setSetting("max_parallel_replicas", UInt64{1}); LOG_TRACE(getLogger("Planner"), "Disabling parallel replicas to be able to use a trivial count optimization"); } @@ -756,7 +756,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres { planner_context->getMutableQueryContext()->setSetting( "allow_experimental_parallel_reading_from_replicas", Field(0)); - planner_context->getMutableQueryContext()->setSetting("max_parallel_replicas", UInt64{0}); + planner_context->getMutableQueryContext()->setSetting("max_parallel_replicas", UInt64{1}); LOG_DEBUG(getLogger("Planner"), "Disabling parallel replicas because there aren't enough rows to read"); } else if (number_of_replicas_to_use < settings.max_parallel_replicas) diff --git a/tests/queries/0_stateless/03001_max_parallel_replicas_zero_value.reference b/tests/queries/0_stateless/03001_max_parallel_replicas_zero_value.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03001_max_parallel_replicas_zero_value.sql b/tests/queries/0_stateless/03001_max_parallel_replicas_zero_value.sql new file mode 100644 index 00000000000..611aa4777ba --- /dev/null +++ b/tests/queries/0_stateless/03001_max_parallel_replicas_zero_value.sql @@ -0,0 +1,5 @@ +drop table if exists test_d; +create table test_d engine=Distributed(test_cluster_two_shard_three_replicas_localhost, system, numbers); +select * from test_d limit 10 settings max_parallel_replicas = 0, prefer_localhost_replica = 0; --{serverError BAD_ARGUMENTS} +drop table test_d; + From 8aa9f36484bbe814a1e3edccc608e71b73915857 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Mon, 26 Feb 2024 22:05:54 +0100 Subject: [PATCH 022/374] Fix style --- src/Client/ConnectionPoolWithFailover.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Client/ConnectionPoolWithFailover.cpp b/src/Client/ConnectionPoolWithFailover.cpp index 46b9741c812..ad8ed0067d8 100644 --- a/src/Client/ConnectionPoolWithFailover.cpp +++ b/src/Client/ConnectionPoolWithFailover.cpp @@ -21,6 +21,7 @@ namespace ErrorCodes { extern const int LOGICAL_ERROR; extern const int ALL_CONNECTION_TRIES_FAILED; + extern const int BAD_ARGUMENTS; } From f264f0a0360baf1413ec38d3f3f30c70595064f4 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Mon, 26 Feb 2024 22:06:10 +0100 Subject: [PATCH 023/374] Fix style --- src/Client/HedgedConnectionsFactory.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Client/HedgedConnectionsFactory.cpp b/src/Client/HedgedConnectionsFactory.cpp index a4e5dbf04ac..16a03a696bd 100644 --- a/src/Client/HedgedConnectionsFactory.cpp +++ b/src/Client/HedgedConnectionsFactory.cpp @@ -19,6 +19,7 @@ namespace ErrorCodes extern const int ALL_CONNECTION_TRIES_FAILED; extern const int ALL_REPLICAS_ARE_STALE; extern const int LOGICAL_ERROR; + extern const int BAD_ARGUMENTS; } HedgedConnectionsFactory::HedgedConnectionsFactory( From f53f43b78d3cf2da6219ea4bdea7018d9811ae54 Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 27 Feb 2024 17:33:47 +0800 Subject: [PATCH 024/374] Fixes for LocalObjectStorage and plain metadata --- .../Local/LocalObjectStorage.cpp | 37 +++++++++++++++++-- .../ObjectStorages/Local/LocalObjectStorage.h | 4 ++ .../MetadataStorageFromPlainObjectStorage.cpp | 5 +-- .../ObjectStorages/ObjectStorageFactory.cpp | 31 ++++++++++------ src/Disks/ObjectStorages/PlainObjectStorage.h | 6 +++ src/Disks/ObjectStorages/S3/DiskS3Utils.cpp | 6 --- src/Disks/ObjectStorages/S3/DiskS3Utils.h | 1 - .../ObjectStorages/S3/S3ObjectStorage.cpp | 2 + .../configs/disk_s3.xml | 4 +- .../test_attach_backup_from_s3_plain/test.py | 7 ++-- 10 files changed, 71 insertions(+), 32 deletions(-) diff --git a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp index 02700b358e0..51c260cc270 100644 --- a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp @@ -31,6 +31,8 @@ LocalObjectStorage::LocalObjectStorage(String key_prefix_) description = *block_device_id; else description = "/"; + + fs::create_directories(getCommonKeyPrefix()); } bool LocalObjectStorage::exists(const StoredObject & object) const @@ -53,6 +55,7 @@ std::unique_ptr LocalObjectStorage::readObjects( /// NOL return createReadBufferFromFileBase(file_path, modified_settings, read_hint, file_size); }; + LOG_TEST(log, "Read object: {}", objects[0].remote_path); switch (read_settings.remote_fs_method) { case RemoteFSReadMethod::read: @@ -111,8 +114,8 @@ std::unique_ptr LocalObjectStorage::readObject( /// NOLI if (!file_size) file_size = tryGetSizeFromFilePath(path); - LOG_TEST(log, "Read object: {}", path); - return createReadBufferFromFileBase(path, patchSettings(read_settings), read_hint, file_size); + LOG_TEST(log, "Read object: {}", object.remote_path); + return createReadBufferFromFileBase(object.remote_path, patchSettings(read_settings), read_hint, file_size); } std::unique_ptr LocalObjectStorage::writeObject( /// NOLINT @@ -126,6 +129,7 @@ std::unique_ptr LocalObjectStorage::writeObject( /// NO throw Exception(ErrorCodes::BAD_ARGUMENTS, "LocalObjectStorage doesn't support append to files"); LOG_TEST(log, "Write object: {}", object.remote_path); + fs::create_directories(fs::path(object.remote_path).parent_path()); return std::make_unique(object.remote_path, buf_size); } @@ -157,9 +161,34 @@ void LocalObjectStorage::removeObjectsIfExist(const StoredObjects & objects) removeObjectIfExists(object); } -ObjectMetadata LocalObjectStorage::getObjectMetadata(const std::string & /* path */) const +ObjectMetadata LocalObjectStorage::getObjectMetadata(const std::string & path) const { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Metadata is not supported for LocalObjectStorage"); + ObjectMetadata object_metadata; + LOG_TEST(log, "Getting metadata for path: {}", path); + object_metadata.size_bytes = fs::file_size(path); + object_metadata.last_modified = Poco::Timestamp::fromEpochTime( + std::chrono::duration_cast(fs::last_write_time(path).time_since_epoch()).count()); + return object_metadata; +} + +void LocalObjectStorage::listObjects(const std::string & path, RelativePathsWithMetadata & children, int /* max_keys */) const +{ + for (const auto & entry : fs::directory_iterator(path)) + { + if (entry.is_directory()) + { + listObjects(entry.path(), children, 0); + continue; + } + + auto metadata = getObjectMetadata(entry.path()); + children.emplace_back(entry.path(), std::move(metadata)); + } +} + +bool LocalObjectStorage::existsOrHasAnyChild(const std::string & path) const +{ + return exists(StoredObject(path)); } void LocalObjectStorage::copyObject( // NOLINT diff --git a/src/Disks/ObjectStorages/Local/LocalObjectStorage.h b/src/Disks/ObjectStorages/Local/LocalObjectStorage.h index ed5f8c1f537..22429a99c76 100644 --- a/src/Disks/ObjectStorages/Local/LocalObjectStorage.h +++ b/src/Disks/ObjectStorages/Local/LocalObjectStorage.h @@ -58,6 +58,10 @@ public: ObjectMetadata getObjectMetadata(const std::string & path) const override; + void listObjects(const std::string & path, RelativePathsWithMetadata & children, int max_keys) const override; + + bool existsOrHasAnyChild(const std::string & path) const override; + void copyObject( /// NOLINT const StoredObject & object_from, const StoredObject & object_to, diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp index b03809f5b39..4b8fc74e956 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp @@ -48,10 +48,7 @@ bool MetadataStorageFromPlainObjectStorage::isDirectory(const std::string & path std::string directory = object_key.serialize(); if (!directory.ends_with('/')) directory += '/'; - - RelativePathsWithMetadata files; - object_storage->listObjects(directory, files, 1); - return !files.empty(); + return object_storage->existsOrHasAnyChild(directory); } uint64_t MetadataStorageFromPlainObjectStorage::getFileSize(const String & path) const diff --git a/src/Disks/ObjectStorages/ObjectStorageFactory.cpp b/src/Disks/ObjectStorages/ObjectStorageFactory.cpp index 6f6ff199902..f64c42c1403 100644 --- a/src/Disks/ObjectStorages/ObjectStorageFactory.cpp +++ b/src/Disks/ObjectStorages/ObjectStorageFactory.cpp @@ -36,16 +36,24 @@ namespace ErrorCodes namespace { + bool isPlainStorage( + ObjectStorageType type, + const Poco::Util::AbstractConfiguration & config, + const std::string & config_prefix) + { + auto compatibility_hint = MetadataStorageFactory::getCompatibilityMetadataTypeHint(type); + auto metadata_type = MetadataStorageFactory::getMetadataType(config, config_prefix, compatibility_hint); + return metadataTypeFromString(metadata_type) == MetadataStorageType::Plain; + } + template ObjectStoragePtr createObjectStorage( + ObjectStorageType type, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, Args && ...args) { - auto compatibility_hint = MetadataStorageFactory::getCompatibilityMetadataTypeHint(ObjectStorageType::S3); - auto metadata_type = MetadataStorageFactory::getMetadataType(config, config_prefix, compatibility_hint); - - if (metadataTypeFromString(metadata_type) == MetadataStorageType::Plain) + if (isPlainStorage(type, config, config_prefix)) { return std::make_shared>(std::forward(args)...); } @@ -151,10 +159,10 @@ void registerS3ObjectStorage(ObjectStorageFactory & factory) auto s3_capabilities = getCapabilitiesFromConfig(config, config_prefix); auto settings = getSettings(config, config_prefix, context); auto client = getClient(config, config_prefix, context, *settings); - auto key_generator = getKeyGenerator(disk_type, uri, config, config_prefix); + auto key_generator = getKeyGenerator(uri, config, config_prefix); auto object_storage = createObjectStorage( - config, config_prefix, std::move(client), std::move(settings), uri, s3_capabilities, key_generator, name); + ObjectStorageType::S3, config, config_prefix, std::move(client), std::move(settings), uri, s3_capabilities, key_generator, name); /// NOTE: should we still perform this check for clickhouse-disks? if (!skip_access_check) @@ -187,7 +195,7 @@ void registerS3PlainObjectStorage(ObjectStorageFactory & factory) auto s3_capabilities = getCapabilitiesFromConfig(config, config_prefix); auto settings = getSettings(config, config_prefix, context); auto client = getClient(config, config_prefix, context, *settings); - auto key_generator = getKeyGenerator(disk_type, uri, config, config_prefix); + auto key_generator = getKeyGenerator(uri, config, config_prefix); auto object_storage = std::make_shared>( std::move(client), std::move(settings), uri, s3_capabilities, key_generator, name); @@ -222,7 +230,7 @@ void registerHDFSObjectStorage(ObjectStorageFactory & factory) context->getSettingsRef().hdfs_replication ); - return createObjectStorage(config, config_prefix, uri, std::move(settings), config); + return createObjectStorage(ObjectStorageType::HDFS, config, config_prefix, uri, std::move(settings), config); }); } #endif @@ -239,8 +247,7 @@ void registerAzureObjectStorage(ObjectStorageFactory & factory) { String container_name = config.getString(config_prefix + ".container_name", "default-container"); return createObjectStorage( - config, config_prefix, - name, + ObjectStorageType::Azure, config, config_prefix, name, getAzureBlobContainerClient(config, config_prefix), getAzureBlobStorageSettings(config, config_prefix, context), container_name); @@ -273,7 +280,7 @@ void registerWebObjectStorage(ObjectStorageFactory & factory) ErrorCodes::BAD_ARGUMENTS, "Bad URI: `{}`. Error: {}", uri, e.what()); } - return createObjectStorage(config, config_prefix, uri, context); + return createObjectStorage(ObjectStorageType::Web, config, config_prefix, uri, context); }); } @@ -291,7 +298,7 @@ void registerLocalObjectStorage(ObjectStorageFactory & factory) loadDiskLocalConfig(name, config, config_prefix, context, object_key_prefix, keep_free_space_bytes); /// keys are mapped to the fs, object_key_prefix is a directory also fs::create_directories(object_key_prefix); - return createObjectStorage(config, config_prefix, object_key_prefix); + return createObjectStorage(ObjectStorageType::Local, config, config_prefix, object_key_prefix); }); } #endif diff --git a/src/Disks/ObjectStorages/PlainObjectStorage.h b/src/Disks/ObjectStorages/PlainObjectStorage.h index 3a81b85c44b..e0907d0b4d8 100644 --- a/src/Disks/ObjectStorages/PlainObjectStorage.h +++ b/src/Disks/ObjectStorages/PlainObjectStorage.h @@ -1,5 +1,6 @@ #pragma once #include +#include namespace DB { @@ -24,6 +25,11 @@ public: bool isWriteOnce() const override { return true; } bool isPlain() const override { return true; } + + ObjectStorageKey generateObjectKeyForPath(const std::string & path) const override + { + return ObjectStorageKey::createAsRelative(BaseObjectStorage::getCommonKeyPrefix(), path); + } }; } diff --git a/src/Disks/ObjectStorages/S3/DiskS3Utils.cpp b/src/Disks/ObjectStorages/S3/DiskS3Utils.cpp index bb7b53b2d22..4b889f89f90 100644 --- a/src/Disks/ObjectStorages/S3/DiskS3Utils.cpp +++ b/src/Disks/ObjectStorages/S3/DiskS3Utils.cpp @@ -15,16 +15,10 @@ namespace ErrorCodes } ObjectStorageKeysGeneratorPtr getKeyGenerator( - String type, const S3::URI & uri, const Poco::Util::AbstractConfiguration & config, const String & config_prefix) { - if (type == "s3_plain") - return createObjectStorageKeysGeneratorAsIsWithPrefix(uri.key); - - chassert(type == "s3"); - bool storage_metadata_write_full_object_key = DiskObjectStorageMetadata::getWriteFullObjectKeySetting(); bool send_metadata = config.getBool(config_prefix + ".send_metadata", false); diff --git a/src/Disks/ObjectStorages/S3/DiskS3Utils.h b/src/Disks/ObjectStorages/S3/DiskS3Utils.h index 29e39d4bc1b..8524a9ccac3 100644 --- a/src/Disks/ObjectStorages/S3/DiskS3Utils.h +++ b/src/Disks/ObjectStorages/S3/DiskS3Utils.h @@ -12,7 +12,6 @@ namespace DB namespace S3 { struct URI; } ObjectStorageKeysGeneratorPtr getKeyGenerator( - String type, const S3::URI & uri, const Poco::Util::AbstractConfiguration & config, const String & config_prefix); diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index 5771eb1ebe0..b2a9ab8fdc3 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -561,6 +561,8 @@ std::unique_ptr S3ObjectStorage::cloneObjectStorage( ObjectStorageKey S3ObjectStorage::generateObjectKeyForPath(const std::string & path) const { + if (!key_generator) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Key generator is not set"); return key_generator->generate(path); } diff --git a/tests/integration/test_attach_backup_from_s3_plain/configs/disk_s3.xml b/tests/integration/test_attach_backup_from_s3_plain/configs/disk_s3.xml index 3166eea7ccb..2edabc76c8b 100644 --- a/tests/integration/test_attach_backup_from_s3_plain/configs/disk_s3.xml +++ b/tests/integration/test_attach_backup_from_s3_plain/configs/disk_s3.xml @@ -10,9 +10,9 @@ object_storage - local + local_blob_storage plain - local_plain/ + /local_plain/ diff --git a/tests/integration/test_attach_backup_from_s3_plain/test.py b/tests/integration/test_attach_backup_from_s3_plain/test.py index 4a8da1e6d66..983275cc24f 100644 --- a/tests/integration/test_attach_backup_from_s3_plain/test.py +++ b/tests/integration/test_attach_backup_from_s3_plain/test.py @@ -21,11 +21,11 @@ def start_cluster(): cluster.shutdown() s3_disk_def = """disk(type=s3_plain, - endpoint='http://minio1:9001/root/data/disks/disk_s3_plain/{backup_name}/', + endpoint='http://minio1:9001/root/data/disks/disk_s3_plain/{}/', access_key_id='minio', secret_access_key='minio123');""" -local_disk_def = "disk(type=object_storage, object_storage_type = 'local', metadata_type = 'plain'" +local_disk_def = "disk(type=object_storage, object_storage_type = 'local_blob_storage', metadata_type = 'plain', path = '/local_plain/{}/')" @pytest.mark.parametrize( "table_name,backup_name,storage_policy,disk_def,min_bytes_for_wide_part", @@ -41,6 +41,7 @@ local_disk_def = "disk(type=object_storage, object_storage_type = 'local', metad ], ) def test_attach_part(table_name, backup_name, storage_policy, disk_def, min_bytes_for_wide_part): + disk_definition = disk_def.format(backup_name) node.query( f""" -- Catch any errors (NOTE: warnings are ok) @@ -63,7 +64,7 @@ def test_attach_part(table_name, backup_name, storage_policy, disk_def, min_byte order by key partition by part settings max_suspicious_broken_parts=0, - disk={disk_def} + disk={disk_definition} """ ) From fb38bd139c433ead685028f232e8c4fad5e566d2 Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 27 Feb 2024 17:38:02 +0800 Subject: [PATCH 025/374] Remove debug logging --- src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp index 51c260cc270..4ec998a2bb0 100644 --- a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp @@ -55,7 +55,6 @@ std::unique_ptr LocalObjectStorage::readObjects( /// NOL return createReadBufferFromFileBase(file_path, modified_settings, read_hint, file_size); }; - LOG_TEST(log, "Read object: {}", objects[0].remote_path); switch (read_settings.remote_fs_method) { case RemoteFSReadMethod::read: @@ -109,10 +108,8 @@ std::unique_ptr LocalObjectStorage::readObject( /// NOLI std::optional read_hint, std::optional file_size) const { - const auto & path = object.remote_path; - if (!file_size) - file_size = tryGetSizeFromFilePath(path); + file_size = tryGetSizeFromFilePath(object.remote_path); LOG_TEST(log, "Read object: {}", object.remote_path); return createReadBufferFromFileBase(object.remote_path, patchSettings(read_settings), read_hint, file_size); From 978fe9fa1a069a231bb52c66b3898c6ce112a215 Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 27 Feb 2024 17:43:34 +0800 Subject: [PATCH 026/374] Add comments --- src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp index 4ec998a2bb0..7f34ca48f7f 100644 --- a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp @@ -126,7 +126,11 @@ std::unique_ptr LocalObjectStorage::writeObject( /// NO throw Exception(ErrorCodes::BAD_ARGUMENTS, "LocalObjectStorage doesn't support append to files"); LOG_TEST(log, "Write object: {}", object.remote_path); + + /// Unlike real blob storage, in local fs we cannot create a file with non-existing prefix. + /// So let's create it. fs::create_directories(fs::path(object.remote_path).parent_path()); + return std::make_unique(object.remote_path, buf_size); } @@ -185,6 +189,8 @@ void LocalObjectStorage::listObjects(const std::string & path, RelativePathsWith bool LocalObjectStorage::existsOrHasAnyChild(const std::string & path) const { + /// Unlike real object storage, existance of a prefix path can be checked by + /// just checking existence of this prefix directly, so simple exists is enough here. return exists(StoredObject(path)); } From 33788250b1f74384661cd241e2badef82c8fdbf6 Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 27 Feb 2024 18:07:19 +0800 Subject: [PATCH 027/374] Update test.py --- tests/integration/test_attach_backup_from_s3_plain/test.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_attach_backup_from_s3_plain/test.py b/tests/integration/test_attach_backup_from_s3_plain/test.py index 3a0fa70a715..c2f8936b82c 100644 --- a/tests/integration/test_attach_backup_from_s3_plain/test.py +++ b/tests/integration/test_attach_backup_from_s3_plain/test.py @@ -26,9 +26,8 @@ s3_disk_def = """disk(type=s3_plain, access_key_id='minio', secret_access_key='minio123');""" -local_disk_def = ( - "disk(type=object_storage, object_storage_type = 'local', metadata_type = 'plain', path = '/local_plain/{}/'" -) +local_disk_def = "disk(type=object_storage, object_storage_type = 'local_blob_storage', metadata_type = 'plain', path = '/local_plain/{}/');" + @pytest.mark.parametrize( "table_name,backup_name,storage_policy,disk_def,min_bytes_for_wide_part", @@ -67,7 +66,6 @@ local_disk_def = ( ), ], ) - def test_attach_part( table_name, backup_name, storage_policy, disk_def, min_bytes_for_wide_part ): From 58a53b42acb3b25a41e8529186db9df0d4387f77 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Tue, 27 Feb 2024 14:31:35 +0100 Subject: [PATCH 028/374] Set max_entries to min(max_parallel_replicas, all available reolicas) --- src/Client/HedgedConnectionsFactory.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Client/HedgedConnectionsFactory.cpp b/src/Client/HedgedConnectionsFactory.cpp index 16a03a696bd..703cc1f8821 100644 --- a/src/Client/HedgedConnectionsFactory.cpp +++ b/src/Client/HedgedConnectionsFactory.cpp @@ -86,7 +86,7 @@ std::vector HedgedConnectionsFactory::getManyConnections(PoolMode if (max_parallel_replicas == 0) throw Exception(ErrorCodes::BAD_ARGUMENTS, "The value of the setting max_parallel_replicas must be greater than 0"); - max_entries = max_parallel_replicas; + max_entries = std::min(max_parallel_replicas, shuffled_pools.size()); break; } } From 98b27fd45fbe1109442c2313181ca4e8435e2024 Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 27 Feb 2024 23:00:27 +0800 Subject: [PATCH 029/374] Fix style check --- src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp | 2 +- src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp index 7f34ca48f7f..eba57969580 100644 --- a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp @@ -189,7 +189,7 @@ void LocalObjectStorage::listObjects(const std::string & path, RelativePathsWith bool LocalObjectStorage::existsOrHasAnyChild(const std::string & path) const { - /// Unlike real object storage, existance of a prefix path can be checked by + /// Unlike real object storage, existence of a prefix path can be checked by /// just checking existence of this prefix directly, so simple exists is enough here. return exists(StoredObject(path)); } diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index b2a9ab8fdc3..eec3a5914fc 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -48,6 +48,7 @@ namespace ErrorCodes { extern const int S3_ERROR; extern const int BAD_ARGUMENTS; + extern const int LOGICAL_ERROR; } namespace From 416638461fe832673252445d8fabb3fe554eed49 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 27 Feb 2024 15:02:13 +0000 Subject: [PATCH 030/374] Try to fix logical error 'Cannot capture column because it has incompatible type' in mapContainsKeyLike --- src/Functions/array/FunctionArrayMapped.h | 4 ++-- .../03002_map_array_functions_with_low_cardinality.reference | 1 + .../03002_map_array_functions_with_low_cardinality.sql | 2 ++ 3 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/03002_map_array_functions_with_low_cardinality.reference create mode 100644 tests/queries/0_stateless/03002_map_array_functions_with_low_cardinality.sql diff --git a/src/Functions/array/FunctionArrayMapped.h b/src/Functions/array/FunctionArrayMapped.h index 49ed9d495e2..136d3481771 100644 --- a/src/Functions/array/FunctionArrayMapped.h +++ b/src/Functions/array/FunctionArrayMapped.h @@ -355,7 +355,7 @@ public: { arrays.emplace_back( column_tuple->getColumnPtr(j), - recursiveRemoveLowCardinality(type_tuple.getElement(j)), + type_tuple.getElement(j), array_with_type_and_name.name + "." + tuple_names[j]); } } @@ -363,7 +363,7 @@ public: { arrays.emplace_back( column_array->getDataPtr(), - recursiveRemoveLowCardinality(array_type->getNestedType()), + array_type->getNestedType(), array_with_type_and_name.name); } diff --git a/tests/queries/0_stateless/03002_map_array_functions_with_low_cardinality.reference b/tests/queries/0_stateless/03002_map_array_functions_with_low_cardinality.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/03002_map_array_functions_with_low_cardinality.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/03002_map_array_functions_with_low_cardinality.sql b/tests/queries/0_stateless/03002_map_array_functions_with_low_cardinality.sql new file mode 100644 index 00000000000..8240a8f93f5 --- /dev/null +++ b/tests/queries/0_stateless/03002_map_array_functions_with_low_cardinality.sql @@ -0,0 +1,2 @@ +SELECT mapContainsKeyLike(map('aa', toLowCardinality(1), 'bb', toLowCardinality(2)), toLowCardinality('a%')); + From 5771e739f0e65baae69f1e7abd42495d5fbc5488 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Tue, 27 Feb 2024 23:11:29 +0800 Subject: [PATCH 031/374] Update ReadSettings.h --- src/IO/ReadSettings.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/IO/ReadSettings.h b/src/IO/ReadSettings.h index 2c79735317d..846fcd668f0 100644 --- a/src/IO/ReadSettings.h +++ b/src/IO/ReadSettings.h @@ -98,7 +98,6 @@ struct ReadSettings bool enable_filesystem_cache = true; bool read_from_filesystem_cache_if_exists_otherwise_bypass_cache = false; bool enable_filesystem_cache_log = false; - /// Don't populate cache when the read is not part of query execution (e.g. background thread). bool force_read_through_cache_merges = false; size_t filesystem_cache_segments_batch_size = 20; From 1eba06dc113881b2845d36a7d3a4703ad64659d7 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Tue, 27 Feb 2024 23:12:41 +0800 Subject: [PATCH 032/374] Update 02241_filesystem_cache_on_write_operations.reference --- .../02241_filesystem_cache_on_write_operations.reference | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.reference b/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.reference index c03b928684b..53566a18edc 100644 --- a/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.reference +++ b/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.reference @@ -95,13 +95,7 @@ INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(300, 10000) SELECT count(), sum(size) FROM system.filesystem_cache 24 84045 SYSTEM START MERGES test_02241 -SELECT value FROM system.events WHERE name = 'CachedWriteBufferCacheWriteBytes' -85146 -SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes' OPTIMIZE TABLE test_02241 FINAL -SELECT value FROM system.events WHERE name = 'CachedWriteBufferCacheWriteBytes' -251542 -SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes' SELECT count(), sum(size) FROM system.filesystem_cache 32 167243 ALTER TABLE test_02241 UPDATE value = 'kek' WHERE key = 100 From ffd69e0e127f64cf90a41d7b710c375ced13f092 Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 27 Feb 2024 23:22:04 +0800 Subject: [PATCH 033/374] Move setting to merge-tree level --- src/Core/ServerSettings.h | 3 --- src/Interpreters/Context.cpp | 1 - src/Storages/MergeTree/MergeTreeSequentialSource.cpp | 3 ++- src/Storages/MergeTree/MergeTreeSettings.h | 1 + .../config.d/force_read_through_cache_for_merges.xml | 4 +++- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Core/ServerSettings.h b/src/Core/ServerSettings.h index 0283b98638f..0063b3a2bd6 100644 --- a/src/Core/ServerSettings.h +++ b/src/Core/ServerSettings.h @@ -102,9 +102,6 @@ namespace DB M(UInt64, tables_loader_background_pool_size, 0, "The maximum number of threads that will be used for background async loading of tables. Zero means use all CPUs.", 0) \ M(Bool, async_load_databases, false, "Enable asynchronous loading of databases and tables to speedup server startup. Queries to not yet loaded entity will be blocked until load is finished.", 0) \ M(Bool, display_secrets_in_show_and_select, false, "Allow showing secrets in SHOW and SELECT queries via a format setting and a grant", 0) \ - \ - M(Bool, force_read_through_cache_for_merges, false, "Force read-through filesystem cache for merges", 0) \ - \ M(Seconds, keep_alive_timeout, DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT, "The number of seconds that ClickHouse waits for incoming requests before closing the connection.", 0) \ M(Seconds, replicated_fetches_http_connection_timeout, 0, "HTTP connection timeout for part fetch requests. Inherited from default profile `http_connection_timeout` if not set explicitly.", 0) \ M(Seconds, replicated_fetches_http_send_timeout, 0, "HTTP send timeout for part fetch requests. Inherited from default profile `http_send_timeout` if not set explicitly.", 0) \ diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index a974eaca067..55a4df10206 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -5079,7 +5079,6 @@ ReadSettings Context::getReadSettings() const res.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache; res.enable_filesystem_cache_log = settings.enable_filesystem_cache_log; res.filesystem_cache_segments_batch_size = settings.filesystem_cache_segments_batch_size; - res.force_read_through_cache_merges = getServerSettings().force_read_through_cache_for_merges; res.filesystem_cache_max_download_size = settings.filesystem_cache_max_download_size; res.skip_download_if_exceeds_query_cache = settings.skip_download_if_exceeds_query_cache; diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index e375e8b0a9f..6b0c5ccb59a 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -151,7 +151,8 @@ MergeTreeSequentialSource::MergeTreeSequentialSource( const auto & context = storage.getContext(); ReadSettings read_settings = context->getReadSettings(); - read_settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = !read_settings.force_read_through_cache_merges; + read_settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = !storage.getSettings()->force_read_through_cache_for_merges; + /// It does not make sense to use pthread_threadpool for background merges/mutations /// And also to preserve backward compatibility read_settings.local_fs_method = LocalFSReadMethod::pread; diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index b64632b6139..9cb74e76dd5 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -191,6 +191,7 @@ struct Settings; M(String, remote_fs_zero_copy_zookeeper_path, "/clickhouse/zero_copy", "ZooKeeper path for zero-copy table-independent info.", 0) \ M(Bool, remote_fs_zero_copy_path_compatible_mode, false, "Run zero-copy in compatible mode during conversion process.", 0) \ M(Bool, cache_populated_by_fetch, false, "Only available in ClickHouse Cloud", 0) \ + M(Bool, force_read_through_cache_for_merges, false, "Force read-through filesystem cache for merges", 0) \ M(Bool, allow_experimental_block_number_column, false, "Enable persisting column _block_number for each row.", 0) \ M(Bool, allow_experimental_replacing_merge_with_cleanup, false, "Allow experimental CLEANUP merges for ReplacingMergeTree with is_deleted column.", 0) \ \ diff --git a/tests/integration/test_filesystem_cache/config.d/force_read_through_cache_for_merges.xml b/tests/integration/test_filesystem_cache/config.d/force_read_through_cache_for_merges.xml index bb2a6e850a4..23d3fdea800 100644 --- a/tests/integration/test_filesystem_cache/config.d/force_read_through_cache_for_merges.xml +++ b/tests/integration/test_filesystem_cache/config.d/force_read_through_cache_for_merges.xml @@ -1,3 +1,5 @@ - 1 + + 1 + From cb8390e9c8672bcdead0108be75021d6c6f21331 Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 28 Feb 2024 13:32:43 +0800 Subject: [PATCH 034/374] Fix build --- src/Disks/ObjectStorages/ObjectStorageFactory.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Disks/ObjectStorages/ObjectStorageFactory.cpp b/src/Disks/ObjectStorages/ObjectStorageFactory.cpp index f64c42c1403..d0c2c9ac4f4 100644 --- a/src/Disks/ObjectStorages/ObjectStorageFactory.cpp +++ b/src/Disks/ObjectStorages/ObjectStorageFactory.cpp @@ -16,10 +16,10 @@ #ifndef CLICKHOUSE_KEEPER_STANDALONE_BUILD #include #include -#include #include #endif #include +#include #include #include From d2ea882bd8105f5d2e173a6670bf23b2917b3190 Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 28 Feb 2024 21:26:19 +0000 Subject: [PATCH 035/374] Fix deadlock in parallel parsing when lots of rows are skipped due to errors --- .../Formats/Impl/ParallelParsingInputFormat.cpp | 4 +++- .../03001_parallel_parsing_deadlock.reference | 0 .../0_stateless/03001_parallel_parsing_deadlock.sh | 12 ++++++++++++ 3 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03001_parallel_parsing_deadlock.reference create mode 100755 tests/queries/0_stateless/03001_parallel_parsing_deadlock.sh diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp index 8b6969bbfcc..447adb1ed48 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp @@ -224,7 +224,9 @@ Chunk ParallelParsingInputFormat::read() /// skipped all rows. For example, it can happen while using settings /// input_format_allow_errors_num/input_format_allow_errors_ratio /// and this segment contained only rows with errors. - /// Process the next unit. + /// Return this empty unit back to segmentator and process the next unit. + unit->status = READY_TO_INSERT; + segmentator_condvar.notify_all(); ++reader_ticket_number; unit = &processing_units[reader_ticket_number % processing_units.size()]; } diff --git a/tests/queries/0_stateless/03001_parallel_parsing_deadlock.reference b/tests/queries/0_stateless/03001_parallel_parsing_deadlock.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03001_parallel_parsing_deadlock.sh b/tests/queries/0_stateless/03001_parallel_parsing_deadlock.sh new file mode 100755 index 00000000000..1bf21dfc53b --- /dev/null +++ b/tests/queries/0_stateless/03001_parallel_parsing_deadlock.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-cpu-aarch64 + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +DATA_FILE=$CLICKHOUSE_TEST_UNIQUE_NAME.csv +$CLICKHOUSE_LOCAL -q "select number > 1000000 ? 'error' : toString(number) from numbers(2000000) format CSV" > $DATA_FILE +$CLICKHOUSE_LOCAL -q "select * from file($DATA_FILE, CSV, 'x UInt64') format Null settings input_format_allow_errors_ratio=1" +rm $DATA_FILE + From 6fbd298b3d7cc06b1f11727263a25bc613f7c295 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 29 Feb 2024 05:03:09 +0300 Subject: [PATCH 036/374] Revert "Revert "Use `MergeTree` as a default table engine"" --- src/Core/Settings.h | 2 +- src/Core/SettingsChangesHistory.h | 1 + tests/queries/0_stateless/02184_default_table_engine.sql | 2 ++ 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index ae6ea165cc9..5f52396d3bb 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -566,7 +566,7 @@ class IColumn; M(UInt64, min_free_disk_space_for_temporary_data, 0, "The minimum disk space to keep while writing temporary data used in external sorting and aggregation.", 0) \ \ M(DefaultTableEngine, default_temporary_table_engine, DefaultTableEngine::Memory, "Default table engine used when ENGINE is not set in CREATE TEMPORARY statement.",0) \ - M(DefaultTableEngine, default_table_engine, DefaultTableEngine::None, "Default table engine used when ENGINE is not set in CREATE statement.",0) \ + M(DefaultTableEngine, default_table_engine, DefaultTableEngine::MergeTree, "Default table engine used when ENGINE is not set in CREATE statement.",0) \ M(Bool, show_table_uuid_in_table_create_query_if_not_nil, false, "For tables in databases with Engine=Atomic show UUID of the table in its CREATE query.", 0) \ M(Bool, database_atomic_wait_for_drop_and_detach_synchronously, false, "When executing DROP or DETACH TABLE in Atomic database, wait for table data to be finally dropped or detached.", 0) \ M(Bool, enable_scalar_subquery_optimization, true, "If it is set to true, prevent scalar subqueries from (de)serializing large scalar values and possibly avoid running the same subquery more than once.", 0) \ diff --git a/src/Core/SettingsChangesHistory.h b/src/Core/SettingsChangesHistory.h index e8d013d13ec..661e7cb80da 100644 --- a/src/Core/SettingsChangesHistory.h +++ b/src/Core/SettingsChangesHistory.h @@ -133,6 +133,7 @@ static std::map sett {"function_visible_width_behavior", 0, 1, "We changed the default behavior of `visibleWidth` to be more precise"}, {"max_estimated_execution_time", 0, 0, "Separate max_execution_time and max_estimated_execution_time"}, {"iceberg_engine_ignore_schema_evolution", false, false, "Allow to ignore schema evolution in Iceberg table engine"}, + {"default_table_engine", "None", "MergeTree", "Set default table engine to MergeTree for better usability"}, {"optimize_injective_functions_in_group_by", false, true, "Replace injective functions by it's arguments in GROUP BY section in analyzer"}, {"update_insert_deduplication_token_in_dependent_materialized_views", false, false, "Allow to update insert deduplication token with table identifier during insert in dependent materialized views"}, {"azure_max_unexpected_write_error_retries", 4, 4, "The maximum number of retries in case of unexpected errors during Azure blob storage write"}, diff --git a/tests/queries/0_stateless/02184_default_table_engine.sql b/tests/queries/0_stateless/02184_default_table_engine.sql index a984ec1b6c9..aff30eeea98 100644 --- a/tests/queries/0_stateless/02184_default_table_engine.sql +++ b/tests/queries/0_stateless/02184_default_table_engine.sql @@ -1,3 +1,5 @@ +SET default_table_engine = 'None'; + CREATE TABLE table_02184 (x UInt8); --{serverError 119} SET default_table_engine = 'Log'; CREATE TABLE table_02184 (x UInt8); From 0d4648b535a61561d122c87cf181434215753b35 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Thu, 29 Feb 2024 10:30:17 +0800 Subject: [PATCH 037/374] Fix clang-tidy --- src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp index eba57969580..c0b45e1d46a 100644 --- a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp @@ -32,7 +32,7 @@ LocalObjectStorage::LocalObjectStorage(String key_prefix_) else description = "/"; - fs::create_directories(getCommonKeyPrefix()); + fs::create_directories(key_prefix); } bool LocalObjectStorage::exists(const StoredObject & object) const From 3188c1ebdac52efbdadb8f64a13b0c4b6f4e1acc Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Wed, 28 Feb 2024 13:51:48 +0800 Subject: [PATCH 038/374] Update test.py --- tests/integration/test_filesystem_cache/test.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_filesystem_cache/test.py b/tests/integration/test_filesystem_cache/test.py index f32fa4e9823..0cb1866f8e4 100644 --- a/tests/integration/test_filesystem_cache/test.py +++ b/tests/integration/test_filesystem_cache/test.py @@ -94,12 +94,21 @@ def test_parallel_cache_loading_on_startup(cluster, node_name): cache_state = node.query( "SELECT key, file_segment_range_begin, size FROM system.filesystem_cache WHERE size > 0 ORDER BY key, file_segment_range_begin, size" ) + keys = ( + node.query( + "SELECT distinct(key) FROM system.filesystem_cache WHERE size > 0 ORDER BY key, file_segment_range_begin, size" + ) + .strip() + .splitlines() + ) node.restart_clickhouse() - assert cache_count == int(node.query("SELECT count() FROM system.filesystem_cache")) + # < because of additional files loaded into cache on server startup. + assert cache_count <= int(node.query("SELECT count() FROM system.filesystem_cache")) + keys_set = ",".join(["'" + x + "'" for x in keys]) assert cache_state == node.query( - "SELECT key, file_segment_range_begin, size FROM system.filesystem_cache ORDER BY key, file_segment_range_begin, size" + f"SELECT key, file_segment_range_begin, size FROM system.filesystem_cache WHERE key in ({keys_set}) ORDER BY key, file_segment_range_begin, size" ) assert node.contains_in_log("Loading filesystem cache with 30 threads") From f8561b2265b924c64c60bdbc5305785c0f0b6f2e Mon Sep 17 00:00:00 2001 From: Sergei Trifonov Date: Thu, 29 Feb 2024 13:53:27 +0100 Subject: [PATCH 039/374] Revert "Revert "Support resource request canceling"" --- docs/en/operations/system-tables/scheduler.md | 4 + src/Common/Scheduler/ISchedulerNode.h | 2 + src/Common/Scheduler/ISchedulerQueue.h | 6 ++ src/Common/Scheduler/Nodes/FairPolicy.h | 99 ++++++++++--------- src/Common/Scheduler/Nodes/FifoQueue.h | 31 ++++-- src/Common/Scheduler/Nodes/PriorityPolicy.h | 38 ++++--- .../tests/gtest_dynamic_resource_manager.cpp | 1 - .../Nodes/tests/gtest_resource_scheduler.cpp | 63 ++++++++++++ src/Common/Scheduler/ResourceGuard.h | 9 +- src/Common/Scheduler/ResourceRequest.cpp | 13 +++ src/Common/Scheduler/ResourceRequest.h | 30 +++--- src/Common/Scheduler/SchedulerRoot.h | 32 +++--- .../System/StorageSystemScheduler.cpp | 4 + 13 files changed, 224 insertions(+), 108 deletions(-) create mode 100644 src/Common/Scheduler/ResourceRequest.cpp diff --git a/docs/en/operations/system-tables/scheduler.md b/docs/en/operations/system-tables/scheduler.md index 953db4c28f2..c4de7f76fdc 100644 --- a/docs/en/operations/system-tables/scheduler.md +++ b/docs/en/operations/system-tables/scheduler.md @@ -26,7 +26,9 @@ priority: 0 is_active: 0 active_children: 0 dequeued_requests: 67 +canceled_requests: 0 dequeued_cost: 4692272 +canceled_cost: 0 busy_periods: 63 vruntime: 938454.1999999989 system_vruntime: ᴺᵁᴸᴸ @@ -54,7 +56,9 @@ Columns: - `is_active` (`UInt8`) - Whether this node is currently active - has resource requests to be dequeued and constraints satisfied. - `active_children` (`UInt64`) - The number of children in active state. - `dequeued_requests` (`UInt64`) - The total number of resource requests dequeued from this node. +- `canceled_requests` (`UInt64`) - The total number of resource requests canceled from this node. - `dequeued_cost` (`UInt64`) - The sum of costs (e.g. size in bytes) of all requests dequeued from this node. +- `canceled_cost` (`UInt64`) - The sum of costs (e.g. size in bytes) of all requests canceled from this node. - `busy_periods` (`UInt64`) - The total number of deactivations of this node. - `vruntime` (`Nullable(Float64)`) - For children of `fair` nodes only. Virtual runtime of a node used by SFQ algorithm to select the next child to process in a max-min fair manner. - `system_vruntime` (`Nullable(Float64)`) - For `fair` nodes only. Virtual runtime showing `vruntime` of the last processed resource request. Used during child activation as the new value of `vruntime`. diff --git a/src/Common/Scheduler/ISchedulerNode.h b/src/Common/Scheduler/ISchedulerNode.h index 804026d7bf4..20c1f4332da 100644 --- a/src/Common/Scheduler/ISchedulerNode.h +++ b/src/Common/Scheduler/ISchedulerNode.h @@ -387,7 +387,9 @@ public: /// Introspection std::atomic dequeued_requests{0}; + std::atomic canceled_requests{0}; std::atomic dequeued_cost{0}; + std::atomic canceled_cost{0}; std::atomic busy_periods{0}; }; diff --git a/src/Common/Scheduler/ISchedulerQueue.h b/src/Common/Scheduler/ISchedulerQueue.h index cbe63bd304a..532f4bf6c63 100644 --- a/src/Common/Scheduler/ISchedulerQueue.h +++ b/src/Common/Scheduler/ISchedulerQueue.h @@ -50,6 +50,12 @@ public: /// Should be called outside of scheduling subsystem, implementation must be thread-safe. virtual void enqueueRequest(ResourceRequest * request) = 0; + /// Cancel previously enqueued request. + /// Returns `false` and does nothing given unknown or already executed request. + /// Returns `true` if requests has been found and canceled. + /// Should be called outside of scheduling subsystem, implementation must be thread-safe. + virtual bool cancelRequest(ResourceRequest * request) = 0; + /// For introspection ResourceCost getBudget() const { diff --git a/src/Common/Scheduler/Nodes/FairPolicy.h b/src/Common/Scheduler/Nodes/FairPolicy.h index c0e187e6fa9..ce2bf729a04 100644 --- a/src/Common/Scheduler/Nodes/FairPolicy.h +++ b/src/Common/Scheduler/Nodes/FairPolicy.h @@ -134,56 +134,65 @@ public: std::pair dequeueRequest() override { - if (heap_size == 0) - return {nullptr, false}; - - // Recursively pull request from child - auto [request, child_active] = items.front().child->dequeueRequest(); - assert(request != nullptr); - std::pop_heap(items.begin(), items.begin() + heap_size); - Item & current = items[heap_size - 1]; - - // SFQ fairness invariant: system vruntime equals last served request start-time - assert(current.vruntime >= system_vruntime); - system_vruntime = current.vruntime; - - // By definition vruntime is amount of consumed resource (cost) divided by weight - current.vruntime += double(request->cost) / current.child->info.weight; - max_vruntime = std::max(max_vruntime, current.vruntime); - - if (child_active) // Put active child back in heap after vruntime update + // Cycle is required to do deactivations in the case of canceled requests, when dequeueRequest returns `nullptr` + while (true) { - std::push_heap(items.begin(), items.begin() + heap_size); - } - else // Deactivate child if it is empty, but remember it's vruntime for latter activations - { - heap_size--; + if (heap_size == 0) + return {nullptr, false}; - // Store index of this inactive child in `parent.idx` - // This enables O(1) search of inactive children instead of O(n) - current.child->info.parent.idx = heap_size; - } + // Recursively pull request from child + auto [request, child_active] = items.front().child->dequeueRequest(); + std::pop_heap(items.begin(), items.begin() + heap_size); + Item & current = items[heap_size - 1]; - // Reset any difference between children on busy period end - if (heap_size == 0) - { - // Reset vtime to zero to avoid floating-point error accumulation, - // but do not reset too often, because it's O(N) - UInt64 ns = clock_gettime_ns(); - if (last_reset_ns + 1000000000 < ns) + if (request) { - last_reset_ns = ns; - for (Item & item : items) - item.vruntime = 0; - max_vruntime = 0; - } - system_vruntime = max_vruntime; - busy_periods++; - } + // SFQ fairness invariant: system vruntime equals last served request start-time + assert(current.vruntime >= system_vruntime); + system_vruntime = current.vruntime; - dequeued_requests++; - dequeued_cost += request->cost; - return {request, heap_size > 0}; + // By definition vruntime is amount of consumed resource (cost) divided by weight + current.vruntime += double(request->cost) / current.child->info.weight; + max_vruntime = std::max(max_vruntime, current.vruntime); + } + + if (child_active) // Put active child back in heap after vruntime update + { + std::push_heap(items.begin(), items.begin() + heap_size); + } + else // Deactivate child if it is empty, but remember it's vruntime for latter activations + { + heap_size--; + + // Store index of this inactive child in `parent.idx` + // This enables O(1) search of inactive children instead of O(n) + current.child->info.parent.idx = heap_size; + } + + // Reset any difference between children on busy period end + if (heap_size == 0) + { + // Reset vtime to zero to avoid floating-point error accumulation, + // but do not reset too often, because it's O(N) + UInt64 ns = clock_gettime_ns(); + if (last_reset_ns + 1000000000 < ns) + { + last_reset_ns = ns; + for (Item & item : items) + item.vruntime = 0; + max_vruntime = 0; + } + system_vruntime = max_vruntime; + busy_periods++; + } + + if (request) + { + dequeued_requests++; + dequeued_cost += request->cost; + return {request, heap_size > 0}; + } + } } bool isActive() override diff --git a/src/Common/Scheduler/Nodes/FifoQueue.h b/src/Common/Scheduler/Nodes/FifoQueue.h index 38ae902bc2f..45ed32343ff 100644 --- a/src/Common/Scheduler/Nodes/FifoQueue.h +++ b/src/Common/Scheduler/Nodes/FifoQueue.h @@ -39,8 +39,7 @@ public: void enqueueRequest(ResourceRequest * request) override { - std::unique_lock lock(mutex); - request->enqueue_ns = clock_gettime_ns(); + std::lock_guard lock(mutex); queue_cost += request->cost; bool was_empty = requests.empty(); requests.push_back(request); @@ -50,7 +49,7 @@ public: std::pair dequeueRequest() override { - std::unique_lock lock(mutex); + std::lock_guard lock(mutex); if (requests.empty()) return {nullptr, false}; ResourceRequest * result = requests.front(); @@ -63,9 +62,29 @@ public: return {result, !requests.empty()}; } + bool cancelRequest(ResourceRequest * request) override + { + std::lock_guard lock(mutex); + // TODO(serxa): reimplement queue as intrusive list of ResourceRequest to make this O(1) instead of O(N) + for (auto i = requests.begin(), e = requests.end(); i != e; ++i) + { + if (*i == request) + { + requests.erase(i); + if (requests.empty()) + busy_periods++; + queue_cost -= request->cost; + canceled_requests++; + canceled_cost += request->cost; + return true; + } + } + return false; + } + bool isActive() override { - std::unique_lock lock(mutex); + std::lock_guard lock(mutex); return !requests.empty(); } @@ -98,14 +117,14 @@ public: std::pair getQueueLengthAndCost() { - std::unique_lock lock(mutex); + std::lock_guard lock(mutex); return {requests.size(), queue_cost}; } private: std::mutex mutex; Int64 queue_cost = 0; - std::deque requests; + std::deque requests; // TODO(serxa): reimplement it using intrusive list to avoid allocations/deallocations and O(N) during cancel }; } diff --git a/src/Common/Scheduler/Nodes/PriorityPolicy.h b/src/Common/Scheduler/Nodes/PriorityPolicy.h index 6d6b15bd063..9b4cfc37f8c 100644 --- a/src/Common/Scheduler/Nodes/PriorityPolicy.h +++ b/src/Common/Scheduler/Nodes/PriorityPolicy.h @@ -102,25 +102,31 @@ public: std::pair dequeueRequest() override { - if (items.empty()) - return {nullptr, false}; - - // Recursively pull request from child - auto [request, child_active] = items.front().child->dequeueRequest(); - assert(request != nullptr); - - // Deactivate child if it is empty - if (!child_active) + // Cycle is required to do deactivations in the case of canceled requests, when dequeueRequest returns `nullptr` + while (true) { - std::pop_heap(items.begin(), items.end()); - items.pop_back(); if (items.empty()) - busy_periods++; - } + return {nullptr, false}; - dequeued_requests++; - dequeued_cost += request->cost; - return {request, !items.empty()}; + // Recursively pull request from child + auto [request, child_active] = items.front().child->dequeueRequest(); + + // Deactivate child if it is empty + if (!child_active) + { + std::pop_heap(items.begin(), items.end()); + items.pop_back(); + if (items.empty()) + busy_periods++; + } + + if (request) + { + dequeued_requests++; + dequeued_cost += request->cost; + return {request, !items.empty()}; + } + } } bool isActive() override diff --git a/src/Common/Scheduler/Nodes/tests/gtest_dynamic_resource_manager.cpp b/src/Common/Scheduler/Nodes/tests/gtest_dynamic_resource_manager.cpp index 961a3b6f713..cdf09776077 100644 --- a/src/Common/Scheduler/Nodes/tests/gtest_dynamic_resource_manager.cpp +++ b/src/Common/Scheduler/Nodes/tests/gtest_dynamic_resource_manager.cpp @@ -38,7 +38,6 @@ TEST(SchedulerDynamicResourceManager, Smoke) { ResourceGuard gA(cA->get("res1"), ResourceGuard::PostponeLocking); gA.lock(); - gA.setFailure(); gA.unlock(); ResourceGuard gB(cB->get("res1")); diff --git a/src/Common/Scheduler/Nodes/tests/gtest_resource_scheduler.cpp b/src/Common/Scheduler/Nodes/tests/gtest_resource_scheduler.cpp index 9fefbc02cbd..e76639a4b01 100644 --- a/src/Common/Scheduler/Nodes/tests/gtest_resource_scheduler.cpp +++ b/src/Common/Scheduler/Nodes/tests/gtest_resource_scheduler.cpp @@ -4,6 +4,7 @@ #include +#include #include using namespace DB; @@ -73,6 +74,22 @@ struct ResourceHolder } }; +struct MyRequest : public ResourceRequest +{ + std::function on_execute; + + explicit MyRequest(ResourceCost cost_, std::function on_execute_) + : ResourceRequest(cost_) + , on_execute(on_execute_) + {} + + void execute() override + { + if (on_execute) + on_execute(); + } +}; + TEST(SchedulerRoot, Smoke) { ResourceTest t; @@ -111,3 +128,49 @@ TEST(SchedulerRoot, Smoke) EXPECT_TRUE(fc2->requests.contains(&rg.request)); } } + +TEST(SchedulerRoot, Cancel) +{ + ResourceTest t; + + ResourceHolder r1(t); + auto * fc1 = r1.add("/", "1"); + r1.add("/prio"); + auto a = r1.addQueue("/prio/A", "1"); + auto b = r1.addQueue("/prio/B", "2"); + r1.registerResource(); + + std::barrier sync(2); + std::thread consumer1([&] + { + std::barrier destruct_sync(2); + MyRequest request(1,[&] + { + sync.arrive_and_wait(); // (A) + EXPECT_TRUE(fc1->requests.contains(&request)); + sync.arrive_and_wait(); // (B) + request.finish(); + destruct_sync.arrive_and_wait(); // (C) + }); + a.queue->enqueueRequest(&request); + destruct_sync.arrive_and_wait(); // (C) + }); + + std::thread consumer2([&] + { + MyRequest request(1,[&] + { + FAIL() << "This request must be canceled, but instead executes"; + }); + sync.arrive_and_wait(); // (A) wait for request of consumer1 to be inside execute, so that constraint is in violated state and our request will not be executed immediately + b.queue->enqueueRequest(&request); + bool canceled = b.queue->cancelRequest(&request); + EXPECT_TRUE(canceled); + sync.arrive_and_wait(); // (B) release request of consumer1 to be finished + }); + + consumer1.join(); + consumer2.join(); + + EXPECT_TRUE(fc1->requests.empty()); +} diff --git a/src/Common/Scheduler/ResourceGuard.h b/src/Common/Scheduler/ResourceGuard.h index dca4041b176..50f665a384b 100644 --- a/src/Common/Scheduler/ResourceGuard.h +++ b/src/Common/Scheduler/ResourceGuard.h @@ -71,8 +71,7 @@ public: // lock(mutex) is not required because `Dequeued` request cannot be used by the scheduler thread chassert(state == Dequeued); state = Finished; - if (constraint) - constraint->finishRequest(this); + ResourceRequest::finish(); } static Request & local() @@ -126,12 +125,6 @@ public: } } - /// Mark request as unsuccessful; by default request is considered to be successful - void setFailure() - { - request.successful = false; - } - ResourceLink link; Request & request; }; diff --git a/src/Common/Scheduler/ResourceRequest.cpp b/src/Common/Scheduler/ResourceRequest.cpp new file mode 100644 index 00000000000..26e8084cdfa --- /dev/null +++ b/src/Common/Scheduler/ResourceRequest.cpp @@ -0,0 +1,13 @@ +#include +#include + +namespace DB +{ + +void ResourceRequest::finish() +{ + if (constraint) + constraint->finishRequest(this); +} + +} diff --git a/src/Common/Scheduler/ResourceRequest.h b/src/Common/Scheduler/ResourceRequest.h index 3d2230746f9..f3153ad382c 100644 --- a/src/Common/Scheduler/ResourceRequest.h +++ b/src/Common/Scheduler/ResourceRequest.h @@ -14,9 +14,6 @@ class ISchedulerConstraint; using ResourceCost = Int64; constexpr ResourceCost ResourceCostMax = std::numeric_limits::max(); -/// Timestamps (nanoseconds since epoch) -using ResourceNs = UInt64; - /* * Request for a resource consumption. The main moving part of the scheduling subsystem. * Resource requests processing workflow: @@ -31,7 +28,7 @@ using ResourceNs = UInt64; * 3) Scheduler calls ISchedulerNode::dequeueRequest() that returns the request. * 4) Callback ResourceRequest::execute() is called to provide access to the resource. * 5) The resource consumption is happening outside of the scheduling subsystem. - * 6) request->constraint->finishRequest() is called when consumption is finished. + * 6) ResourceRequest::finish() is called when consumption is finished. * * Steps (5) and (6) can be omitted if constraint is not used by the resource. * @@ -39,7 +36,10 @@ using ResourceNs = UInt64; * Request ownership is done outside of the scheduling subsystem. * After (6) request can be destructed safely. * - * Request cancelling is not supported yet. + * Request can also be canceled before (3) using ISchedulerQueue::cancelRequest(). + * Returning false means it is too late for request to be canceled. It should be processed in a regular way. + * Returning true means successful cancel and therefore steps (4) and (5) are not going to happen + * and step (6) MUST be omitted. */ class ResourceRequest { @@ -48,32 +48,20 @@ public: /// NOTE: If cost is not known in advance, ResourceBudget should be used (note that every ISchedulerQueue has it) ResourceCost cost; - /// Request outcome - /// Should be filled during resource consumption - bool successful; - /// Scheduler node to be notified on consumption finish /// Auto-filled during request enqueue/dequeue ISchedulerConstraint * constraint; - /// Timestamps for introspection - ResourceNs enqueue_ns; - ResourceNs execute_ns; - ResourceNs finish_ns; - explicit ResourceRequest(ResourceCost cost_ = 1) { reset(cost_); } + /// ResourceRequest object may be reused again after reset() void reset(ResourceCost cost_) { cost = cost_; - successful = true; constraint = nullptr; - enqueue_ns = 0; - execute_ns = 0; - finish_ns = 0; } virtual ~ResourceRequest() = default; @@ -83,6 +71,12 @@ public: /// just triggering start of a consumption, not doing the consumption itself /// (e.g. setting an std::promise or creating a job in a thread pool) virtual void execute() = 0; + + /// Stop resource consumption and notify resource scheduler. + /// Should be called when resource consumption is finished by consumer. + /// ResourceRequest should not be destructed or reset before calling to `finish()`. + /// WARNING: this function MUST not be called if request was canceled. + void finish(); }; } diff --git a/src/Common/Scheduler/SchedulerRoot.h b/src/Common/Scheduler/SchedulerRoot.h index 3a23a8df834..ab3f702a422 100644 --- a/src/Common/Scheduler/SchedulerRoot.h +++ b/src/Common/Scheduler/SchedulerRoot.h @@ -145,22 +145,27 @@ public: std::pair dequeueRequest() override { - if (current == nullptr) // No active resources - return {nullptr, false}; + while (true) + { + if (current == nullptr) // No active resources + return {nullptr, false}; - // Dequeue request from current resource - auto [request, resource_active] = current->root->dequeueRequest(); - assert(request != nullptr); + // Dequeue request from current resource + auto [request, resource_active] = current->root->dequeueRequest(); - // Deactivate resource if required - if (!resource_active) - deactivate(current); - else - current = current->next; // Just move round-robin pointer + // Deactivate resource if required + if (!resource_active) + deactivate(current); + else + current = current->next; // Just move round-robin pointer - dequeued_requests++; - dequeued_cost += request->cost; - return {request, current != nullptr}; + if (request == nullptr) // Possible in case of request cancel, just retry + continue; + + dequeued_requests++; + dequeued_cost += request->cost; + return {request, current != nullptr}; + } } bool isActive() override @@ -245,7 +250,6 @@ private: void execute(ResourceRequest * request) { - request->execute_ns = clock_gettime_ns(); request->execute(); } diff --git a/src/Storages/System/StorageSystemScheduler.cpp b/src/Storages/System/StorageSystemScheduler.cpp index ba07d44dbf9..633bac5d285 100644 --- a/src/Storages/System/StorageSystemScheduler.cpp +++ b/src/Storages/System/StorageSystemScheduler.cpp @@ -30,7 +30,9 @@ ColumnsDescription StorageSystemScheduler::getColumnsDescription() {"is_active", std::make_shared(), "Whether this node is currently active - has resource requests to be dequeued and constraints satisfied."}, {"active_children", std::make_shared(), "The number of children in active state."}, {"dequeued_requests", std::make_shared(), "The total number of resource requests dequeued from this node."}, + {"canceled_requests", std::make_shared(), "The total number of resource requests canceled from this node."}, {"dequeued_cost", std::make_shared(), "The sum of costs (e.g. size in bytes) of all requests dequeued from this node."}, + {"canceled_cost", std::make_shared(), "The sum of costs (e.g. size in bytes) of all requests canceled from this node."}, {"busy_periods", std::make_shared(), "The total number of deactivations of this node."}, {"vruntime", std::make_shared(std::make_shared()), "For children of `fair` nodes only. Virtual runtime of a node used by SFQ algorithm to select the next child to process in a max-min fair manner."}, @@ -93,7 +95,9 @@ void StorageSystemScheduler::fillData(MutableColumns & res_columns, ContextPtr c res_columns[i++]->insert(node->isActive()); res_columns[i++]->insert(node->activeChildren()); res_columns[i++]->insert(node->dequeued_requests.load()); + res_columns[i++]->insert(node->canceled_requests.load()); res_columns[i++]->insert(node->dequeued_cost.load()); + res_columns[i++]->insert(node->canceled_cost.load()); res_columns[i++]->insert(node->busy_periods.load()); Field vruntime; From 7632c2c33f357c1c616f734c7bf2502ccbfbd496 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 29 Feb 2024 15:17:12 +0000 Subject: [PATCH 040/374] Remove non-deterministic functions in virtual columns filter --- src/Storages/MergeTree/MergeTreeData.cpp | 2 ++ src/Storages/VirtualColumnUtils.cpp | 21 +++++++++++++++++++ ...with_non_deterministic_functions.reference | 11 ++++++++++ ...lumns_with_non_deterministic_functions.sql | 6 ++++++ 4 files changed, 40 insertions(+) create mode 100644 tests/queries/0_stateless/03002_filter_skip_virtual_columns_with_non_deterministic_functions.reference create mode 100644 tests/queries/0_stateless/03002_filter_skip_virtual_columns_with_non_deterministic_functions.sql diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 8aa188cfe5c..6494ed5d844 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1082,6 +1082,8 @@ std::optional MergeTreeData::totalRowsByPartitionPredicateImpl( Block virtual_columns_block = getBlockWithVirtualPartColumns(parts, true /* one_part */); auto filter_dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), nullptr); + if (!filter_dag) + return {}; // Generate valid expressions for filtering bool valid = true; diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 33ff6e7104f..3e0ef1d7990 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -467,6 +467,23 @@ static bool canEvaluateSubtree(const ActionsDAG::Node * node, const Block & allo return true; } +static bool isDeterministic(const ActionsDAG::Node * node) +{ + if (node->type != ActionsDAG::ActionType::FUNCTION) + return true; + + if (!node->function_base->isDeterministic()) + return false; + + for (const auto * child : node->children) + { + if (!isDeterministic(child)) + return false; + } + + return true; +} + static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( const ActionsDAG::Node * node, const Block * allowed_inputs, @@ -542,6 +559,10 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( } } } + else if (!isDeterministic(node)) + { + return nullptr; + } } if (allowed_inputs && !canEvaluateSubtree(node, *allowed_inputs)) diff --git a/tests/queries/0_stateless/03002_filter_skip_virtual_columns_with_non_deterministic_functions.reference b/tests/queries/0_stateless/03002_filter_skip_virtual_columns_with_non_deterministic_functions.reference new file mode 100644 index 00000000000..4c9646d6ffa --- /dev/null +++ b/tests/queries/0_stateless/03002_filter_skip_virtual_columns_with_non_deterministic_functions.reference @@ -0,0 +1,11 @@ +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +1 diff --git a/tests/queries/0_stateless/03002_filter_skip_virtual_columns_with_non_deterministic_functions.sql b/tests/queries/0_stateless/03002_filter_skip_virtual_columns_with_non_deterministic_functions.sql new file mode 100644 index 00000000000..9f8bc6bd3d7 --- /dev/null +++ b/tests/queries/0_stateless/03002_filter_skip_virtual_columns_with_non_deterministic_functions.sql @@ -0,0 +1,6 @@ +create table test (number UInt64) engine=MergeTree order by number; +insert into test select * from numbers(100000000); +select ignore(number) from test where RAND() > 4292390314 limit 10; +select count() > 0 from test where RAND() > 4292390314; +drop table test; + From 09a392772d75b38e1b19ad6bd2a863168ea0de5c Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 29 Feb 2024 15:34:45 +0000 Subject: [PATCH 041/374] Use isDeterministicInScopeOfQuery --- src/Storages/VirtualColumnUtils.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 3e0ef1d7990..6d66453442e 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -467,17 +467,17 @@ static bool canEvaluateSubtree(const ActionsDAG::Node * node, const Block & allo return true; } -static bool isDeterministic(const ActionsDAG::Node * node) +static bool isDeterministicInScopeOfQuery(const ActionsDAG::Node * node) { if (node->type != ActionsDAG::ActionType::FUNCTION) return true; - if (!node->function_base->isDeterministic()) + if (!node->function_base->isDeterministicInScopeOfQuery()) return false; for (const auto * child : node->children) { - if (!isDeterministic(child)) + if (!isDeterministicInScopeOfQuery(child)) return false; } @@ -559,7 +559,7 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( } } } - else if (!isDeterministic(node)) + else if (!isDeterministicInScopeOfQuery(node)) { return nullptr; } From 3825cb3ad0d7f2296cf075648d022ef26f1e0cef Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 2 Mar 2024 15:28:45 +0000 Subject: [PATCH 042/374] expand CTE in alter modify query --- src/Interpreters/InterpreterAlterQuery.cpp | 11 +++++++++++ .../0_stateless/03002_modify_query_cte.reference | 2 ++ .../0_stateless/03002_modify_query_cte.sql | 15 +++++++++++++++ 3 files changed, 28 insertions(+) create mode 100644 tests/queries/0_stateless/03002_modify_query_cte.reference create mode 100644 tests/queries/0_stateless/03002_modify_query_cte.sql diff --git a/src/Interpreters/InterpreterAlterQuery.cpp b/src/Interpreters/InterpreterAlterQuery.cpp index b768593da98..7acaf95becc 100644 --- a/src/Interpreters/InterpreterAlterQuery.cpp +++ b/src/Interpreters/InterpreterAlterQuery.cpp @@ -1,3 +1,4 @@ +#include #include #include @@ -71,11 +72,15 @@ BlockIO InterpreterAlterQuery::execute() BlockIO InterpreterAlterQuery::executeToTable(const ASTAlterQuery & alter) { + ASTSelectWithUnionQuery * modify_query = nullptr; + for (auto & child : alter.command_list->children) { auto * command_ast = child->as(); if (command_ast->sql_security) InterpreterCreateQuery::processSQLSecurityOption(getContext(), command_ast->sql_security->as()); + else if (command_ast->type == ASTAlterCommand::MODIFY_QUERY) + modify_query = command_ast->select->as(); } BlockIO res; @@ -123,6 +128,12 @@ BlockIO InterpreterAlterQuery::executeToTable(const ASTAlterQuery & alter) throw Exception(ErrorCodes::TABLE_IS_READ_ONLY, "Table is read-only"); auto table_lock = table->lockForShare(getContext()->getCurrentQueryId(), getContext()->getSettingsRef().lock_acquire_timeout); + if (modify_query) + { + // Expand CTE before filling default database + ApplyWithSubqueryVisitor().visit(*modify_query); + } + /// Add default database to table identifiers that we can encounter in e.g. default expressions, mutation expression, etc. AddDefaultDatabaseVisitor visitor(getContext(), table_id.getDatabaseName()); ASTPtr command_list_ptr = alter.command_list->ptr(); diff --git a/tests/queries/0_stateless/03002_modify_query_cte.reference b/tests/queries/0_stateless/03002_modify_query_cte.reference new file mode 100644 index 00000000000..a3d66f70f8f --- /dev/null +++ b/tests/queries/0_stateless/03002_modify_query_cte.reference @@ -0,0 +1,2 @@ +CREATE MATERIALIZED VIEW default.mv_03002 TO default.table_03002\n(\n `ts` DateTime\n)\nAS SELECT ts\nFROM default.table_03002 +CREATE MATERIALIZED VIEW default.mv_03002 TO default.table_03002\n(\n `ts` DateTime\n)\nAS WITH MY_CTE AS\n (\n SELECT ts\n FROM default.table_03002\n )\nSELECT *\nFROM\nMY_CTE diff --git a/tests/queries/0_stateless/03002_modify_query_cte.sql b/tests/queries/0_stateless/03002_modify_query_cte.sql new file mode 100644 index 00000000000..3a36ce7e7fd --- /dev/null +++ b/tests/queries/0_stateless/03002_modify_query_cte.sql @@ -0,0 +1,15 @@ + +CREATE TABLE table_03002 (ts DateTime, event_type String) ENGINE = MergeTree ORDER BY (event_type, ts); + +CREATE MATERIALIZED VIEW mv_03002 TO table_03002 AS SELECT ts FROM table_03002; + +SHOW CREATE TABLE mv_03002; + +ALTER TABLE mv_03002 MODIFY QUERY +WITH MY_CTE AS (SELECT ts FROM table_03002) +SELECT * FROM MY_CTE; + +SHOW CREATE TABLE mv_03002; + +DROP TABLE mv_03002; +DROP TABLE table_03002; From 17413ded759ebcef809e03a80284f6f805507560 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy <99031427+yakov-olkhovskiy@users.noreply.github.com> Date: Sat, 2 Mar 2024 11:11:44 -0500 Subject: [PATCH 043/374] Update 03002_modify_query_cte.reference --- tests/queries/0_stateless/03002_modify_query_cte.reference | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03002_modify_query_cte.reference b/tests/queries/0_stateless/03002_modify_query_cte.reference index a3d66f70f8f..50e4a7c6a07 100644 --- a/tests/queries/0_stateless/03002_modify_query_cte.reference +++ b/tests/queries/0_stateless/03002_modify_query_cte.reference @@ -1,2 +1,2 @@ CREATE MATERIALIZED VIEW default.mv_03002 TO default.table_03002\n(\n `ts` DateTime\n)\nAS SELECT ts\nFROM default.table_03002 -CREATE MATERIALIZED VIEW default.mv_03002 TO default.table_03002\n(\n `ts` DateTime\n)\nAS WITH MY_CTE AS\n (\n SELECT ts\n FROM default.table_03002\n )\nSELECT *\nFROM\nMY_CTE +CREATE MATERIALIZED VIEW default.mv_03002 TO default.table_03002\n(\n `ts` DateTime\n)\nAS WITH MY_CTE AS\n (\n SELECT ts\n FROM default.table_03002\n )\nSELECT *\nFROM MY_CTE From a6cb302ab54082db5650263d6417052f81f30710 Mon Sep 17 00:00:00 2001 From: serxa Date: Sun, 3 Mar 2024 15:48:49 +0000 Subject: [PATCH 044/374] fix 'AddressSanitizer: stack-use-after-return' --- src/Common/Scheduler/Nodes/tests/gtest_resource_scheduler.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/Scheduler/Nodes/tests/gtest_resource_scheduler.cpp b/src/Common/Scheduler/Nodes/tests/gtest_resource_scheduler.cpp index e76639a4b01..f8196d15819 100644 --- a/src/Common/Scheduler/Nodes/tests/gtest_resource_scheduler.cpp +++ b/src/Common/Scheduler/Nodes/tests/gtest_resource_scheduler.cpp @@ -140,10 +140,10 @@ TEST(SchedulerRoot, Cancel) auto b = r1.addQueue("/prio/B", "2"); r1.registerResource(); + std::barrier destruct_sync(2); std::barrier sync(2); std::thread consumer1([&] { - std::barrier destruct_sync(2); MyRequest request(1,[&] { sync.arrive_and_wait(); // (A) From 77fe221665ac8610e5ae42f547771e1877793ad0 Mon Sep 17 00:00:00 2001 From: johnnymatthews <9611008+johnnymatthews@users.noreply.github.com> Date: Sun, 3 Mar 2024 14:25:25 -0700 Subject: [PATCH 045/374] Adds undocumented rand functions. Prettifies markdown. --- .../functions/random-functions.md | 277 +++++++++++++----- 1 file changed, 206 insertions(+), 71 deletions(-) diff --git a/docs/en/sql-reference/functions/random-functions.md b/docs/en/sql-reference/functions/random-functions.md index 6fd31e8d25c..2ce9c75eae4 100644 --- a/docs/en/sql-reference/functions/random-functions.md +++ b/docs/en/sql-reference/functions/random-functions.md @@ -11,79 +11,213 @@ elimination](../../sql-reference/functions/index.md#common-subexpression-elimina function return different random values. Related content + - Blog: [Generating random data in ClickHouse](https://clickhouse.com/blog/generating-random-test-distribution-data-for-clickhouse) :::note The random numbers are generated by non-cryptographic algorithms. ::: -## rand, rand32 +## rand -Returns a random UInt32 number, evenly distributed across the range of all possible UInt32 numbers. +Returns a random UInt32 number. + +### Syntax + +```sql +rand() +``` + +### Parameters + +None. + +### Output + +Returns a number of type UInt32. + +### Example + +```sql +SELECT rand() +``` + +```response +1569354847 +``` + +### Implementation details Uses a linear congruential generator. +## rand32 + +Returns a random 32-bit unsigned integer (UInt32) number. + +### Syntax + +```sql +rand32() +``` + +### Parameters + +None. + +### Output + +Returns a number of type UInt32, evenly distributed across the range of all possible UInt32 values. + +### Example + +```sql +SELECT rand32(); +``` + +```response +2754546224 +``` + +**Note:** The actual output will be a random number, not the specific number shown in the example. + ## rand64 -Returns a random UInt64 number, evenly distributed across the range of all possible UInt64 numbers. +Returns a random 64-bit unsigned integer (UInt64) number. -Uses a linear congruential generator. +### Syntax + +```sql +rand64() +``` + +### Parameters + +None. + +### Implementation details + +The `rand64` function uses a linear congruential generator, which means that while it appears random, it's not truly random and can be predictable if the initial state is known. + +For scenarios where true randomness is crucial, consider using alternative methods like system-level calls or integrating with external libraries. + +### Output + +Returns a number of type UInt64, evenly distributed across the range of all possible UInt64 values. + +### Example + +```sql +SELECT rand64(); +``` + +```response +15030268859237645412 +``` + +**Note:** The actual output will be a random number, not the specific number shown in the example. ## randCanonical -Returns a random Float64 value, evenly distributed in interval [0, 1). +Returns a random floating-point number of type Float64, evenly distributed within the closed interval. + +### Syntax + +```sql +randCanonical() +``` + +### Parameters + +None. + +### Output + +Returns a Float64 value between 0 (inclusive) and 1 (exclusive). + +### Example + +```sql +SELECT randCanonical(); +``` + +```response +0.3452178901234567 +``` + +**Note:** The actual output will be a random decimal number between 0 and 1, not the specific number shown in the example. ## randConstant -Like `rand` but produces a constant column with a random value. +Generates a single constant column filled with a random value. Unlike `rand`, `randConstant` ensures the same random value appears in every row of the generated column, making it useful for scenarios requiring a consistent random seed across rows in a single query. -**Example** +### Syntax -``` sql -SELECT rand(), rand(1), rand(number), randConstant(), randConstant(1), randConstant(number) -FROM numbers(3) +```sql +randConstant([x]); ``` -Result: +### Parameters -``` result -┌─────rand()─┬────rand(1)─┬─rand(number)─┬─randConstant()─┬─randConstant(1)─┬─randConstant(number)─┐ -│ 3047369878 │ 4132449925 │ 4044508545 │ 2740811946 │ 4229401477 │ 1924032898 │ -│ 2938880146 │ 1267722397 │ 4154983056 │ 2740811946 │ 4229401477 │ 1924032898 │ -│ 956619638 │ 4238287282 │ 1104342490 │ 2740811946 │ 4229401477 │ 1924032898 │ -└────────────┴────────────┴──────────────┴────────────────┴─────────────────┴──────────────────────┘ +- **[x] (Optional):** An optional expression that influences the generated random value. Even if provided, the resulting value will still be constant within the same query execution. Different queries using the same expression will likely generate different constant values. + +### Implementation details + +The actual output will be different for each query execution, even with the same optional expression. + +The optional parameter may not significantly change the generated value compared to using `randConstant` alone. + +### Output + +Returns a column of type UInt32 containing the same random value in each row. + +### Examples + +```sql +SELECT randConstant() AS random_value; +``` + +```response +| random_value | +|--------------| +| 1234567890 | +``` + +```sql +SELECT randConstant(10) AS random_value; +``` + +```response +| random_value | +|--------------| +| 9876543210 | ``` ## randUniform -Returns a random Float64 drawn uniformly from interval [`min`, `max`) ([continuous uniform distribution](https://en.wikipedia.org/wiki/Continuous_uniform_distribution)). +Returns a random Float64 drawn uniformly from interval [`min`, `max`]. -**Syntax** +### Syntax -``` sql +```sql randUniform(min, max) ``` -**Arguments** +### Parameters - `min` - `Float64` - left boundary of the range, - `max` - `Float64` - right boundary of the range. -**Returned value** +### Output -- Random number. +A random number of type [Float64](/docs/en/sql-reference/data-types/float.md). -Type: [Float64](/docs/en/sql-reference/data-types/float.md). +### Example -**Example** - -``` sql +```sql SELECT randUniform(5.5, 10) FROM numbers(5) ``` -Result: - -``` result +```response ┌─randUniform(5.5, 10)─┐ │ 8.094978491443102 │ │ 7.3181248914450885 │ @@ -99,7 +233,7 @@ Returns a random Float64 drawn from a [normal distribution](https://en.wikipedia **Syntax** -``` sql +```sql randNormal(mean, variance) ``` @@ -116,13 +250,13 @@ Type: [Float64](/docs/en/sql-reference/data-types/float.md). **Example** -``` sql +```sql SELECT randNormal(10, 2) FROM numbers(5) ``` Result: -``` result +```result ┌──randNormal(10, 2)─┐ │ 13.389228911709653 │ │ 8.622949707401295 │ @@ -138,7 +272,7 @@ Returns a random Float64 drawn from a [log-normal distribution](https://en.wikip **Syntax** -``` sql +```sql randLogNormal(mean, variance) ``` @@ -155,13 +289,13 @@ Type: [Float64](/docs/en/sql-reference/data-types/float.md). **Example** -``` sql +```sql SELECT randLogNormal(100, 5) FROM numbers(5) ``` Result: -``` result +```result ┌─randLogNormal(100, 5)─┐ │ 1.295699673937363e48 │ │ 9.719869109186684e39 │ @@ -177,7 +311,7 @@ Returns a random UInt64 drawn from a [binomial distribution](https://en.wikipedi **Syntax** -``` sql +```sql randBinomial(experiments, probability) ``` @@ -194,13 +328,13 @@ Type: [UInt64](/docs/en/sql-reference/data-types/int-uint.md). **Example** -``` sql +```sql SELECT randBinomial(100, .75) FROM numbers(5) ``` Result: -``` result +```result ┌─randBinomial(100, 0.75)─┐ │ 74 │ │ 78 │ @@ -216,7 +350,7 @@ Returns a random UInt64 drawn from a [negative binomial distribution](https://en **Syntax** -``` sql +```sql randNegativeBinomial(experiments, probability) ``` @@ -233,13 +367,13 @@ Type: [UInt64](/docs/en/sql-reference/data-types/int-uint.md). **Example** -``` sql +```sql SELECT randNegativeBinomial(100, .75) FROM numbers(5) ``` Result: -``` result +```result ┌─randNegativeBinomial(100, 0.75)─┐ │ 33 │ │ 32 │ @@ -255,7 +389,7 @@ Returns a random UInt64 drawn from a [Poisson distribution](https://en.wikipedia **Syntax** -``` sql +```sql randPoisson(n) ``` @@ -271,13 +405,13 @@ Type: [UInt64](/docs/en/sql-reference/data-types/int-uint.md). **Example** -``` sql +```sql SELECT randPoisson(10) FROM numbers(5) ``` Result: -``` result +```result ┌─randPoisson(10)─┐ │ 8 │ │ 8 │ @@ -293,7 +427,7 @@ Returns a random UInt64 drawn from a [Bernoulli distribution](https://en.wikiped **Syntax** -``` sql +```sql randBernoulli(probability) ``` @@ -309,13 +443,13 @@ Type: [UInt64](/docs/en/sql-reference/data-types/int-uint.md). **Example** -``` sql +```sql SELECT randBernoulli(.75) FROM numbers(5) ``` Result: -``` result +```result ┌─randBernoulli(0.75)─┐ │ 1 │ │ 1 │ @@ -331,7 +465,7 @@ Returns a random Float64 drawn from a [exponential distribution](https://en.wiki **Syntax** -``` sql +```sql randExponential(lambda) ``` @@ -347,13 +481,13 @@ Type: [Float64](/docs/en/sql-reference/data-types/float.md). **Example** -``` sql +```sql SELECT randExponential(1/10) FROM numbers(5) ``` Result: -``` result +```result ┌─randExponential(divide(1, 10))─┐ │ 44.71628934340778 │ │ 4.211013337903262 │ @@ -369,7 +503,7 @@ Returns a random Float64 drawn from a [Chi-square distribution](https://en.wikip **Syntax** -``` sql +```sql randChiSquared(degree_of_freedom) ``` @@ -385,13 +519,13 @@ Type: [Float64](/docs/en/sql-reference/data-types/float.md). **Example** -``` sql +```sql SELECT randChiSquared(10) FROM numbers(5) ``` Result: -``` result +```result ┌─randChiSquared(10)─┐ │ 10.015463656521543 │ │ 9.621799919882768 │ @@ -407,7 +541,7 @@ Returns a random Float64 drawn from a [Student's t-distribution](https://en.wiki **Syntax** -``` sql +```sql randStudentT(degree_of_freedom) ``` @@ -423,13 +557,13 @@ Type: [Float64](/docs/en/sql-reference/data-types/float.md). **Example** -``` sql +```sql SELECT randStudentT(10) FROM numbers(5) ``` Result: -``` result +```result ┌─────randStudentT(10)─┐ │ 1.2217309938538725 │ │ 1.7941971681200541 │ @@ -445,7 +579,7 @@ Returns a random Float64 drawn from a [F-distribution](https://en.wikipedia.org/ **Syntax** -``` sql +```sql randFisherF(d1, d2) ``` @@ -462,13 +596,13 @@ Type: [Float64](/docs/en/sql-reference/data-types/float.md). **Example** -``` sql +```sql SELECT randFisherF(10, 3) FROM numbers(5) ``` Result: -``` result +```result ┌──randFisherF(10, 3)─┐ │ 7.286287504216609 │ │ 0.26590779413050386 │ @@ -484,7 +618,7 @@ Generates a string of the specified length filled with random bytes (including z **Syntax** -``` sql +```sql randomString(length) ``` @@ -502,13 +636,13 @@ Type: [String](../../sql-reference/data-types/string.md). Query: -``` sql +```sql SELECT randomString(30) AS str, length(str) AS len FROM numbers(2) FORMAT Vertical; ``` Result: -``` text +```text Row 1: ────── str: 3 G : pT ?w тi k aV f6 @@ -526,7 +660,7 @@ Generates a binary string of the specified length filled with random bytes (incl **Syntax** -``` sql +```sql randomFixedString(length); ``` @@ -563,7 +697,7 @@ If you pass `length < 0`, the behavior of the function is undefined. **Syntax** -``` sql +```sql randomPrintableASCII(length) ``` @@ -579,11 +713,11 @@ Type: [String](../../sql-reference/data-types/string.md) **Example** -``` sql +```sql SELECT number, randomPrintableASCII(30) as str, length(str) FROM system.numbers LIMIT 3 ``` -``` text +```text ┌─number─┬─str────────────────────────────┬─length(randomPrintableASCII(30))─┐ │ 0 │ SuiCOSTvC0csfABSw=UcSzp2.`rv8x │ 30 │ │ 1 │ 1Ag NlJ &RCN:*>HVPG;PE-nO"SUFD │ 30 │ @@ -597,7 +731,7 @@ Generates a random string of a specified length. Result string contains valid UT **Syntax** -``` sql +```sql randomStringUTF8(length); ``` @@ -635,11 +769,12 @@ Flips the bits of String or FixedString `s`, each with probability `prob`. **Syntax** -``` sql +```sql fuzzBits(s, prob) ``` **Arguments** + - `s` - `String` or `FixedString`, - `prob` - constant `Float32/64` between 0.0 and 1.0. @@ -649,14 +784,14 @@ Fuzzed string with same type as `s`. **Example** -``` sql +```sql SELECT fuzzBits(materialize('abacaba'), 0.1) FROM numbers(3) ``` Result: -``` result +```result ┌─fuzzBits(materialize('abacaba'), 0.1)─┐ │ abaaaja │ │ a*cjab+ │ From e98c30c161303d91c483e7928326f0d8efc1f9df Mon Sep 17 00:00:00 2001 From: johnnymatthews <9611008+johnnymatthews@users.noreply.github.com> Date: Sun, 3 Mar 2024 14:38:59 -0700 Subject: [PATCH 046/374] Reorganizes rand docs page. --- .../functions/random-functions.md | 30 +++++++++---------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/docs/en/sql-reference/functions/random-functions.md b/docs/en/sql-reference/functions/random-functions.md index 2ce9c75eae4..b745d2833d3 100644 --- a/docs/en/sql-reference/functions/random-functions.md +++ b/docs/en/sql-reference/functions/random-functions.md @@ -36,19 +36,21 @@ None. Returns a number of type UInt32. +### Implementation details + +Uses a linear congruential generator. + ### Example ```sql -SELECT rand() +SELECT rand(); ``` ```response 1569354847 ``` -### Implementation details - -Uses a linear congruential generator. +**Note:** The actual output will be a random number, not the specific number shown in the example. ## rand32 @@ -94,16 +96,14 @@ rand64() None. -### Implementation details - -The `rand64` function uses a linear congruential generator, which means that while it appears random, it's not truly random and can be predictable if the initial state is known. - -For scenarios where true randomness is crucial, consider using alternative methods like system-level calls or integrating with external libraries. - ### Output Returns a number of type UInt64, evenly distributed across the range of all possible UInt64 values. +### Implementation details + +The `rand64` function uses a linear congruential generator, which means that while it appears random, it's not truly random and can be predictable if the initial state is known. For scenarios where true randomness is crucial, consider using alternative methods like system-level calls or integrating with external libraries. + ### Example ```sql @@ -160,16 +160,14 @@ randConstant([x]); - **[x] (Optional):** An optional expression that influences the generated random value. Even if provided, the resulting value will still be constant within the same query execution. Different queries using the same expression will likely generate different constant values. -### Implementation details - -The actual output will be different for each query execution, even with the same optional expression. - -The optional parameter may not significantly change the generated value compared to using `randConstant` alone. - ### Output Returns a column of type UInt32 containing the same random value in each row. +### Implementation details + +The actual output will be different for each query execution, even with the same optional expression. The optional parameter may not significantly change the generated value compared to using `randConstant` alone. + ### Examples ```sql From 671b0f678afcdcb354a85aa141920bff09e2bcb2 Mon Sep 17 00:00:00 2001 From: M1eyu2018 <857037797@qq.com> Date: Mon, 4 Mar 2024 10:12:27 +0800 Subject: [PATCH 047/374] Add positional read in libhdfs3 Signed-off-by: M1eyu2018 <857037797@qq.com> --- contrib/libhdfs3 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/libhdfs3 b/contrib/libhdfs3 index b9598e60167..0d04201c453 160000 --- a/contrib/libhdfs3 +++ b/contrib/libhdfs3 @@ -1 +1 @@ -Subproject commit b9598e6016720a7c088bfe85ce1fa0410f9d2103 +Subproject commit 0d04201c45359f0d0701fb1e8297d25eff7cfecf From c435d5894f48d37478454b1934d000fb967e2973 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Mon, 4 Mar 2024 14:23:59 +0800 Subject: [PATCH 048/374] remove wrong assertion n quantileGK --- .../AggregateFunctionGroupArray.cpp | 13 ++++++++----- .../AggregateFunctionQuantileGK.cpp | 12 ++++-------- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp index d72ddb42d9e..6af8b1018dd 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp @@ -182,11 +182,14 @@ public: if constexpr (Trait::sampler == Sampler::NONE) { - if (limit_num_elems && cur_elems.value.size() >= max_elems) + if constexpr (limit_num_elems) { - if constexpr (Trait::last) - cur_elems.value[(cur_elems.total_values - 1) % max_elems] = row_value; - return; + if (cur_elems.value.size() >= max_elems) + { + if constexpr (Trait::last) + cur_elems.value[(cur_elems.total_values - 1) % max_elems] = row_value; + return; + } } cur_elems.value.push_back(row_value, arena); @@ -236,7 +239,7 @@ public: void mergeNoSampler(Data & cur_elems, const Data & rhs_elems, Arena * arena) const { - if (!limit_num_elems) + if constexpr (!limit_num_elems) { if (rhs_elems.value.size()) cur_elems.value.insertByOffsets(rhs_elems.value, 0, rhs_elems.value.size(), arena); diff --git a/src/AggregateFunctions/AggregateFunctionQuantileGK.cpp b/src/AggregateFunctions/AggregateFunctionQuantileGK.cpp index 2e8ccb2e5e4..26737e43eef 100644 --- a/src/AggregateFunctions/AggregateFunctionQuantileGK.cpp +++ b/src/AggregateFunctions/AggregateFunctionQuantileGK.cpp @@ -144,7 +144,7 @@ public: count = other.count; compressed = other.compressed; - sampled.resize(other.sampled.size()); + sampled.resize_exact(other.sampled.size()); memcpy(sampled.data(), other.sampled.data(), sizeof(Stats) * other.sampled.size()); return; } @@ -180,7 +180,7 @@ public: compress(); backup_sampled.clear(); - backup_sampled.reserve(sampled.size() + other.sampled.size()); + backup_sampled.reserve_exact(sampled.size() + other.sampled.size()); double merged_relative_error = std::max(relative_error, other.relative_error); size_t merged_count = count + other.count; Int64 additional_self_delta = static_cast(std::floor(2 * other.relative_error * other.count)); @@ -268,11 +268,7 @@ public: size_t sampled_len = 0; readBinaryLittleEndian(sampled_len, buf); - if (sampled_len > compress_threshold) - throw Exception( - ErrorCodes::INCORRECT_DATA, "The number of elements {} for quantileGK exceeds {}", sampled_len, compress_threshold); - - sampled.resize(sampled_len); + sampled.resize_exact(sampled_len); for (size_t i = 0; i < sampled_len; ++i) { @@ -317,7 +313,7 @@ private: ::sort(head_sampled.begin(), head_sampled.end()); backup_sampled.clear(); - backup_sampled.reserve(sampled.size() + head_sampled.size()); + backup_sampled.reserve_exact(sampled.size() + head_sampled.size()); size_t sample_idx = 0; size_t ops_idx = 0; From 6fbfd42a0522fe4161d367e3d923f2480c1df21a Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 4 Mar 2024 16:13:44 +0800 Subject: [PATCH 049/374] Update 02241_filesystem_cache_on_write_operations.reference --- .../02241_filesystem_cache_on_write_operations.reference | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.reference b/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.reference index 53566a18edc..186dcc1eeb2 100644 --- a/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.reference +++ b/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.reference @@ -205,13 +205,7 @@ INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(300, 10000) SELECT count(), sum(size) FROM system.filesystem_cache 24 84045 SYSTEM START MERGES test_02241 -SELECT value FROM system.events WHERE name = 'CachedWriteBufferCacheWriteBytes' -81715476 -SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes' OPTIMIZE TABLE test_02241 FINAL -SELECT value FROM system.events WHERE name = 'CachedWriteBufferCacheWriteBytes' -81881872 -SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes' SELECT count(), sum(size) FROM system.filesystem_cache 32 167243 ALTER TABLE test_02241 UPDATE value = 'kek' WHERE key = 100 From a7db6688edb50f894457c414b207c25548bb18d3 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 4 Mar 2024 18:24:24 +0800 Subject: [PATCH 050/374] Update ObjectStorageFactory.cpp --- src/Disks/ObjectStorages/ObjectStorageFactory.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Disks/ObjectStorages/ObjectStorageFactory.cpp b/src/Disks/ObjectStorages/ObjectStorageFactory.cpp index 47c02f87b23..a0578ac4454 100644 --- a/src/Disks/ObjectStorages/ObjectStorageFactory.cpp +++ b/src/Disks/ObjectStorages/ObjectStorageFactory.cpp @@ -246,12 +246,11 @@ void registerAzureObjectStorage(ObjectStorageFactory & factory) bool /* skip_access_check */) -> ObjectStoragePtr { AzureBlobStorageEndpoint endpoint = processAzureBlobStorageEndpoint(config, config_prefix); - return std::make_unique( + return createObjectStorage( ObjectStorageType::Azure, config, config_prefix, name, getAzureBlobContainerClient(config, config_prefix), getAzureBlobStorageSettings(config, config_prefix, context), endpoint.prefix.empty() ? endpoint.container_name : endpoint.container_name + "/" + endpoint.prefix); - }); } #endif From b0050566e22d10ca621a33c1b4fedb987ad2620c Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Tue, 5 Mar 2024 12:14:56 +0800 Subject: [PATCH 051/374] Fix style check --- src/Disks/IO/ReadBufferFromRemoteFSGather.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp index fe8d63b053d..298000ac015 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp @@ -21,7 +21,7 @@ namespace { return settings.remote_fs_cache && settings.enable_filesystem_cache; } - + bool withPageCache(const ReadSettings & settings, bool with_file_cache) { return settings.page_cache && !with_file_cache && settings.use_page_cache_for_disks_without_file_cache; From 2ee846b393d79f3f0d9710ddf910552ba1e040cd Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Tue, 5 Mar 2024 14:07:56 +0800 Subject: [PATCH 052/374] Fix build --- src/Disks/IO/ReadBufferFromRemoteFSGather.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp index 298000ac015..f72e6634465 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp @@ -17,7 +17,7 @@ using namespace DB; namespace { - bool withCache(const ReadSettings & settings) + bool withFileCache(const ReadSettings & settings) { return settings.remote_fs_cache && settings.enable_filesystem_cache; } From 47ad21dd257ff1a5751d191dfd311a7950a93111 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Tue, 5 Mar 2024 12:17:04 +0100 Subject: [PATCH 053/374] Remove extra empty line --- .../03002_map_array_functions_with_low_cardinality.sql | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/queries/0_stateless/03002_map_array_functions_with_low_cardinality.sql b/tests/queries/0_stateless/03002_map_array_functions_with_low_cardinality.sql index 8240a8f93f5..8820a433da8 100644 --- a/tests/queries/0_stateless/03002_map_array_functions_with_low_cardinality.sql +++ b/tests/queries/0_stateless/03002_map_array_functions_with_low_cardinality.sql @@ -1,2 +1 @@ SELECT mapContainsKeyLike(map('aa', toLowCardinality(1), 'bb', toLowCardinality(2)), toLowCardinality('a%')); - From bfb703b579fa192dc58e51ea842067e7e379e949 Mon Sep 17 00:00:00 2001 From: Blargian Date: Tue, 5 Mar 2024 15:38:42 +0100 Subject: [PATCH 054/374] Add mortonEncode and mortonDecode to documentation --- .../functions/encoding-functions.md | 203 ++++++++++++++++++ 1 file changed, 203 insertions(+) diff --git a/docs/en/sql-reference/functions/encoding-functions.md b/docs/en/sql-reference/functions/encoding-functions.md index 618dd3f4b4f..7fd77ce3a6a 100644 --- a/docs/en/sql-reference/functions/encoding-functions.md +++ b/docs/en/sql-reference/functions/encoding-functions.md @@ -433,3 +433,206 @@ Result: │ [0,1,2,3,4,5,6,7] │ └───────────────────┘ ``` + +## mortonEncode + +Calculates the Morton encoding (ZCurve) for a list of unsigned integers. + +The function has two modes of operation: +- Simple +- Expanded + +### Simple mode + +Accepts up to 8 unsigned integers as arguments and produces a UInt64 code. + +**Syntax** + +```sql +mortonEncode(args) +``` + +**Parameters** + +- `args`: up to 8 [unsigned integers](../../sql-reference/data-types/int-uint.md) or columns of the aforementioned type. + +**Returned value** + +- A UInt64 code + +Type: [UInt64](../../sql-reference/data-types/int-uint.md) + +**Example** + +Query: + +```sql +SELECT mortonEncode(1, 2, 3); +``` + +```response +53 +``` + +### Expanded mode + +Accepts a range mask ([tuple](../../sql-reference/data-types/tuple.md)) as a first argument and up to 8 [unsigned integers](../../sql-reference/data-types/int-uint.md) as other arguments. + +Each number in the mask configures the amount of range expansion: +1 - no expansion +2 - 2x expansion +3 - 3x expansion +... +Up to 8x expansion. + +**Syntax** + +```sql +mortonEncode(range_mask, args) +``` + +**Parameters** +- `range_mask`: 1-8. +- `args`: up to 8 [unsigned integers](../../sql-reference/data-types/int-uint.md) or columns of the aforementioned type. + +Note: when using columns for `args` the provided `range_mask` tuple should still be a constant. + +**Returned value** + +- A UInt64 code + +Type: [UInt64](../../sql-reference/data-types/int-uint.md) + + +**Example** + +Range expansion can be beneficial when you need a similar distribution for arguments with wildly different ranges (or cardinality) +For example: 'IP Address' (0...FFFFFFFF) and 'Country code' (0...FF). + +Query: + +```sql +SELECT mortonEncode((1,2), 1024, 16); +``` + +```response +1572864 +``` + +Note: tuple size must be equal to the number of the other arguments. + +**Example** + +Morton encoding for one argument is always the argument itself: + +Query: + +```sql +SELECT mortonEncode(1); +``` + +```response +1 +``` + +**Example** + +It is also possible to expand one argument too: + +Query: + +```sql +SELECT mortonEncode(tuple(2), 128); +``` + +```response +32768 +``` + +**implementation details** + +Please note that you can fit only so much bits of information into Morton code as [UInt64](../../sql-reference/data-types/int-uint.md) has. Two arguments will have a range of maximum 2^32 (64/2) each, three arguments a range of max 2^21 (64/3) each and so on. All overflow will be clamped to zero. + +## mortonDecode + +Decodes a Morton encoding (ZCurve) into the corresponding unsigned integer tuple. + +As with the `mortonEncode` function, this function has two modes of operation: +- Simple +- Expanded + +### Simple mode + +Accepts a resulting tuple size as the first argument and the code as the second argument. + +**Syntax** + +```sql +mortonDecode(tuple_size, code) +``` + +**Parameters** +- `tuple_size`: integer value no more than 8. +- `code`: [UInt64](../../sql-reference/data-types/int-uint.md) code. + +**Returned value** + +- [tuple](../../sql-reference/data-types/tuple.md) of the specified size. + +Type: [UInt64](../../sql-reference/data-types/int-uint.md) + +**Example** + +Query: + +```sql +SELECT mortonDecode(3, 53); +``` + +```response +["1","2","3"] +``` + +### Expanded mode + +Accepts a range mask (tuple) as a first argument and the code as the second argument. +Each number in the mask configures the amount of range shrink +1 - no shrink +2 - 2x shrink +3 - 3x shrink +... +Up to 8x shrink. + +Range expansion can be beneficial when you need a similar distribution for arguments with wildly different ranges (or cardinality) +For example: 'IP Address' (0...FFFFFFFF) and 'Country code' (0...FF). +As with the encode function, this is limited to 8 numbers at most. + +**Example** + +Query: + +```sql +SELECT mortonDecode(1, 1); +``` + +```response +["1"] +``` + +**Example** + +It is also possible to shrink one argument: + +Query: + +```sql +SELECT mortonDecode(tuple(2), 32768); +``` + +```response +["128"] +``` + + + + From 7930a26df136c8a2e10f839ea4738a338dbb6c9e Mon Sep 17 00:00:00 2001 From: Blargian Date: Tue, 5 Mar 2024 15:47:45 +0100 Subject: [PATCH 055/374] Fix formatting of compression/expansion levels --- .../functions/encoding-functions.md | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/en/sql-reference/functions/encoding-functions.md b/docs/en/sql-reference/functions/encoding-functions.md index 7fd77ce3a6a..0cb459b8e07 100644 --- a/docs/en/sql-reference/functions/encoding-functions.md +++ b/docs/en/sql-reference/functions/encoding-functions.md @@ -478,12 +478,12 @@ SELECT mortonEncode(1, 2, 3); Accepts a range mask ([tuple](../../sql-reference/data-types/tuple.md)) as a first argument and up to 8 [unsigned integers](../../sql-reference/data-types/int-uint.md) as other arguments. -Each number in the mask configures the amount of range expansion: -1 - no expansion -2 - 2x expansion -3 - 3x expansion -... -Up to 8x expansion. +Each number in the mask configures the amount of range expansion:
+1 - no expansion
+2 - 2x expansion
+3 - 3x expansion
+...
+Up to 8x expansion.
**Syntax** @@ -596,12 +596,12 @@ SELECT mortonDecode(3, 53); ### Expanded mode Accepts a range mask (tuple) as a first argument and the code as the second argument. -Each number in the mask configures the amount of range shrink -1 - no shrink -2 - 2x shrink -3 - 3x shrink -... -Up to 8x shrink. +Each number in the mask configures the amount of range shrink:
+1 - no shrink
+2 - 2x shrink
+3 - 3x shrink
+...
+Up to 8x shrink.
Range expansion can be beneficial when you need a similar distribution for arguments with wildly different ranges (or cardinality) For example: 'IP Address' (0...FFFFFFFF) and 'Country code' (0...FF). From 45509607ad139c099c5a4d5fea07ac34149dcf2d Mon Sep 17 00:00:00 2001 From: Blargian Date: Tue, 5 Mar 2024 15:50:30 +0100 Subject: [PATCH 056/374] Fix spelling mistake --- docs/en/sql-reference/functions/encoding-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/functions/encoding-functions.md b/docs/en/sql-reference/functions/encoding-functions.md index 0cb459b8e07..28431c84add 100644 --- a/docs/en/sql-reference/functions/encoding-functions.md +++ b/docs/en/sql-reference/functions/encoding-functions.md @@ -551,7 +551,7 @@ SELECT mortonEncode(tuple(2), 128); **implementation details** -Please note that you can fit only so much bits of information into Morton code as [UInt64](../../sql-reference/data-types/int-uint.md) has. Two arguments will have a range of maximum 2^32 (64/2) each, three arguments a range of max 2^21 (64/3) each and so on. All overflow will be clamped to zero. +Please note that you can fit only so many bits of information into Morton code as [UInt64](../../sql-reference/data-types/int-uint.md) has. Two arguments will have a range of maximum 2^32 (64/2) each, three arguments a range of max 2^21 (64/3) each and so on. All overflow will be clamped to zero. ## mortonDecode From aa6b70e5f2187be71b6bce835ecff0aa0c0bfca7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 5 Mar 2024 16:55:08 +0000 Subject: [PATCH 057/374] Add documentation to `simpleJSON` functions --- .../sql-reference/functions/json-functions.md | 392 +++++++++++++++--- 1 file changed, 342 insertions(+), 50 deletions(-) diff --git a/docs/en/sql-reference/functions/json-functions.md b/docs/en/sql-reference/functions/json-functions.md index 2c837ff4a42..246cb8972fb 100644 --- a/docs/en/sql-reference/functions/json-functions.md +++ b/docs/en/sql-reference/functions/json-functions.md @@ -5,80 +5,372 @@ sidebar_label: JSON --- There are two sets of functions to parse JSON. - - `visitParam*` (`simpleJSON*`) is made to parse a special very limited subset of a JSON, but these functions are extremely fast. + - `simpleJSON*` (`visitParam*`) is made to parse a special very limited subset of a JSON, but these functions are extremely fast. - `JSONExtract*` is made to parse normal JSON. -# visitParam functions +# simpleJSON/visitParam functions ClickHouse has special functions for working with simplified JSON. All these JSON functions are based on strong assumptions about what the JSON can be, but they try to do as little as possible to get the job done. The following assumptions are made: 1. The field name (function argument) must be a constant. -2. The field name is somehow canonically encoded in JSON. For example: `visitParamHas('{"abc":"def"}', 'abc') = 1`, but `visitParamHas('{"\\u0061\\u0062\\u0063":"def"}', 'abc') = 0` +2. The field name is somehow canonically encoded in JSON. For example: `simpleJSONHas('{"abc":"def"}', 'abc') = 1`, but `simpleJSONHas('{"\\u0061\\u0062\\u0063":"def"}', 'abc') = 0` 3. Fields are searched for on any nesting level, indiscriminately. If there are multiple matching fields, the first occurrence is used. 4. The JSON does not have space characters outside of string literals. -## visitParamHas(params, name) +## simpleJSONHas -Checks whether there is a field with the `name` name. +Checks whether there is a field named `field_name`. The result is `UInt8`. -Alias: `simpleJSONHas`. +**Syntax** -## visitParamExtractUInt(params, name) - -Parses UInt64 from the value of the field named `name`. If this is a string field, it tries to parse a number from the beginning of the string. If the field does not exist, or it exists but does not contain a number, it returns 0. - -Alias: `simpleJSONExtractUInt`. - -## visitParamExtractInt(params, name) - -The same as for Int64. - -Alias: `simpleJSONExtractInt`. - -## visitParamExtractFloat(params, name) - -The same as for Float64. - -Alias: `simpleJSONExtractFloat`. - -## visitParamExtractBool(params, name) - -Parses a true/false value. The result is UInt8. - -Alias: `simpleJSONExtractBool`. - -## visitParamExtractRaw(params, name) - -Returns the value of a field, including separators. - -Alias: `simpleJSONExtractRaw`. - -Examples: - -``` sql -visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"'; -visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}'; +```sql +simpleJSONHas(json, field_name) ``` -## visitParamExtractString(params, name) +**Parameters** -Parses the string in double quotes. The value is unescaped. If unescaping failed, it returns an empty string. +- `json`: The JSON in which the field is searched for. [String](../../sql-reference/data-types/string.md#string) +- `field_name`: The name of the field to search for. [String literal](../syntax#string) -Alias: `simpleJSONExtractString`. +**Returned value** -Examples: +It returns `1` if the field exists, `0` otherwise. -``` sql -visitParamExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0'; -visitParamExtractString('{"abc":"\\u263a"}', 'abc') = '☺'; -visitParamExtractString('{"abc":"\\u263"}', 'abc') = ''; -visitParamExtractString('{"abc":"hello}', 'abc') = ''; +**Example** + +Query: + +```sql +CREATE TABLE jsons +( + `json` String +) +ENGINE = Memory; + +INSERT INTO jsons VALUES ('{"foo":"true","qux":1}'); + +SELECT simpleJSONHas(json, 'foo') FROM jsons; +SELECT simpleJSONHas(json, 'bar') FROM jsons; ``` +```response +1 +0 +``` +## simpleJSONExtractUInt + +Parses `UInt64` from the value of the field named `field_name`. If this is a string field, it tries to parse a number from the beginning of the string. If the field does not exist, or it exists but does not contain a number, it returns `0`. + +**Syntax** + +```sql +simpleJSONExtractUInt(json, field_name) +``` + +**Parameters** + +- `json`: The JSON in which the field is searched for. [String](../../sql-reference/data-types/string.md#string) +- `field_name`: The name of the field to search for. [String literal](../syntax#string) + +**Returned value** + +It returns the number parsed from the field if the field exists and contains a number, `0` otherwise. + +**Example** + +Query: + +```sql +CREATE TABLE jsons +( + `json` String +) +ENGINE = Memory; + +INSERT INTO jsons VALUES ('{"foo":"4e3"}'); +INSERT INTO jsons VALUES ('{"foo":3.4}'); +INSERT INTO jsons VALUES ('{"foo":5}'); +INSERT INTO jsons VALUES ('{"foo":"not1number"}'); +INSERT INTO jsons VALUES ('{"baz":2}'); + +SELECT simpleJSONExtractUInt(json, 'foo') FROM jsons ORDER BY json; +``` + +```response +0 +4 +0 +3 +5 +``` + +## simpleJSONExtractInt + +Parses `Int64` from the value of the field named `field_name`. If this is a string field, it tries to parse a number from the beginning of the string. If the field does not exist, or it exists but does not contain a number, it returns `0`. + +**Syntax** + +```sql +simpleJSONExtractInt(json, field_name) +``` + +**Parameters** + +- `json`: The JSON in which the field is searched for. [String](../../sql-reference/data-types/string.md#string) +- `field_name`: The name of the field to search for. [String literal](../syntax#string) + +**Returned value** + +It returns the number parsed from the field if the field exists and contains a number, `0` otherwise. + +**Example** + +Query: + +```sql +CREATE TABLE jsons +( + `json` String +) +ENGINE = Memory; + +INSERT INTO jsons VALUES ('{"foo":"-4e3"}'); +INSERT INTO jsons VALUES ('{"foo":-3.4}'); +INSERT INTO jsons VALUES ('{"foo":5}'); +INSERT INTO jsons VALUES ('{"foo":"not1number"}'); +INSERT INTO jsons VALUES ('{"baz":2}'); + +SELECT simpleJSONExtractInt(json, 'foo') FROM jsons ORDER BY json; +``` + +```response +0 +-4 +0 +-3 +5 +``` + +## simpleJSONExtractFloat + +Parses `Float64` from the value of the field named `field_name`. If this is a string field, it tries to parse a number from the beginning of the string. If the field does not exist, or it exists but does not contain a number, it returns `0`. + +**Syntax** + +```sql +simpleJSONExtractFloat(json, field_name) +``` + +**Parameters** + +- `json`: The JSON in which the field is searched for. [String](../../sql-reference/data-types/string.md#string) +- `field_name`: The name of the field to search for. [String literal](../syntax#string) + +**Returned value** + +It returns the number parsed from the field if the field exists and contains a number, `0` otherwise. + +**Example** + +Query: + +```sql +CREATE TABLE jsons +( + `json` String +) +ENGINE = Memory; + +INSERT INTO jsons VALUES ('{"foo":"-4e3"}'); +INSERT INTO jsons VALUES ('{"foo":-3.4}'); +INSERT INTO jsons VALUES ('{"foo":5}'); +INSERT INTO jsons VALUES ('{"foo":"not1number"}'); +INSERT INTO jsons VALUES ('{"baz":2}'); + +SELECT simpleJSONExtractFloat(json, 'foo') FROM jsons ORDER BY json; +``` + +```response +0 +-4000 +0 +-3.4 +5 +``` + +## simpleJSONExtractBool + +Parses a true/false value from the value of the field named `field_name`. The result is `UInt8`. + +**Syntax** + +```sql +simpleJSONExtractBool(json, field_name) +``` + +**Parameters** + +- `json`: The JSON in which the field is searched for. [String](../../sql-reference/data-types/string.md#string) +- `field_name`: The name of the field to search for. [String literal](../syntax#string) + +**Returned value** + +It returns `1` if the value of the field is `true`, `0` otherwise. This means this function will return `0` including (and not only) in the following cases: + - If the field doesn't exists. + - If the field contains `true` as a string, e.g.: `{"field":"true"}`. + - If the field contains `1` as a numerical value. + +**Example** + +Query: + +```sql +CREATE TABLE jsons +( + `json` String +) +ENGINE = Memory; + +INSERT INTO jsons VALUES ('{"foo":false,"bar":true}'); +INSERT INTO jsons VALUES ('{"foo":"true","qux":1}'); + +SELECT simpleJSONExtractBool(json, 'bar') FROM jsons ORDER BY json; +SELECT simpleJSONExtractBool(json, 'foo') FROM jsons ORDER BY json; +``` + +```response +0 +1 +0 +0 +``` + +## simpleJSONExtractRaw + +Returns the value of the field named `field_name` as a `String`, including separators. + +**Syntax** + +```sql +simpleJSONExtractRaw(json, field_name) +``` + +**Parameters** + +- `json`: The JSON in which the field is searched for. [String](../../sql-reference/data-types/string.md#string) +- `field_name`: The name of the field to search for. [String literal](../syntax#string) + +**Returned value** + +It returns the value of the field as a [`String`](../../sql-reference/data-types/string.md#string), including separators if the field exists, or an emtpy `String` otherwise. + +**Example** + +Query: + +```sql +CREATE TABLE jsons +( + `json` String +) +ENGINE = Memory; + +INSERT INTO jsons VALUES ('{"foo":"-4e3"}'); +INSERT INTO jsons VALUES ('{"foo":-3.4}'); +INSERT INTO jsons VALUES ('{"foo":5}'); +INSERT INTO jsons VALUES ('{"foo":{"def":[1,2,3]}}'); +INSERT INTO jsons VALUES ('{"baz":2}'); + +SELECT simpleJSONExtractRaw(json, 'foo') FROM jsons ORDER BY json; +``` + +```response + +"-4e3" +-3.4 +5 +{"def":[1,2,3]} +``` + +## simpleJSONExtractString + +Parses `String` in double quotes from the value of the field named `field_name`. + +**Syntax** + +```sql +simpleJSONExtractString(json, field_name) +``` + +**Parameters** + +- `json`: The JSON in which the field is searched for. [String](../../sql-reference/data-types/string.md#string) +- `field_name`: The name of the field to search for. [String literal](../syntax#string) + +**Returned value** + +It returns the value of a field as a [`String`](../../sql-reference/data-types/string.md#string), including separators. The value is unescaped. It returns an empty `String`: if the field doesn't contain a double quoted string, if unescaping fails or if the field doesn't exist. + +**Implementation details** + There is currently no support for code points in the format `\uXXXX\uYYYY` that are not from the basic multilingual plane (they are converted to CESU-8 instead of UTF-8). +**Example** + +Query: + +```sql +CREATE TABLE jsons +( + `json` String +) +ENGINE = Memory; + +INSERT INTO jsons VALUES ('{"foo":"\\n\\u0000"}'); +INSERT INTO jsons VALUES ('{"foo":"\\u263"}'); +INSERT INTO jsons VALUES ('{"foo":"\\u263a"}'); +INSERT INTO jsons VALUES ('{"foo":"hello}'); + +SELECT simpleJSONExtractString(json, 'foo') FROM jsons ORDER BY json; +``` + +```response +\n\0 + +☺ + +``` + +## visitParamHas + +This function is [an alias of `simpleJSONHas`](./json-functions#simplejsonhas). + +## visitParamExtractUInt + +This function is [an alias of `simpleJSONExtractUInt`](./json-functions#simplejsonextractuint). + +## visitParamExtractInt + +This function is [an alias of `simpleJSONExtractInt`](./json-functions#simplejsonextractint). + +## visitParamExtractFloat + +This function is [an alias of `simpleJSONExtractFloat`](./json-functions#simplejsonextractfloat). + +## visitParamExtractBool + +This function is [an alias of `simpleJSONExtractBool`](./json-functions#simplejsonextractbool). + +## visitParamExtractRaw + +This function is [an alias of `simpleJSONExtractRaw`](./json-functions#simplejsonextractraw). + +## visitParamExtractString + +This function is [an alias of `simpleJSONExtractString`](./json-functions#simplejsonextractstring). + # JSONExtract functions The following functions are based on [simdjson](https://github.com/lemire/simdjson) designed for more complex JSON parsing requirements. From 981c507d8007a4f7761a83a2ecfa0956a364317d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 5 Mar 2024 17:01:54 +0000 Subject: [PATCH 058/374] Add example to `sin`. --- docs/en/sql-reference/functions/math-functions.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/en/sql-reference/functions/math-functions.md b/docs/en/sql-reference/functions/math-functions.md index b27668caf0c..fc659891b5c 100644 --- a/docs/en/sql-reference/functions/math-functions.md +++ b/docs/en/sql-reference/functions/math-functions.md @@ -299,6 +299,18 @@ sin(x) Type: [Float*](../../sql-reference/data-types/float.md). +**Example** + +Query: + +```sql +SELECT sin(1.23); +``` + +```response +0.9424888019316975 +``` + ## cos Returns the cosine of the argument. From 57670a69be7aee37141aad13c3c9509ea2a40162 Mon Sep 17 00:00:00 2001 From: Shaun Struwig <41984034+Blargian@users.noreply.github.com> Date: Wed, 6 Mar 2024 04:15:57 +0100 Subject: [PATCH 059/374] Add mortonEncode, mortonDecode and related to spelling exceptions --- utils/check-style/aspell-ignore/en/aspell-dict.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index f61448b2f35..6257b2fcd95 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -260,6 +260,7 @@ ExactEdgeLengthRads ExecutablePool ExtType ExternalDistributed +FFFFFFFF FFFD FIPS FOSDEM @@ -546,6 +547,8 @@ MinIO MinMax MindsDB Mongodb +mortonDecode +mortonEncode MsgPack MultiPolygon Multiline @@ -2741,6 +2744,7 @@ xz yaml yandex youtube +ZCurve zLib zLinux zabbix From 68a3ca37c40db7f9b928d9f20bde6912ba6bd7da Mon Sep 17 00:00:00 2001 From: Shaun Struwig <41984034+Blargian@users.noreply.github.com> Date: Wed, 6 Mar 2024 06:42:01 +0100 Subject: [PATCH 060/374] Add examples using columns --- .../functions/encoding-functions.md | 85 +++++++++++++++++++ 1 file changed, 85 insertions(+) diff --git a/docs/en/sql-reference/functions/encoding-functions.md b/docs/en/sql-reference/functions/encoding-functions.md index 28431c84add..c81b3e35317 100644 --- a/docs/en/sql-reference/functions/encoding-functions.md +++ b/docs/en/sql-reference/functions/encoding-functions.md @@ -515,6 +515,8 @@ Query: SELECT mortonEncode((1,2), 1024, 16); ``` +Result: + ```response 1572864 ``` @@ -531,6 +533,8 @@ Query: SELECT mortonEncode(1); ``` +Result: + ```response 1 ``` @@ -545,10 +549,49 @@ Query: SELECT mortonEncode(tuple(2), 128); ``` +Result: + ```response 32768 ``` +**Example** + +You can also use column names in the function. + +Query: + +First create the table and insert some data. + +```sql +create table morton_numbers( + n1 UInt32, + n2 UInt32, + n3 UInt16, + n4 UInt16, + n5 UInt8, + n6 UInt8, + n7 UInt8, + n8 UInt8 +) +Engine=MergeTree() +ORDER BY n1 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into morton_numbers (*) values(1,2,3,4,5,6,7,8); +``` +Use column names instead of constants as function arguments to `mortonEncode` + +Query: + +```sql +SELECT mortonEncode(n1, n2, n3, n4, n5, n6, n7, n8) FROM morton_numbers; +``` + +Result: + +```response +2155374165 +``` + **implementation details** Please note that you can fit only so many bits of information into Morton code as [UInt64](../../sql-reference/data-types/int-uint.md) has. Two arguments will have a range of maximum 2^32 (64/2) each, three arguments a range of max 2^21 (64/3) each and so on. All overflow will be clamped to zero. @@ -589,6 +632,8 @@ Query: SELECT mortonDecode(3, 53); ``` +Result: + ```response ["1","2","3"] ``` @@ -615,6 +660,8 @@ Query: SELECT mortonDecode(1, 1); ``` +Result: + ```response ["1"] ``` @@ -629,10 +676,48 @@ Query: SELECT mortonDecode(tuple(2), 32768); ``` +Result: + ```response ["128"] ``` +**Example** + +You can also use column names in the function. + +First create the table and insert some data. + +Query: +```sql +create table morton_numbers( + n1 UInt32, + n2 UInt32, + n3 UInt16, + n4 UInt16, + n5 UInt8, + n6 UInt8, + n7 UInt8, + n8 UInt8 +) +Engine=MergeTree() +ORDER BY n1 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into morton_numbers (*) values(1,2,3,4,5,6,7,8); +``` +Use column names instead of constants as function arguments to `mortonDecode` + +Query: + +```sql +select untuple(mortonDecode(8, mortonEncode(n1, n2, n3, n4, n5, n6, n7, n8))) from morton_numbers; +``` + +Result: + +```response +1 2 3 4 5 6 7 8 +``` + From 6d4514c045cc565919f9c8384710eee89354f0f3 Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 6 Mar 2024 16:55:48 +0800 Subject: [PATCH 061/374] Fix test --- src/Storages/System/StorageSystemDisks.cpp | 10 +++++++++- tests/integration/test_backup_restore_s3/test.py | 12 ++++++------ 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/src/Storages/System/StorageSystemDisks.cpp b/src/Storages/System/StorageSystemDisks.cpp index 30d64156b22..0f8a6640f2c 100644 --- a/src/Storages/System/StorageSystemDisks.cpp +++ b/src/Storages/System/StorageSystemDisks.cpp @@ -25,6 +25,8 @@ StorageSystemDisks::StorageSystemDisks(const StorageID & table_id_) {"unreserved_space", std::make_shared()}, {"keep_free_space", std::make_shared()}, {"type", std::make_shared()}, + {"object_storage_type", std::make_shared()}, + {"metadata_type", std::make_shared()}, {"is_encrypted", std::make_shared()}, {"is_read_only", std::make_shared()}, {"is_write_once", std::make_shared()}, @@ -53,6 +55,8 @@ Pipe StorageSystemDisks::read( MutableColumnPtr col_unreserved = ColumnUInt64::create(); MutableColumnPtr col_keep = ColumnUInt64::create(); MutableColumnPtr col_type = ColumnString::create(); + MutableColumnPtr col_object_storage_type = ColumnString::create(); + MutableColumnPtr col_metadata_type = ColumnString::create(); MutableColumnPtr col_is_encrypted = ColumnUInt8::create(); MutableColumnPtr col_is_read_only = ColumnUInt8::create(); MutableColumnPtr col_is_write_once = ColumnUInt8::create(); @@ -69,7 +73,9 @@ Pipe StorageSystemDisks::read( col_unreserved->insert(disk_ptr->getUnreservedSpace().value_or(std::numeric_limits::max())); col_keep->insert(disk_ptr->getKeepingFreeSpace()); auto data_source_description = disk_ptr->getDataSourceDescription(); - col_type->insert(data_source_description.toString()); + col_type->insert(data_source_description.type); + col_object_storage_type->insert(data_source_description.object_storage_type); + col_metadata_type->insert(data_source_description.metadata_type); col_is_encrypted->insert(data_source_description.is_encrypted); col_is_read_only->insert(disk_ptr->isReadOnly()); col_is_write_once->insert(disk_ptr->isWriteOnce()); @@ -91,6 +97,8 @@ Pipe StorageSystemDisks::read( res_columns.emplace_back(std::move(col_unreserved)); res_columns.emplace_back(std::move(col_keep)); res_columns.emplace_back(std::move(col_type)); + res_columns.emplace_back(std::move(col_object_storage_type)); + res_columns.emplace_back(std::move(col_metadata_type)); res_columns.emplace_back(std::move(col_is_encrypted)); res_columns.emplace_back(std::move(col_is_read_only)); res_columns.emplace_back(std::move(col_is_write_once)); diff --git a/tests/integration/test_backup_restore_s3/test.py b/tests/integration/test_backup_restore_s3/test.py index 4d3ee8200a3..95e264107e4 100644 --- a/tests/integration/test_backup_restore_s3/test.py +++ b/tests/integration/test_backup_restore_s3/test.py @@ -124,15 +124,15 @@ def check_backup_and_restore( def check_system_tables(backup_query_id=None): disks = [ tuple(disk.split("\t")) - for disk in node.query("SELECT name, type FROM system.disks").split("\n") + for disk in node.query("SELECT name, type, object_storage_type, metadata_type FROM system.disks").split("\n") if disk ] expected_disks = ( - ("default", "local"), - ("disk_s3", "s3"), - ("disk_s3_cache", "s3"), - ("disk_s3_other_bucket", "s3"), - ("disk_s3_plain", "s3_plain"), + ("default", "local", "", ""), + ("disk_s3", "object_storage", "s3", "local"), + ("disk_s3_cache", "object_storage", "s3", "local"), + ("disk_s3_other_bucket", "object_storage", "s3", "local"), + ("disk_s3_plain", "object_storage", "s3", "plain"), ) assert len(expected_disks) == len(disks) for expected_disk in expected_disks: From be98c95f586762cdf20a6375917e30f296175593 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 6 Mar 2024 09:12:26 +0000 Subject: [PATCH 062/374] Automatic style fix --- tests/integration/test_backup_restore_s3/test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_backup_restore_s3/test.py b/tests/integration/test_backup_restore_s3/test.py index 95e264107e4..452a9143067 100644 --- a/tests/integration/test_backup_restore_s3/test.py +++ b/tests/integration/test_backup_restore_s3/test.py @@ -124,7 +124,9 @@ def check_backup_and_restore( def check_system_tables(backup_query_id=None): disks = [ tuple(disk.split("\t")) - for disk in node.query("SELECT name, type, object_storage_type, metadata_type FROM system.disks").split("\n") + for disk in node.query( + "SELECT name, type, object_storage_type, metadata_type FROM system.disks" + ).split("\n") if disk ] expected_disks = ( From 56fb61e1866e81e9a00b9b98299ddc56a54f5394 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 6 Mar 2024 10:53:39 +0000 Subject: [PATCH 063/374] Do not duplicate the first category in case of multiple categories in `FunctionDocumentation` --- src/Common/FunctionDocumentation.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Common/FunctionDocumentation.cpp b/src/Common/FunctionDocumentation.cpp index 2aad23b90b7..0dc5b48f9d1 100644 --- a/src/Common/FunctionDocumentation.cpp +++ b/src/Common/FunctionDocumentation.cpp @@ -36,6 +36,7 @@ std::string FunctionDocumentation::categoriesAsString() const auto it = categories.begin(); std::string res = *it; + ++it; for (; it != categories.end(); ++it) res += ", " + *it; return res; From 6f726865baf3fea606e7ff46e5d8cd98bda94f5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 6 Mar 2024 11:10:02 +0000 Subject: [PATCH 064/374] Add inline docs to functions --- src/Functions/sin.cpp | 10 +++++- src/Functions/visitParamExtractBool.cpp | 30 +++++++++++++++++- src/Functions/visitParamExtractFloat.cpp | 31 ++++++++++++++++++- src/Functions/visitParamExtractInt.cpp | 31 ++++++++++++++++++- src/Functions/visitParamExtractRaw.cpp | 30 +++++++++++++++++- src/Functions/visitParamExtractString.cpp | 30 +++++++++++++++++- src/Functions/visitParamExtractUInt.cpp | 31 ++++++++++++++++++- src/Functions/visitParamHas.cpp | 23 +++++++++++++- ...new_functions_must_be_documented.reference | 8 ----- 9 files changed, 208 insertions(+), 16 deletions(-) diff --git a/src/Functions/sin.cpp b/src/Functions/sin.cpp index dc75f4800c0..914f431adb4 100644 --- a/src/Functions/sin.cpp +++ b/src/Functions/sin.cpp @@ -13,7 +13,15 @@ using FunctionSin = FunctionMathUnary>; REGISTER_FUNCTION(Sin) { - factory.registerFunction({}, FunctionFactory::CaseInsensitive); + factory.registerFunction( + FunctionDocumentation{ + .description = "Returns the sine of the argument.", + .syntax = "sin(x)", + .arguments = {{"x", "The number whose sine will be returned. (U)Int*, Float* or Decimal*."}}, + .returned_value = "The sine of x.", + .examples = {{.name = "simple", .query = "SELECT sin(1.23)", .result = "0.9424888019316975"}}, + .categories{"Mathematical", "Trigonometric"}}, + FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/visitParamExtractBool.cpp b/src/Functions/visitParamExtractBool.cpp index 31763fe54ce..2c413ec13bb 100644 --- a/src/Functions/visitParamExtractBool.cpp +++ b/src/Functions/visitParamExtractBool.cpp @@ -21,7 +21,35 @@ using FunctionSimpleJSONExtractBool = FunctionsStringSearch(); + factory.registerFunction(FunctionDocumentation{ + .description = "Parses a true/false value from the value of the field named field_name. The result is UInt8.", + .syntax = "simpleJSONExtractBool(json, field_name)", + .arguments + = {{"json", "The JSON in which the field is searched for. String."}, + {"field_name", "The name of the field to search for. String literal."}}, + .returned_value + = R"(It returns 1 if the value of the field is true, 0 otherwise. This means this function will return 0 including (and not only) in the following cases: + - If the field doesn't exists. + - If the field contains true as a string, e.g.: {"field":"true"}. + - If the field contains 1 as a numerical value.)", + .examples + = {{.name = "simple", + .query = R"(CREATE TABLE jsons +( + json String +) +ENGINE = Memory; + +INSERT INTO jsons VALUES ('{"foo":false,"bar":true}'); +INSERT INTO jsons VALUES ('{"foo":"true","qux":1}'); + +SELECT simpleJSONExtractBool(json, 'bar') FROM jsons ORDER BY json; +SELECT simpleJSONExtractBool(json, 'foo') FROM jsons ORDER BY json;)", + .result = R"(0 +1 +0 +0)"}}, + .categories{"JSON"}}); factory.registerAlias("visitParamExtractBool", "simpleJSONExtractBool"); } diff --git a/src/Functions/visitParamExtractFloat.cpp b/src/Functions/visitParamExtractFloat.cpp index 6f6d5274050..fc839142cc7 100644 --- a/src/Functions/visitParamExtractFloat.cpp +++ b/src/Functions/visitParamExtractFloat.cpp @@ -11,7 +11,36 @@ using FunctionSimpleJSONExtractFloat = FunctionsStringSearch(); + factory.registerFunction(FunctionDocumentation{ + .description + = "Parses Float64 from the value of the field named field_name. If this is a string field, it tries to parse a number from the " + "beginning of the string. If the field does not exist, or it exists but does not contain a number, it returns 0.", + .syntax = "simpleJSONExtractFloat(json, field_name)", + .arguments + = {{"json", "The JSON in which the field is searched for. String."}, + {"field_name", "The name of the field to search for. String literal."}}, + .returned_value = "It returns the number parsed from the field if the field exists and contains a number, 0 otherwise.", + .examples + = {{.name = "simple", + .query = R"(CREATE TABLE jsons +( + json String +) +ENGINE = Memory; + +INSERT INTO jsons VALUES ('{"foo":"-4e3"}'); +INSERT INTO jsons VALUES ('{"foo":-3.4}'); +INSERT INTO jsons VALUES ('{"foo":5}'); +INSERT INTO jsons VALUES ('{"foo":"not1number"}'); +INSERT INTO jsons VALUES ('{"baz":2}'); + +SELECT simpleJSONExtractFloat(json, 'foo') FROM jsons ORDER BY json;)", + .result = R"(0 +-4000 +0 +-3.4 +5)"}}, + .categories{"JSON"}}); factory.registerAlias("visitParamExtractFloat", "simpleJSONExtractFloat"); } diff --git a/src/Functions/visitParamExtractInt.cpp b/src/Functions/visitParamExtractInt.cpp index e020c43e8b4..4588fc55c52 100644 --- a/src/Functions/visitParamExtractInt.cpp +++ b/src/Functions/visitParamExtractInt.cpp @@ -11,7 +11,36 @@ using FunctionSimpleJSONExtractInt = FunctionsStringSearch(); + factory.registerFunction(FunctionDocumentation{ + .description + = "Parses Int64 from the value of the field named field_name. If this is a string field, it tries to parse a number from the " + "beginning of the string. If the field does not exist, or it exists but does not contain a number, it returns 0.", + .syntax = "simpleJSONExtractInt(json, field_name)", + .arguments + = {{"json", "The JSON in which the field is searched for. String."}, + {"field_name", "The name of the field to search for. String literal."}}, + .returned_value = "It returns the number parsed from the field if the field exists and contains a number, 0 otherwise.", + .examples + = {{.name = "simple", + .query = R"(CREATE TABLE jsons +( + json String +) +ENGINE = Memory; + +INSERT INTO jsons VALUES ('{"foo":"-4e3"}'); +INSERT INTO jsons VALUES ('{"foo":-3.4}'); +INSERT INTO jsons VALUES ('{"foo":5}'); +INSERT INTO jsons VALUES ('{"foo":"not1number"}'); +INSERT INTO jsons VALUES ('{"baz":2}'); + +SELECT simpleJSONExtractInt(json, 'foo') FROM jsons ORDER BY json;)", + .result = R"(0 +-4 +0 +-3 +5)"}}, + .categories{"JSON"}}); factory.registerAlias("visitParamExtractInt", "simpleJSONExtractInt"); } diff --git a/src/Functions/visitParamExtractRaw.cpp b/src/Functions/visitParamExtractRaw.cpp index 74a83170545..296429423fe 100644 --- a/src/Functions/visitParamExtractRaw.cpp +++ b/src/Functions/visitParamExtractRaw.cpp @@ -61,7 +61,35 @@ using FunctionSimpleJSONExtractRaw = FunctionsStringSearchToString(); + factory.registerFunction(FunctionDocumentation{ + .description = "Returns the value of the field named field_name as a String, including separators.", + .syntax = "simpleJSONExtractRaw(json, field_name)", + .arguments + = {{"json", "The JSON in which the field is searched for. String."}, + {"field_name", "The name of the field to search for. String literal."}}, + .returned_value + = "It returns the value of the field as a String including separators if the field exists, or an emtpy String otherwise.", + .examples + = {{.name = "simple", + .query = R"(CREATE TABLE jsons +( + json String +) +ENGINE = Memory; + +INSERT INTO jsons VALUES ('{"foo":"-4e3"}'); +INSERT INTO jsons VALUES ('{"foo":-3.4}'); +INSERT INTO jsons VALUES ('{"foo":5}'); +INSERT INTO jsons VALUES ('{"foo":{"def":[1,2,3]}}'); +INSERT INTO jsons VALUES ('{"baz":2}'); + +SELECT simpleJSONExtractRaw(json, 'foo') FROM jsons ORDER BY json;)", + .result = R"( +"-4e3" +-3.4 +5 +{"def":[1,2,3]})"}}, + .categories{"JSON"}}); factory.registerAlias("visitParamExtractRaw", "simpleJSONExtractRaw"); } diff --git a/src/Functions/visitParamExtractString.cpp b/src/Functions/visitParamExtractString.cpp index 50d5f345189..8dae10638f8 100644 --- a/src/Functions/visitParamExtractString.cpp +++ b/src/Functions/visitParamExtractString.cpp @@ -22,7 +22,35 @@ using FunctionSimpleJSONExtractString = FunctionsStringSearchToString(); + factory.registerFunction(FunctionDocumentation{ + .description = R"(Parses String in double quotes from the value of the field named field_name. + + There is currently no support for code points in the format \uXXXX\uYYYY that are not from the basic multilingual plane (they are converted to CESU-8 instead of UTF-8).)", + .syntax = "simpleJSONExtractString(json, field_name)", + .arguments + = {{"json", "The JSON in which the field is searched for. String."}, + {"field_name", "The name of the field to search for. String literal."}}, + .returned_value = "It returns the value of a field as a String, including separators. The value is unescaped. It returns an empty " + "String: if the field doesn't contain a double quoted string, if unescaping fails or if the field doesn't exist.", + .examples + = {{.name = "simple", + .query = R"(CREATE TABLE jsons +( + json String +) +ENGINE = Memory; + +INSERT INTO jsons VALUES ('{"foo":"\\n\\u0000"}'); +INSERT INTO jsons VALUES ('{"foo":"\\u263"}'); +INSERT INTO jsons VALUES ('{"foo":"\\u263a"}'); +INSERT INTO jsons VALUES ('{"foo":"hello}'); + +SELECT simpleJSONExtractString(json, 'foo') FROM jsons ORDER BY json;)", + .result = R"(\n\0 + +☺ +)"}}, + .categories{"JSON"}}); factory.registerAlias("visitParamExtractString", "simpleJSONExtractString"); } diff --git a/src/Functions/visitParamExtractUInt.cpp b/src/Functions/visitParamExtractUInt.cpp index fb58e417f34..777df9fdd24 100644 --- a/src/Functions/visitParamExtractUInt.cpp +++ b/src/Functions/visitParamExtractUInt.cpp @@ -12,7 +12,36 @@ using FunctionSimpleJSONExtractUInt = FunctionsStringSearch(); + factory.registerFunction(FunctionDocumentation{ + .description + = "Parses UInt64 from the value of the field named field_name. If this is a string field, it tries to parse a number from the " + "beginning of the string. If the field does not exist, or it exists but does not contain a number, it returns 0.", + .syntax = "simpleJSONExtractUInt(json, field_name)", + .arguments + = {{"json", "The JSON in which the field is searched for. String."}, + {"field_name", "The name of the field to search for. String literal."}}, + .returned_value = "It returns the number parsed from the field if the field exists and contains a number, 0 otherwise.", + .examples + = {{.name = "simple", + .query = R"(CREATE TABLE jsons +( + json String +) +ENGINE = Memory; + +INSERT INTO jsons VALUES ('{"foo":"4e3"}'); +INSERT INTO jsons VALUES ('{"foo":3.4}'); +INSERT INTO jsons VALUES ('{"foo":5}'); +INSERT INTO jsons VALUES ('{"foo":"not1number"}'); +INSERT INTO jsons VALUES ('{"baz":2}'); + +SELECT simpleJSONExtractUInt(json, 'foo') FROM jsons ORDER BY json;)", + .result = R"(0 +4 +0 +3 +5)"}}, + .categories{"JSON"}}); factory.registerAlias("visitParamExtractUInt", "simpleJSONExtractUInt"); } diff --git a/src/Functions/visitParamHas.cpp b/src/Functions/visitParamHas.cpp index 1ed1f1d16e7..09fec782980 100644 --- a/src/Functions/visitParamHas.cpp +++ b/src/Functions/visitParamHas.cpp @@ -21,7 +21,28 @@ using FunctionSimpleJSONHas = FunctionsStringSearch(); + factory.registerFunction(FunctionDocumentation{ + .description = "Checks whether there is a field named field_name. The result is UInt8.", + .syntax = "simpleJSONHas(json, field_name)", + .arguments + = {{"json", "The JSON in which the field is searched for. String."}, + {"field_name", "The name of the field to search for. String literal."}}, + .returned_value = "It returns 1 if the field exists, 0 otherwise.", + .examples + = {{.name = "simple", + .query = R"(CREATE TABLE jsons +( + json String +) +ENGINE = Memory; + +INSERT INTO jsons VALUES ('{"foo":"true","qux":1}'); + +SELECT simpleJSONHas(json, 'foo') FROM jsons; +SELECT simpleJSONHas(json, 'bar') FROM jsons;)", + .result = R"(1 +0)"}}, + .categories{"JSON"}}); factory.registerAlias("visitParamHas", "simpleJSONHas"); } diff --git a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference index 379eea4dbbb..0a11e8b5034 100644 --- a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference +++ b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference @@ -643,14 +643,6 @@ shardNum showCertificate sigmoid sign -simpleJSONExtractBool -simpleJSONExtractFloat -simpleJSONExtractInt -simpleJSONExtractRaw -simpleJSONExtractString -simpleJSONExtractUInt -simpleJSONHas -sin sinh sipHash128 sipHash128Keyed From 5b94f9b4115e3b7e03118b4a4f4999139e58511e Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Wed, 6 Mar 2024 15:31:19 +0100 Subject: [PATCH 065/374] Check children first --- src/Storages/VirtualColumnUtils.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 6d66453442e..e8441b96782 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -469,18 +469,18 @@ static bool canEvaluateSubtree(const ActionsDAG::Node * node, const Block & allo static bool isDeterministicInScopeOfQuery(const ActionsDAG::Node * node) { - if (node->type != ActionsDAG::ActionType::FUNCTION) - return true; - - if (!node->function_base->isDeterministicInScopeOfQuery()) - return false; - for (const auto * child : node->children) { if (!isDeterministicInScopeOfQuery(child)) return false; } + if (node->type != ActionsDAG::ActionType::FUNCTION) + return true; + + if (!node->function_base->isDeterministicInScopeOfQuery()) + return false; + return true; } From 526f162082dfbb4ad2fb5d3d807dfd2ad9b54bdd Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 29 Feb 2024 18:20:47 +0000 Subject: [PATCH 066/374] Fix logical error on bad compatibility setting value type --- src/Core/Settings.cpp | 4 ++++ .../03003_compatibility_setting_bad_value.reference | 0 .../0_stateless/03003_compatibility_setting_bad_value.sql | 2 ++ 3 files changed, 6 insertions(+) create mode 100644 tests/queries/0_stateless/03003_compatibility_setting_bad_value.reference create mode 100644 tests/queries/0_stateless/03003_compatibility_setting_bad_value.sql diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index a38197b9eeb..fb456b46d89 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -114,7 +114,11 @@ std::vector Settings::getAllRegisteredNames() const void Settings::set(std::string_view name, const Field & value) { if (name == "compatibility") + { + if (value.getType() != Field::Types::Which::String) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unexpected type of value for setting 'compatibility'. Expected String, got {}", value.getTypeName()); applyCompatibilitySetting(value.get()); + } /// If we change setting that was changed by compatibility setting before /// we should remove it from settings_changed_by_compatibility_setting, /// otherwise the next time we will change compatibility setting diff --git a/tests/queries/0_stateless/03003_compatibility_setting_bad_value.reference b/tests/queries/0_stateless/03003_compatibility_setting_bad_value.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03003_compatibility_setting_bad_value.sql b/tests/queries/0_stateless/03003_compatibility_setting_bad_value.sql new file mode 100644 index 00000000000..9a6f4e7944a --- /dev/null +++ b/tests/queries/0_stateless/03003_compatibility_setting_bad_value.sql @@ -0,0 +1,2 @@ +select 42 settings compatibility=NULL; -- {clientError BAD_GET} + From bdb76d9dd4b42ab4f40db0d371165665171afb4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 6 Mar 2024 16:30:22 +0000 Subject: [PATCH 067/374] Fix aspell errors --- docs/en/sql-reference/functions/json-functions.md | 2 +- utils/check-style/aspell-ignore/en/aspell-dict.txt | 12 +++++++++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/docs/en/sql-reference/functions/json-functions.md b/docs/en/sql-reference/functions/json-functions.md index 246cb8972fb..e920ab82988 100644 --- a/docs/en/sql-reference/functions/json-functions.md +++ b/docs/en/sql-reference/functions/json-functions.md @@ -264,7 +264,7 @@ simpleJSONExtractRaw(json, field_name) **Returned value** -It returns the value of the field as a [`String`](../../sql-reference/data-types/string.md#string), including separators if the field exists, or an emtpy `String` otherwise. +It returns the value of the field as a [`String`](../../sql-reference/data-types/string.md#string), including separators if the field exists, or an empty `String` otherwise. **Example** diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 3614bcb7452..917b2cdcc71 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1,4 +1,4 @@ -personal_ws-1.1 en 2724 +personal_ws-1.1 en 2758 AArch ACLs ALTERs @@ -843,7 +843,6 @@ SendScalars ShareAlike SharedMergeTree Shortkeys -Shortkeys SimHash Simhash SimpleAggregateFunction @@ -1703,7 +1702,6 @@ hyperscan hypot hyvor iTerm -iTerm icosahedron icudata idempotency @@ -2327,6 +2325,14 @@ shortcircuit shortkeys shoutout simdjson +simpleJSON +simpleJSONExtractBool +simpleJSONExtractFloat +simpleJSONExtractInt +simpleJSONExtractRaw +simpleJSONExtractString +simpleJSONExtractUInt +simpleJSONHas simpleLinearRegression simpleaggregatefunction simplelinearregression From 77a980373a1dab7c49e5713ba7050d218c1250c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 6 Mar 2024 16:31:27 +0000 Subject: [PATCH 068/374] Fix typo in inline doc --- src/Functions/visitParamExtractRaw.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/visitParamExtractRaw.cpp b/src/Functions/visitParamExtractRaw.cpp index 296429423fe..3cdc5001e13 100644 --- a/src/Functions/visitParamExtractRaw.cpp +++ b/src/Functions/visitParamExtractRaw.cpp @@ -68,7 +68,7 @@ REGISTER_FUNCTION(VisitParamExtractRaw) = {{"json", "The JSON in which the field is searched for. String."}, {"field_name", "The name of the field to search for. String literal."}}, .returned_value - = "It returns the value of the field as a String including separators if the field exists, or an emtpy String otherwise.", + = "It returns the value of the field as a String including separators if the field exists, or an empty String otherwise.", .examples = {{.name = "simple", .query = R"(CREATE TABLE jsons From 06a7665d0e780b52b4c1e2c1115ed41050d2d03a Mon Sep 17 00:00:00 2001 From: Shaun Struwig <41984034+Blargian@users.noreply.github.com> Date: Wed, 6 Mar 2024 18:19:52 +0100 Subject: [PATCH 069/374] Update encoding-functions.md Add missing "Result:" after query --- docs/en/sql-reference/functions/encoding-functions.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/en/sql-reference/functions/encoding-functions.md b/docs/en/sql-reference/functions/encoding-functions.md index c81b3e35317..4f6da764b3c 100644 --- a/docs/en/sql-reference/functions/encoding-functions.md +++ b/docs/en/sql-reference/functions/encoding-functions.md @@ -469,6 +469,7 @@ Query: ```sql SELECT mortonEncode(1, 2, 3); ``` +Result: ```response 53 From f77b5963748c321975d8bd131e794dcc57002fc8 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 7 Mar 2024 16:17:27 +0800 Subject: [PATCH 070/374] Fix test --- .../integration/test_filesystem_cache/test.py | 47 ++++++++++++------- 1 file changed, 29 insertions(+), 18 deletions(-) diff --git a/tests/integration/test_filesystem_cache/test.py b/tests/integration/test_filesystem_cache/test.py index 0cb1866f8e4..63316aba57e 100644 --- a/tests/integration/test_filesystem_cache/test.py +++ b/tests/integration/test_filesystem_cache/test.py @@ -350,6 +350,20 @@ def test_custom_cached_disk(cluster): def test_force_filesystem_cache_on_merges(cluster): def test(node, forced_read_through_cache_on_merge): + def to_int(value): + if value == "": + return 0 + else: + return int(value) + + r_cache_count = to_int(node.query( + "SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes'" + )) + + w_cache_count = to_int(node.query( + "SELECT value FROM system.events WHERE name = 'CachedWriteBufferCacheWriteBytes'" + )) + node.query( """ DROP TABLE IF EXISTS test SYNC; @@ -376,36 +390,33 @@ def test_force_filesystem_cache_on_merges(cluster): assert int(node.query("SELECT count() FROM system.filesystem_cache")) > 0 assert int(node.query("SELECT max(size) FROM system.filesystem_cache")) == 1024 - write_count = int( + w_cache_count_2 = int( node.query( "SELECT value FROM system.events WHERE name = 'CachedWriteBufferCacheWriteBytes'" ) ) - assert write_count > 100000 - assert "" == node.query( - "SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes'" + assert w_cache_count_2 > w_cache_count + + r_cache_count_2 = to_int( + node.query( + "SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes'" + ) ) + assert r_cache_count_2 == r_cache_count node.query("SYSTEM DROP FILESYSTEM CACHE") node.query("OPTIMIZE TABLE test FINAL") - new_write_count = int( + r_cache_count_3 = to_int( node.query( - "SELECT value FROM system.events WHERE name = 'CachedWriteBufferCacheWriteBytes'" - ) - ) - assert new_write_count >= write_count - - if forced_read_through_cache_on_merge: - assert 100000 < int( - node.query( - "SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes'" - ) - ) - else: - assert "" == node.query( "SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes'" ) + ) + + if forced_read_through_cache_on_merge: + assert r_cache_count_3 > r_cache_count + else: + assert r_cache_count_3 == r_cache_count node = cluster.instances["node_force_read_through_cache_on_merge"] test(node, True) From 50b84954e4810c94c1397504a64ca96e1a0fed55 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 7 Mar 2024 16:29:38 +0800 Subject: [PATCH 071/374] Update .reference --- .../0_stateless/02117_show_create_table_system.reference | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference index 7382b24afbc..5081527ceef 100644 --- a/tests/queries/0_stateless/02117_show_create_table_system.reference +++ b/tests/queries/0_stateless/02117_show_create_table_system.reference @@ -195,6 +195,8 @@ CREATE TABLE system.disks `unreserved_space` UInt64, `keep_free_space` UInt64, `type` String, + `object_storage_type` String, + `metadata_type` String, `is_encrypted` UInt8, `is_read_only` UInt8, `is_write_once` UInt8, From c7f5b1631c359c61b6e4c74727092df73e956922 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 7 Mar 2024 08:30:34 +0000 Subject: [PATCH 072/374] Automatic style fix --- tests/integration/test_filesystem_cache/test.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/tests/integration/test_filesystem_cache/test.py b/tests/integration/test_filesystem_cache/test.py index 63316aba57e..c44d817c57c 100644 --- a/tests/integration/test_filesystem_cache/test.py +++ b/tests/integration/test_filesystem_cache/test.py @@ -356,13 +356,17 @@ def test_force_filesystem_cache_on_merges(cluster): else: return int(value) - r_cache_count = to_int(node.query( - "SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes'" - )) + r_cache_count = to_int( + node.query( + "SELECT value FROM system.events WHERE name = 'CachedReadBufferCacheWriteBytes'" + ) + ) - w_cache_count = to_int(node.query( - "SELECT value FROM system.events WHERE name = 'CachedWriteBufferCacheWriteBytes'" - )) + w_cache_count = to_int( + node.query( + "SELECT value FROM system.events WHERE name = 'CachedWriteBufferCacheWriteBytes'" + ) + ) node.query( """ From 31ed1966e3c5388e601edd6e97c0497153bb7196 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 7 Mar 2024 16:44:10 +0800 Subject: [PATCH 073/374] Fix build --- src/Disks/ObjectStorages/ObjectStorageFactory.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Disks/ObjectStorages/ObjectStorageFactory.cpp b/src/Disks/ObjectStorages/ObjectStorageFactory.cpp index 9d7e714445a..46136ad7b12 100644 --- a/src/Disks/ObjectStorages/ObjectStorageFactory.cpp +++ b/src/Disks/ObjectStorages/ObjectStorageFactory.cpp @@ -166,7 +166,7 @@ void registerS3ObjectStorage(ObjectStorageFactory & factory) /// NOTE: should we still perform this check for clickhouse-disks? if (!skip_access_check) - checkS3Capabilities(*object_storage, s3_capabilities, name); + checkS3Capabilities(*dynamic_cast(object_storage.get()), s3_capabilities, name); return object_storage; }); @@ -202,7 +202,7 @@ void registerS3PlainObjectStorage(ObjectStorageFactory & factory) /// NOTE: should we still perform this check for clickhouse-disks? if (!skip_access_check) - checkS3Capabilities(*object_storage, s3_capabilities, name); + checkS3Capabilities(*dynamic_cast(object_storage.get()), s3_capabilities, name); return object_storage; }); From 6c69e7d4dcfdfa21cfcaa103fc1cc7c53dfe0291 Mon Sep 17 00:00:00 2001 From: HowePa <2873679104@qq.com> Date: Thu, 7 Mar 2024 20:29:04 +0800 Subject: [PATCH 074/374] detect output format by file extension in clickhouse-local --- programs/local/LocalServer.cpp | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 68f0e52ce08..20974dd9751 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -327,6 +327,14 @@ static bool checkIfStdinIsRegularFile() return fstat(STDIN_FILENO, &file_stat) == 0 && S_ISREG(file_stat.st_mode); } + +static bool checkIfStdoutIsRegularFile() +{ + struct stat file_stat; + return fstat(STDOUT_FILENO, &file_stat) == 0 && S_ISREG(file_stat.st_mode); +} + + std::string LocalServer::getInitialCreateTableQuery() { if (!config().has("table-structure") && !config().has("table-file") && !config().has("table-data-format") && (!checkIfStdinIsRegularFile() || queries.empty())) @@ -638,7 +646,14 @@ void LocalServer::processConfig() if (config().has("macros")) global_context->setMacros(std::make_unique(config(), "macros", log)); - format = config().getString("output-format", config().getString("format", is_interactive ? "PrettyCompact" : "TSV")); + if (!config().has("output-format") && !config().has("format") && checkIfStdoutIsRegularFile()) + { + std::optional format_from_file_name; + format_from_file_name = FormatFactory::instance().tryGetFormatFromFileDescriptor(STDOUT_FILENO); + format = format_from_file_name ? *format_from_file_name : "TSV"; + } + else + format = config().getString("output-format", config().getString("format", is_interactive ? "PrettyCompact" : "TSV")); insert_format = "Values"; /// Setting value from cmd arg overrides one from config From 6d5fd2857ed50047d8acf48766165aa815ca30b9 Mon Sep 17 00:00:00 2001 From: HowePa <2873679104@qq.com> Date: Thu, 7 Mar 2024 20:29:42 +0800 Subject: [PATCH 075/374] detect output format by file extension in clickhouse-client --- programs/client/Client.cpp | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index a2bd6b6016a..fac34003553 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -50,6 +50,7 @@ #include #include #include +#include namespace fs = std::filesystem; using namespace std::literals; @@ -1137,6 +1138,13 @@ void Client::processOptions(const OptionsDescription & options_description, } +static bool checkIfStdoutIsRegularFile() +{ + struct stat file_stat; + return fstat(STDOUT_FILENO, &file_stat) == 0 && S_ISREG(file_stat.st_mode); +} + + void Client::processConfig() { if (!queries.empty() && config().has("queries-file")) @@ -1173,7 +1181,14 @@ void Client::processConfig() pager = config().getString("pager", ""); is_default_format = !config().has("vertical") && !config().has("format"); - if (config().has("vertical")) + if (is_default_format && checkIfStdoutIsRegularFile()) + { + is_default_format = false; + std::optional format_from_file_name; + format_from_file_name = FormatFactory::instance().tryGetFormatFromFileDescriptor(STDOUT_FILENO); + format = format_from_file_name ? *format_from_file_name : "TabSeparated"; + } + else if (config().has("vertical")) format = config().getString("format", "Vertical"); else format = config().getString("format", is_interactive ? "PrettyCompact" : "TabSeparated"); From 112c1efb7da2619cb67a48ff7fbe65ecea8e44a9 Mon Sep 17 00:00:00 2001 From: HowePa <2873679104@qq.com> Date: Thu, 7 Mar 2024 20:30:24 +0800 Subject: [PATCH 076/374] test detect output format by file extension --- ..._output_format_by_file_extension.reference | 20 +++++++++++++++++++ ..._detect_output_format_by_file_extension.sh | 13 ++++++++++++ 2 files changed, 33 insertions(+) create mode 100644 tests/queries/0_stateless/02181_detect_output_format_by_file_extension.reference create mode 100755 tests/queries/0_stateless/02181_detect_output_format_by_file_extension.sh diff --git a/tests/queries/0_stateless/02181_detect_output_format_by_file_extension.reference b/tests/queries/0_stateless/02181_detect_output_format_by_file_extension.reference new file mode 100644 index 00000000000..7b36cc96f5e --- /dev/null +++ b/tests/queries/0_stateless/02181_detect_output_format_by_file_extension.reference @@ -0,0 +1,20 @@ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 diff --git a/tests/queries/0_stateless/02181_detect_output_format_by_file_extension.sh b/tests/queries/0_stateless/02181_detect_output_format_by_file_extension.sh new file mode 100755 index 00000000000..ec1edd710a1 --- /dev/null +++ b/tests/queries/0_stateless/02181_detect_output_format_by_file_extension.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +# Tags: no-parallel, no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +$CLICKHOUSE_LOCAL -q "select * from numbers(10)" > $CLICKHOUSE_TMP/data.parquet +$CLICKHOUSE_LOCAL -q "select * from table" < $CLICKHOUSE_TMP/data.parquet + +$CLICKHOUSE_CLIENT -q "select * from numbers(10)" > $CLICKHOUSE_TMP/data.parquet +$CLICKHOUSE_LOCAL -q "select * from table" < $CLICKHOUSE_TMP/data.parquet From bd194aab41401492c5d628269df53e68243a1211 Mon Sep 17 00:00:00 2001 From: johnnymatthews <9611008+johnnymatthews@users.noreply.github.com> Date: Thu, 7 Mar 2024 12:55:21 -0400 Subject: [PATCH 077/374] Adds makeDateTime64 function. --- .../functions/date-time-functions.md | 137 ++++++++++++++---- .../functions/other-functions.md | 65 ++++++++- 2 files changed, 170 insertions(+), 32 deletions(-) diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index 41503abfa2f..12f0c996ce7 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -26,66 +26,115 @@ SELECT ## makeDate -Creates a [Date](../../sql-reference/data-types/date.md) -- from a year, month and day argument, or -- from a year and day of year argument. +Creates a [Date](../../sql-reference/data-types/date.md) from either one of the following sets of arguments: -**Syntax** +- a year, month, and day. +- a year and day of year. -``` sql -makeDate(year, month, day); -makeDate(year, day_of_year); +### Syntax + +Using a year, month, and day: + +```sql +makeDate(year, month, day) ``` -Alias: -- `MAKEDATE(year, month, day);` -- `MAKEDATE(year, day_of_year);` +Using a year and day of year: -**Arguments** +```sql +makeDate(year, day_of_year) +``` + +### Arguments - `year` — Year. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). - `month` — Month. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). - `day` — Day. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). - `day_of_year` — Day of the year. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). -**Returned value** +### Returned values -- A date created from the arguments. +A date created from the arguments. Type: [Date](../../sql-reference/data-types/date.md). -**Example** +### Examples Create a Date from a year, month and day: -``` sql +```sql SELECT makeDate(2023, 2, 28) AS Date; ``` -Result: - -``` text -┌───────date─┐ -│ 2023-02-28 │ -└────────────┘ +```response +2023-02-28 ``` -Create a Date from a year and day of year argument: +Create a Date from a year and day of year: ``` sql SELECT makeDate(2023, 42) AS Date; ``` -Result: - -``` text -┌───────date─┐ -│ 2023-02-11 │ -└────────────┘ +```response +2023-02-11 ``` + ## makeDate32 -Like [makeDate](#makeDate) but produces a [Date32](../../sql-reference/data-types/date32.md). +Creates a date of type [Date32](../../sql-reference/data-types/date32.md) from either one of the following sets of arguments: + +- a year, month, and day. +- a year and day of year. + +### Syntax + +Using a year, month, and day: + +```sql +makeDate32(year, month, day) +``` + +Using a year and day of year: + +```sql +makeDate32(year, day_of_year) +``` + +### Arguments + +- `year` — Year. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). +- `month` — Month. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). +- `day` — Day. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). +- `day_of_year` — Day of the year. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). + +### Returned values + +A date created from the arguments. + +Type: [Date32](../../sql-reference/data-types/date32.md). + +### Examples + +Create a date from a year, month, and day: + +```sql +SELECT makeDate32(2024, 1, 1); +``` + +```response +2024-01-01 +``` + +Create a Date from a year and day of year: + +``` sql +SELECT makeDate32(2024, 100); +``` + +```response +2024-04-09 +``` ## makeDateTime @@ -129,12 +178,38 @@ Result: ## makeDateTime64 -Like [makeDateTime](#makedatetime) but produces a [DateTime64](../../sql-reference/data-types/datetime64.md). +Create a [DateTime64](../../sql-reference/data-types/datetime64.md) data type value from its components (year, month, day, hour, minute, second, and optionally, subsecond precision). + +The DateTime64 data type stores both the date and time components in a single 64-bit integer value. The precision of the time component is configurable, allowing you to store time values with subsecond precision up to nanoseconds. **Syntax** +```sql +makeDateTime64(year, month, day, hour, minute, second[, fraction[, precision]]) +``` + +**Arguments** + +- `year` — [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). The year component (0-9999). +- `month` — Month. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). The month component (1-12). +- `day` — Day. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). The day component (1-31). +- `hour` — Hour. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). The hour component (0-23). +- `minute` — Minute. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). The minute component (0-59). +- `second` — Second. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). The second component (0-59). +- `subsecond_precision` (optional) [Integer](../../sql-reference/data-types/int-uint.md): The precision of the subsecond component (0-9, where 0 means no subsecond precision, and 9 means nanosecond precision). + +**Returned value** + +A date and time element of type [DateTime64](../../sql-reference/data-types/datetime64.md) with created from the supplied arguments. + +**Example** + ``` sql -makeDateTime64(year, month, day, hour, minute, second[, fraction[, precision[, timezone]]]) +SELECT makeDateTime64(2023, 5, 15, 10, 30, 45, 779, 5); +``` + +```response +2023-05-15 10:30:45.00779 ``` ## timestamp diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 739b688a0d2..10ceedad9aa 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -1866,7 +1866,7 @@ As you can see, `runningAccumulate` merges states for each group of rows separat ## joinGet -The function lets you extract data from the table the same way as from a [dictionary](../../sql-reference/dictionaries/index.md). +Allows you to extract data from a specific column in a Join table, similar to how you would access a value from a dictionary. Gets the data from [Join](../../engines/table-engines/special/join.md#creating-a-table) tables using the specified join key. @@ -1927,6 +1927,69 @@ Result: └──────────────────────────────────────────────────┘ ``` +## joinGetOrNull + +Allows you to extract data from a specific column in a Join table, similar to how you would access a value from a dictionary. + +Gets the data from [Join](../../engines/table-engines/special/join.md#creating-a-table) tables using the specified join key. + +Only supports tables created with the `ENGINE = Join(ANY, LEFT, )` statement. + +### Syntax + +```sql +joinGet(join_storage_table_name, `value_column`, join_keys) +``` + +### Parameters + +- `join_storage_table_name` — an [identifier](../../sql-reference/syntax.md#syntax-identifiers) indicating where the search is performed. The identifier is searched in the default database (see setting `default_database` in the config file). To override the default database, use `USE db_name` or specify the database and the table through the separator `db_name.db_table` as in the example. +- `value_column` — name of the column of the table that contains required data. +- `join_keys` — list of keys. + +### Returned value + +Returns a list of values corresponded to list of keys. + +If certain does not exist in source table then `0` or `null` will be returned based on [join_use_nulls](../../operations/settings/settings.md#join_use_nulls) setting. + +More info about `join_use_nulls` in [Join operation](../../engines/table-engines/special/join.md). + +**Example** + +Input table: + +``` sql +CREATE DATABASE db_test +CREATE TABLE db_test.id_val(`id` UInt32, `val` UInt32) ENGINE = Join(ANY, LEFT, id) SETTINGS join_use_nulls = 1 +INSERT INTO db_test.id_val VALUES (1,11)(2,12)(4,13) +``` + +``` text +┌─id─┬─val─┐ +│ 4 │ 13 │ +│ 2 │ 12 │ +│ 1 │ 11 │ +└────┴─────┘ +``` + +Query: + +``` sql +SELECT joinGet(db_test.id_val, 'val', toUInt32(number)) from numbers(4) SETTINGS join_use_nulls = 1 +``` + +Result: + +``` text +┌─joinGet(db_test.id_val, 'val', toUInt32(number))─┐ +│ 0 │ +│ 11 │ +│ 12 │ +│ 0 │ +└──────────────────────────────────────────────────┘ +``` + ## catboostEvaluate(path_to_model, feature_1, feature_2, …, feature_n) :::note From a00a1fd7b4c9a4e83eeb746da781b27c74dcd0b2 Mon Sep 17 00:00:00 2001 From: johnnymatthews <9611008+johnnymatthews@users.noreply.github.com> Date: Thu, 7 Mar 2024 14:45:15 -0400 Subject: [PATCH 078/374] Adds readWKT docs. --- .../en/sql-reference/functions/geo/polygon.md | 56 +++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/docs/en/sql-reference/functions/geo/polygon.md b/docs/en/sql-reference/functions/geo/polygon.md index 4a8653965c2..35e2280e5cc 100644 --- a/docs/en/sql-reference/functions/geo/polygon.md +++ b/docs/en/sql-reference/functions/geo/polygon.md @@ -53,6 +53,62 @@ String starting with `POLYGON` Polygon +## readWKTPoint + +The `readWKTPoint` function in ClickHouse parses a Well-Known Text (WKT) representation of a Point geometry and returns a point in the internal ClickHouse format. + +### Syntax + +```sql +readWKTPoint(wkt_string) +``` + +### Arguments + +- `wkt_string`: The input WKT string representing a Point geometry. + +### Returned value + +The function returns a ClickHouse internal representation of the Point geometry. + +### Example + +```sql +SELECT readWKTPoint('POINT (1.2 3.4)'); +``` + +```response +(1.2,3.4) +``` + +## readWKTRing + +Parses a Well-Known Text (WKT) representation of a Polygon geometry and returns a ring (closed linestring) in the internal ClickHouse format. + +### Syntax + +```sql +readWKTRing(wkt_string) +``` + +### Arguments + +- `wkt_string`: The input WKT string representing a Polygon geometry. + +### Returned value + +The function returns a ClickHouse internal representation of the ring (closed linestring) geometry. + +### Example + +```sql +SELECT readWKTRing('LINESTRING (1 1, 2 2, 3 3, 1 1)'); +``` + +```response +[(1,1),(2,2),(3,3),(1,1)] +``` + ## polygonsWithinSpherical Returns true or false depending on whether or not one polygon lies completely inside another polygon. Reference https://www.boost.org/doc/libs/1_62_0/libs/geometry/doc/html/geometry/reference/algorithms/within/within_2.html From 2bc4d27ac6f42af79120dac72b25db6e36ee4b42 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Thu, 7 Mar 2024 19:24:39 +0000 Subject: [PATCH 079/374] Bye bye --- docker/packager/README.md | 1 - docs/en/operations/backup.md | 6 +- .../operations/utilities/clickhouse-copier.md | 187 -- docs/en/operations/utilities/index.md | 2 - .../sql-reference/statements/alter/column.md | 2 +- docs/ru/getting-started/tutorial.md | 4 - docs/ru/operations/backup.md | 6 - .../operations/utilities/clickhouse-copier.md | 183 -- docs/ru/operations/utilities/index.md | 1 - .../sql-reference/statements/alter/column.md | 4 +- docs/zh/getting-started/tutorial.md | 2 - docs/zh/operations/backup.md | 6 - .../operations/utilities/clickhouse-copier.md | 172 -- docs/zh/operations/utilities/index.md | 1 - docs/zh/sql-reference/statements/alter.md | 2 +- packages/clickhouse-server.yaml | 2 - programs/CMakeLists.txt | 2 - programs/copier/Aliases.h | 15 - programs/copier/CMakeLists.txt | 28 - programs/copier/ClusterCopier.cpp | 2076 ----------------- programs/copier/ClusterCopier.h | 240 -- programs/copier/ClusterCopierApp.cpp | 252 -- programs/copier/ClusterCopierApp.h | 99 - programs/copier/ClusterPartition.h | 22 - programs/copier/Internals.cpp | 280 --- programs/copier/Internals.h | 198 -- programs/copier/ShardPartition.cpp | 70 - programs/copier/ShardPartition.h | 54 - programs/copier/ShardPartitionPiece.cpp | 64 - programs/copier/ShardPartitionPiece.h | 43 - programs/copier/StatusAccumulator.cpp | 48 - programs/copier/StatusAccumulator.h | 27 - programs/copier/TaskCluster.cpp | 74 - programs/copier/TaskCluster.h | 51 - programs/copier/TaskShard.cpp | 37 - programs/copier/TaskShard.h | 56 - programs/copier/TaskTable.cpp | 222 -- programs/copier/TaskTable.h | 173 -- programs/copier/ZooKeeperStaff.h | 221 -- programs/copier/clickhouse-copier.cpp | 1 - .../testdata/configs/xml/config.xml | 2 +- .../testdata/configs/yaml/config.yaml | 2 +- .../testdata/configs/yandex_xml/config.xml | 2 +- programs/install/Install.cpp | 1 - programs/main.cpp | 2 - programs/server/config.xml | 2 +- programs/server/config.yaml.example | 2 +- src/Storages/StorageDistributed.h | 1 - tests/integration/README.md | 2 +- .../test_cluster_copier/__init__.py | 0 .../configs/conf.d/clusters.xml | 73 - .../configs/conf.d/clusters_trivial.xml | 20 - .../configs/conf.d/ddl.xml | 5 - .../configs/conf.d/query_log.xml | 14 - .../configs/config-copier.xml | 11 - .../test_cluster_copier/configs/users.xml | 34 - .../configs_three_nodes/conf.d/clusters.xml | 27 - .../configs_three_nodes/conf.d/ddl.xml | 5 - .../configs_three_nodes/config-copier.xml | 27 - .../configs_three_nodes/users.xml | 32 - .../configs_two_nodes/conf.d/clusters.xml | 22 - .../configs_two_nodes/conf.d/ddl.xml | 5 - .../conf.d/storage_configuration.xml | 34 - .../configs_two_nodes/config-copier.xml | 19 - .../configs_two_nodes/users.xml | 32 - .../test_cluster_copier/task0_description.xml | 95 - .../task_drop_target_partition.xml | 41 - .../task_month_to_week_description.xml | 99 - .../test_cluster_copier/task_no_arg.xml | 39 - .../test_cluster_copier/task_no_index.xml | 109 - .../task_non_partitioned_table.xml | 39 - .../test_cluster_copier/task_self_copy.xml | 63 - .../test_cluster_copier/task_skip_index.xml | 39 - .../test_cluster_copier/task_taxi_data.xml | 42 - .../task_test_block_size.xml | 101 - .../test_cluster_copier/task_trivial.xml | 63 - .../task_trivial_without_arguments.xml | 63 - .../test_cluster_copier/task_ttl_columns.xml | 39 - .../task_ttl_move_to_volume.xml | 39 - .../task_with_different_schema.xml | 39 - tests/integration/test_cluster_copier/test.py | 653 ------ .../test_cluster_copier/test_three_nodes.py | 286 --- .../test_cluster_copier/test_trivial.py | 227 -- .../test_cluster_copier/test_two_nodes.py | 597 ----- .../test_config_xml_full/configs/config.xml | 2 +- 85 files changed, 12 insertions(+), 7973 deletions(-) delete mode 100644 docs/en/operations/utilities/clickhouse-copier.md delete mode 100644 docs/ru/operations/utilities/clickhouse-copier.md delete mode 100644 docs/zh/operations/utilities/clickhouse-copier.md delete mode 100644 programs/copier/Aliases.h delete mode 100644 programs/copier/CMakeLists.txt delete mode 100644 programs/copier/ClusterCopier.cpp delete mode 100644 programs/copier/ClusterCopier.h delete mode 100644 programs/copier/ClusterCopierApp.cpp delete mode 100644 programs/copier/ClusterCopierApp.h delete mode 100644 programs/copier/ClusterPartition.h delete mode 100644 programs/copier/Internals.cpp delete mode 100644 programs/copier/Internals.h delete mode 100644 programs/copier/ShardPartition.cpp delete mode 100644 programs/copier/ShardPartition.h delete mode 100644 programs/copier/ShardPartitionPiece.cpp delete mode 100644 programs/copier/ShardPartitionPiece.h delete mode 100644 programs/copier/StatusAccumulator.cpp delete mode 100644 programs/copier/StatusAccumulator.h delete mode 100644 programs/copier/TaskCluster.cpp delete mode 100644 programs/copier/TaskCluster.h delete mode 100644 programs/copier/TaskShard.cpp delete mode 100644 programs/copier/TaskShard.h delete mode 100644 programs/copier/TaskTable.cpp delete mode 100644 programs/copier/TaskTable.h delete mode 100644 programs/copier/ZooKeeperStaff.h delete mode 100644 programs/copier/clickhouse-copier.cpp delete mode 100644 tests/integration/test_cluster_copier/__init__.py delete mode 100644 tests/integration/test_cluster_copier/configs/conf.d/clusters.xml delete mode 100644 tests/integration/test_cluster_copier/configs/conf.d/clusters_trivial.xml delete mode 100644 tests/integration/test_cluster_copier/configs/conf.d/ddl.xml delete mode 100644 tests/integration/test_cluster_copier/configs/conf.d/query_log.xml delete mode 100644 tests/integration/test_cluster_copier/configs/config-copier.xml delete mode 100644 tests/integration/test_cluster_copier/configs/users.xml delete mode 100644 tests/integration/test_cluster_copier/configs_three_nodes/conf.d/clusters.xml delete mode 100644 tests/integration/test_cluster_copier/configs_three_nodes/conf.d/ddl.xml delete mode 100644 tests/integration/test_cluster_copier/configs_three_nodes/config-copier.xml delete mode 100644 tests/integration/test_cluster_copier/configs_three_nodes/users.xml delete mode 100644 tests/integration/test_cluster_copier/configs_two_nodes/conf.d/clusters.xml delete mode 100644 tests/integration/test_cluster_copier/configs_two_nodes/conf.d/ddl.xml delete mode 100644 tests/integration/test_cluster_copier/configs_two_nodes/conf.d/storage_configuration.xml delete mode 100644 tests/integration/test_cluster_copier/configs_two_nodes/config-copier.xml delete mode 100644 tests/integration/test_cluster_copier/configs_two_nodes/users.xml delete mode 100644 tests/integration/test_cluster_copier/task0_description.xml delete mode 100644 tests/integration/test_cluster_copier/task_drop_target_partition.xml delete mode 100644 tests/integration/test_cluster_copier/task_month_to_week_description.xml delete mode 100644 tests/integration/test_cluster_copier/task_no_arg.xml delete mode 100644 tests/integration/test_cluster_copier/task_no_index.xml delete mode 100644 tests/integration/test_cluster_copier/task_non_partitioned_table.xml delete mode 100644 tests/integration/test_cluster_copier/task_self_copy.xml delete mode 100644 tests/integration/test_cluster_copier/task_skip_index.xml delete mode 100644 tests/integration/test_cluster_copier/task_taxi_data.xml delete mode 100644 tests/integration/test_cluster_copier/task_test_block_size.xml delete mode 100644 tests/integration/test_cluster_copier/task_trivial.xml delete mode 100644 tests/integration/test_cluster_copier/task_trivial_without_arguments.xml delete mode 100644 tests/integration/test_cluster_copier/task_ttl_columns.xml delete mode 100644 tests/integration/test_cluster_copier/task_ttl_move_to_volume.xml delete mode 100644 tests/integration/test_cluster_copier/task_with_different_schema.xml delete mode 100644 tests/integration/test_cluster_copier/test.py delete mode 100644 tests/integration/test_cluster_copier/test_three_nodes.py delete mode 100644 tests/integration/test_cluster_copier/test_trivial.py delete mode 100644 tests/integration/test_cluster_copier/test_two_nodes.py diff --git a/docker/packager/README.md b/docker/packager/README.md index e0b7f38ea58..3604e8585a4 100644 --- a/docker/packager/README.md +++ b/docker/packager/README.md @@ -28,7 +28,6 @@ lrwxrwxrwx 1 root root 10 clickhouse-benchmark -> clickhouse lrwxrwxrwx 1 root root 10 clickhouse-clang -> clickhouse lrwxrwxrwx 1 root root 10 clickhouse-client -> clickhouse lrwxrwxrwx 1 root root 10 clickhouse-compressor -> clickhouse -lrwxrwxrwx 1 root root 10 clickhouse-copier -> clickhouse lrwxrwxrwx 1 root root 10 clickhouse-extract-from-config -> clickhouse lrwxrwxrwx 1 root root 10 clickhouse-format -> clickhouse lrwxrwxrwx 1 root root 10 clickhouse-lld -> clickhouse diff --git a/docs/en/operations/backup.md b/docs/en/operations/backup.md index 2d9bf2a2ee8..8639af468c2 100644 --- a/docs/en/operations/backup.md +++ b/docs/en/operations/backup.md @@ -170,7 +170,7 @@ RESTORE TABLE test.table PARTITIONS '2', '3' ### Backups as tar archives -Backups can also be stored as tar archives. The functionality is the same as for zip, except that a password is not supported. +Backups can also be stored as tar archives. The functionality is the same as for zip, except that a password is not supported. Write a backup as a tar: ``` @@ -444,10 +444,6 @@ Often data that is ingested into ClickHouse is delivered through some sort of pe Some local filesystems provide snapshot functionality (for example, [ZFS](https://en.wikipedia.org/wiki/ZFS)), but they might not be the best choice for serving live queries. A possible solution is to create additional replicas with this kind of filesystem and exclude them from the [Distributed](../engines/table-engines/special/distributed.md) tables that are used for `SELECT` queries. Snapshots on such replicas will be out of reach of any queries that modify data. As a bonus, these replicas might have special hardware configurations with more disks attached per server, which would be cost-effective. -### clickhouse-copier {#clickhouse-copier} - -[clickhouse-copier](../operations/utilities/clickhouse-copier.md) is a versatile tool that was initially created to re-shard petabyte-sized tables. It can also be used for backup and restore purposes because it reliably copies data between ClickHouse tables and clusters. - For smaller volumes of data, a simple `INSERT INTO ... SELECT ...` to remote tables might work as well. ### Manipulations with Parts {#manipulations-with-parts} diff --git a/docs/en/operations/utilities/clickhouse-copier.md b/docs/en/operations/utilities/clickhouse-copier.md deleted file mode 100644 index 0d329487504..00000000000 --- a/docs/en/operations/utilities/clickhouse-copier.md +++ /dev/null @@ -1,187 +0,0 @@ ---- -slug: /en/operations/utilities/clickhouse-copier -sidebar_position: 59 -sidebar_label: clickhouse-copier ---- - -# clickhouse-copier - -Copies data from the tables in one cluster to tables in another (or the same) cluster. - -:::note -To get a consistent copy, the data in the source tables and partitions should not change during the entire process. -::: - -You can run multiple `clickhouse-copier` instances on different servers to perform the same job. ClickHouse Keeper, or ZooKeeper, is used for syncing the processes. - -After starting, `clickhouse-copier`: - -- Connects to ClickHouse Keeper and receives: - - - Copying jobs. - - The state of the copying jobs. - -- It performs the jobs. - - Each running process chooses the “closest” shard of the source cluster and copies the data into the destination cluster, resharding the data if necessary. - -`clickhouse-copier` tracks the changes in ClickHouse Keeper and applies them on the fly. - -To reduce network traffic, we recommend running `clickhouse-copier` on the same server where the source data is located. - -## Running Clickhouse-copier {#running-clickhouse-copier} - -The utility should be run manually: - -``` bash -$ clickhouse-copier --daemon --config keeper.xml --task-path /task/path --base-dir /path/to/dir -``` - -Parameters: - -- `daemon` — Starts `clickhouse-copier` in daemon mode. -- `config` — The path to the `keeper.xml` file with the parameters for the connection to ClickHouse Keeper. -- `task-path` — The path to the ClickHouse Keeper node. This node is used for syncing `clickhouse-copier` processes and storing tasks. Tasks are stored in `$task-path/description`. -- `task-file` — Optional path to file with task configuration for initial upload to ClickHouse Keeper. -- `task-upload-force` — Force upload `task-file` even if node already exists. Default is false. -- `base-dir` — The path to logs and auxiliary files. When it starts, `clickhouse-copier` creates `clickhouse-copier_YYYYMMHHSS_` subdirectories in `$base-dir`. If this parameter is omitted, the directories are created in the directory where `clickhouse-copier` was launched. - -## Format of keeper.xml {#format-of-zookeeper-xml} - -``` xml - - - trace - 100M - 3 - - - - - 127.0.0.1 - 2181 - - - -``` - -## Configuration of Copying Tasks {#configuration-of-copying-tasks} - -``` xml - - - - - - - false - - 127.0.0.1 - 9000 - - - - ... - - - - ... - - - - - 2 - - - - 1 - - - - - 0 - - - - - 3 - - 1 - - - - - - - - source_cluster - test - hits - - - destination_cluster - test - hits2 - - - - ENGINE=ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/hits2', '{replica}') - PARTITION BY toMonday(date) - ORDER BY (CounterID, EventDate) - - - - jumpConsistentHash(intHash64(UserID), 2) - - - CounterID != 0 - - - - '2018-02-26' - '2018-03-05' - ... - - - - - - ... - - ... - - -``` - -`clickhouse-copier` tracks the changes in `/task/path/description` and applies them on the fly. For instance, if you change the value of `max_workers`, the number of processes running tasks will also change. diff --git a/docs/en/operations/utilities/index.md b/docs/en/operations/utilities/index.md index 8959073d00e..912a5b9ccb1 100644 --- a/docs/en/operations/utilities/index.md +++ b/docs/en/operations/utilities/index.md @@ -2,13 +2,11 @@ slug: /en/operations/utilities/ sidebar_position: 56 sidebar_label: List of tools and utilities -pagination_next: 'en/operations/utilities/clickhouse-copier' --- # List of tools and utilities - [clickhouse-local](../../operations/utilities/clickhouse-local.md) — Allows running SQL queries on data without starting the ClickHouse server, similar to how `awk` does this. -- [clickhouse-copier](../../operations/utilities/clickhouse-copier.md) — Copies (and reshards) data from one cluster to another cluster. - [clickhouse-benchmark](../../operations/utilities/clickhouse-benchmark.md) — Loads server with the custom queries and settings. - [clickhouse-format](../../operations/utilities/clickhouse-format.md) — Enables formatting input queries. - [ClickHouse obfuscator](../../operations/utilities/clickhouse-obfuscator.md) — Obfuscates data. diff --git a/docs/en/sql-reference/statements/alter/column.md b/docs/en/sql-reference/statements/alter/column.md index 0989c151d18..a23710b12bd 100644 --- a/docs/en/sql-reference/statements/alter/column.md +++ b/docs/en/sql-reference/statements/alter/column.md @@ -335,7 +335,7 @@ The `ALTER` query lets you create and delete separate elements (columns) in nest There is no support for deleting columns in the primary key or the sampling key (columns that are used in the `ENGINE` expression). Changing the type for columns that are included in the primary key is only possible if this change does not cause the data to be modified (for example, you are allowed to add values to an Enum or to change a type from `DateTime` to `UInt32`). -If the `ALTER` query is not sufficient to make the table changes you need, you can create a new table, copy the data to it using the [INSERT SELECT](/docs/en/sql-reference/statements/insert-into.md/#inserting-the-results-of-select) query, then switch the tables using the [RENAME](/docs/en/sql-reference/statements/rename.md/#rename-table) query and delete the old table. You can use the [clickhouse-copier](/docs/en/operations/utilities/clickhouse-copier.md) as an alternative to the `INSERT SELECT` query. +If the `ALTER` query is not sufficient to make the table changes you need, you can create a new table, copy the data to it using the [INSERT SELECT](/docs/en/sql-reference/statements/insert-into.md/#inserting-the-results-of-select) query, then switch the tables using the [RENAME](/docs/en/sql-reference/statements/rename.md/#rename-table) query and delete the old table. The `ALTER` query blocks all reads and writes for the table. In other words, if a long `SELECT` is running at the time of the `ALTER` query, the `ALTER` query will wait for it to complete. At the same time, all new queries to the same table will wait while this `ALTER` is running. diff --git a/docs/ru/getting-started/tutorial.md b/docs/ru/getting-started/tutorial.md index 34064b6cf2f..8c827137e6d 100644 --- a/docs/ru/getting-started/tutorial.md +++ b/docs/ru/getting-started/tutorial.md @@ -585,10 +585,6 @@ ENGINE = Distributed(perftest_3shards_1replicas, tutorial, hits_local, rand()); INSERT INTO tutorial.hits_all SELECT * FROM tutorial.hits_v1; ``` -:::danger Внимание! -Этот подход не годится для сегментирования больших таблиц. Есть инструмент [clickhouse-copier](../operations/utilities/clickhouse-copier.md), специально предназначенный для перераспределения любых больших таблиц. -::: - Как и следовало ожидать, вычислительно сложные запросы работают втрое быстрее, если они выполняются на трёх серверах, а не на одном. В данном случае мы использовали кластер из трёх сегментов с одной репликой для каждого. diff --git a/docs/ru/operations/backup.md b/docs/ru/operations/backup.md index 9ff13bbc8a6..50ee6b45e09 100644 --- a/docs/ru/operations/backup.md +++ b/docs/ru/operations/backup.md @@ -24,12 +24,6 @@ sidebar_label: "Резервное копирование данных" Некоторые локальные файловые системы позволяют делать снимки (например, [ZFS](https://en.wikipedia.org/wiki/ZFS)), но они могут быть не лучшим выбором для обслуживания живых запросов. Возможным решением является создание дополнительных реплик с такой файловой системой и исключение их из [Distributed](../engines/table-engines/special/distributed.md) таблиц, используемых для запросов `SELECT`. Снимки на таких репликах будут недоступны для запросов, изменяющих данные. В качестве бонуса, эти реплики могут иметь особые конфигурации оборудования с большим количеством дисков, подключенных к серверу, что будет экономически эффективным. -## clickhouse-copier {#clickhouse-copier} - -[clickhouse-copier](utilities/clickhouse-copier.md) — это универсальный инструмент, который изначально был создан для перешардирования таблиц с петабайтами данных. Его также можно использовать для резервного копирования и восстановления, поскольку он надёжно копирует данные между таблицами и кластерами ClickHouse. - -Для небольших объёмов данных можно применять `INSERT INTO ... SELECT ...` в удалённые таблицы. - ## Манипуляции с партициями {#manipuliatsii-s-partitsiiami} ClickHouse позволяет использовать запрос `ALTER TABLE ... FREEZE PARTITION ...` для создания локальной копии партиций таблицы. Это реализуется с помощью жестких ссылок (hardlinks) на каталог `/var/lib/clickhouse/shadow/`, поэтому такая копия обычно не занимает дополнительное место на диске для старых данных. Созданные копии файлов не обрабатываются сервером ClickHouse, поэтому вы можете просто оставить их там: у вас будет простая резервная копия, которая не требует дополнительной внешней системы, однако при аппаратных проблемах вы можете утратить и актуальные данные и сохраненную копию. По этой причине, лучше удаленно скопировать их в другое место, а затем удалить локальную копию. Распределенные файловые системы и хранилища объектов по-прежнему являются хорошими вариантами для этого, однако можно использовать и обычные присоединенные файловые серверы с достаточно большой ёмкостью (в этом случае передача будет происходить через сетевую файловую систему или, возможно, [rsync](https://en.wikipedia.org/wiki/Rsync)). diff --git a/docs/ru/operations/utilities/clickhouse-copier.md b/docs/ru/operations/utilities/clickhouse-copier.md deleted file mode 100644 index da86ef2d35d..00000000000 --- a/docs/ru/operations/utilities/clickhouse-copier.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -slug: /ru/operations/utilities/clickhouse-copier -sidebar_position: 59 -sidebar_label: clickhouse-copier ---- - -# clickhouse-copier {#clickhouse-copier} - -Копирует данные из таблиц одного кластера в таблицы другого (или этого же) кластера. - -Можно запустить несколько `clickhouse-copier` для разных серверах для выполнения одного и того же задания. Для синхронизации между процессами используется ZooKeeper. - -После запуска, `clickhouse-copier`: - -- Соединяется с ZooKeeper и получает: - - - Задания на копирование. - - Состояние заданий на копирование. - -- Выполняет задания. - - Каждый запущенный процесс выбирает "ближайший" шард исходного кластера и копирует данные в кластер назначения, при необходимости перешардируя их. - -`clickhouse-copier` отслеживает изменения в ZooKeeper и применяет их «на лету». - -Для снижения сетевого трафика рекомендуем запускать `clickhouse-copier` на том же сервере, где находятся исходные данные. - -## Запуск Clickhouse-copier {#zapusk-clickhouse-copier} - -Утилиту следует запускать вручную следующим образом: - -``` bash -$ clickhouse-copier --daemon --config zookeeper.xml --task-path /task/path --base-dir /path/to/dir -``` - -Параметры запуска: - -- `daemon` - запускает `clickhouse-copier` в режиме демона. -- `config` - путь к файлу `zookeeper.xml` с параметрами соединения с ZooKeeper. -- `task-path` - путь к ноде ZooKeeper. Нода используется для синхронизации между процессами `clickhouse-copier` и для хранения заданий. Задания хранятся в `$task-path/description`. -- `task-file` - необязательный путь к файлу с описанием конфигурация заданий для загрузки в ZooKeeper. -- `task-upload-force` - Загрузить `task-file` в ZooKeeper даже если уже было загружено. -- `base-dir` - путь к логам и вспомогательным файлам. При запуске `clickhouse-copier` создает в `$base-dir` подкаталоги `clickhouse-copier_YYYYMMHHSS_`. Если параметр не указан, то каталоги будут создаваться в каталоге, где `clickhouse-copier` был запущен. - -## Формат Zookeeper.xml {#format-zookeeper-xml} - -``` xml - - - trace - 100M - 3 - - - - - 127.0.0.1 - 2181 - - - -``` - -## Конфигурация заданий на копирование {#konfiguratsiia-zadanii-na-kopirovanie} - -``` xml - - - - - - - false - - 127.0.0.1 - 9000 - - - - ... - - - - ... - - - - - 2 - - - - 1 - - - - - 0 - - - - - 3 - - 1 - - - - - - - - source_cluster - test - hits - - - destination_cluster - test - hits2 - - - - ENGINE=ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/hits2', '{replica}') - PARTITION BY toMonday(date) - ORDER BY (CounterID, EventDate) - - - - jumpConsistentHash(intHash64(UserID), 2) - - - CounterID != 0 - - - - '2018-02-26' - '2018-03-05' - ... - - - - - - ... - - ... - - -``` - -`clickhouse-copier` отслеживает изменения `/task/path/description` и применяет их «на лету». Если вы поменяете, например, значение `max_workers`, то количество процессов, выполняющих задания, также изменится. diff --git a/docs/ru/operations/utilities/index.md b/docs/ru/operations/utilities/index.md index 9eb90a3037c..e4b01a0276d 100644 --- a/docs/ru/operations/utilities/index.md +++ b/docs/ru/operations/utilities/index.md @@ -7,7 +7,6 @@ sidebar_position: 56 # Утилиты ClickHouse {#utility-clickhouse} - [clickhouse-local](clickhouse-local.md) - позволяет выполнять SQL-запросы над данными без остановки сервера ClickHouse, подобно утилите `awk`. -- [clickhouse-copier](clickhouse-copier.md) - копирует (и перешардирует) данные с одного кластера на другой. - [clickhouse-benchmark](../../operations/utilities/clickhouse-benchmark.md) — устанавливает соединение с сервером ClickHouse и запускает циклическое выполнение указанных запросов. - [clickhouse-format](../../operations/utilities/clickhouse-format.md) — позволяет форматировать входящие запросы. - [ClickHouse obfuscator](../../operations/utilities/clickhouse-obfuscator.md) — обфусцирует данные. diff --git a/docs/ru/sql-reference/statements/alter/column.md b/docs/ru/sql-reference/statements/alter/column.md index 385a9835eca..2ea045f4ae3 100644 --- a/docs/ru/sql-reference/statements/alter/column.md +++ b/docs/ru/sql-reference/statements/alter/column.md @@ -94,7 +94,7 @@ RENAME COLUMN [IF EXISTS] name to new_name Переименовывает столбец `name` в `new_name`. Если указано выражение `IF EXISTS`, то запрос не будет возвращать ошибку при условии, что столбец `name` не существует. Поскольку переименование не затрагивает физические данные колонки, запрос выполняется практически мгновенно. -**ЗАМЕЧЕНИЕ**: Столбцы, являющиеся частью основного ключа или ключа сортировки (заданные с помощью `ORDER BY` или `PRIMARY KEY`), не могут быть переименованы. Попытка переименовать эти слобцы приведет к `SQL Error [524]`. +**ЗАМЕЧЕНИЕ**: Столбцы, являющиеся частью основного ключа или ключа сортировки (заданные с помощью `ORDER BY` или `PRIMARY KEY`), не могут быть переименованы. Попытка переименовать эти слобцы приведет к `SQL Error [524]`. Пример: @@ -254,7 +254,7 @@ SELECT groupArray(x), groupArray(s) FROM tmp; Отсутствует возможность удалять столбцы, входящие в первичный ключ или ключ для сэмплирования (в общем, входящие в выражение `ENGINE`). Изменение типа у столбцов, входящих в первичный ключ возможно только в том случае, если это изменение не приводит к изменению данных (например, разрешено добавление значения в Enum или изменение типа с `DateTime` на `UInt32`). -Если возможностей запроса `ALTER` не хватает для нужного изменения таблицы, вы можете создать новую таблицу, скопировать туда данные с помощью запроса [INSERT SELECT](../insert-into.md#inserting-the-results-of-select), затем поменять таблицы местами с помощью запроса [RENAME](../rename.md#rename-table), и удалить старую таблицу. В качестве альтернативы для запроса `INSERT SELECT`, можно использовать инструмент [clickhouse-copier](../../../sql-reference/statements/alter/index.md). +Если возможностей запроса `ALTER` не хватает для нужного изменения таблицы, вы можете создать новую таблицу, скопировать туда данные с помощью запроса [INSERT SELECT](../insert-into.md#inserting-the-results-of-select), затем поменять таблицы местами с помощью запроса [RENAME](../rename.md#rename-table), и удалить старую таблицу. Запрос `ALTER` блокирует все чтения и записи для таблицы. То есть если на момент запроса `ALTER` выполнялся долгий `SELECT`, то запрос `ALTER` сначала дождётся его выполнения. И в это время все новые запросы к той же таблице будут ждать, пока завершится этот `ALTER`. diff --git a/docs/zh/getting-started/tutorial.md b/docs/zh/getting-started/tutorial.md index 989cf5f57d8..d0c9bda83ef 100644 --- a/docs/zh/getting-started/tutorial.md +++ b/docs/zh/getting-started/tutorial.md @@ -582,8 +582,6 @@ ENGINE = Distributed(perftest_3shards_1replicas, tutorial, hits_local, rand()); INSERT INTO tutorial.hits_all SELECT * FROM tutorial.hits_v1; ``` -!!! warning "注意:" - 这种方法不适合大型表的分片。 有一个单独的工具 [clickhouse-copier](../operations/utilities/clickhouse-copier.md) 这可以重新分片任意大表。 正如您所期望的那样,如果计算量大的查询使用3台服务器而不是一个,则运行速度快N倍。 diff --git a/docs/zh/operations/backup.md b/docs/zh/operations/backup.md index 6d491f9c2f7..48e852b4228 100644 --- a/docs/zh/operations/backup.md +++ b/docs/zh/operations/backup.md @@ -24,12 +24,6 @@ sidebar_label: "\u6570\u636E\u5907\u4EFD" 某些本地文件系统提供快照功能(例如, [ZFS](https://en.wikipedia.org/wiki/ZFS)),但它们可能不是提供实时查询的最佳选择。 一个可能的解决方案是使用这种文件系统创建额外的副本,并将它们与用于`SELECT` 查询的 [分布式](../engines/table-engines/special/distributed.md) 表分离。 任何修改数据的查询都无法访问此类副本上的快照。 作为回报,这些副本可能具有特殊的硬件配置,每个服务器附加更多的磁盘,这将是经济高效的。 -## clickhouse-copier {#clickhouse-copier} - -[clickhouse-copier](utilities/clickhouse-copier.md) 是一个多功能工具,最初创建它是为了用于重新切分pb大小的表。 因为它能够在ClickHouse表和集群之间可靠地复制数据,所以它也可用于备份和还原数据。 - -对于较小的数据量,一个简单的 `INSERT INTO ... SELECT ...` 到远程表也可以工作。 - ## part操作 {#manipulations-with-parts} ClickHouse允许使用 `ALTER TABLE ... FREEZE PARTITION ...` 查询以创建表分区的本地副本。 这是利用硬链接(hardlink)到 `/var/lib/clickhouse/shadow/` 文件夹中实现的,所以它通常不会因为旧数据而占用额外的磁盘空间。 创建的文件副本不由ClickHouse服务器处理,所以你可以把它们留在那里:你将有一个简单的备份,不需要任何额外的外部系统,但它仍然容易出现硬件问题。 出于这个原因,最好将它们远程复制到另一个位置,然后删除本地副本。 分布式文件系统和对象存储仍然是一个不错的选择,但是具有足够大容量的正常附加文件服务器也可以工作(在这种情况下,传输将通过网络文件系统或者也许是 [rsync](https://en.wikipedia.org/wiki/Rsync) 来进行). diff --git a/docs/zh/operations/utilities/clickhouse-copier.md b/docs/zh/operations/utilities/clickhouse-copier.md deleted file mode 100644 index b01edd9257c..00000000000 --- a/docs/zh/operations/utilities/clickhouse-copier.md +++ /dev/null @@ -1,172 +0,0 @@ ---- -slug: /zh/operations/utilities/clickhouse-copier ---- -# clickhouse-copier {#clickhouse-copier} - -将数据从一个群集中的表复制到另一个(或相同)群集中的表。 - -您可以运行多个 `clickhouse-copier` 不同服务器上的实例执行相同的作业。 ZooKeeper用于同步进程。 - -开始后, `clickhouse-copier`: - -- 连接到ZooKeeper并且接收: - - - 复制作业。 - - 复制作业的状态。 - -- 它执行的工作。 - - 每个正在运行的进程都会选择源集群的“最接近”分片,然后将数据复制到目标集群,并在必要时重新分片数据。 - -`clickhouse-copier` 跟踪ZooKeeper中的更改,并实时应用它们。 - -为了减少网络流量,我们建议运行 `clickhouse-copier` 在源数据所在的同一服务器上。 - -## 运行Clickhouse-copier {#running-clickhouse-copier} - -该实用程序应手动运行: - -``` bash -clickhouse-copier --daemon --config zookeeper.xml --task-path /task/path --base-dir /path/to/dir -``` - -参数: - -- `daemon` — 在守护进程模式下启动`clickhouse-copier`。 -- `config` — `zookeeper.xml`文件的路径,其中包含用于连接ZooKeeper的参数。 -- `task-path` — ZooKeeper节点的路径。 该节点用于同步`clickhouse-copier`进程和存储任务。 任务存储在`$task-path/description`中。 -- `task-file` — 可选的非必须参数, 指定一个包含任务配置的参数文件, 用于初始上传到ZooKeeper。 -- `task-upload-force` — 即使节点已经存在,也强制上载`task-file`。 -- `base-dir` — 日志和辅助文件的路径。 启动时,`clickhouse-copier`在`$base-dir`中创建`clickhouse-copier_YYYYMMHHSS_`子目录。 如果省略此参数,则会在启动`clickhouse-copier`的目录中创建目录。 - - - -## Zookeeper.xml格式 {#format-of-zookeeper-xml} - -``` xml - - - trace - 100M - 3 - - - - - 127.0.0.1 - 2181 - - - -``` - -## 复制任务的配置 {#configuration-of-copying-tasks} - -``` xml - - - - - - false - - 127.0.0.1 - 9000 - - - ... - - - - ... - - - - - 2 - - - - 1 - - - - - 0 - - - - - 3 - - 1 - - - - - - - - source_cluster - test - hits - - - destination_cluster - test - hits2 - - - - ENGINE=ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/hits2', '{replica}') - PARTITION BY toMonday(date) - ORDER BY (CounterID, EventDate) - - - - jumpConsistentHash(intHash64(UserID), 2) - - - CounterID != 0 - - - - '2018-02-26' - '2018-03-05' - ... - - - - - - ... - - ... - - -``` - -`clickhouse-copier` 跟踪更改 `/task/path/description` 并在飞行中应用它们。 例如,如果你改变的值 `max_workers`,运行任务的进程数也会发生变化。 diff --git a/docs/zh/operations/utilities/index.md b/docs/zh/operations/utilities/index.md index af158baf275..cebe312450c 100644 --- a/docs/zh/operations/utilities/index.md +++ b/docs/zh/operations/utilities/index.md @@ -4,5 +4,4 @@ slug: /zh/operations/utilities/ # 实用工具 {#clickhouse-utility} - [本地查询](clickhouse-local.md) — 在不停止ClickHouse服务的情况下,对数据执行查询操作(类似于 `awk` 命令)。 -- [跨集群复制](clickhouse-copier.md) — 在不同集群间复制数据。 - [性能测试](clickhouse-benchmark.md) — 连接到Clickhouse服务器,执行性能测试。 diff --git a/docs/zh/sql-reference/statements/alter.md b/docs/zh/sql-reference/statements/alter.md index 002d5102fa3..48665ae04ab 100644 --- a/docs/zh/sql-reference/statements/alter.md +++ b/docs/zh/sql-reference/statements/alter.md @@ -150,7 +150,7 @@ ALTER TABLE visits MODIFY COLUMN browser Array(String) 不支持对primary key或者sampling key中的列(在 `ENGINE` 表达式中用到的列)进行删除操作。改变包含在primary key中的列的类型时,如果操作不会导致数据的变化(例如,往Enum中添加一个值,或者将`DateTime` 类型改成 `UInt32`),那么这种操作是可行的。 -如果 `ALTER` 操作不足以完成你想要的表变动操作,你可以创建一张新的表,通过 [INSERT SELECT](../../sql-reference/statements/insert-into.md#inserting-the-results-of-select)将数据拷贝进去,然后通过 [RENAME](../../sql-reference/statements/misc.md#misc_operations-rename)将新的表改成和原有表一样的名称,并删除原有的表。你可以使用 [clickhouse-copier](../../operations/utilities/clickhouse-copier.md) 代替 `INSERT SELECT`。 +如果 `ALTER` 操作不足以完成你想要的表变动操作,你可以创建一张新的表,通过 [INSERT SELECT](../../sql-reference/statements/insert-into.md#inserting-the-results-of-select)将数据拷贝进去,然后通过 [RENAME](../../sql-reference/statements/misc.md#misc_operations-rename)将新的表改成和原有表一样的名称,并删除原有的表。 `ALTER` 操作会阻塞对表的所有读写操作。换句话说,当一个大的 `SELECT` 语句和 `ALTER`同时执行时,`ALTER`会等待,直到 `SELECT` 执行结束。与此同时,当 `ALTER` 运行时,新的 sql 语句将会等待。 diff --git a/packages/clickhouse-server.yaml b/packages/clickhouse-server.yaml index 7894129b8e3..dc183ead102 100644 --- a/packages/clickhouse-server.yaml +++ b/packages/clickhouse-server.yaml @@ -50,8 +50,6 @@ contents: dst: /etc/init.d/clickhouse-server - src: clickhouse-server.service dst: /lib/systemd/system/clickhouse-server.service -- src: root/usr/bin/clickhouse-copier - dst: /usr/bin/clickhouse-copier - src: root/usr/bin/clickhouse-server dst: /usr/bin/clickhouse-server # clickhouse-keeper part diff --git a/programs/CMakeLists.txt b/programs/CMakeLists.txt index 62bcf068879..d945fdf4a6f 100644 --- a/programs/CMakeLists.txt +++ b/programs/CMakeLists.txt @@ -122,7 +122,6 @@ add_subdirectory (local) add_subdirectory (benchmark) add_subdirectory (extract-from-config) add_subdirectory (compressor) -add_subdirectory (copier) add_subdirectory (format) add_subdirectory (obfuscator) add_subdirectory (install) @@ -200,7 +199,6 @@ clickhouse_program_install(clickhouse-server server) clickhouse_program_install(clickhouse-client client chc) clickhouse_program_install(clickhouse-local local chl ch) clickhouse_program_install(clickhouse-benchmark benchmark) -clickhouse_program_install(clickhouse-copier copier) clickhouse_program_install(clickhouse-extract-from-config extract-from-config) clickhouse_program_install(clickhouse-compressor compressor) clickhouse_program_install(clickhouse-format format) diff --git a/programs/copier/Aliases.h b/programs/copier/Aliases.h deleted file mode 100644 index 02be3441acd..00000000000 --- a/programs/copier/Aliases.h +++ /dev/null @@ -1,15 +0,0 @@ -#pragma once - -#include - -#include - -#include - -namespace DB -{ - using ConfigurationPtr = Poco::AutoPtr; - - using DatabaseAndTableName = std::pair; - using ListOfDatabasesAndTableNames = std::vector; -} diff --git a/programs/copier/CMakeLists.txt b/programs/copier/CMakeLists.txt deleted file mode 100644 index 2c17e70bc5e..00000000000 --- a/programs/copier/CMakeLists.txt +++ /dev/null @@ -1,28 +0,0 @@ -set(CLICKHOUSE_COPIER_SOURCES - "${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopierApp.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopier.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/Internals.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/ShardPartition.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/ShardPartitionPiece.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/StatusAccumulator.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/TaskCluster.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/TaskShard.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/TaskTable.cpp") - -set (CLICKHOUSE_COPIER_LINK - PRIVATE - clickhouse_common_zookeeper - clickhouse_common_config - clickhouse_parsers - clickhouse_functions - clickhouse_table_functions - clickhouse_aggregate_functions - string_utils - - PUBLIC - daemon -) - -set(CLICKHOUSE_COPIER_INCLUDE SYSTEM PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) - -clickhouse_program_add(copier) diff --git a/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp deleted file mode 100644 index 59505d08f5c..00000000000 --- a/programs/copier/ClusterCopier.cpp +++ /dev/null @@ -1,2076 +0,0 @@ -#include "ClusterCopier.h" - -#include "Internals.h" -#include "StatusAccumulator.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace CurrentMetrics -{ - extern const Metric LocalThread; - extern const Metric LocalThreadActive; - extern const Metric LocalThreadScheduled; -} - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int NOT_IMPLEMENTED; - extern const int LOGICAL_ERROR; - extern const int UNFINISHED; - extern const int BAD_ARGUMENTS; -} - - -void ClusterCopier::init() -{ - auto zookeeper = getContext()->getZooKeeper(); - - task_description_watch_callback = [this] (const Coordination::WatchResponse & response) - { - if (response.error != Coordination::Error::ZOK) - return; - UInt64 version = ++task_description_version; - LOG_INFO(log, "Task description should be updated, local version {}", version); - }; - - task_description_path = task_zookeeper_path + "/description"; - task_cluster = std::make_unique(task_zookeeper_path, working_database_name); - - reloadTaskDescription(); - - task_cluster->loadTasks(*task_cluster_current_config); - getContext()->setClustersConfig(task_cluster_current_config, false, task_cluster->clusters_prefix); - - /// Set up shards and their priority - task_cluster->random_engine.seed(randomSeed()); - for (auto & task_table : task_cluster->table_tasks) - { - task_table.cluster_pull = getContext()->getCluster(task_table.cluster_pull_name); - task_table.cluster_push = getContext()->getCluster(task_table.cluster_push_name); - task_table.initShards(task_cluster->random_engine); - } - - LOG_INFO(log, "Will process {} table tasks", task_cluster->table_tasks.size()); - - /// Do not initialize tables, will make deferred initialization in process() - - zookeeper->createAncestors(getWorkersPathVersion() + "/"); - zookeeper->createAncestors(getWorkersPath() + "/"); - /// Init status node - zookeeper->createIfNotExists(task_zookeeper_path + "/status", "{}"); -} - -template -decltype(auto) ClusterCopier::retry(T && func, UInt64 max_tries) -{ - std::exception_ptr exception; - - if (max_tries == 0) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot perform zero retries"); - - for (UInt64 try_number = 1; try_number <= max_tries; ++try_number) - { - try - { - return func(); - } - catch (...) - { - exception = std::current_exception(); - if (try_number < max_tries) - { - tryLogCurrentException(log, "Will retry"); - std::this_thread::sleep_for(retry_delay_ms); - } - } - } - - std::rethrow_exception(exception); -} - - -void ClusterCopier::discoverShardPartitions(const ConnectionTimeouts & timeouts, const TaskShardPtr & task_shard) -{ - TaskTable & task_table = task_shard->task_table; - - LOG_INFO(log, "Discover partitions of shard {}", task_shard->getDescription()); - - auto get_partitions = [&] () { return getShardPartitions(timeouts, *task_shard); }; - auto existing_partitions_names = retry(get_partitions, 60); - Strings filtered_partitions_names; - Strings missing_partitions; - - /// Check that user specified correct partition names - auto check_partition_format = [] (const DataTypePtr & type, const String & partition_text_quoted) - { - MutableColumnPtr column_dummy = type->createColumn(); - ReadBufferFromString rb(partition_text_quoted); - - try - { - type->getDefaultSerialization()->deserializeTextQuoted(*column_dummy, rb, FormatSettings()); - } - catch (Exception & e) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Partition {} has incorrect format. {}", partition_text_quoted, e.displayText()); - } - }; - - if (task_table.has_enabled_partitions) - { - /// Process partition in order specified by - for (const String & partition_name : task_table.enabled_partitions) - { - /// Check that user specified correct partition names - check_partition_format(task_shard->partition_key_column.type, partition_name); - - auto it = existing_partitions_names.find(partition_name); - - /// Do not process partition if it is not in enabled_partitions list - if (it == existing_partitions_names.end()) - { - missing_partitions.emplace_back(partition_name); - continue; - } - - filtered_partitions_names.emplace_back(*it); - } - - for (const String & partition_name : existing_partitions_names) - { - if (!task_table.enabled_partitions_set.contains(partition_name)) - { - LOG_INFO(log, "Partition {} will not be processed, since it is not in enabled_partitions of {}", partition_name, task_table.table_id); - } - } - } - else - { - for (const String & partition_name : existing_partitions_names) - filtered_partitions_names.emplace_back(partition_name); - } - - for (const String & partition_name : filtered_partitions_names) - { - const size_t number_of_splits = task_table.number_of_splits; - task_shard->partition_tasks.emplace(partition_name, ShardPartition(*task_shard, partition_name, number_of_splits)); - task_shard->checked_partitions.emplace(partition_name, true); - - auto shard_partition_it = task_shard->partition_tasks.find(partition_name); - PartitionPieces & shard_partition_pieces = shard_partition_it->second.pieces; - - for (size_t piece_number = 0; piece_number < number_of_splits; ++piece_number) - { - bool res = checkPresentPartitionPiecesOnCurrentShard(timeouts, *task_shard, partition_name, piece_number); - shard_partition_pieces.emplace_back(shard_partition_it->second, piece_number, res); - } - } - - if (!missing_partitions.empty()) - { - WriteBufferFromOwnString ss; - for (const String & missing_partition : missing_partitions) - ss << " " << missing_partition; - - LOG_WARNING(log, "There are no {} partitions from enabled_partitions in shard {} :{}", missing_partitions.size(), task_shard->getDescription(), ss.str()); - } - - LOG_INFO(log, "Will copy {} partitions from shard {}", task_shard->partition_tasks.size(), task_shard->getDescription()); -} - -void ClusterCopier::discoverTablePartitions(const ConnectionTimeouts & timeouts, TaskTable & task_table, UInt64 num_threads) -{ - /// Fetch partitions list from a shard - { - ThreadPool thread_pool(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, CurrentMetrics::LocalThreadScheduled, num_threads ? num_threads : 2 * getNumberOfPhysicalCPUCores()); - - for (const TaskShardPtr & task_shard : task_table.all_shards) - thread_pool.scheduleOrThrowOnError([this, timeouts, task_shard]() - { - setThreadName("DiscoverPartns"); - discoverShardPartitions(timeouts, task_shard); - }); - - LOG_INFO(log, "Waiting for {} setup jobs", thread_pool.active()); - thread_pool.wait(); - } -} - -void ClusterCopier::uploadTaskDescription(const std::string & task_path, const std::string & task_file, const bool force) -{ - auto local_task_description_path = task_path + "/description"; - - String task_config_str; - { - ReadBufferFromFile in(task_file); - readStringUntilEOF(task_config_str, in); - } - if (task_config_str.empty()) - return; - - auto zookeeper = getContext()->getZooKeeper(); - - zookeeper->createAncestors(local_task_description_path); - auto code = zookeeper->tryCreate(local_task_description_path, task_config_str, zkutil::CreateMode::Persistent); - if (code != Coordination::Error::ZOK && force) - zookeeper->createOrUpdate(local_task_description_path, task_config_str, zkutil::CreateMode::Persistent); - - LOG_INFO(log, "Task description {} uploaded to {} with result {} ({})", - ((code != Coordination::Error::ZOK && !force) ? "not " : ""), local_task_description_path, code, Coordination::errorMessage(code)); -} - -void ClusterCopier::reloadTaskDescription() -{ - auto zookeeper = getContext()->getZooKeeper(); - task_description_watch_zookeeper = zookeeper; - - Coordination::Stat stat{}; - - /// It will throw exception if such a node doesn't exist. - auto task_config_str = zookeeper->get(task_description_path, &stat); - - LOG_INFO(log, "Loading task description"); - task_cluster_current_config = getConfigurationFromXMLString(task_config_str); - - /// Setup settings - task_cluster->reloadSettings(*task_cluster_current_config); - getContext()->setSettings(task_cluster->settings_common); -} - -void ClusterCopier::updateConfigIfNeeded() -{ - UInt64 version_to_update = task_description_version; - bool is_outdated_version = task_description_current_version != version_to_update; - bool is_expired_session = !task_description_watch_zookeeper || task_description_watch_zookeeper->expired(); - - if (!is_outdated_version && !is_expired_session) - return; - - LOG_INFO(log, "Updating task description"); - reloadTaskDescription(); - - task_description_current_version = version_to_update; -} - -void ClusterCopier::process(const ConnectionTimeouts & timeouts) -{ - for (TaskTable & task_table : task_cluster->table_tasks) - { - LOG_INFO(log, "Process table task {} with {} shards, {} of them are local ones", task_table.table_id, task_table.all_shards.size(), task_table.local_shards.size()); - - if (task_table.all_shards.empty()) - continue; - - /// Discover partitions of each shard and total set of partitions - if (!task_table.has_enabled_partitions) - { - /// If there are no specified enabled_partitions, we must discover them manually - discoverTablePartitions(timeouts, task_table); - - /// After partitions of each shard are initialized, initialize cluster partitions - for (const TaskShardPtr & task_shard : task_table.all_shards) - { - for (const auto & partition_elem : task_shard->partition_tasks) - { - const String & partition_name = partition_elem.first; - task_table.cluster_partitions.emplace(partition_name, ClusterPartition{}); - } - } - - for (auto & partition_elem : task_table.cluster_partitions) - { - const String & partition_name = partition_elem.first; - - for (const TaskShardPtr & task_shard : task_table.all_shards) - task_shard->checked_partitions.emplace(partition_name); - - task_table.ordered_partition_names.emplace_back(partition_name); - } - } - else - { - /// If enabled_partitions are specified, assume that each shard has all partitions - /// We will refine partition set of each shard in future - - for (const String & partition_name : task_table.enabled_partitions) - { - task_table.cluster_partitions.emplace(partition_name, ClusterPartition{}); - task_table.ordered_partition_names.emplace_back(partition_name); - } - } - - task_table.watch.restart(); - - /// Retry table processing - bool table_is_done = false; - for (UInt64 num_table_tries = 1; num_table_tries <= max_table_tries; ++num_table_tries) - { - if (tryProcessTable(timeouts, task_table)) - { - table_is_done = true; - break; - } - } - - if (!table_is_done) - { - throw Exception(ErrorCodes::UNFINISHED, "Too many tries to process table {}. Abort remaining execution", - task_table.table_id); - } - } -} - -/// Protected section - - -/* - * Creates task worker node and checks maximum number of workers not to exceed the limit. - * To achieve this we have to check version of workers_version_path node and create current_worker_path - * node atomically. - * */ - -zkutil::EphemeralNodeHolder::Ptr ClusterCopier::createTaskWorkerNodeAndWaitIfNeed( - const zkutil::ZooKeeperPtr & zookeeper, - const String & description, - bool unprioritized) -{ - std::chrono::milliseconds current_sleep_time = retry_delay_ms; - static constexpr std::chrono::milliseconds max_sleep_time(30000); // 30 sec - - if (unprioritized) - std::this_thread::sleep_for(current_sleep_time); - - String workers_version_path = getWorkersPathVersion(); - String workers_path = getWorkersPath(); - String current_worker_path = getCurrentWorkerNodePath(); - - UInt64 num_bad_version_errors = 0; - - while (true) - { - updateConfigIfNeeded(); - - Coordination::Stat stat; - zookeeper->get(workers_version_path, &stat); - auto version = stat.version; - zookeeper->get(workers_path, &stat); - - if (static_cast(stat.numChildren) >= task_cluster->max_workers) - { - LOG_INFO(log, "Too many workers ({}, maximum {}). Postpone processing {}", stat.numChildren, task_cluster->max_workers, description); - - if (unprioritized) - current_sleep_time = std::min(max_sleep_time, current_sleep_time + retry_delay_ms); - - std::this_thread::sleep_for(current_sleep_time); - num_bad_version_errors = 0; - } - else - { - Coordination::Requests ops; - ops.emplace_back(zkutil::makeSetRequest(workers_version_path, description, version)); - ops.emplace_back(zkutil::makeCreateRequest(current_worker_path, description, zkutil::CreateMode::Ephemeral)); - Coordination::Responses responses; - auto code = zookeeper->tryMulti(ops, responses); - - if (code == Coordination::Error::ZOK || code == Coordination::Error::ZNODEEXISTS) - return zkutil::EphemeralNodeHolder::existing(current_worker_path, *zookeeper); - - if (code == Coordination::Error::ZBADVERSION) - { - ++num_bad_version_errors; - - /// Try to make fast retries - if (num_bad_version_errors > 3) - { - LOG_INFO(log, "A concurrent worker has just been added, will check free worker slots again"); - std::chrono::milliseconds random_sleep_time(std::uniform_int_distribution(1, 1000)(task_cluster->random_engine)); - std::this_thread::sleep_for(random_sleep_time); - num_bad_version_errors = 0; - } - } - else - throw Coordination::Exception(code); - } - } -} - - -bool ClusterCopier::checkPartitionPieceIsClean( - const zkutil::ZooKeeperPtr & zookeeper, - const CleanStateClock & clean_state_clock, - const String & task_status_path) -{ - LogicalClock task_start_clock; - - Coordination::Stat stat{}; - if (zookeeper->exists(task_status_path, &stat)) - task_start_clock = LogicalClock(stat.mzxid); - - return clean_state_clock.is_clean() && (!task_start_clock.hasHappened() || clean_state_clock.discovery_zxid <= task_start_clock); -} - - -bool ClusterCopier::checkAllPiecesInPartitionAreDone(const TaskTable & task_table, const String & partition_name, const TasksShard & shards_with_partition) -{ - bool answer = true; - for (size_t piece_number = 0; piece_number < task_table.number_of_splits; ++piece_number) - { - bool piece_is_done = checkPartitionPieceIsDone(task_table, partition_name, piece_number, shards_with_partition); - if (!piece_is_done) - LOG_INFO(log, "Partition {} piece {} is not already done.", partition_name, piece_number); - answer &= piece_is_done; - } - - return answer; -} - - -/* The same as function above - * Assume that we don't know on which shards do we have partition certain piece. - * We'll check them all (I mean shards that contain the whole partition) - * And shards that don't have certain piece MUST mark that piece is_done true. - * */ -bool ClusterCopier::checkPartitionPieceIsDone(const TaskTable & task_table, const String & partition_name, - size_t piece_number, const TasksShard & shards_with_partition) -{ - LOG_INFO(log, "Check that all shards processed partition {} piece {} successfully", partition_name, piece_number); - - auto zookeeper = getContext()->getZooKeeper(); - - /// Collect all shards that contain partition piece number piece_number. - Strings piece_status_paths; - for (const auto & shard : shards_with_partition) - { - ShardPartition & task_shard_partition = shard->partition_tasks.find(partition_name)->second; - ShardPartitionPiece & shard_partition_piece = task_shard_partition.pieces[piece_number]; - piece_status_paths.emplace_back(shard_partition_piece.getShardStatusPath()); - } - - std::vector zxid1, zxid2; - - try - { - std::vector get_futures; - for (const String & path : piece_status_paths) - get_futures.emplace_back(zookeeper->asyncGet(path)); - - // Check that state is Finished and remember zxid - for (auto & future : get_futures) - { - auto res = future.get(); - - TaskStateWithOwner status = TaskStateWithOwner::fromString(res.data); - if (status.state != TaskState::Finished) - { - LOG_INFO(log, "The task {} is being rewritten by {}. Partition piece will be rechecked", res.data, status.owner); - return false; - } - - zxid1.push_back(res.stat.pzxid); - } - - const String piece_is_dirty_flag_path = task_table.getCertainPartitionPieceIsDirtyPath(partition_name, piece_number); - const String piece_is_dirty_cleaned_path = task_table.getCertainPartitionPieceIsCleanedPath(partition_name, piece_number); - const String piece_task_status_path = task_table.getCertainPartitionPieceTaskStatusPath(partition_name, piece_number); - - CleanStateClock clean_state_clock (zookeeper, piece_is_dirty_flag_path, piece_is_dirty_cleaned_path); - - const bool is_clean = checkPartitionPieceIsClean(zookeeper, clean_state_clock, piece_task_status_path); - - - if (!is_clean) - { - LOG_INFO(log, "Partition {} become dirty", partition_name); - return false; - } - - get_futures.clear(); - for (const String & path : piece_status_paths) - get_futures.emplace_back(zookeeper->asyncGet(path)); - - // Remember zxid of states again - for (auto & future : get_futures) - { - auto res = future.get(); - zxid2.push_back(res.stat.pzxid); - } - } - catch (const Coordination::Exception & e) - { - LOG_INFO(log, "A ZooKeeper error occurred while checking partition {} piece number {}. Will recheck the partition. Error: {}", partition_name, toString(piece_number), e.displayText()); - return false; - } - - // If all task is finished and zxid is not changed then partition could not become dirty again - for (UInt64 shard_num = 0; shard_num < piece_status_paths.size(); ++shard_num) - { - if (zxid1[shard_num] != zxid2[shard_num]) - { - LOG_INFO(log, "The task {} is being modified now. Partition piece will be rechecked", piece_status_paths[shard_num]); - return false; - } - } - - LOG_INFO(log, "Partition {} piece number {} is copied successfully", partition_name, toString(piece_number)); - return true; -} - - -TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & task_table, const String & partition_name) -{ - bool inject_fault = false; - if (move_fault_probability > 0) - { - double value = std::uniform_real_distribution<>(0, 1)(task_table.task_cluster.random_engine); - inject_fault = value < move_fault_probability; - } - - LOG_INFO(log, "Try to move {} to destination table", partition_name); - - auto zookeeper = getContext()->getZooKeeper(); - - const auto current_partition_attach_is_active = task_table.getPartitionAttachIsActivePath(partition_name); - const auto current_partition_attach_is_done = task_table.getPartitionAttachIsDonePath(partition_name); - - /// Create ephemeral node to mark that we are active and process the partition - zookeeper->createAncestors(current_partition_attach_is_active); - zkutil::EphemeralNodeHolderPtr partition_attach_node_holder; - try - { - partition_attach_node_holder = zkutil::EphemeralNodeHolder::create(current_partition_attach_is_active, *zookeeper, host_id); - } - catch (const Coordination::Exception & e) - { - if (e.code == Coordination::Error::ZNODEEXISTS) - { - LOG_INFO(log, "Someone is already moving pieces {}", current_partition_attach_is_active); - return TaskStatus::Active; - } - - throw; - } - - - /// Exit if task has been already processed; - /// create blocking node to signal cleaning up if it is abandoned - { - String status_data; - if (zookeeper->tryGet(current_partition_attach_is_done, status_data)) - { - TaskStateWithOwner status = TaskStateWithOwner::fromString(status_data); - if (status.state == TaskState::Finished) - { - LOG_INFO(log, "All pieces for partition from this task {} has been successfully moved to destination table by {}", current_partition_attach_is_active, status.owner); - return TaskStatus::Finished; - } - - /// Task is abandoned, because previously we created ephemeral node, possibly in other copier's process. - /// Initialize DROP PARTITION - LOG_INFO(log, "Moving piece for partition {} has not been successfully finished by {}. Will try to move by myself.", current_partition_attach_is_active, status.owner); - - /// Remove is_done marker. - zookeeper->remove(current_partition_attach_is_done); - } - } - - - /// Try start processing, create node about it - { - String start_state = TaskStateWithOwner::getData(TaskState::Started, host_id); - zookeeper->create(current_partition_attach_is_done, start_state, zkutil::CreateMode::Persistent); - } - - - /// Try to drop destination partition in original table - if (task_table.allow_to_drop_target_partitions) - { - DatabaseAndTableName original_table = task_table.table_push; - - WriteBufferFromOwnString ss; - ss << "ALTER TABLE " << getQuotedTable(original_table) << ((partition_name == "'all'") ? " DROP PARTITION ID " : " DROP PARTITION ") << partition_name; - - UInt64 num_shards_drop_partition = executeQueryOnCluster(task_table.cluster_push, ss.str(), task_cluster->settings_push, ClusterExecutionMode::ON_EACH_SHARD); - if (num_shards_drop_partition != task_table.cluster_push->getShardCount()) - return TaskStatus::Error; - - LOG_INFO(log, "Drop partition {} in original table {} have been executed successfully on {} shards of {}", - partition_name, getQuotedTable(original_table), num_shards_drop_partition, task_table.cluster_push->getShardCount()); - } - - /// Move partition to original destination table. - for (size_t current_piece_number = 0; current_piece_number < task_table.number_of_splits; ++current_piece_number) - { - LOG_INFO(log, "Trying to move partition {} piece {} to original table", partition_name, toString(current_piece_number)); - - ASTPtr query_alter_ast; - String query_alter_ast_string; - - DatabaseAndTableName original_table = task_table.table_push; - DatabaseAndTableName helping_table = DatabaseAndTableName(original_table.first, - original_table.second + "_piece_" + - toString(current_piece_number)); - - Settings settings_push = task_cluster->settings_push; - ClusterExecutionMode execution_mode = ClusterExecutionMode::ON_EACH_NODE; - - if (settings_push.alter_sync == 1) - execution_mode = ClusterExecutionMode::ON_EACH_SHARD; - - query_alter_ast_string += " ALTER TABLE " + getQuotedTable(original_table) + - ((partition_name == "'all'") ? " ATTACH PARTITION ID " : " ATTACH PARTITION ") + partition_name + - " FROM " + getQuotedTable(helping_table); - - LOG_INFO(log, "Executing ALTER query: {}", query_alter_ast_string); - - try - { - /// Try attach partition on each shard - UInt64 num_nodes = executeQueryOnCluster( - task_table.cluster_push, - query_alter_ast_string, - task_cluster->settings_push, - execution_mode); - - if (settings_push.alter_sync == 1) - { - LOG_INFO( - log, - "Destination tables {} have been executed alter query successfully on {} shards of {}", - getQuotedTable(task_table.table_push), - num_nodes, - task_table.cluster_push->getShardCount()); - - if (num_nodes != task_table.cluster_push->getShardCount()) - return TaskStatus::Error; - } - else - { - LOG_INFO(log, "Number of nodes that executed ALTER query successfully : {}", toString(num_nodes)); - } - } - catch (...) - { - LOG_INFO(log, "Error while moving partition {} piece {} to original table", partition_name, toString(current_piece_number)); - LOG_WARNING(log, "In case of non-replicated tables it can cause duplicates."); - throw; - } - - if (inject_fault) - throw Exception(ErrorCodes::UNFINISHED, "Copy fault injection is activated"); - } - - /// Create node to signal that we finished moving - /// Also increment a counter of processed partitions - { - const auto state_finished = TaskStateWithOwner::getData(TaskState::Finished, host_id); - const auto task_status = task_zookeeper_path + "/status"; - - /// Try until success - while (true) - { - Coordination::Stat stat; - auto status_json = zookeeper->get(task_status, &stat); - auto statuses = StatusAccumulator::fromJSON(status_json); - - /// Increment status for table. - (*statuses)[task_table.name_in_config].processed_partitions_count += 1; - auto statuses_to_commit = StatusAccumulator::serializeToJSON(statuses); - - Coordination::Requests ops; - ops.emplace_back(zkutil::makeSetRequest(current_partition_attach_is_done, state_finished, 0)); - ops.emplace_back(zkutil::makeSetRequest(task_status, statuses_to_commit, stat.version)); - - Coordination::Responses responses; - Coordination::Error code = zookeeper->tryMulti(ops, responses); - - if (code == Coordination::Error::ZOK) - break; - } - } - - return TaskStatus::Finished; -} - -/// This is needed to create internal Distributed table -/// Removes column's TTL expression from `CREATE` query -/// Removes MATEREALIZED or ALIAS columns not to copy additional and useless data over the network. -/// Removes data skipping indices. -ASTPtr ClusterCopier::removeAliasMaterializedAndTTLColumnsFromCreateQuery(const ASTPtr & query_ast, bool allow_to_copy_alias_and_materialized_columns) -{ - const ASTs & column_asts = query_ast->as().columns_list->columns->children; - auto new_columns = std::make_shared(); - - for (const ASTPtr & column_ast : column_asts) - { - const auto & column = column_ast->as(); - - /// Skip this columns - if (!column.default_specifier.empty() && !allow_to_copy_alias_and_materialized_columns) - { - ColumnDefaultKind kind = columnDefaultKindFromString(column.default_specifier); - if (kind == ColumnDefaultKind::Materialized || kind == ColumnDefaultKind::Alias) - continue; - } - - /// Remove TTL on columns definition. - auto new_column_ast = column_ast->clone(); - auto & new_column = new_column_ast->as(); - if (new_column.ttl) - new_column.ttl.reset(); - - new_columns->children.emplace_back(new_column_ast); - } - - ASTPtr new_query_ast = query_ast->clone(); - auto & new_query = new_query_ast->as(); - - auto new_columns_list = std::make_shared(); - new_columns_list->set(new_columns_list->columns, new_columns); - - /// Skip indices and projections are not needed, because distributed table doesn't support it. - - new_query.replace(new_query.columns_list, new_columns_list); - - return new_query_ast; -} - -/// Replaces ENGINE and table name in a create query -std::shared_ptr rewriteCreateQueryStorage(const ASTPtr & create_query_ast, - const DatabaseAndTableName & new_table, - const ASTPtr & new_storage_ast) -{ - const auto & create = create_query_ast->as(); - auto res = std::make_shared(create); - - if (create.storage == nullptr || new_storage_ast == nullptr) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Storage is not specified"); - - res->setDatabase(new_table.first); - res->setTable(new_table.second); - - res->children.clear(); - res->set(res->columns_list, create.columns_list->clone()); - res->set(res->storage, new_storage_ast->clone()); - /// Just to make it better and don't store additional flag like `is_table_created` somewhere else - res->if_not_exists = true; - - return res; -} - - -bool ClusterCopier::tryDropPartitionPiece( - ShardPartition & task_partition, - const size_t current_piece_number, - const zkutil::ZooKeeperPtr & zookeeper, - const CleanStateClock & clean_state_clock) -{ - if (is_safe_mode) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "DROP PARTITION is prohibited in safe mode"); - - TaskTable & task_table = task_partition.task_shard.task_table; - ShardPartitionPiece & partition_piece = task_partition.pieces[current_piece_number]; - - const String current_shards_path = partition_piece.getPartitionPieceShardsPath(); - const String current_partition_active_workers_dir = partition_piece.getPartitionPieceActiveWorkersPath(); - const String is_dirty_flag_path = partition_piece.getPartitionPieceIsDirtyPath(); - const String dirty_cleaner_path = partition_piece.getPartitionPieceCleanerPath(); - const String is_dirty_cleaned_path = partition_piece.getPartitionPieceIsCleanedPath(); - - zkutil::EphemeralNodeHolder::Ptr cleaner_holder; - try - { - cleaner_holder = zkutil::EphemeralNodeHolder::create(dirty_cleaner_path, *zookeeper, host_id); - } - catch (const Coordination::Exception & e) - { - if (e.code == Coordination::Error::ZNODEEXISTS) - { - LOG_INFO(log, "Partition {} piece {} is cleaning now by somebody, sleep", task_partition.name, toString(current_piece_number)); - std::this_thread::sleep_for(retry_delay_ms); - return false; - } - - throw; - } - - Coordination::Stat stat{}; - if (zookeeper->exists(current_partition_active_workers_dir, &stat)) - { - if (stat.numChildren != 0) - { - LOG_INFO(log, "Partition {} contains {} active workers while trying to drop it. Going to sleep.", task_partition.name, stat.numChildren); - std::this_thread::sleep_for(retry_delay_ms); - return false; - } - else - { - zookeeper->remove(current_partition_active_workers_dir); - } - } - - { - zkutil::EphemeralNodeHolder::Ptr active_workers_lock; - try - { - active_workers_lock = zkutil::EphemeralNodeHolder::create(current_partition_active_workers_dir, *zookeeper, host_id); - } - catch (const Coordination::Exception & e) - { - if (e.code == Coordination::Error::ZNODEEXISTS) - { - LOG_INFO(log, "Partition {} is being filled now by somebody, sleep", task_partition.name); - return false; - } - - throw; - } - - // Lock the dirty flag - zookeeper->set(is_dirty_flag_path, host_id, clean_state_clock.discovery_version.value()); - zookeeper->tryRemove(partition_piece.getPartitionPieceCleanStartPath()); - CleanStateClock my_clock(zookeeper, is_dirty_flag_path, is_dirty_cleaned_path); - - /// Remove all status nodes - { - Strings children; - if (zookeeper->tryGetChildren(current_shards_path, children) == Coordination::Error::ZOK) - for (const auto & child : children) - { - zookeeper->removeRecursive(current_shards_path + "/" + child); - } - } - - - DatabaseAndTableName original_table = task_table.table_push; - DatabaseAndTableName helping_table = DatabaseAndTableName(original_table.first, original_table.second + "_piece_" + toString(current_piece_number)); - - String query = "ALTER TABLE " + getQuotedTable(helping_table); - query += ((task_partition.name == "'all'") ? " DROP PARTITION ID " : " DROP PARTITION ") + task_partition.name + ""; - - /// TODO: use this statement after servers will be updated up to 1.1.54310 - // query += " DROP PARTITION ID '" + task_partition.name + "'"; - - ClusterPtr & cluster_push = task_table.cluster_push; - Settings settings_push = task_cluster->settings_push; - - /// It is important, DROP PARTITION must be done synchronously - settings_push.alter_sync = 2; - - LOG_INFO(log, "Execute distributed DROP PARTITION: {}", query); - /// We have to drop partition_piece on each replica - size_t num_shards = executeQueryOnCluster( - cluster_push, query, - settings_push, - ClusterExecutionMode::ON_EACH_NODE); - - LOG_INFO(log, "DROP PARTITION was successfully executed on {} nodes of a cluster.", num_shards); - - /// Update the locking node - if (!my_clock.is_stale()) - { - zookeeper->set(is_dirty_flag_path, host_id, my_clock.discovery_version.value()); - if (my_clock.clean_state_version) - zookeeper->set(is_dirty_cleaned_path, host_id, my_clock.clean_state_version.value()); - else - zookeeper->create(is_dirty_cleaned_path, host_id, zkutil::CreateMode::Persistent); - } - else - { - LOG_INFO(log, "Clean state is altered when dropping the partition, cowardly bailing"); - /// clean state is stale - return false; - } - - LOG_INFO(log, "Partition {} piece {} was dropped on cluster {}", task_partition.name, toString(current_piece_number), task_table.cluster_push_name); - if (zookeeper->tryCreate(current_shards_path, host_id, zkutil::CreateMode::Persistent) == Coordination::Error::ZNODEEXISTS) - zookeeper->set(current_shards_path, host_id); - } - - LOG_INFO(log, "Partition {} piece {} is safe for work now.", task_partition.name, toString(current_piece_number)); - return true; -} - -bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTable & task_table) -{ - /// Create destination table - TaskStatus task_status = TaskStatus::Error; - - task_status = tryCreateDestinationTable(timeouts, task_table); - /// Exit if success - if (task_status != TaskStatus::Finished) - { - LOG_WARNING(log, "Create destination table failed "); - return false; - } - - /// Set all_partitions_count for table in Zookeeper - auto zookeeper = getContext()->getZooKeeper(); - while (true) - { - Coordination::Stat stat; - auto status_json = zookeeper->get(task_zookeeper_path + "/status", &stat); - auto statuses = StatusAccumulator::fromJSON(status_json); - - /// Exit if someone already set the initial value for this table. - if (statuses->find(task_table.name_in_config) != statuses->end()) - break; - (*statuses)[task_table.name_in_config] = StatusAccumulator::TableStatus - { - /*all_partitions_count=*/task_table.ordered_partition_names.size(), - /*processed_partition_count=*/0 - }; - - auto statuses_to_commit = StatusAccumulator::serializeToJSON(statuses); - auto error = zookeeper->trySet(task_zookeeper_path + "/status", statuses_to_commit, stat.version); - if (error == Coordination::Error::ZOK) - break; - } - - - /// An heuristic: if previous shard is already done, then check next one without sleeps due to max_workers constraint - bool previous_shard_is_instantly_finished = false; - - /// Process each partition that is present in cluster - for (const String & partition_name : task_table.ordered_partition_names) - { - if (!task_table.cluster_partitions.contains(partition_name)) - throw Exception(ErrorCodes::LOGICAL_ERROR, "There are no expected partition {}. It is a bug", partition_name); - - ClusterPartition & cluster_partition = task_table.cluster_partitions[partition_name]; - - Stopwatch watch; - /// We will check all the shards of the table and check if they contain current partition. - TasksShard expected_shards; - UInt64 num_failed_shards = 0; - - ++cluster_partition.total_tries; - - LOG_INFO(log, "Processing partition {} for the whole cluster", partition_name); - - /// Process each source shard having current partition and copy current partition - /// NOTE: shards are sorted by "distance" to current host - bool has_shard_to_process = false; - for (const TaskShardPtr & shard : task_table.all_shards) - { - /// Does shard have a node with current partition? - if (!shard->partition_tasks.contains(partition_name)) - { - /// If not, did we check existence of that partition previously? - if (!shard->checked_partitions.contains(partition_name)) - { - auto check_shard_has_partition = [&] () { return checkShardHasPartition(timeouts, *shard, partition_name); }; - bool has_partition = retry(check_shard_has_partition); - - shard->checked_partitions.emplace(partition_name); - - if (has_partition) - { - const size_t number_of_splits = task_table.number_of_splits; - shard->partition_tasks.emplace(partition_name, ShardPartition(*shard, partition_name, number_of_splits)); - LOG_INFO(log, "Discovered partition {} in shard {}", partition_name, shard->getDescription()); - /// To save references in the future. - auto shard_partition_it = shard->partition_tasks.find(partition_name); - PartitionPieces & shard_partition_pieces = shard_partition_it->second.pieces; - - for (size_t piece_number = 0; piece_number < number_of_splits; ++piece_number) - { - auto res = checkPresentPartitionPiecesOnCurrentShard(timeouts, *shard, partition_name, piece_number); - shard_partition_pieces.emplace_back(shard_partition_it->second, piece_number, res); - } - } - else - { - LOG_INFO(log, "Found that shard {} does not contain current partition {}", shard->getDescription(), partition_name); - continue; - } - } - else - { - /// We have already checked that partition, but did not discover it - previous_shard_is_instantly_finished = true; - continue; - } - } - - auto it_shard_partition = shard->partition_tasks.find(partition_name); - /// Previously when we discovered that shard does not contain current partition, we skipped it. - /// At this moment partition have to be present. - if (it_shard_partition == shard->partition_tasks.end()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "There are no such partition in a shard. This is a bug."); - auto & partition = it_shard_partition->second; - - expected_shards.emplace_back(shard); - - /// Do not sleep if there is a sequence of already processed shards to increase startup - bool is_unprioritized_task = !previous_shard_is_instantly_finished && shard->priority.is_remote; - task_status = TaskStatus::Error; - bool was_error = false; - has_shard_to_process = true; - for (UInt64 try_num = 1; try_num <= max_shard_partition_tries; ++try_num) - { - task_status = tryProcessPartitionTask(timeouts, partition, is_unprioritized_task); - - /// Exit if success - if (task_status == TaskStatus::Finished) - break; - - was_error = true; - - /// Skip if the task is being processed by someone - if (task_status == TaskStatus::Active) - break; - - /// Repeat on errors - std::this_thread::sleep_for(retry_delay_ms); - } - - if (task_status == TaskStatus::Error) - ++num_failed_shards; - - previous_shard_is_instantly_finished = !was_error; - } - - cluster_partition.elapsed_time_seconds += watch.elapsedSeconds(); - - /// Check that whole cluster partition is done - /// Firstly check the number of failed partition tasks, then look into ZooKeeper and ensure that each partition is done - bool partition_copying_is_done = num_failed_shards == 0; - try - { - partition_copying_is_done = - !has_shard_to_process - || (partition_copying_is_done && checkAllPiecesInPartitionAreDone(task_table, partition_name, expected_shards)); - } - catch (...) - { - tryLogCurrentException(log); - partition_copying_is_done = false; - } - - - bool partition_moving_is_done = false; - /// Try to move only if all pieces were copied. - if (partition_copying_is_done) - { - for (UInt64 try_num = 0; try_num < max_shard_partition_piece_tries_for_alter; ++try_num) - { - try - { - auto res = tryMoveAllPiecesToDestinationTable(task_table, partition_name); - /// Exit and mark current task is done. - if (res == TaskStatus::Finished) - { - partition_moving_is_done = true; - break; - } - - /// Exit if this task is active. - if (res == TaskStatus::Active) - break; - - /// Repeat on errors. - std::this_thread::sleep_for(retry_delay_ms); - } - catch (...) - { - tryLogCurrentException(log, "Some error occurred while moving pieces to destination table for partition " + partition_name); - } - } - } - - if (partition_copying_is_done && partition_moving_is_done) - { - task_table.finished_cluster_partitions.emplace(partition_name); - - task_table.bytes_copied += cluster_partition.bytes_copied; - task_table.rows_copied += cluster_partition.rows_copied; - double elapsed = cluster_partition.elapsed_time_seconds; - - LOG_INFO(log, "It took {} seconds to copy partition {}: {} uncompressed bytes, {} rows and {} source blocks are copied", - elapsed, partition_name, - formatReadableSizeWithDecimalSuffix(cluster_partition.bytes_copied), - formatReadableQuantity(cluster_partition.rows_copied), - cluster_partition.blocks_copied); - - if (cluster_partition.rows_copied) - { - LOG_INFO(log, "Average partition speed: {} per second.", formatReadableSizeWithDecimalSuffix(cluster_partition.bytes_copied / elapsed)); - } - - if (task_table.rows_copied) - { - LOG_INFO(log, "Average table {} speed: {} per second.", task_table.table_id, formatReadableSizeWithDecimalSuffix(task_table.bytes_copied / elapsed)); - } - } - } - - UInt64 required_partitions = task_table.cluster_partitions.size(); - UInt64 finished_partitions = task_table.finished_cluster_partitions.size(); - bool table_is_done = finished_partitions >= required_partitions; - - if (!table_is_done) - { - LOG_INFO(log, "Table {} is not processed yet. Copied {} of {}, will retry", task_table.table_id, finished_partitions, required_partitions); - } - else - { - /// Delete helping tables in case that whole table is done - dropHelpingTables(task_table); - } - - return table_is_done; -} - -TaskStatus ClusterCopier::tryCreateDestinationTable(const ConnectionTimeouts & timeouts, TaskTable & task_table) -{ - /// Try create original table (if not exists) on each shard - - //TaskTable & task_table = task_shard.task_table; - const TaskShardPtr task_shard = task_table.all_shards.at(0); - /// We need to update table definitions for each part, it could be changed after ALTER - task_shard->current_pull_table_create_query = getCreateTableForPullShard(timeouts, *task_shard); - try - { - auto create_query_push_ast - = rewriteCreateQueryStorage(task_shard->current_pull_table_create_query, task_table.table_push, task_table.engine_push_ast); - auto & create = create_query_push_ast->as(); - create.if_not_exists = true; - InterpreterCreateQuery::prepareOnClusterQuery(create, getContext(), task_table.cluster_push_name); - String query = queryToString(create_query_push_ast); - - LOG_INFO(log, "Create destination tables. Query: {}", query); - UInt64 shards = executeQueryOnCluster(task_table.cluster_push, query, task_cluster->settings_push, ClusterExecutionMode::ON_EACH_NODE); - LOG_INFO( - log, - "Destination tables {} have been created on {} shards of {}", - getQuotedTable(task_table.table_push), - shards, - task_table.cluster_push->getShardCount()); - } - catch (...) - { - tryLogCurrentException(log, "Error while creating original table. Maybe we are not first."); - } - - return TaskStatus::Finished; -} - -/// Job for copying partition from particular shard. -TaskStatus ClusterCopier::tryProcessPartitionTask(const ConnectionTimeouts & timeouts, ShardPartition & task_partition, bool is_unprioritized_task) -{ - TaskStatus res; - - try - { - res = iterateThroughAllPiecesInPartition(timeouts, task_partition, is_unprioritized_task); - } - catch (...) - { - tryLogCurrentException(log, "An error occurred while processing partition " + task_partition.name); - res = TaskStatus::Error; - } - - /// At the end of each task check if the config is updated - try - { - updateConfigIfNeeded(); - } - catch (...) - { - tryLogCurrentException(log, "An error occurred while updating the config"); - } - - return res; -} - -TaskStatus ClusterCopier::iterateThroughAllPiecesInPartition(const ConnectionTimeouts & timeouts, ShardPartition & task_partition, - bool is_unprioritized_task) -{ - const size_t total_number_of_pieces = task_partition.task_shard.task_table.number_of_splits; - - TaskStatus res{TaskStatus::Finished}; - - bool was_failed_pieces = false; - bool was_active_pieces = false; - - for (size_t piece_number = 0; piece_number < total_number_of_pieces; piece_number++) - { - for (UInt64 try_num = 0; try_num < max_shard_partition_tries; ++try_num) - { - LOG_INFO(log, "Attempt number {} to process partition {} piece number {} on shard number {} with index {}.", - try_num, task_partition.name, piece_number, - task_partition.task_shard.numberInCluster(), - task_partition.task_shard.indexInCluster()); - - res = processPartitionPieceTaskImpl(timeouts, task_partition, piece_number, is_unprioritized_task); - - /// Exit if success - if (res == TaskStatus::Finished) - break; - - /// Skip if the task is being processed by someone - if (res == TaskStatus::Active) - break; - - /// Repeat on errors - std::this_thread::sleep_for(retry_delay_ms); - } - - was_active_pieces |= (res == TaskStatus::Active); - was_failed_pieces |= (res == TaskStatus::Error); - } - - if (was_failed_pieces) - return TaskStatus::Error; - - if (was_active_pieces) - return TaskStatus::Active; - - return TaskStatus::Finished; -} - - -TaskStatus ClusterCopier::processPartitionPieceTaskImpl( - const ConnectionTimeouts & timeouts, ShardPartition & task_partition, - const size_t current_piece_number, bool is_unprioritized_task) -{ - TaskShard & task_shard = task_partition.task_shard; - TaskTable & task_table = task_shard.task_table; - ClusterPartition & cluster_partition = task_table.getClusterPartition(task_partition.name); - ShardPartitionPiece & partition_piece = task_partition.pieces[current_piece_number]; - - const size_t number_of_splits = task_table.number_of_splits; - const String primary_key_comma_separated = task_table.primary_key_comma_separated; - - /// We need to update table definitions for each partition, it could be changed after ALTER - createShardInternalTables(timeouts, task_shard, true); - - auto split_table_for_current_piece = task_shard.list_of_split_tables_on_shard[current_piece_number]; - - auto zookeeper = getContext()->getZooKeeper(); - - const String piece_is_dirty_flag_path = partition_piece.getPartitionPieceIsDirtyPath(); - const String piece_is_dirty_cleaned_path = partition_piece.getPartitionPieceIsCleanedPath(); - const String current_task_piece_is_active_path = partition_piece.getActiveWorkerPath(); - const String current_task_piece_status_path = partition_piece.getShardStatusPath(); - - /// Auxiliary functions: - - /// Creates is_dirty node to initialize DROP PARTITION - auto create_is_dirty_node = [&] (const CleanStateClock & clock) - { - if (clock.is_stale()) - LOG_INFO(log, "Clean state clock is stale while setting dirty flag, cowardly bailing"); - else if (!clock.is_clean()) - LOG_INFO(log, "Thank you, Captain Obvious"); - else if (clock.discovery_version) - { - LOG_INFO(log, "Updating clean state clock"); - zookeeper->set(piece_is_dirty_flag_path, host_id, clock.discovery_version.value()); - } - else - { - LOG_INFO(log, "Creating clean state clock"); - zookeeper->create(piece_is_dirty_flag_path, host_id, zkutil::CreateMode::Persistent); - } - }; - - /// Returns SELECT query filtering current partition and applying user filter - auto get_select_query = [&] (const DatabaseAndTableName & from_table, const String & fields, bool enable_splitting, String limit = "") - { - String query; - query += "WITH " + task_partition.name + " AS partition_key "; - query += "SELECT " + fields + " FROM " + getQuotedTable(from_table); - - if (enable_splitting && experimental_use_sample_offset) - query += " SAMPLE 1/" + toString(number_of_splits) + " OFFSET " + toString(current_piece_number) + "/" + toString(number_of_splits); - - /// TODO: Bad, it is better to rewrite with ASTLiteral(partition_key_field) - query += " WHERE (" + queryToString(task_table.engine_push_partition_key_ast) + " = partition_key)"; - - if (enable_splitting && !experimental_use_sample_offset) - query += " AND ( cityHash64(" + primary_key_comma_separated + ") %" + toString(number_of_splits) + " = " + toString(current_piece_number) + " )"; - - if (!task_table.where_condition_str.empty()) - query += " AND (" + task_table.where_condition_str + ")"; - - if (!limit.empty()) - query += " LIMIT " + limit; - - query += " FORMAT Native"; - - ParserQuery p_query(query.data() + query.size()); - - const auto & settings = getContext()->getSettingsRef(); - return parseQuery(p_query, query, settings.max_query_size, settings.max_parser_depth); - }; - - /// Load balancing - auto worker_node_holder = createTaskWorkerNodeAndWaitIfNeed(zookeeper, current_task_piece_status_path, is_unprioritized_task); - - LOG_INFO(log, "Processing {}", current_task_piece_status_path); - - const String piece_status_path = partition_piece.getPartitionPieceShardsPath(); - - CleanStateClock clean_state_clock(zookeeper, piece_is_dirty_flag_path, piece_is_dirty_cleaned_path); - - const bool is_clean = checkPartitionPieceIsClean(zookeeper, clean_state_clock, piece_status_path); - - /// Do not start if partition piece is dirty, try to clean it - if (is_clean) - { - LOG_INFO(log, "Partition {} piece {} appears to be clean", task_partition.name, current_piece_number); - zookeeper->createAncestors(current_task_piece_status_path); - } - else - { - LOG_INFO(log, "Partition {} piece {} is dirty, try to drop it", task_partition.name, current_piece_number); - - try - { - tryDropPartitionPiece(task_partition, current_piece_number, zookeeper, clean_state_clock); - } - catch (...) - { - tryLogCurrentException(log, "An error occurred when clean partition"); - } - - return TaskStatus::Error; - } - - /// Create ephemeral node to mark that we are active and process the partition - zookeeper->createAncestors(current_task_piece_is_active_path); - zkutil::EphemeralNodeHolderPtr partition_task_node_holder; - try - { - partition_task_node_holder = zkutil::EphemeralNodeHolder::create(current_task_piece_is_active_path, *zookeeper, host_id); - } - catch (const Coordination::Exception & e) - { - if (e.code == Coordination::Error::ZNODEEXISTS) - { - LOG_INFO(log, "Someone is already processing {}", current_task_piece_is_active_path); - return TaskStatus::Active; - } - - throw; - } - - /// Exit if task has been already processed; - /// create blocking node to signal cleaning up if it is abandoned - { - String status_data; - if (zookeeper->tryGet(current_task_piece_status_path, status_data)) - { - TaskStateWithOwner status = TaskStateWithOwner::fromString(status_data); - if (status.state == TaskState::Finished) - { - LOG_INFO(log, "Task {} has been successfully executed by {}", current_task_piece_status_path, status.owner); - return TaskStatus::Finished; - } - - /// Task is abandoned, because previously we created ephemeral node, possibly in other copier's process. - /// Initialize DROP PARTITION - LOG_INFO(log, "Task {} has not been successfully finished by {}. Partition will be dropped and refilled.", current_task_piece_status_path, status.owner); - - create_is_dirty_node(clean_state_clock); - return TaskStatus::Error; - } - } - - - /// Try create table (if not exists) on each shard - /// We have to create this table even in case that partition piece is empty - /// This is significant, because we will have simpler code - { - /// 1) Get columns description from any replica of destination cluster - /// 2) Change ENGINE, database and table name - /// 3) Create helping table on the whole destination cluster - auto & settings_push = task_cluster->settings_push; - - auto connection = task_table.cluster_push->getAnyShardInfo().pool->get(timeouts, settings_push, true); - String create_query = getRemoteCreateTable(task_shard.task_table.table_push, *connection, settings_push); - - ParserCreateQuery parser_create_query; - auto create_query_ast = parseQuery(parser_create_query, create_query, settings_push.max_query_size, settings_push.max_parser_depth); - /// Define helping table database and name for current partition piece - DatabaseAndTableName database_and_table_for_current_piece - { - task_table.table_push.first, - task_table.table_push.second + "_piece_" + toString(current_piece_number) - }; - - - auto new_engine_push_ast = task_table.engine_push_ast; - if (task_table.isReplicatedTable()) - new_engine_push_ast = task_table.rewriteReplicatedCreateQueryToPlain(); - - /// Take columns definition from destination table, new database and table name, and new engine (non replicated variant of MergeTree) - auto create_query_push_ast = rewriteCreateQueryStorage(create_query_ast, database_and_table_for_current_piece, new_engine_push_ast); - String query = queryToString(create_query_push_ast); - - LOG_INFO(log, "Create destination tables. Query: {}", query); - UInt64 shards = executeQueryOnCluster(task_table.cluster_push, query, task_cluster->settings_push, ClusterExecutionMode::ON_EACH_NODE); - LOG_INFO( - log, - "Destination tables {} have been created on {} shards of {}", - getQuotedTable(task_table.table_push), - shards, - task_table.cluster_push->getShardCount()); - } - - - /// Exit if current piece is absent on this shard. Also mark it as finished, because we will check - /// whether each shard have processed each partitition (and its pieces). - if (partition_piece.is_absent_piece) - { - String state_finished = TaskStateWithOwner::getData(TaskState::Finished, host_id); - auto res = zookeeper->tryCreate(current_task_piece_status_path, state_finished, zkutil::CreateMode::Persistent); - if (res == Coordination::Error::ZNODEEXISTS) - LOG_INFO(log, "Partition {} piece {} is absent on current replica of a shard. But other replicas have already marked it as done.", task_partition.name, current_piece_number); - if (res == Coordination::Error::ZOK) - LOG_INFO(log, "Partition {} piece {} is absent on current replica of a shard. Will mark it as done. Other replicas will do the same.", task_partition.name, current_piece_number); - return TaskStatus::Finished; - } - - /// Check that destination partition is empty if we are first worker - /// NOTE: this check is incorrect if pull and push tables have different partition key! - String clean_start_status; - if (!zookeeper->tryGet(partition_piece.getPartitionPieceCleanStartPath(), clean_start_status) || clean_start_status != "ok") - { - zookeeper->createIfNotExists(partition_piece.getPartitionPieceCleanStartPath(), ""); - auto checker = zkutil::EphemeralNodeHolder::create(partition_piece.getPartitionPieceCleanStartPath() + "/checker", - *zookeeper, host_id); - // Maybe we are the first worker - - ASTPtr query_select_ast = get_select_query(split_table_for_current_piece, "count()", /* enable_splitting= */ true); - UInt64 count; - { - auto local_context = Context::createCopy(context); - // Use pull (i.e. readonly) settings, but fetch data from destination servers - local_context->setSettings(task_cluster->settings_pull); - local_context->setSetting("skip_unavailable_shards", true); - - InterpreterSelectWithUnionQuery select(query_select_ast, local_context, SelectQueryOptions{}); - QueryPlan plan; - select.buildQueryPlan(plan); - auto builder = std::move(*plan.buildQueryPipeline( - QueryPlanOptimizationSettings::fromContext(local_context), - BuildQueryPipelineSettings::fromContext(local_context))); - - Block block = getBlockWithAllStreamData(std::move(builder)); - count = (block) ? block.safeGetByPosition(0).column->getUInt(0) : 0; - } - - if (count != 0) - { - LOG_INFO(log, "Partition {} piece {} is not empty. In contains {} rows.", task_partition.name, current_piece_number, count); - Coordination::Stat stat_shards{}; - zookeeper->get(partition_piece.getPartitionPieceShardsPath(), &stat_shards); - - /// NOTE: partition is still fresh if dirt discovery happens before cleaning - if (stat_shards.numChildren == 0) - { - LOG_WARNING(log, "There are no workers for partition {} piece {}, but destination table contains {} rows. Partition will be dropped and refilled.", task_partition.name, toString(current_piece_number), count); - - create_is_dirty_node(clean_state_clock); - return TaskStatus::Error; - } - } - zookeeper->set(partition_piece.getPartitionPieceCleanStartPath(), "ok"); - } - /// At this point, we need to sync that the destination table is clean - /// before any actual work - - /// Try start processing, create node about it - { - String start_state = TaskStateWithOwner::getData(TaskState::Started, host_id); - CleanStateClock new_clean_state_clock(zookeeper, piece_is_dirty_flag_path, piece_is_dirty_cleaned_path); - if (clean_state_clock != new_clean_state_clock) - { - LOG_INFO(log, "Partition {} piece {} clean state changed, cowardly bailing", task_partition.name, toString(current_piece_number)); - return TaskStatus::Error; - } - else if (!new_clean_state_clock.is_clean()) - { - LOG_INFO(log, "Partition {} piece {} is dirty and will be dropped and refilled", task_partition.name, toString(current_piece_number)); - create_is_dirty_node(new_clean_state_clock); - return TaskStatus::Error; - } - zookeeper->create(current_task_piece_status_path, start_state, zkutil::CreateMode::Persistent); - } - - - /// Do the copying - { - bool inject_fault = false; - if (copy_fault_probability > 0) - { - double value = std::uniform_real_distribution<>(0, 1)(task_table.task_cluster.random_engine); - inject_fault = value < copy_fault_probability; - } - - // Select all fields - ASTPtr query_select_ast = get_select_query(task_shard.table_read_shard, "*", /* enable_splitting= */ true, inject_fault ? "1" : ""); - - LOG_INFO(log, "Executing SELECT query and pull from {}: {}", task_shard.getDescription(), queryToString(query_select_ast)); - - ASTPtr query_insert_ast; - { - String query; - query += "INSERT INTO " + getQuotedTable(split_table_for_current_piece) + " FORMAT Native "; - - ParserQuery p_query(query.data() + query.size()); - const auto & settings = getContext()->getSettingsRef(); - query_insert_ast = parseQuery(p_query, query, settings.max_query_size, settings.max_parser_depth); - - LOG_INFO(log, "Executing INSERT query: {}", query); - } - - try - { - auto context_select = Context::createCopy(context); - context_select->setSettings(task_cluster->settings_pull); - - auto context_insert = Context::createCopy(context); - context_insert->setSettings(task_cluster->settings_push); - - /// Custom INSERT SELECT implementation - QueryPipeline input; - QueryPipeline output; - { - BlockIO io_insert = InterpreterFactory::instance().get(query_insert_ast, context_insert)->execute(); - - InterpreterSelectWithUnionQuery select(query_select_ast, context_select, SelectQueryOptions{}); - QueryPlan plan; - select.buildQueryPlan(plan); - auto builder = std::move(*plan.buildQueryPipeline( - QueryPlanOptimizationSettings::fromContext(context_select), - BuildQueryPipelineSettings::fromContext(context_select))); - - output = std::move(io_insert.pipeline); - - /// Add converting actions to make it possible to copy blocks with slightly different schema - const auto & select_block = builder.getHeader(); - const auto & insert_block = output.getHeader(); - auto actions_dag = ActionsDAG::makeConvertingActions( - select_block.getColumnsWithTypeAndName(), - insert_block.getColumnsWithTypeAndName(), - ActionsDAG::MatchColumnsMode::Position); - - auto actions = std::make_shared(actions_dag, ExpressionActionsSettings::fromContext(getContext())); - - builder.addSimpleTransform([&](const Block & header) - { - return std::make_shared(header, actions); - }); - input = QueryPipelineBuilder::getPipeline(std::move(builder)); - } - - /// Fail-fast optimization to abort copying when the current clean state expires - std::future future_is_dirty_checker; - - Stopwatch watch(CLOCK_MONOTONIC_COARSE); - constexpr UInt64 check_period_milliseconds = 500; - - /// Will asynchronously check that ZooKeeper connection and is_dirty flag appearing while copying data - auto cancel_check = [&] () - { - if (zookeeper->expired()) - throw Exception(ErrorCodes::UNFINISHED, "ZooKeeper session is expired, cancel INSERT SELECT"); - - if (!future_is_dirty_checker.valid()) - future_is_dirty_checker = zookeeper->asyncExists(piece_is_dirty_flag_path); - - /// check_period_milliseconds should less than average insert time of single block - /// Otherwise, the insertion will slow a little bit - if (watch.elapsedMilliseconds() >= check_period_milliseconds) - { - Coordination::ExistsResponse status = future_is_dirty_checker.get(); - - if (status.error != Coordination::Error::ZNONODE) - { - LogicalClock dirt_discovery_epoch (status.stat.mzxid); - if (dirt_discovery_epoch == clean_state_clock.discovery_zxid) - return false; - throw Exception(ErrorCodes::UNFINISHED, "Partition is dirty, cancel INSERT SELECT"); - } - } - - return false; - }; - - /// Update statistics - /// It is quite rough: bytes_copied don't take into account DROP PARTITION. - auto update_stats = [&cluster_partition] (const Block & block) - { - cluster_partition.bytes_copied += block.bytes(); - cluster_partition.rows_copied += block.rows(); - cluster_partition.blocks_copied += 1; - }; - - /// Main work is here - PullingPipelineExecutor pulling_executor(input); - PushingPipelineExecutor pushing_executor(output); - - Block data; - bool is_cancelled = false; - while (pulling_executor.pull(data)) - { - if (cancel_check()) - { - is_cancelled = true; - pushing_executor.cancel(); - pushing_executor.cancel(); - break; - } - pushing_executor.push(data); - update_stats(data); - } - - if (!is_cancelled) - pushing_executor.finish(); - - // Just in case - if (future_is_dirty_checker.valid()) - future_is_dirty_checker.get(); - - if (inject_fault) - throw Exception(ErrorCodes::UNFINISHED, "Copy fault injection is activated"); - } - catch (...) - { - tryLogCurrentException(log, "An error occurred during copying, partition will be marked as dirty"); - create_is_dirty_node(clean_state_clock); - return TaskStatus::Error; - } - } - - LOG_INFO(log, "Partition {} piece {} copied. But not moved to original destination table.", task_partition.name, toString(current_piece_number)); - - /// Finalize the processing, change state of current partition task (and also check is_dirty flag) - { - String state_finished = TaskStateWithOwner::getData(TaskState::Finished, host_id); - CleanStateClock new_clean_state_clock (zookeeper, piece_is_dirty_flag_path, piece_is_dirty_cleaned_path); - if (clean_state_clock != new_clean_state_clock) - { - LOG_INFO(log, "Partition {} piece {} clean state changed, cowardly bailing", task_partition.name, toString(current_piece_number)); - return TaskStatus::Error; - } - else if (!new_clean_state_clock.is_clean()) - { - LOG_INFO(log, "Partition {} piece {} became dirty and will be dropped and refilled", task_partition.name, toString(current_piece_number)); - create_is_dirty_node(new_clean_state_clock); - return TaskStatus::Error; - } - zookeeper->set(current_task_piece_status_path, state_finished, 0); - } - - return TaskStatus::Finished; -} - -void ClusterCopier::dropAndCreateLocalTable(const ASTPtr & create_ast) -{ - const auto & create = create_ast->as(); - dropLocalTableIfExists({create.getDatabase(), create.getTable()}); - - auto create_context = Context::createCopy(getContext()); - - InterpreterCreateQuery interpreter(create_ast, create_context); - interpreter.execute(); -} - -void ClusterCopier::dropLocalTableIfExists(const DatabaseAndTableName & table_name) const -{ - auto drop_ast = std::make_shared(); - drop_ast->if_exists = true; - drop_ast->setDatabase(table_name.first); - drop_ast->setTable(table_name.second); - - auto drop_context = Context::createCopy(getContext()); - - InterpreterDropQuery interpreter(drop_ast, drop_context); - interpreter.execute(); -} - -void ClusterCopier::dropHelpingTablesByPieceNumber(const TaskTable & task_table, size_t current_piece_number) -{ - LOG_INFO(log, "Removing helping tables piece {}", current_piece_number); - - DatabaseAndTableName original_table = task_table.table_push; - DatabaseAndTableName helping_table - = DatabaseAndTableName(original_table.first, original_table.second + "_piece_" + toString(current_piece_number)); - - String query = "DROP TABLE IF EXISTS " + getQuotedTable(helping_table); - - const ClusterPtr & cluster_push = task_table.cluster_push; - Settings settings_push = task_cluster->settings_push; - - LOG_INFO(log, "Execute distributed DROP TABLE: {}", query); - - /// We have to drop partition_piece on each replica - UInt64 num_nodes = executeQueryOnCluster(cluster_push, query, settings_push, ClusterExecutionMode::ON_EACH_NODE); - - LOG_INFO(log, "DROP TABLE query was successfully executed on {} nodes.", toString(num_nodes)); -} - -void ClusterCopier::dropHelpingTables(const TaskTable & task_table) -{ - LOG_INFO(log, "Removing helping tables"); - for (size_t current_piece_number = 0; current_piece_number < task_table.number_of_splits; ++current_piece_number) - { - dropHelpingTablesByPieceNumber(task_table, current_piece_number); - } -} - -void ClusterCopier::dropParticularPartitionPieceFromAllHelpingTables(const TaskTable & task_table, const String & partition_name) -{ - LOG_INFO(log, "Try drop partition partition from all helping tables."); - for (size_t current_piece_number = 0; current_piece_number < task_table.number_of_splits; ++current_piece_number) - { - DatabaseAndTableName original_table = task_table.table_push; - DatabaseAndTableName helping_table = DatabaseAndTableName(original_table.first, original_table.second + "_piece_" + toString(current_piece_number)); - - String query = "ALTER TABLE " + getQuotedTable(helping_table) + ((partition_name == "'all'") ? " DROP PARTITION ID " : " DROP PARTITION ") + partition_name; - - const ClusterPtr & cluster_push = task_table.cluster_push; - Settings settings_push = task_cluster->settings_push; - - LOG_INFO(log, "Execute distributed DROP PARTITION: {}", query); - /// We have to drop partition_piece on each replica - UInt64 num_nodes = executeQueryOnCluster( - cluster_push, query, - settings_push, - ClusterExecutionMode::ON_EACH_NODE); - - LOG_INFO(log, "DROP PARTITION query was successfully executed on {} nodes.", toString(num_nodes)); - } - LOG_INFO(log, "All helping tables dropped partition {}", partition_name); -} - -String ClusterCopier::getRemoteCreateTable(const DatabaseAndTableName & table, Connection & connection, const Settings & settings) -{ - auto remote_context = Context::createCopy(context); - remote_context->setSettings(settings); - - String query = "SHOW CREATE TABLE " + getQuotedTable(table); - - QueryPipelineBuilder builder; - builder.init(Pipe(std::make_shared( - std::make_shared(connection, query, InterpreterShowCreateQuery::getSampleBlock(), remote_context), false, false, /* async_query_sending= */ false))); - Block block = getBlockWithAllStreamData(std::move(builder)); - return typeid_cast(*block.safeGetByPosition(0).column).getDataAt(0).toString(); -} - - -ASTPtr ClusterCopier::getCreateTableForPullShard(const ConnectionTimeouts & timeouts, TaskShard & task_shard) -{ - /// Fetch and parse (possibly) new definition - auto connection_entry = task_shard.info.pool->get(timeouts, task_cluster->settings_pull, true); - String create_query_pull_str = getRemoteCreateTable( - task_shard.task_table.table_pull, - *connection_entry, - task_cluster->settings_pull); - - ParserCreateQuery parser_create_query; - const auto & settings = getContext()->getSettingsRef(); - return parseQuery(parser_create_query, create_query_pull_str, settings.max_query_size, settings.max_parser_depth); -} - - -/// If it is implicitly asked to create split Distributed table for certain piece on current shard, we will do it. -void ClusterCopier::createShardInternalTables(const ConnectionTimeouts & timeouts, - TaskShard & task_shard, bool create_split) -{ - TaskTable & task_table = task_shard.task_table; - - /// We need to update table definitions for each part, it could be changed after ALTER - task_shard.current_pull_table_create_query = getCreateTableForPullShard(timeouts, task_shard); - - /// Create local Distributed tables: - /// a table fetching data from current shard and a table inserting data to the whole destination cluster - String read_shard_prefix = ".read_shard_" + toString(task_shard.indexInCluster()) + "."; - String split_shard_prefix = ".split."; - task_shard.table_read_shard = DatabaseAndTableName(working_database_name, read_shard_prefix + task_table.table_id); - task_shard.main_table_split_shard = DatabaseAndTableName(working_database_name, split_shard_prefix + task_table.table_id); - - for (const auto & piece_number : collections::range(0, task_table.number_of_splits)) - { - task_shard.list_of_split_tables_on_shard[piece_number] = - DatabaseAndTableName(working_database_name, split_shard_prefix + task_table.table_id + "_piece_" + toString(piece_number)); - } - - /// Create special cluster with single shard - String shard_read_cluster_name = read_shard_prefix + task_table.cluster_pull_name; - ClusterPtr cluster_pull_current_shard = task_table.cluster_pull->getClusterWithSingleShard(task_shard.indexInCluster()); - getContext()->setCluster(shard_read_cluster_name, cluster_pull_current_shard); - - auto storage_shard_ast = createASTStorageDistributed(shard_read_cluster_name, task_table.table_pull.first, task_table.table_pull.second); - - auto create_query_ast = removeAliasMaterializedAndTTLColumnsFromCreateQuery( - task_shard.current_pull_table_create_query, - task_table.allow_to_copy_alias_and_materialized_columns); - - auto create_table_pull_ast = rewriteCreateQueryStorage(create_query_ast, task_shard.table_read_shard, storage_shard_ast); - dropAndCreateLocalTable(create_table_pull_ast); - - if (create_split) - { - auto create_table_split_piece_ast = rewriteCreateQueryStorage( - create_query_ast, - task_shard.main_table_split_shard, - task_table.main_engine_split_ast); - - dropAndCreateLocalTable(create_table_split_piece_ast); - - /// Create auxiliary split tables for each piece - for (const auto & piece_number : collections::range(0, task_table.number_of_splits)) - { - const auto & storage_piece_split_ast = task_table.auxiliary_engine_split_asts[piece_number]; - - create_table_split_piece_ast = rewriteCreateQueryStorage( - create_query_ast, - task_shard.list_of_split_tables_on_shard[piece_number], - storage_piece_split_ast); - - dropAndCreateLocalTable(create_table_split_piece_ast); - } - } - -} - - -std::set ClusterCopier::getShardPartitions(const ConnectionTimeouts & timeouts, TaskShard & task_shard) -{ - std::set res; - - createShardInternalTables(timeouts, task_shard, false); - - TaskTable & task_table = task_shard.task_table; - - const String & partition_name = queryToString(task_table.engine_push_partition_key_ast); - - if (partition_name == "'all'") - { - res.emplace("'all'"); - return res; - } - - String query; - { - WriteBufferFromOwnString wb; - wb << "SELECT " << partition_name << " AS partition FROM " - << getQuotedTable(task_shard.table_read_shard) << " GROUP BY partition ORDER BY partition DESC"; - query = wb.str(); - } - - ParserQuery parser_query(query.data() + query.size()); - const auto & settings = getContext()->getSettingsRef(); - ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth); - - LOG_INFO(log, "Computing destination partition set, executing query: {}", query); - - auto local_context = Context::createCopy(context); - local_context->setSettings(task_cluster->settings_pull); - InterpreterSelectWithUnionQuery select(query_ast, local_context, SelectQueryOptions{}); - QueryPlan plan; - select.buildQueryPlan(plan); - auto builder = std::move(*plan.buildQueryPipeline( - QueryPlanOptimizationSettings::fromContext(local_context), - BuildQueryPipelineSettings::fromContext(local_context))); - - Block block = getBlockWithAllStreamData(std::move(builder)); - - if (block) - { - ColumnWithTypeAndName & column = block.getByPosition(0); - task_shard.partition_key_column = column; - - for (size_t i = 0; i < column.column->size(); ++i) - { - WriteBufferFromOwnString wb; - column.type->getDefaultSerialization()->serializeTextQuoted(*column.column, i, wb, FormatSettings()); - res.emplace(wb.str()); - } - } - - LOG_INFO(log, "There are {} destination partitions in shard {}", res.size(), task_shard.getDescription()); - - return res; -} - -bool ClusterCopier::checkShardHasPartition(const ConnectionTimeouts & timeouts, - TaskShard & task_shard, const String & partition_quoted_name) -{ - createShardInternalTables(timeouts, task_shard, false); - - TaskTable & task_table = task_shard.task_table; - - WriteBufferFromOwnString ss; - ss << "WITH " + partition_quoted_name + " AS partition_key "; - ss << "SELECT 1 FROM " << getQuotedTable(task_shard.table_read_shard); - ss << " WHERE (" << queryToString(task_table.engine_push_partition_key_ast) << " = partition_key)"; - if (!task_table.where_condition_str.empty()) - ss << " AND (" << task_table.where_condition_str << ")"; - ss << " LIMIT 1"; - auto query = ss.str(); - - ParserQuery parser_query(query.data() + query.size()); - const auto & settings = getContext()->getSettingsRef(); - ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth); - - LOG_INFO(log, "Checking shard {} for partition {} existence, executing query: {}", - task_shard.getDescription(), partition_quoted_name, query_ast->formatForErrorMessage()); - - auto local_context = Context::createCopy(context); - local_context->setSettings(task_cluster->settings_pull); - auto pipeline = InterpreterFactory::instance().get(query_ast, local_context)->execute().pipeline; - PullingPipelineExecutor executor(pipeline); - Block block; - executor.pull(block); - return block.rows() != 0; -} - -bool ClusterCopier::checkPresentPartitionPiecesOnCurrentShard(const ConnectionTimeouts & timeouts, - TaskShard & task_shard, const String & partition_quoted_name, size_t current_piece_number) -{ - createShardInternalTables(timeouts, task_shard, false); - - TaskTable & task_table = task_shard.task_table; - const size_t number_of_splits = task_table.number_of_splits; - const String & primary_key_comma_separated = task_table.primary_key_comma_separated; - - UNUSED(primary_key_comma_separated); - - std::string query; - - query += "WITH " + partition_quoted_name + " AS partition_key "; - query += "SELECT 1 FROM " + getQuotedTable(task_shard.table_read_shard); - - if (experimental_use_sample_offset) - query += " SAMPLE 1/" + toString(number_of_splits) + " OFFSET " + toString(current_piece_number) + "/" + toString(number_of_splits); - - query += " WHERE (" + queryToString(task_table.engine_push_partition_key_ast) + " = partition_key)"; - - if (!experimental_use_sample_offset) - query += " AND (cityHash64(" + primary_key_comma_separated + ") % " - + std::to_string(number_of_splits) + " = " + std::to_string(current_piece_number) + " )"; - - if (!task_table.where_condition_str.empty()) - query += " AND (" + task_table.where_condition_str + ")"; - - query += " LIMIT 1"; - - LOG_INFO(log, "Checking shard {} for partition {} piece {} existence, executing query: {}", task_shard.getDescription(), partition_quoted_name, std::to_string(current_piece_number), query); - - ParserQuery parser_query(query.data() + query.size()); - const auto & settings = getContext()->getSettingsRef(); - ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth); - - auto local_context = Context::createCopy(context); - local_context->setSettings(task_cluster->settings_pull); - auto pipeline = InterpreterFactory::instance().get(query_ast, local_context)->execute().pipeline; - PullingPipelineExecutor executor(pipeline); - Block result; - executor.pull(result); - if (result.rows() != 0) - LOG_INFO(log, "Partition {} piece number {} is PRESENT on shard {}", partition_quoted_name, std::to_string(current_piece_number), task_shard.getDescription()); - else - LOG_INFO(log, "Partition {} piece number {} is ABSENT on shard {}", partition_quoted_name, std::to_string(current_piece_number), task_shard.getDescription()); - return result.rows() != 0; -} - - -/** Executes simple query (without output streams, for example DDL queries) on each shard of the cluster - * Returns number of shards for which at least one replica executed query successfully - */ -UInt64 ClusterCopier::executeQueryOnCluster( - const ClusterPtr & cluster, - const String & query, - const Settings & current_settings, - ClusterExecutionMode execution_mode) const -{ - ClusterPtr cluster_for_query = cluster; - if (execution_mode == ClusterExecutionMode::ON_EACH_NODE) - cluster_for_query = cluster->getClusterWithReplicasAsShards(current_settings); - - std::vector> connections; - connections.reserve(cluster->getShardCount()); - - std::atomic successfully_executed = 0; - - for (const auto & replicas : cluster_for_query->getShardsAddresses()) - { - for (const auto & node : replicas) - { - try - { - connections.emplace_back(std::make_shared( - node.host_name, node.port, node.default_database, - node.user, node.password, ssh::SSHKey(), node.quota_key, node.cluster, node.cluster_secret, - "ClusterCopier", node.compression, node.secure - )); - - /// We execute only Alter, Create and Drop queries. - const auto header = Block{}; - - /// For unknown reason global context is passed to IStorage::read() method - /// So, task_identifier is passed as constructor argument. It is more obvious. - auto remote_query_executor = std::make_shared( - *connections.back(), query, header, getContext(), - /*throttler=*/nullptr, Scalars(), Tables(), QueryProcessingStage::Complete); - - try - { - remote_query_executor->sendQuery(); - } - catch (...) - { - LOG_WARNING(log, "Node with address {} seems to be unreachable.", node.host_name); - continue; - } - - while (true) - { - auto block = remote_query_executor->readBlock(); - if (!block) - break; - } - - remote_query_executor->finish(); - ++successfully_executed; - break; - } - catch (...) - { - LOG_WARNING(log, "An error occurred while processing query: {}", query); - tryLogCurrentException(log); - continue; - } - } - } - - return successfully_executed.load(); -} - -} diff --git a/programs/copier/ClusterCopier.h b/programs/copier/ClusterCopier.h deleted file mode 100644 index 01f8b30f546..00000000000 --- a/programs/copier/ClusterCopier.h +++ /dev/null @@ -1,240 +0,0 @@ -#pragma once - -#include "Aliases.h" -#include "Internals.h" -#include "TaskCluster.h" -#include "TaskShard.h" -#include "TaskTable.h" -#include "ShardPartition.h" -#include "ShardPartitionPiece.h" -#include "ZooKeeperStaff.h" - - -namespace DB -{ - -class ClusterCopier : WithMutableContext -{ -public: - ClusterCopier(const String & task_path_, - const String & host_id_, - const String & proxy_database_name_, - ContextMutablePtr context_, - LoggerRawPtr log_) - : WithMutableContext(context_), - task_zookeeper_path(task_path_), - host_id(host_id_), - working_database_name(proxy_database_name_), - log(log_) {} - - void init(); - - template - decltype(auto) retry(T && func, UInt64 max_tries = 100); - - void discoverShardPartitions(const ConnectionTimeouts & timeouts, const TaskShardPtr & task_shard); - - /// Compute set of partitions, assume set of partitions aren't changed during the processing - void discoverTablePartitions(const ConnectionTimeouts & timeouts, TaskTable & task_table, UInt64 num_threads = 0); - - void uploadTaskDescription(const std::string & task_path, const std::string & task_file, bool force); - - void reloadTaskDescription(); - - void updateConfigIfNeeded(); - - void process(const ConnectionTimeouts & timeouts); - - /// Disables DROP PARTITION commands that used to clear data after errors - void setSafeMode(bool is_safe_mode_ = true) - { - is_safe_mode = is_safe_mode_; - } - - void setCopyFaultProbability(double copy_fault_probability_) - { - copy_fault_probability = copy_fault_probability_; - } - - void setMoveFaultProbability(double move_fault_probability_) - { - move_fault_probability = move_fault_probability_; - } - - void setExperimentalUseSampleOffset(bool value) - { - experimental_use_sample_offset = value; - } - - void setMaxTableTries(UInt64 tries) - { - max_table_tries = tries; - } - void setMaxShardPartitionTries(UInt64 tries) - { - max_shard_partition_tries = tries; - } - void setMaxShardPartitionPieceTriesForAlter(UInt64 tries) - { - max_shard_partition_piece_tries_for_alter = tries; - } - void setRetryDelayMs(std::chrono::milliseconds ms) - { - retry_delay_ms = ms; - } - -protected: - - String getWorkersPath() const - { - return task_cluster->task_zookeeper_path + "/task_active_workers"; - } - - String getWorkersPathVersion() const - { - return getWorkersPath() + "_version"; - } - - String getCurrentWorkerNodePath() const - { - return getWorkersPath() + "/" + host_id; - } - - zkutil::EphemeralNodeHolder::Ptr createTaskWorkerNodeAndWaitIfNeed( - const zkutil::ZooKeeperPtr & zookeeper, - const String & description, - bool unprioritized); - - /* - * Checks that partition piece or some other entity is clean. - * The only requirement is that you have to pass is_dirty_flag_path and is_dirty_cleaned_path to the function. - * And is_dirty_flag_path is a parent of is_dirty_cleaned_path. - * */ - static bool checkPartitionPieceIsClean( - const zkutil::ZooKeeperPtr & zookeeper, - const CleanStateClock & clean_state_clock, - const String & task_status_path); - - bool checkAllPiecesInPartitionAreDone(const TaskTable & task_table, const String & partition_name, const TasksShard & shards_with_partition); - - /** Checks that the whole partition of a table was copied. We should do it carefully due to dirty lock. - * State of some task could change during the processing. - * We have to ensure that all shards have the finished state and there is no dirty flag. - * Moreover, we have to check status twice and check zxid, because state can change during the checking. - */ - - /* The same as function above - * Assume that we don't know on which shards do we have partition certain piece. - * We'll check them all (I mean shards that contain the whole partition) - * And shards that don't have certain piece MUST mark that piece is_done true. - * */ - bool checkPartitionPieceIsDone(const TaskTable & task_table, const String & partition_name, - size_t piece_number, const TasksShard & shards_with_partition); - - - /*Alter successful insertion to helping tables it will move all pieces to destination table*/ - TaskStatus tryMoveAllPiecesToDestinationTable(const TaskTable & task_table, const String & partition_name); - - /// Removes MATERIALIZED and ALIAS columns from create table query - static ASTPtr removeAliasMaterializedAndTTLColumnsFromCreateQuery(const ASTPtr & query_ast, bool allow_to_copy_alias_and_materialized_columns); - - bool tryDropPartitionPiece(ShardPartition & task_partition, size_t current_piece_number, - const zkutil::ZooKeeperPtr & zookeeper, const CleanStateClock & clean_state_clock); - - bool tryProcessTable(const ConnectionTimeouts & timeouts, TaskTable & task_table); - - TaskStatus tryCreateDestinationTable(const ConnectionTimeouts & timeouts, TaskTable & task_table); - /// Job for copying partition from particular shard. - TaskStatus tryProcessPartitionTask(const ConnectionTimeouts & timeouts, - ShardPartition & task_partition, - bool is_unprioritized_task); - - TaskStatus iterateThroughAllPiecesInPartition(const ConnectionTimeouts & timeouts, - ShardPartition & task_partition, - bool is_unprioritized_task); - - TaskStatus processPartitionPieceTaskImpl(const ConnectionTimeouts & timeouts, - ShardPartition & task_partition, - size_t current_piece_number, - bool is_unprioritized_task); - - void dropAndCreateLocalTable(const ASTPtr & create_ast); - - void dropLocalTableIfExists(const DatabaseAndTableName & table_name) const; - - void dropHelpingTables(const TaskTable & task_table); - - void dropHelpingTablesByPieceNumber(const TaskTable & task_table, size_t current_piece_number); - - /// Is used for usage less disk space. - /// After all pieces were successfully moved to original destination - /// table we can get rid of partition pieces (partitions in helping tables). - void dropParticularPartitionPieceFromAllHelpingTables(const TaskTable & task_table, const String & partition_name); - - String getRemoteCreateTable(const DatabaseAndTableName & table, Connection & connection, const Settings & settings); - - ASTPtr getCreateTableForPullShard(const ConnectionTimeouts & timeouts, TaskShard & task_shard); - - /// If it is implicitly asked to create split Distributed table for certain piece on current shard, we will do it. - void createShardInternalTables(const ConnectionTimeouts & timeouts, TaskShard & task_shard, bool create_split = true); - - std::set getShardPartitions(const ConnectionTimeouts & timeouts, TaskShard & task_shard); - - bool checkShardHasPartition(const ConnectionTimeouts & timeouts, TaskShard & task_shard, const String & partition_quoted_name); - - bool checkPresentPartitionPiecesOnCurrentShard(const ConnectionTimeouts & timeouts, - TaskShard & task_shard, const String & partition_quoted_name, size_t current_piece_number); - - /* - * This class is used in executeQueryOnCluster function - * You can execute query on each shard (no sense it is executed on each replica of a shard or not) - * or you can execute query on each replica on each shard. - * First mode is useful for INSERTS queries. - * */ - enum ClusterExecutionMode - { - ON_EACH_SHARD, - ON_EACH_NODE - }; - - /** Executes simple query (without output streams, for example DDL queries) on each shard of the cluster - * Returns number of shards for which at least one replica executed query successfully - */ - UInt64 executeQueryOnCluster( - const ClusterPtr & cluster, - const String & query, - const Settings & current_settings, - ClusterExecutionMode execution_mode = ClusterExecutionMode::ON_EACH_SHARD) const; - -private: - String task_zookeeper_path; - String task_description_path; - String host_id; - String working_database_name; - - /// Auto update config stuff - UInt64 task_description_current_version = 1; - std::atomic task_description_version{1}; - Coordination::WatchCallback task_description_watch_callback; - /// ZooKeeper session used to set the callback - zkutil::ZooKeeperPtr task_description_watch_zookeeper; - - ConfigurationPtr task_cluster_initial_config; - ConfigurationPtr task_cluster_current_config; - - std::unique_ptr task_cluster; - - bool is_safe_mode = false; - double copy_fault_probability = 0.0; - double move_fault_probability = 0.0; - - bool experimental_use_sample_offset{false}; - - LoggerRawPtr log; - - UInt64 max_table_tries = 3; - UInt64 max_shard_partition_tries = 3; - UInt64 max_shard_partition_piece_tries_for_alter = 10; - std::chrono::milliseconds retry_delay_ms{1000}; -}; -} diff --git a/programs/copier/ClusterCopierApp.cpp b/programs/copier/ClusterCopierApp.cpp deleted file mode 100644 index fdf07dec61a..00000000000 --- a/programs/copier/ClusterCopierApp.cpp +++ /dev/null @@ -1,252 +0,0 @@ -#include "ClusterCopierApp.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace fs = std::filesystem; - -namespace DB -{ - -/// ClusterCopierApp - -void ClusterCopierApp::initialize(Poco::Util::Application & self) -{ - is_help = config().has("help"); - if (is_help) - return; - - config_xml_path = config().getString("config-file"); - task_path = config().getString("task-path"); - log_level = config().getString("log-level", "info"); - is_safe_mode = config().has("safe-mode"); - is_status_mode = config().has("status"); - if (config().has("copy-fault-probability")) - copy_fault_probability = std::max(std::min(config().getDouble("copy-fault-probability"), 1.0), 0.0); - if (config().has("move-fault-probability")) - move_fault_probability = std::max(std::min(config().getDouble("move-fault-probability"), 1.0), 0.0); - base_dir = (config().has("base-dir")) ? config().getString("base-dir") : fs::current_path().string(); - - max_table_tries = std::max(config().getUInt("max-table-tries", 3), 1); - max_shard_partition_tries = std::max(config().getUInt("max-shard-partition-tries", 3), 1); - max_shard_partition_piece_tries_for_alter = std::max(config().getUInt("max-shard-partition-piece-tries-for-alter", 10), 1); - retry_delay_ms = std::chrono::milliseconds(std::max(config().getUInt("retry-delay-ms", 1000), 100)); - - if (config().has("experimental-use-sample-offset")) - experimental_use_sample_offset = config().getBool("experimental-use-sample-offset"); - - // process_id is '#_' - time_t timestamp = Poco::Timestamp().epochTime(); - auto curr_pid = Poco::Process::id(); - - process_id = std::to_string(DateLUT::serverTimezoneInstance().toNumYYYYMMDDhhmmss(timestamp)) + "_" + std::to_string(curr_pid); - host_id = escapeForFileName(getFQDNOrHostName()) + '#' + process_id; - process_path = fs::weakly_canonical(fs::path(base_dir) / ("clickhouse-copier_" + process_id)); - fs::create_directories(process_path); - - /// Override variables for BaseDaemon - if (config().has("log-level")) - config().setString("logger.level", config().getString("log-level")); - - if (config().has("base-dir") || !config().has("logger.log")) - config().setString("logger.log", fs::path(process_path) / "log.log"); - - if (config().has("base-dir") || !config().has("logger.errorlog")) - config().setString("logger.errorlog", fs::path(process_path) / "log.err.log"); - - Base::initialize(self); -} - - -void ClusterCopierApp::handleHelp(const std::string &, const std::string &) -{ - uint16_t terminal_width = 0; - if (isatty(STDIN_FILENO)) - terminal_width = getTerminalWidth(); - - Poco::Util::HelpFormatter help_formatter(options()); - if (terminal_width) - help_formatter.setWidth(terminal_width); - help_formatter.setCommand(commandName()); - help_formatter.setHeader("Copies tables from one cluster to another"); - help_formatter.setUsage("--config-file --task-path "); - help_formatter.format(std::cerr); - help_formatter.setFooter("See also: https://clickhouse.com/docs/en/operations/utilities/clickhouse-copier/"); - - stopOptionsProcessing(); -} - - -void ClusterCopierApp::defineOptions(Poco::Util::OptionSet & options) -{ - Base::defineOptions(options); - - options.addOption(Poco::Util::Option("task-path", "", "path to task in ZooKeeper") - .argument("task-path").binding("task-path")); - options.addOption(Poco::Util::Option("task-file", "", "path to task file for uploading in ZooKeeper to task-path") - .argument("task-file").binding("task-file")); - options.addOption(Poco::Util::Option("task-upload-force", "", "Force upload task-file even node already exists. Default is false.") - .argument("task-upload-force").binding("task-upload-force")); - options.addOption(Poco::Util::Option("safe-mode", "", "disables ALTER DROP PARTITION in case of errors") - .binding("safe-mode")); - options.addOption(Poco::Util::Option("copy-fault-probability", "", "the copying fails with specified probability (used to test partition state recovering)") - .argument("copy-fault-probability").binding("copy-fault-probability")); - options.addOption(Poco::Util::Option("move-fault-probability", "", "the moving fails with specified probability (used to test partition state recovering)") - .argument("move-fault-probability").binding("move-fault-probability")); - options.addOption(Poco::Util::Option("log-level", "", "sets log level") - .argument("log-level").binding("log-level")); - options.addOption(Poco::Util::Option("base-dir", "", "base directory for copiers, consecutive copier launches will populate /base-dir/launch_id/* directories") - .argument("base-dir").binding("base-dir")); - options.addOption(Poco::Util::Option("experimental-use-sample-offset", "", "Use SAMPLE OFFSET query instead of cityHash64(PRIMARY KEY) % n == k") - .argument("experimental-use-sample-offset").binding("experimental-use-sample-offset")); - options.addOption(Poco::Util::Option("status", "", "Get for status for current execution").binding("status")); - - options.addOption(Poco::Util::Option("max-table-tries", "", "Number of tries for the copy table task") - .argument("max-table-tries").binding("max-table-tries")); - options.addOption(Poco::Util::Option("max-shard-partition-tries", "", "Number of tries for the copy one partition task") - .argument("max-shard-partition-tries").binding("max-shard-partition-tries")); - options.addOption(Poco::Util::Option("max-shard-partition-piece-tries-for-alter", "", "Number of tries for final ALTER ATTACH to destination table") - .argument("max-shard-partition-piece-tries-for-alter").binding("max-shard-partition-piece-tries-for-alter")); - options.addOption(Poco::Util::Option("retry-delay-ms", "", "Delay between task retries") - .argument("retry-delay-ms").binding("retry-delay-ms")); - - using Me = std::decay_t; - options.addOption(Poco::Util::Option("help", "", "produce this help message").binding("help") - .callback(Poco::Util::OptionCallback(this, &Me::handleHelp))); -} - - -void ClusterCopierApp::mainImpl() -{ - /// Status command - { - if (is_status_mode) - { - SharedContextHolder shared_context = Context::createShared(); - auto context = Context::createGlobal(shared_context.get()); - context->makeGlobalContext(); - SCOPE_EXIT_SAFE(context->shutdown()); - - auto zookeeper = context->getZooKeeper(); - auto status_json = zookeeper->get(task_path + "/status"); - - LOG_INFO(&logger(), "{}", status_json); - std::cout << status_json << std::endl; - - context->resetZooKeeper(); - return; - } - } - StatusFile status_file(process_path + "/status", StatusFile::write_full_info); - ThreadStatus thread_status; - - auto * log = &logger(); - LOG_INFO(log, "Starting clickhouse-copier (id {}, host_id {}, path {}, revision {})", process_id, host_id, process_path, ClickHouseRevision::getVersionRevision()); - - SharedContextHolder shared_context = Context::createShared(); - auto context = Context::createGlobal(shared_context.get()); - context->makeGlobalContext(); - SCOPE_EXIT_SAFE(context->shutdown()); - - context->setConfig(loaded_config.configuration); - context->setApplicationType(Context::ApplicationType::LOCAL); - context->setPath(process_path + "/"); - - registerInterpreters(); - registerFunctions(); - registerAggregateFunctions(); - registerTableFunctions(); - registerDatabases(); - registerStorages(); - registerDictionaries(); - registerDisks(/* global_skip_access_check= */ true); - registerFormats(); - - static const std::string default_database = "_local"; - DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared(default_database, context)); - context->setCurrentDatabase(default_database); - - /// Disable queries logging, since: - /// - There are bits that is not allowed for global context, like adding factories info (for the query_log) - /// - And anyway it is useless for copier. - context->setSetting("log_queries", false); - - auto local_context = Context::createCopy(context); - - /// Initialize query scope just in case. - CurrentThread::QueryScope query_scope(local_context); - - auto copier = std::make_unique( - task_path, host_id, default_database, local_context, log); - copier->setSafeMode(is_safe_mode); - copier->setCopyFaultProbability(copy_fault_probability); - copier->setMoveFaultProbability(move_fault_probability); - copier->setMaxTableTries(max_table_tries); - copier->setMaxShardPartitionTries(max_shard_partition_tries); - copier->setMaxShardPartitionPieceTriesForAlter(max_shard_partition_piece_tries_for_alter); - copier->setRetryDelayMs(retry_delay_ms); - copier->setExperimentalUseSampleOffset(experimental_use_sample_offset); - - auto task_file = config().getString("task-file", ""); - if (!task_file.empty()) - copier->uploadTaskDescription(task_path, task_file, config().getBool("task-upload-force", false)); - - zkutil::validateZooKeeperConfig(config()); - - copier->init(); - copier->process(ConnectionTimeouts::getTCPTimeoutsWithoutFailover(context->getSettingsRef())); - - /// Reset ZooKeeper before removing ClusterCopier. - /// Otherwise zookeeper watch can call callback which use already removed ClusterCopier object. - context->resetZooKeeper(); -} - - -int ClusterCopierApp::main(const std::vector &) -{ - if (is_help) - return 0; - - try - { - mainImpl(); - } - catch (...) - { - tryLogCurrentException(&Poco::Logger::root(), __PRETTY_FUNCTION__); - auto code = getCurrentExceptionCode(); - - return (code) ? code : -1; - } - - return 0; -} - - -} - -#pragma GCC diagnostic ignored "-Wunused-function" -#pragma GCC diagnostic ignored "-Wmissing-declarations" - -int mainEntryClickHouseClusterCopier(int argc, char ** argv) -{ - try - { - DB::ClusterCopierApp app; - return app.run(argc, argv); - } - catch (...) - { - std::cerr << DB::getCurrentExceptionMessage(true) << "\n"; - auto code = DB::getCurrentExceptionCode(); - - return (code) ? code : -1; - } -} diff --git a/programs/copier/ClusterCopierApp.h b/programs/copier/ClusterCopierApp.h deleted file mode 100644 index 0ddc232381e..00000000000 --- a/programs/copier/ClusterCopierApp.h +++ /dev/null @@ -1,99 +0,0 @@ -#pragma once - -#include -#include - -#include "ClusterCopier.h" - -/* clickhouse cluster copier util - * Copies tables data from one cluster to new tables of other (possibly the same) cluster in distributed fault-tolerant manner. - * - * See overview in the docs: docs/en/utils/clickhouse-copier.md - * - * Implementation details: - * - * cluster-copier workers pull each partition of each shard of the source cluster and push it to the destination cluster through - * Distributed table (to perform data resharding). So, worker job is a partition of a source shard. - * A job has three states: Active, Finished and Abandoned. Abandoned means that worker died and did not finish the job. - * - * If an error occurred during the copying (a worker failed or a worker did not finish the INSERT), then the whole partition (on - * all destination servers) should be dropped and refilled. So, copying entity is a partition of all destination shards. - * If a failure is detected a special /is_dirty node is created in ZooKeeper signalling that other workers copying the same partition - * should stop, after a refilling procedure should start. - * - * ZooKeeper task node has the following structure: - * /task/path_root - path passed in --task-path parameter - * /description - contains user-defined XML config of the task - * /task_active_workers - contains ephemeral nodes of all currently active workers, used to implement max_workers limitation - * /server_fqdn#PID_timestamp - cluster-copier worker ID - * ... - * /tables - directory with table tasks - * /cluster.db.table1 - directory of table_hits task - * /partition1 - directory for partition1 - * /shards - directory for source cluster shards - * /1 - worker job for the first shard of partition1 of table test.hits - * Contains info about current status (Active or Finished) and worker ID. - * /2 - * ... - * /partition_active_workers - * /1 - for each job in /shards a corresponding ephemeral node created in /partition_active_workers - * It is used to detect Abandoned jobs (if there is Active node in /shards and there is no node in - * /partition_active_workers). - * Also, it is used to track active workers in the partition (when we need to refill the partition we do - * not DROP PARTITION while there are active workers) - * /2 - * ... - * /is_dirty - the node is set if some worker detected that an error occurred (the INSERT is failed or an Abandoned node is - * detected). If the node appeared workers in this partition should stop and start cleaning and refilling - * partition procedure. - * During this procedure a single 'cleaner' worker is selected. The worker waits for stopping all partition - * workers, removes /shards node, executes DROP PARTITION on each destination node and removes /is_dirty node. - * /cleaner- An ephemeral node used to select 'cleaner' worker. Contains ID of the worker. - * /cluster.db.table2 - * ... - */ - -namespace DB -{ - -class ClusterCopierApp : public BaseDaemon -{ -public: - - void initialize(Poco::Util::Application & self) override; - - void handleHelp(const std::string &, const std::string &); - - void defineOptions(Poco::Util::OptionSet & options) override; - - int main(const std::vector &) override; - -private: - - using Base = BaseDaemon; - - void mainImpl(); - - std::string config_xml_path; - std::string task_path; - std::string log_level = "info"; - bool is_safe_mode = false; - bool is_status_mode = false; - double copy_fault_probability = 0.0; - double move_fault_probability = 0.0; - bool is_help = false; - - UInt64 max_table_tries = 3; - UInt64 max_shard_partition_tries = 3; - UInt64 max_shard_partition_piece_tries_for_alter = 10; - std::chrono::milliseconds retry_delay_ms{1000}; - - bool experimental_use_sample_offset{false}; - - std::string base_dir; - std::string process_path; - std::string process_id; - std::string host_id; -}; - -} diff --git a/programs/copier/ClusterPartition.h b/programs/copier/ClusterPartition.h deleted file mode 100644 index 22063989e22..00000000000 --- a/programs/copier/ClusterPartition.h +++ /dev/null @@ -1,22 +0,0 @@ -#pragma once - -#include -#include - -namespace DB -{ - -/// Contains info about all shards that contain a partition -struct ClusterPartition -{ - double elapsed_time_seconds = 0; - UInt64 bytes_copied = 0; - UInt64 rows_copied = 0; - UInt64 blocks_copied = 0; - - UInt64 total_tries = 0; -}; - -using ClusterPartitions = std::map>; - -} diff --git a/programs/copier/Internals.cpp b/programs/copier/Internals.cpp deleted file mode 100644 index dcd199c6b38..00000000000 --- a/programs/copier/Internals.cpp +++ /dev/null @@ -1,280 +0,0 @@ -#include "Internals.h" -#include -#include -#include -#include -#include -#include -#include - -namespace DB -{ -namespace ErrorCodes -{ - extern const int BAD_ARGUMENTS; -} - -using ConfigurationPtr = Poco::AutoPtr; - -ConfigurationPtr getConfigurationFromXMLString(const std::string & xml_data) -{ - std::stringstream ss(xml_data); // STYLE_CHECK_ALLOW_STD_STRING_STREAM - Poco::XML::InputSource input_source{ss}; - return {new Poco::Util::XMLConfiguration{&input_source}}; -} - -String getQuotedTable(const String & database, const String & table) -{ - if (database.empty()) - return backQuoteIfNeed(table); - - return backQuoteIfNeed(database) + "." + backQuoteIfNeed(table); -} - -String getQuotedTable(const DatabaseAndTableName & db_and_table) -{ - return getQuotedTable(db_and_table.first, db_and_table.second); -} - - -// Creates AST representing 'ENGINE = Distributed(cluster, db, table, [sharding_key]) -std::shared_ptr createASTStorageDistributed( - const String & cluster_name, const String & database, const String & table, - const ASTPtr & sharding_key_ast) -{ - auto args = std::make_shared(); - args->children.emplace_back(std::make_shared(cluster_name)); - args->children.emplace_back(std::make_shared(database)); - args->children.emplace_back(std::make_shared(table)); - if (sharding_key_ast) - args->children.emplace_back(sharding_key_ast); - - auto engine = std::make_shared(); - engine->name = "Distributed"; - engine->arguments = args; - - auto storage = std::make_shared(); - storage->set(storage->engine, engine); - - return storage; -} - - -Block getBlockWithAllStreamData(QueryPipelineBuilder builder) -{ - builder.addTransform(std::make_shared( - builder.getHeader(), - std::numeric_limits::max(), - std::numeric_limits::max())); - - auto cur_pipeline = QueryPipelineBuilder::getPipeline(std::move(builder)); - Block block; - PullingPipelineExecutor executor(cur_pipeline); - executor.pull(block); - - return block; -} - -bool isExtendedDefinitionStorage(const ASTPtr & storage_ast) -{ - const auto & storage = storage_ast->as(); - return storage.partition_by || storage.order_by || storage.sample_by; -} - -ASTPtr extractPartitionKey(const ASTPtr & storage_ast) -{ - String storage_str = queryToString(storage_ast); - - const auto & storage = storage_ast->as(); - const auto & engine = storage.engine->as(); - - if (!endsWith(engine.name, "MergeTree")) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported engine was specified in {}, only *MergeTree engines are supported", storage_str); - } - - if (isExtendedDefinitionStorage(storage_ast)) - { - if (storage.partition_by) - return storage.partition_by->clone(); - - static const char * all = "all"; - return std::make_shared(Field(all, strlen(all))); - } - else - { - bool is_replicated = startsWith(engine.name, "Replicated"); - size_t min_args = is_replicated ? 3 : 1; - - if (!engine.arguments) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected arguments in {}", storage_str); - - ASTPtr arguments_ast = engine.arguments->clone(); - ASTs & arguments = arguments_ast->children; - - if (arguments.size() < min_args) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected at least {} arguments in {}", min_args, storage_str); - - ASTPtr & month_arg = is_replicated ? arguments[2] : arguments[1]; - return makeASTFunction("toYYYYMM", month_arg->clone()); - } -} - -ASTPtr extractPrimaryKey(const ASTPtr & storage_ast) -{ - String storage_str = queryToString(storage_ast); - - const auto & storage = storage_ast->as(); - const auto & engine = storage.engine->as(); - - if (!endsWith(engine.name, "MergeTree")) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported engine was specified in {}, only *MergeTree engines are supported", storage_str); - } - - if (!isExtendedDefinitionStorage(storage_ast)) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Is not extended deginition storage {} Will be fixed later.", storage_str); - } - - if (storage.primary_key) - return storage.primary_key->clone(); - - return nullptr; -} - - -ASTPtr extractOrderBy(const ASTPtr & storage_ast) -{ - String storage_str = queryToString(storage_ast); - - const auto & storage = storage_ast->as(); - const auto & engine = storage.engine->as(); - - if (!endsWith(engine.name, "MergeTree")) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported engine was specified in {}, only *MergeTree engines are supported", storage_str); - } - - if (!isExtendedDefinitionStorage(storage_ast)) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Is not extended deginition storage {} Will be fixed later.", storage_str); - } - - if (storage.order_by) - return storage.order_by->clone(); - - throw Exception(ErrorCodes::BAD_ARGUMENTS, "ORDER BY cannot be empty"); -} - -/// Wraps only identifiers with backticks. -std::string wrapIdentifiersWithBackticks(const ASTPtr & root) -{ - if (auto identifier = std::dynamic_pointer_cast(root)) - return backQuote(identifier->name()); - - if (auto function = std::dynamic_pointer_cast(root)) - return function->name + '(' + wrapIdentifiersWithBackticks(function->arguments) + ')'; - - if (auto expression_list = std::dynamic_pointer_cast(root)) - { - Names function_arguments(expression_list->children.size()); - for (size_t i = 0; i < expression_list->children.size(); ++i) - function_arguments[i] = wrapIdentifiersWithBackticks(expression_list->children[0]); - return boost::algorithm::join(function_arguments, ", "); - } - - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Primary key could be represented only as columns or functions from columns."); -} - - -Names extractPrimaryKeyColumnNames(const ASTPtr & storage_ast) -{ - const auto sorting_key_ast = extractOrderBy(storage_ast); - const auto primary_key_ast = extractPrimaryKey(storage_ast); - - const auto sorting_key_expr_list = extractKeyExpressionList(sorting_key_ast); - const auto primary_key_expr_list = primary_key_ast - ? extractKeyExpressionList(primary_key_ast) : sorting_key_expr_list->clone(); - - /// Maybe we have to handle VersionedCollapsing engine separately. But in our case in looks pointless. - - size_t primary_key_size = primary_key_expr_list->children.size(); - size_t sorting_key_size = sorting_key_expr_list->children.size(); - - if (primary_key_size > sorting_key_size) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Primary key must be a prefix of the sorting key, but its length: " - "{} is greater than the sorting key length: {}", - primary_key_size, sorting_key_size); - - Names primary_key_columns; - NameSet primary_key_columns_set; - - for (size_t i = 0; i < sorting_key_size; ++i) - { - /// Column name could be represented as a f_1(f_2(...f_n(column_name))). - /// Each f_i could take one or more parameters. - /// We will wrap identifiers with backticks to allow non-standard identifier names. - String sorting_key_column = sorting_key_expr_list->children[i]->getColumnName(); - - if (i < primary_key_size) - { - String pk_column = primary_key_expr_list->children[i]->getColumnName(); - if (pk_column != sorting_key_column) - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Primary key must be a prefix of the sorting key, " - "but the column in the position {} is {}, not {}", i, sorting_key_column, pk_column); - - if (!primary_key_columns_set.emplace(pk_column).second) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Primary key contains duplicate columns"); - - primary_key_columns.push_back(wrapIdentifiersWithBackticks(primary_key_expr_list->children[i])); - } - } - - return primary_key_columns; -} - -bool isReplicatedTableEngine(const ASTPtr & storage_ast) -{ - const auto & storage = storage_ast->as(); - const auto & engine = storage.engine->as(); - - if (!endsWith(engine.name, "MergeTree")) - { - String storage_str = queryToString(storage_ast); - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported engine was specified in {}, only *MergeTree engines are supported", storage_str); - } - - return startsWith(engine.name, "Replicated"); -} - -ShardPriority getReplicasPriority(const Cluster::Addresses & replicas, const std::string & local_hostname, UInt8 random) -{ - ShardPriority res; - - if (replicas.empty()) - return res; - - res.is_remote = 1; - for (const auto & replica : replicas) - { - if (isLocalAddress(DNSResolver::instance().resolveHostAllInOriginOrder(replica.host_name).front())) - { - res.is_remote = 0; - break; - } - } - - res.hostname_difference = std::numeric_limits::max(); - for (const auto & replica : replicas) - { - size_t difference = getHostNamePrefixDistance(local_hostname, replica.host_name); - res.hostname_difference = std::min(difference, res.hostname_difference); - } - - res.random = random; - return res; -} - -} diff --git a/programs/copier/Internals.h b/programs/copier/Internals.h deleted file mode 100644 index 27fedd5d9e8..00000000000 --- a/programs/copier/Internals.h +++ /dev/null @@ -1,198 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "Aliases.h" - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} - - -ConfigurationPtr getConfigurationFromXMLString(const std::string & xml_data); - -String getQuotedTable(const String & database, const String & table); - -String getQuotedTable(const DatabaseAndTableName & db_and_table); - - -enum class TaskState -{ - Started = 0, - Finished, - Unknown -}; - -/// Used to mark status of shard partition tasks -struct TaskStateWithOwner -{ - TaskStateWithOwner() = default; - - TaskStateWithOwner(TaskState state_, const String & owner_) : state(state_), owner(owner_) {} - - TaskState state{TaskState::Unknown}; - String owner; - - static String getData(TaskState state, const String &owner) - { - return TaskStateWithOwner(state, owner).toString(); - } - - String toString() const - { - WriteBufferFromOwnString wb; - wb << static_cast(state) << "\n" << escape << owner; - return wb.str(); - } - - static TaskStateWithOwner fromString(const String & data) - { - ReadBufferFromString rb(data); - TaskStateWithOwner res; - UInt32 state; - - rb >> state >> "\n" >> escape >> res.owner; - - if (state >= static_cast(TaskState::Unknown)) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown state {}", data); - - res.state = static_cast(state); - return res; - } -}; - - -struct ShardPriority -{ - UInt8 is_remote = 1; - size_t hostname_difference = 0; - UInt8 random = 0; - - static bool greaterPriority(const ShardPriority & current, const ShardPriority & other) - { - return std::forward_as_tuple(current.is_remote, current.hostname_difference, current.random) - < std::forward_as_tuple(other.is_remote, other.hostname_difference, other.random); - } -}; - -/// Execution status of a task. -/// Is used for: partition copying task status, partition piece copying task status, partition moving task status. -enum class TaskStatus -{ - Active, - Finished, - Error, -}; - -struct MultiTransactionInfo -{ - int32_t code; - Coordination::Requests requests; - Coordination::Responses responses; -}; - -// Creates AST representing 'ENGINE = Distributed(cluster, db, table, [sharding_key]) -std::shared_ptr createASTStorageDistributed( - const String & cluster_name, const String & database, const String & table, - const ASTPtr & sharding_key_ast = nullptr); - -Block getBlockWithAllStreamData(QueryPipelineBuilder builder); - -bool isExtendedDefinitionStorage(const ASTPtr & storage_ast); - -ASTPtr extractPartitionKey(const ASTPtr & storage_ast); - -/* -* Choosing a Primary Key that Differs from the Sorting Key -* It is possible to specify a primary key (an expression with values that are written in the index file for each mark) -* that is different from the sorting key (an expression for sorting the rows in data parts). -* In this case the primary key expression tuple must be a prefix of the sorting key expression tuple. -* This feature is helpful when using the SummingMergeTree and AggregatingMergeTree table engines. -* In a common case when using these engines, the table has two types of columns: dimensions and measures. -* Typical queries aggregate values of measure columns with arbitrary GROUP BY and filtering by dimensions. -* Because SummingMergeTree and AggregatingMergeTree aggregate rows with the same value of the sorting key, -* it is natural to add all dimensions to it. As a result, the key expression consists of a long list of columns -* and this list must be frequently updated with newly added dimensions. -* In this case it makes sense to leave only a few columns in the primary key that will provide efficient -* range scans and add the remaining dimension columns to the sorting key tuple. -* ALTER of the sorting key is a lightweight operation because when a new column is simultaneously added t -* o the table and to the sorting key, existing data parts don't need to be changed. -* Since the old sorting key is a prefix of the new sorting key and there is no data in the newly added column, -* the data is sorted by both the old and new sorting keys at the moment of table modification. -* -* */ -ASTPtr extractPrimaryKey(const ASTPtr & storage_ast); - -ASTPtr extractOrderBy(const ASTPtr & storage_ast); - -Names extractPrimaryKeyColumnNames(const ASTPtr & storage_ast); - -bool isReplicatedTableEngine(const ASTPtr & storage_ast); - -ShardPriority getReplicasPriority(const Cluster::Addresses & replicas, const std::string & local_hostname, UInt8 random); - -} diff --git a/programs/copier/ShardPartition.cpp b/programs/copier/ShardPartition.cpp deleted file mode 100644 index 4c962fc807d..00000000000 --- a/programs/copier/ShardPartition.cpp +++ /dev/null @@ -1,70 +0,0 @@ -#include "ShardPartition.h" - -#include "TaskShard.h" -#include "TaskTable.h" - -namespace DB -{ - -ShardPartition::ShardPartition(TaskShard & parent, String name_quoted_, size_t number_of_splits) - : task_shard(parent) - , name(std::move(name_quoted_)) -{ - pieces.reserve(number_of_splits); -} - -String ShardPartition::getPartitionCleanStartPath() const -{ - return getPartitionPath() + "/clean_start"; -} - -String ShardPartition::getPartitionPieceCleanStartPath(size_t current_piece_number) const -{ - assert(current_piece_number < task_shard.task_table.number_of_splits); - return getPartitionPiecePath(current_piece_number) + "/clean_start"; -} - -String ShardPartition::getPartitionPath() const -{ - return task_shard.task_table.getPartitionPath(name); -} - -String ShardPartition::getPartitionPiecePath(size_t current_piece_number) const -{ - assert(current_piece_number < task_shard.task_table.number_of_splits); - return task_shard.task_table.getPartitionPiecePath(name, current_piece_number); -} - -String ShardPartition::getShardStatusPath() const -{ - // schema: //tables///shards/ - // e.g. /root/table_test.hits/201701/shards/1 - return getPartitionShardsPath() + "/" + toString(task_shard.numberInCluster()); -} - -String ShardPartition::getPartitionShardsPath() const -{ - return getPartitionPath() + "/shards"; -} - -String ShardPartition::getPartitionActiveWorkersPath() const -{ - return getPartitionPath() + "/partition_active_workers"; -} - -String ShardPartition::getActiveWorkerPath() const -{ - return getPartitionActiveWorkersPath() + "/" + toString(task_shard.numberInCluster()); -} - -String ShardPartition::getCommonPartitionIsDirtyPath() const -{ - return getPartitionPath() + "/is_dirty"; -} - -String ShardPartition::getCommonPartitionIsCleanedPath() const -{ - return getCommonPartitionIsDirtyPath() + "/cleaned"; -} - -} diff --git a/programs/copier/ShardPartition.h b/programs/copier/ShardPartition.h deleted file mode 100644 index 2457213733c..00000000000 --- a/programs/copier/ShardPartition.h +++ /dev/null @@ -1,54 +0,0 @@ -#pragma once - -#include "ShardPartitionPiece.h" - -#include - -#include - -namespace DB -{ - -struct TaskShard; - -/// Just destination partition of a shard -/// I don't know what this comment means. -/// In short, when we discovered what shards contain currently processing partition, -/// This class describes a partition (name) that is stored on the shard (parent). -struct ShardPartition -{ - ShardPartition(TaskShard &parent, String name_quoted_, size_t number_of_splits = 10); - - String getPartitionPath() const; - - String getPartitionPiecePath(size_t current_piece_number) const; - - String getPartitionCleanStartPath() const; - - String getPartitionPieceCleanStartPath(size_t current_piece_number) const; - - String getCommonPartitionIsDirtyPath() const; - - String getCommonPartitionIsCleanedPath() const; - - String getPartitionActiveWorkersPath() const; - - String getActiveWorkerPath() const; - - String getPartitionShardsPath() const; - - String getShardStatusPath() const; - - /// What partition pieces are present in current shard. - /// FYI: Piece is a part of partition which has modulo equals to concrete constant (less than number_of_splits obliously) - /// For example SELECT ... from ... WHERE partition=current_partition AND cityHash64(*) == const; - /// Absent pieces have field is_absent_piece equals to true. - PartitionPieces pieces; - - TaskShard & task_shard; - String name; -}; - -using TasksPartition = std::map>; - -} diff --git a/programs/copier/ShardPartitionPiece.cpp b/programs/copier/ShardPartitionPiece.cpp deleted file mode 100644 index 36d1621e012..00000000000 --- a/programs/copier/ShardPartitionPiece.cpp +++ /dev/null @@ -1,64 +0,0 @@ -#include "ShardPartitionPiece.h" - -#include "ShardPartition.h" -#include "TaskShard.h" - -#include - -namespace DB -{ - -ShardPartitionPiece::ShardPartitionPiece(ShardPartition & parent, size_t current_piece_number_, bool is_present_piece_) - : is_absent_piece(!is_present_piece_) - , current_piece_number(current_piece_number_) - , shard_partition(parent) -{ -} - -String ShardPartitionPiece::getPartitionPiecePath() const -{ - return shard_partition.getPartitionPath() + "/piece_" + toString(current_piece_number); -} - -String ShardPartitionPiece::getPartitionPieceCleanStartPath() const -{ - return getPartitionPiecePath() + "/clean_start"; -} - -String ShardPartitionPiece::getPartitionPieceIsDirtyPath() const -{ - return getPartitionPiecePath() + "/is_dirty"; -} - -String ShardPartitionPiece::getPartitionPieceIsCleanedPath() const -{ - return getPartitionPieceIsDirtyPath() + "/cleaned"; -} - -String ShardPartitionPiece::getPartitionPieceActiveWorkersPath() const -{ - return getPartitionPiecePath() + "/partition_piece_active_workers"; -} - -String ShardPartitionPiece::getActiveWorkerPath() const -{ - return getPartitionPieceActiveWorkersPath() + "/" + toString(shard_partition.task_shard.numberInCluster()); -} - -/// On what shards do we have current partition. -String ShardPartitionPiece::getPartitionPieceShardsPath() const -{ - return getPartitionPiecePath() + "/shards"; -} - -String ShardPartitionPiece::getShardStatusPath() const -{ - return getPartitionPieceShardsPath() + "/" + toString(shard_partition.task_shard.numberInCluster()); -} - -String ShardPartitionPiece::getPartitionPieceCleanerPath() const -{ - return getPartitionPieceIsDirtyPath() + "/cleaner"; -} - -} diff --git a/programs/copier/ShardPartitionPiece.h b/programs/copier/ShardPartitionPiece.h deleted file mode 100644 index 453364c0fc8..00000000000 --- a/programs/copier/ShardPartitionPiece.h +++ /dev/null @@ -1,43 +0,0 @@ -#pragma once - -#include - -#include - -namespace DB -{ - -struct ShardPartition; - -struct ShardPartitionPiece -{ - ShardPartitionPiece(ShardPartition & parent, size_t current_piece_number_, bool is_present_piece_); - - String getPartitionPiecePath() const; - - String getPartitionPieceCleanStartPath() const; - - String getPartitionPieceIsDirtyPath() const; - - String getPartitionPieceIsCleanedPath() const; - - String getPartitionPieceActiveWorkersPath() const; - - String getActiveWorkerPath() const ; - - /// On what shards do we have current partition. - String getPartitionPieceShardsPath() const; - - String getShardStatusPath() const; - - String getPartitionPieceCleanerPath() const; - - bool is_absent_piece; - const size_t current_piece_number; - - ShardPartition & shard_partition; -}; - -using PartitionPieces = std::vector; - -} diff --git a/programs/copier/StatusAccumulator.cpp b/programs/copier/StatusAccumulator.cpp deleted file mode 100644 index 77adeac708c..00000000000 --- a/programs/copier/StatusAccumulator.cpp +++ /dev/null @@ -1,48 +0,0 @@ -#include "StatusAccumulator.h" - -#include -#include -#include -#include - -#include - -namespace DB -{ - -StatusAccumulator::MapPtr StatusAccumulator::fromJSON(String state_json) -{ - Poco::JSON::Parser parser; - auto state = parser.parse(state_json).extract(); - MapPtr result_ptr = std::make_shared(); - for (const auto & table_name : state->getNames()) - { - auto table_status_json = state->getValue(table_name); - auto table_status = parser.parse(table_status_json).extract(); - /// Map entry will be created if it is absent - auto & map_table_status = (*result_ptr)[table_name]; - map_table_status.all_partitions_count += table_status->getValue("all_partitions_count"); - map_table_status.processed_partitions_count += table_status->getValue("processed_partitions_count"); - } - return result_ptr; -} - -String StatusAccumulator::serializeToJSON(MapPtr statuses) -{ - Poco::JSON::Object result_json; - for (const auto & [table_name, table_status] : *statuses) - { - Poco::JSON::Object status_json; - status_json.set("all_partitions_count", table_status.all_partitions_count); - status_json.set("processed_partitions_count", table_status.processed_partitions_count); - - result_json.set(table_name, status_json); - } - std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM - oss.exceptions(std::ios::failbit); - Poco::JSON::Stringifier::stringify(result_json, oss); - auto result = oss.str(); - return result; -} - -} diff --git a/programs/copier/StatusAccumulator.h b/programs/copier/StatusAccumulator.h deleted file mode 100644 index d420b611602..00000000000 --- a/programs/copier/StatusAccumulator.h +++ /dev/null @@ -1,27 +0,0 @@ -#pragma once - -#include - -#include -#include - -namespace DB -{ - -class StatusAccumulator -{ -public: - struct TableStatus - { - size_t all_partitions_count; - size_t processed_partitions_count; - }; - - using Map = std::unordered_map; - using MapPtr = std::shared_ptr; - - static MapPtr fromJSON(String state_json); - static String serializeToJSON(MapPtr statuses); -}; - -} diff --git a/programs/copier/TaskCluster.cpp b/programs/copier/TaskCluster.cpp deleted file mode 100644 index 0fb06616e50..00000000000 --- a/programs/copier/TaskCluster.cpp +++ /dev/null @@ -1,74 +0,0 @@ -#include "TaskCluster.h" - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int BAD_ARGUMENTS; -} - -TaskCluster::TaskCluster(const String & task_zookeeper_path_, const String & default_local_database_) - : task_zookeeper_path(task_zookeeper_path_) - , default_local_database(default_local_database_) -{} - -void DB::TaskCluster::loadTasks(const Poco::Util::AbstractConfiguration & config, const String & base_key) -{ - String prefix = base_key.empty() ? "" : base_key + "."; - - clusters_prefix = prefix + "remote_servers"; - if (!config.has(clusters_prefix)) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "You should specify list of clusters in {}", clusters_prefix); - - Poco::Util::AbstractConfiguration::Keys tables_keys; - config.keys(prefix + "tables", tables_keys); - - for (const auto & table_key : tables_keys) - { - table_tasks.emplace_back(*this, config, prefix + "tables", table_key); - } -} - -void DB::TaskCluster::reloadSettings(const Poco::Util::AbstractConfiguration & config, const String & base_key) -{ - String prefix = base_key.empty() ? "" : base_key + "."; - - max_workers = config.getUInt64(prefix + "max_workers"); - - settings_common = Settings(); - if (config.has(prefix + "settings")) - settings_common.loadSettingsFromConfig(prefix + "settings", config); - - settings_common.prefer_localhost_replica = false; - - settings_pull = settings_common; - if (config.has(prefix + "settings_pull")) - settings_pull.loadSettingsFromConfig(prefix + "settings_pull", config); - - settings_push = settings_common; - if (config.has(prefix + "settings_push")) - settings_push.loadSettingsFromConfig(prefix + "settings_push", config); - - auto set_default_value = [] (auto && setting, auto && default_value) - { - setting = setting.changed ? setting.value : default_value; - }; - - /// Override important settings - settings_pull.readonly = 1; - settings_pull.prefer_localhost_replica = false; - settings_push.distributed_foreground_insert = true; - settings_push.prefer_localhost_replica = false; - - set_default_value(settings_pull.load_balancing, LoadBalancing::NEAREST_HOSTNAME); - set_default_value(settings_pull.max_threads, 1); - set_default_value(settings_pull.max_block_size, 8192UL); - set_default_value(settings_pull.preferred_block_size_bytes, 0); - - set_default_value(settings_push.distributed_background_insert_timeout, 0); - set_default_value(settings_push.alter_sync, 2); -} - -} - diff --git a/programs/copier/TaskCluster.h b/programs/copier/TaskCluster.h deleted file mode 100644 index a7f8bc3baca..00000000000 --- a/programs/copier/TaskCluster.h +++ /dev/null @@ -1,51 +0,0 @@ -#pragma once - -#include "TaskTable.h" - -#include -#include - -#include - -#include - -namespace DB -{ - -struct TaskCluster -{ - TaskCluster(const String & task_zookeeper_path_, const String & default_local_database_); - - void loadTasks(const Poco::Util::AbstractConfiguration & config, const String & base_key = ""); - - /// Set (or update) settings and max_workers param - void reloadSettings(const Poco::Util::AbstractConfiguration & config, const String & base_key = ""); - - /// Base node for all tasks. Its structure: - /// workers/ - directory with active workers (amount of them is less or equal max_workers) - /// description - node with task configuration - /// table_table1/ - directories with per-partition copying status - String task_zookeeper_path; - - /// Database used to create temporary Distributed tables - String default_local_database; - - /// Limits number of simultaneous workers - UInt64 max_workers = 0; - - /// Base settings for pull and push - Settings settings_common; - /// Settings used to fetch data - Settings settings_pull; - /// Settings used to insert data - Settings settings_push; - - String clusters_prefix; - - /// Subtasks - TasksTable table_tasks; - - pcg64 random_engine; -}; - -} diff --git a/programs/copier/TaskShard.cpp b/programs/copier/TaskShard.cpp deleted file mode 100644 index d156f451a84..00000000000 --- a/programs/copier/TaskShard.cpp +++ /dev/null @@ -1,37 +0,0 @@ -#include "TaskShard.h" - -#include "TaskTable.h" - -namespace DB -{ - -TaskShard::TaskShard(TaskTable & parent, const Cluster::ShardInfo & info_) - : task_table(parent) - , info(info_) -{ - list_of_split_tables_on_shard.assign(task_table.number_of_splits, DatabaseAndTableName()); -} - -UInt32 TaskShard::numberInCluster() const -{ - return info.shard_num; -} - -UInt32 TaskShard::indexInCluster() const -{ - return info.shard_num - 1; -} - -String DB::TaskShard::getDescription() const -{ - return fmt::format("N{} (having a replica {}, pull table {} of cluster {}", - numberInCluster(), getHostNameExample(), getQuotedTable(task_table.table_pull), task_table.cluster_pull_name); -} - -String DB::TaskShard::getHostNameExample() const -{ - const auto & replicas = task_table.cluster_pull->getShardsAddresses().at(indexInCluster()); - return replicas.at(0).readableString(); -} - -} diff --git a/programs/copier/TaskShard.h b/programs/copier/TaskShard.h deleted file mode 100644 index 05d652077ea..00000000000 --- a/programs/copier/TaskShard.h +++ /dev/null @@ -1,56 +0,0 @@ -#pragma once - -#include "Aliases.h" -#include "Internals.h" -#include "ClusterPartition.h" -#include "ShardPartition.h" - - -namespace DB -{ - -struct TaskTable; - -struct TaskShard -{ - TaskShard(TaskTable & parent, const Cluster::ShardInfo & info_); - - TaskTable & task_table; - - Cluster::ShardInfo info; - - UInt32 numberInCluster() const; - - UInt32 indexInCluster() const; - - String getDescription() const; - - String getHostNameExample() const; - - /// Used to sort clusters by their proximity - ShardPriority priority; - - /// Column with unique destination partitions (computed from engine_push_partition_key expr.) in the shard - ColumnWithTypeAndName partition_key_column; - - /// There is a task for each destination partition - TasksPartition partition_tasks; - - /// Which partitions have been checked for existence - /// If some partition from this lists is exists, it is in partition_tasks - std::set checked_partitions; - - /// Last CREATE TABLE query of the table of the shard - ASTPtr current_pull_table_create_query; - ASTPtr current_push_table_create_query; - - /// Internal distributed tables - DatabaseAndTableName table_read_shard; - DatabaseAndTableName main_table_split_shard; - ListOfDatabasesAndTableNames list_of_split_tables_on_shard; -}; - -using TaskShardPtr = std::shared_ptr; -using TasksShard = std::vector; - -} diff --git a/programs/copier/TaskTable.cpp b/programs/copier/TaskTable.cpp deleted file mode 100644 index d055ceb4c7b..00000000000 --- a/programs/copier/TaskTable.cpp +++ /dev/null @@ -1,222 +0,0 @@ -#include "TaskTable.h" - -#include "ClusterPartition.h" -#include "TaskCluster.h" - -#include -#include - -#include - - -namespace DB -{ -namespace ErrorCodes -{ - extern const int UNKNOWN_ELEMENT_IN_CONFIG; - extern const int LOGICAL_ERROR; -} - -TaskTable::TaskTable(TaskCluster & parent, const Poco::Util::AbstractConfiguration & config, - const String & prefix_, const String & table_key) - : task_cluster(parent) -{ - String table_prefix = prefix_ + "." + table_key + "."; - - name_in_config = table_key; - - number_of_splits = config.getUInt64(table_prefix + "number_of_splits", 3); - - allow_to_copy_alias_and_materialized_columns = config.getBool(table_prefix + "allow_to_copy_alias_and_materialized_columns", false); - allow_to_drop_target_partitions = config.getBool(table_prefix + "allow_to_drop_target_partitions", false); - - cluster_pull_name = config.getString(table_prefix + "cluster_pull"); - cluster_push_name = config.getString(table_prefix + "cluster_push"); - - table_pull.first = config.getString(table_prefix + "database_pull"); - table_pull.second = config.getString(table_prefix + "table_pull"); - - table_push.first = config.getString(table_prefix + "database_push"); - table_push.second = config.getString(table_prefix + "table_push"); - - /// Used as node name in ZooKeeper - table_id = escapeForFileName(cluster_push_name) - + "." + escapeForFileName(table_push.first) - + "." + escapeForFileName(table_push.second); - - engine_push_str = config.getString(table_prefix + "engine", "rand()"); - - { - ParserStorage parser_storage{ParserStorage::TABLE_ENGINE}; - engine_push_ast = parseQuery(parser_storage, engine_push_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); - engine_push_partition_key_ast = extractPartitionKey(engine_push_ast); - primary_key_comma_separated = boost::algorithm::join(extractPrimaryKeyColumnNames(engine_push_ast), ", "); - is_replicated_table = isReplicatedTableEngine(engine_push_ast); - } - - sharding_key_str = config.getString(table_prefix + "sharding_key"); - - auxiliary_engine_split_asts.reserve(number_of_splits); - { - ParserExpressionWithOptionalAlias parser_expression(false); - sharding_key_ast = parseQuery(parser_expression, sharding_key_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); - main_engine_split_ast = createASTStorageDistributed(cluster_push_name, table_push.first, table_push.second, - sharding_key_ast); - - for (const auto piece_number : collections::range(0, number_of_splits)) - { - auxiliary_engine_split_asts.emplace_back - ( - createASTStorageDistributed(cluster_push_name, table_push.first, - table_push.second + "_piece_" + toString(piece_number), sharding_key_ast) - ); - } - } - - where_condition_str = config.getString(table_prefix + "where_condition", ""); - if (!where_condition_str.empty()) - { - ParserExpressionWithOptionalAlias parser_expression(false); - where_condition_ast = parseQuery(parser_expression, where_condition_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); - - // Will use canonical expression form - where_condition_str = queryToString(where_condition_ast); - } - - String enabled_partitions_prefix = table_prefix + "enabled_partitions"; - has_enabled_partitions = config.has(enabled_partitions_prefix); - - if (has_enabled_partitions) - { - Strings keys; - config.keys(enabled_partitions_prefix, keys); - - if (keys.empty()) - { - /// Parse list of partition from space-separated string - String partitions_str = config.getString(table_prefix + "enabled_partitions"); - boost::trim_if(partitions_str, isWhitespaceASCII); - boost::split(enabled_partitions, partitions_str, isWhitespaceASCII, boost::token_compress_on); - } - else - { - /// Parse sequence of ... - for (const String &key : keys) - { - if (!startsWith(key, "partition")) - throw Exception(ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG, "Unknown key {} in {}", key, enabled_partitions_prefix); - - enabled_partitions.emplace_back(config.getString(enabled_partitions_prefix + "." + key)); - } - } - - std::copy(enabled_partitions.begin(), enabled_partitions.end(), std::inserter(enabled_partitions_set, enabled_partitions_set.begin())); - } -} - - -String TaskTable::getPartitionPath(const String & partition_name) const -{ - return task_cluster.task_zookeeper_path // root - + "/tables/" + table_id // tables/dst_cluster.merge.hits - + "/" + escapeForFileName(partition_name); // 201701 -} - -String TaskTable::getPartitionAttachIsActivePath(const String & partition_name) const -{ - return getPartitionPath(partition_name) + "/attach_active"; -} - -String TaskTable::getPartitionAttachIsDonePath(const String & partition_name) const -{ - return getPartitionPath(partition_name) + "/attach_is_done"; -} - -String TaskTable::getPartitionPiecePath(const String & partition_name, size_t piece_number) const -{ - assert(piece_number < number_of_splits); - return getPartitionPath(partition_name) + "/piece_" + toString(piece_number); // 1...number_of_splits -} - -String TaskTable::getCertainPartitionIsDirtyPath(const String &partition_name) const -{ - return getPartitionPath(partition_name) + "/is_dirty"; -} - -String TaskTable::getCertainPartitionPieceIsDirtyPath(const String & partition_name, const size_t piece_number) const -{ - return getPartitionPiecePath(partition_name, piece_number) + "/is_dirty"; -} - -String TaskTable::getCertainPartitionIsCleanedPath(const String & partition_name) const -{ - return getCertainPartitionIsDirtyPath(partition_name) + "/cleaned"; -} - -String TaskTable::getCertainPartitionPieceIsCleanedPath(const String & partition_name, const size_t piece_number) const -{ - return getCertainPartitionPieceIsDirtyPath(partition_name, piece_number) + "/cleaned"; -} - -String TaskTable::getCertainPartitionTaskStatusPath(const String & partition_name) const -{ - return getPartitionPath(partition_name) + "/shards"; -} - -String TaskTable::getCertainPartitionPieceTaskStatusPath(const String & partition_name, const size_t piece_number) const -{ - return getPartitionPiecePath(partition_name, piece_number) + "/shards"; -} - -bool TaskTable::isReplicatedTable() const -{ - return is_replicated_table; -} - -String TaskTable::getStatusAllPartitionCount() const -{ - return task_cluster.task_zookeeper_path + "/status/all_partitions_count"; -} - -String TaskTable::getStatusProcessedPartitionsCount() const -{ - return task_cluster.task_zookeeper_path + "/status/processed_partitions_count"; -} - -ASTPtr TaskTable::rewriteReplicatedCreateQueryToPlain() const -{ - ASTPtr prev_engine_push_ast = engine_push_ast->clone(); - - auto & new_storage_ast = prev_engine_push_ast->as(); - auto & new_engine_ast = new_storage_ast.engine->as(); - - /// Remove "Replicated" from name - new_engine_ast.name = new_engine_ast.name.substr(10); - - if (new_engine_ast.arguments) - { - auto & replicated_table_arguments = new_engine_ast.arguments->children; - - - /// In some cases of Atomic database engine usage ReplicatedMergeTree tables - /// could be created without arguments. - if (!replicated_table_arguments.empty()) - { - /// Delete first two arguments of Replicated...MergeTree() table. - replicated_table_arguments.erase(replicated_table_arguments.begin()); - replicated_table_arguments.erase(replicated_table_arguments.begin()); - } - } - - return new_storage_ast.clone(); -} - -ClusterPartition & TaskTable::getClusterPartition(const String & partition_name) -{ - auto it = cluster_partitions.find(partition_name); - if (it == cluster_partitions.end()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "There are no cluster partition {} in {}", partition_name, table_id); - return it->second; -} - -} diff --git a/programs/copier/TaskTable.h b/programs/copier/TaskTable.h deleted file mode 100644 index 2bb7f078bc6..00000000000 --- a/programs/copier/TaskTable.h +++ /dev/null @@ -1,173 +0,0 @@ -#pragma once - -#include "Aliases.h" -#include "TaskShard.h" - - -namespace DB -{ - -struct ClusterPartition; -struct TaskCluster; - -struct TaskTable -{ - TaskTable(TaskCluster & parent, const Poco::Util::AbstractConfiguration & config, const String & prefix, const String & table_key); - - TaskCluster & task_cluster; - - /// These functions used in checkPartitionIsDone() or checkPartitionPieceIsDone() - /// They are implemented here not to call task_table.tasks_shard[partition_name].second.pieces[current_piece_number] etc. - - String getPartitionPath(const String & partition_name) const; - - String getPartitionAttachIsActivePath(const String & partition_name) const; - - String getPartitionAttachIsDonePath(const String & partition_name) const; - - String getPartitionPiecePath(const String & partition_name, size_t piece_number) const; - - String getCertainPartitionIsDirtyPath(const String & partition_name) const; - - String getCertainPartitionPieceIsDirtyPath(const String & partition_name, size_t piece_number) const; - - String getCertainPartitionIsCleanedPath(const String & partition_name) const; - - String getCertainPartitionPieceIsCleanedPath(const String & partition_name, size_t piece_number) const; - - String getCertainPartitionTaskStatusPath(const String & partition_name) const; - - String getCertainPartitionPieceTaskStatusPath(const String & partition_name, size_t piece_number) const; - - bool isReplicatedTable() const; - - /// These nodes are used for check-status option - String getStatusAllPartitionCount() const; - String getStatusProcessedPartitionsCount() const; - - /// Partitions will be split into number-of-splits pieces. - /// Each piece will be copied independently. (10 by default) - size_t number_of_splits; - - bool allow_to_copy_alias_and_materialized_columns{false}; - bool allow_to_drop_target_partitions{false}; - - String name_in_config; - - /// Used as task ID - String table_id; - - /// Column names in primary key - String primary_key_comma_separated; - - /// Source cluster and table - String cluster_pull_name; - DatabaseAndTableName table_pull; - - /// Destination cluster and table - String cluster_push_name; - DatabaseAndTableName table_push; - - /// Storage of destination table - /// (tables that are stored on each shard of target cluster) - String engine_push_str; - ASTPtr engine_push_ast; - ASTPtr engine_push_partition_key_ast; - - /// First argument of Replicated...MergeTree() - String engine_push_zk_path; - bool is_replicated_table; - - ASTPtr rewriteReplicatedCreateQueryToPlain() const; - - /* - * A Distributed table definition used to split data - * Distributed table will be created on each shard of default - * cluster to perform data copying and resharding - * */ - String sharding_key_str; - ASTPtr sharding_key_ast; - ASTPtr main_engine_split_ast; - - /* - * To copy partition piece form one cluster to another we have to use Distributed table. - * In case of usage separate table (engine_push) for each partition piece, - * we have to use many Distributed tables. - * */ - ASTs auxiliary_engine_split_asts; - - /// Additional WHERE expression to filter input data - String where_condition_str; - ASTPtr where_condition_ast; - - /// Resolved clusters - ClusterPtr cluster_pull; - ClusterPtr cluster_push; - - /// Filter partitions that should be copied - bool has_enabled_partitions = false; - Strings enabled_partitions; - NameSet enabled_partitions_set; - - /** - * Prioritized list of shards - * all_shards contains information about all shards in the table. - * So we have to check whether particular shard have current partition or not while processing. - */ - TasksShard all_shards; - TasksShard local_shards; - - /// All partitions of the current table. - ClusterPartitions cluster_partitions; - NameSet finished_cluster_partitions; - - /// Partition names to process in user-specified order - Strings ordered_partition_names; - - ClusterPartition & getClusterPartition(const String & partition_name); - - Stopwatch watch; - UInt64 bytes_copied = 0; - UInt64 rows_copied = 0; - - template - void initShards(RandomEngine &&random_engine); -}; - -using TasksTable = std::list; - - -template -inline void TaskTable::initShards(RandomEngine && random_engine) -{ - const String & fqdn_name = getFQDNOrHostName(); - std::uniform_int_distribution get_urand(0, std::numeric_limits::max()); - - // Compute the priority - for (const auto & shard_info : cluster_pull->getShardsInfo()) - { - TaskShardPtr task_shard = std::make_shared(*this, shard_info); - const auto & replicas = cluster_pull->getShardsAddresses().at(task_shard->indexInCluster()); - task_shard->priority = getReplicasPriority(replicas, fqdn_name, get_urand(random_engine)); - - all_shards.emplace_back(task_shard); - } - - // Sort by priority - std::sort(all_shards.begin(), all_shards.end(), - [](const TaskShardPtr & lhs, const TaskShardPtr & rhs) - { - return ShardPriority::greaterPriority(lhs->priority, rhs->priority); - }); - - // Cut local shards - auto it_first_remote = std::lower_bound(all_shards.begin(), all_shards.end(), 1, - [](const TaskShardPtr & lhs, UInt8 is_remote) - { - return lhs->priority.is_remote < is_remote; - }); - - local_shards.assign(all_shards.begin(), it_first_remote); -} - -} diff --git a/programs/copier/ZooKeeperStaff.h b/programs/copier/ZooKeeperStaff.h deleted file mode 100644 index c15db73f060..00000000000 --- a/programs/copier/ZooKeeperStaff.h +++ /dev/null @@ -1,221 +0,0 @@ -#pragma once - -/** Allows to compare two incremental counters of type UInt32 in presence of possible overflow. - * We assume that we compare values that are not too far away. - * For example, when we increment 0xFFFFFFFF, we get 0. So, 0xFFFFFFFF is less than 0. - */ -class WrappingUInt32 -{ -public: - UInt32 value; - - explicit WrappingUInt32(UInt32 _value) - : value(_value) - {} - - bool operator<(const WrappingUInt32 & other) const - { - return value != other.value && *this <= other; - } - - bool operator<=(const WrappingUInt32 & other) const - { - const UInt32 HALF = static_cast(1) << 31; - return (value <= other.value && other.value - value < HALF) - || (value > other.value && value - other.value > HALF); - } - - bool operator==(const WrappingUInt32 & other) const - { - return value == other.value; - } -}; - -/** Conforming Zxid definition. - * cf. https://github.com/apache/zookeeper/blob/631d1b284f0edb1c4f6b0fb221bf2428aec71aaa/zookeeper-docs/src/main/resources/markdown/zookeeperInternals.md#guarantees-properties-and-definitions - * - * But it is better to read this: https://zookeeper.apache.org/doc/r3.1.2/zookeeperProgrammers.html - * - * Actually here is the definition of Zxid. - * Every change to the ZooKeeper state receives a stamp in the form of a zxid (ZooKeeper Transaction Id). - * This exposes the total ordering of all changes to ZooKeeper. Each change will have a unique zxid - * and if zxid1 is smaller than zxid2 then zxid1 happened before zxid2. - */ -class Zxid -{ -public: - WrappingUInt32 epoch; - WrappingUInt32 counter; - explicit Zxid(UInt64 _zxid) - : epoch(static_cast(_zxid >> 32)) - , counter(static_cast(_zxid)) - {} - - bool operator<=(const Zxid & other) const - { - return (epoch < other.epoch) - || (epoch == other.epoch && counter <= other.counter); - } - - bool operator==(const Zxid & other) const - { - return epoch == other.epoch && counter == other.counter; - } -}; - -/* When multiple ClusterCopiers discover that the target partition is not empty, - * they will attempt to clean up this partition before proceeding to copying. - * - * Instead of purging is_dirty, the history of cleaning work is preserved and partition hygiene is established - * based on a happens-before relation between the events. - * This relation is encoded by LogicalClock based on the mzxid of the is_dirty ZNode and is_dirty/cleaned. - * The fact of the partition hygiene is encoded by CleanStateClock. - * - * For you to know what mzxid means: - * - * ZooKeeper Stat Structure: - * The Stat structure for each znode in ZooKeeper is made up of the following fields: - * - * -- czxid - * The zxid of the change that caused this znode to be created. - * - * -- mzxid - * The zxid of the change that last modified this znode. - * - * -- ctime - * The time in milliseconds from epoch when this znode was created. - * - * -- mtime - * The time in milliseconds from epoch when this znode was last modified. - * - * -- version - * The number of changes to the data of this znode. - * - * -- cversion - * The number of changes to the children of this znode. - * - * -- aversion - * The number of changes to the ACL of this znode. - * - * -- ephemeralOwner - * The session id of the owner of this znode if the znode is an ephemeral node. - * If it is not an ephemeral node, it will be zero. - * - * -- dataLength - * The length of the data field of this znode. - * - * -- numChildren - * The number of children of this znode. - * */ - -class LogicalClock -{ -public: - std::optional zxid; - - LogicalClock() = default; - - explicit LogicalClock(UInt64 _zxid) - : zxid(_zxid) - {} - - bool hasHappened() const - { - return bool(zxid); - } - - /// happens-before relation with a reasonable time bound - bool happensBefore(const LogicalClock & other) const - { - return !zxid - || (other.zxid && *zxid <= *other.zxid); - } - - bool operator<=(const LogicalClock & other) const - { - return happensBefore(other); - } - - /// strict equality check - bool operator==(const LogicalClock & other) const - { - return zxid == other.zxid; - } -}; - - -class CleanStateClock -{ -public: - LogicalClock discovery_zxid; - std::optional discovery_version; - - LogicalClock clean_state_zxid; - std::optional clean_state_version; - - std::shared_ptr stale; - - bool is_clean() const - { - return !is_stale() - && (!discovery_zxid.hasHappened() || (clean_state_zxid.hasHappened() && discovery_zxid <= clean_state_zxid)); - } - - bool is_stale() const - { - return stale->load(); - } - - CleanStateClock( - const zkutil::ZooKeeperPtr & zookeeper, - const String & discovery_path, - const String & clean_state_path) - : stale(std::make_shared(false)) - { - Coordination::Stat stat{}; - String _some_data; - auto watch_callback = - [my_stale = stale] (const Coordination::WatchResponse & rsp) - { - auto logger = getLogger("ClusterCopier"); - if (rsp.error == Coordination::Error::ZOK) - { - switch (rsp.type) /// NOLINT(bugprone-switch-missing-default-case) - { - case Coordination::CREATED: - LOG_DEBUG(logger, "CleanStateClock change: CREATED, at {}", rsp.path); - my_stale->store(true); - break; - case Coordination::CHANGED: - LOG_DEBUG(logger, "CleanStateClock change: CHANGED, at {}", rsp.path); - my_stale->store(true); - } - } - }; - if (zookeeper->tryGetWatch(discovery_path, _some_data, &stat, watch_callback)) - { - discovery_zxid = LogicalClock(stat.mzxid); - discovery_version = stat.version; - } - if (zookeeper->tryGetWatch(clean_state_path, _some_data, &stat, watch_callback)) - { - clean_state_zxid = LogicalClock(stat.mzxid); - clean_state_version = stat.version; - } - } - - bool operator==(const CleanStateClock & other) const - { - return !is_stale() - && !other.is_stale() - && discovery_zxid == other.discovery_zxid - && discovery_version == other.discovery_version - && clean_state_zxid == other.clean_state_zxid - && clean_state_version == other.clean_state_version; - } - - bool operator!=(const CleanStateClock & other) const - { - return !(*this == other); - } -}; diff --git a/programs/copier/clickhouse-copier.cpp b/programs/copier/clickhouse-copier.cpp deleted file mode 100644 index 4dabb01775b..00000000000 --- a/programs/copier/clickhouse-copier.cpp +++ /dev/null @@ -1 +0,0 @@ -int mainEntryClickHouseClusterCopier(int argc, char ** argv); diff --git a/programs/diagnostics/testdata/configs/xml/config.xml b/programs/diagnostics/testdata/configs/xml/config.xml index ae09d207091..eb7c70cf498 100644 --- a/programs/diagnostics/testdata/configs/xml/config.xml +++ b/programs/diagnostics/testdata/configs/xml/config.xml @@ -94,7 +94,7 @@ 8123 - - - system -
query_log
- - - 1000 - - \ No newline at end of file diff --git a/tests/integration/test_cluster_copier/configs/config-copier.xml b/tests/integration/test_cluster_copier/configs/config-copier.xml deleted file mode 100644 index 590b1892f8d..00000000000 --- a/tests/integration/test_cluster_copier/configs/config-copier.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - information - /var/log/clickhouse-server/copier/log.log - /var/log/clickhouse-server/copier/log.err.log - 1000M - 10 - /var/log/clickhouse-server/copier/stderr.log - /var/log/clickhouse-server/copier/stdout.log - - diff --git a/tests/integration/test_cluster_copier/configs/users.xml b/tests/integration/test_cluster_copier/configs/users.xml deleted file mode 100644 index b463dfc81e7..00000000000 --- a/tests/integration/test_cluster_copier/configs/users.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - - 1 - - 5 - 1 - - - - - - - - ::/0 - - default - default - - - 12345678 - - ::/0 - - default - default - - - - - - - - diff --git a/tests/integration/test_cluster_copier/configs_three_nodes/conf.d/clusters.xml b/tests/integration/test_cluster_copier/configs_three_nodes/conf.d/clusters.xml deleted file mode 100644 index 9de7b57de27..00000000000 --- a/tests/integration/test_cluster_copier/configs_three_nodes/conf.d/clusters.xml +++ /dev/null @@ -1,27 +0,0 @@ - - - - - false - - first - 9000 - - - - false - - second - 9000 - - - - false - - third - 9000 - - - - - diff --git a/tests/integration/test_cluster_copier/configs_three_nodes/conf.d/ddl.xml b/tests/integration/test_cluster_copier/configs_three_nodes/conf.d/ddl.xml deleted file mode 100644 index 64fa32335ab..00000000000 --- a/tests/integration/test_cluster_copier/configs_three_nodes/conf.d/ddl.xml +++ /dev/null @@ -1,5 +0,0 @@ - - - /clickhouse/task_queue/ddl - - \ No newline at end of file diff --git a/tests/integration/test_cluster_copier/configs_three_nodes/config-copier.xml b/tests/integration/test_cluster_copier/configs_three_nodes/config-copier.xml deleted file mode 100644 index d0cab0fafb7..00000000000 --- a/tests/integration/test_cluster_copier/configs_three_nodes/config-copier.xml +++ /dev/null @@ -1,27 +0,0 @@ - - - information - /var/log/clickhouse-server/copier/log.log - /var/log/clickhouse-server/copier/log.err.log - 1000M - 10 - /var/log/clickhouse-server/copier/stderr.log - /var/log/clickhouse-server/copier/stdout.log - - - - - zoo1 - 2181 - - - zoo2 - 2181 - - - zoo3 - 2181 - - 2000 - - diff --git a/tests/integration/test_cluster_copier/configs_three_nodes/users.xml b/tests/integration/test_cluster_copier/configs_three_nodes/users.xml deleted file mode 100644 index badaf46a5ca..00000000000 --- a/tests/integration/test_cluster_copier/configs_three_nodes/users.xml +++ /dev/null @@ -1,32 +0,0 @@ - - - - 1 - 1 - - - - - - - - ::/0 - - default - default - - - 12345678 - - ::/0 - - default - default - - - - - - - - diff --git a/tests/integration/test_cluster_copier/configs_two_nodes/conf.d/clusters.xml b/tests/integration/test_cluster_copier/configs_two_nodes/conf.d/clusters.xml deleted file mode 100644 index 38d88308631..00000000000 --- a/tests/integration/test_cluster_copier/configs_two_nodes/conf.d/clusters.xml +++ /dev/null @@ -1,22 +0,0 @@ - - - - - false - - first_of_two - 9000 - - - - - - false - - second_of_two - 9000 - - - - - diff --git a/tests/integration/test_cluster_copier/configs_two_nodes/conf.d/ddl.xml b/tests/integration/test_cluster_copier/configs_two_nodes/conf.d/ddl.xml deleted file mode 100644 index 64fa32335ab..00000000000 --- a/tests/integration/test_cluster_copier/configs_two_nodes/conf.d/ddl.xml +++ /dev/null @@ -1,5 +0,0 @@ - - - /clickhouse/task_queue/ddl - - \ No newline at end of file diff --git a/tests/integration/test_cluster_copier/configs_two_nodes/conf.d/storage_configuration.xml b/tests/integration/test_cluster_copier/configs_two_nodes/conf.d/storage_configuration.xml deleted file mode 100644 index 8306f40ad6a..00000000000 --- a/tests/integration/test_cluster_copier/configs_two_nodes/conf.d/storage_configuration.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - /jbod1/ - - - /jbod2/ - - - /external/ - - - - - - - - external - -
- jbod1 - jbod2 -
-
-
-
- -
- -
diff --git a/tests/integration/test_cluster_copier/configs_two_nodes/config-copier.xml b/tests/integration/test_cluster_copier/configs_two_nodes/config-copier.xml deleted file mode 100644 index 55bd24816ae..00000000000 --- a/tests/integration/test_cluster_copier/configs_two_nodes/config-copier.xml +++ /dev/null @@ -1,19 +0,0 @@ - - - information - /var/log/clickhouse-server/copier/log.log - /var/log/clickhouse-server/copier/log.err.log - 1000M - 10 - /var/log/clickhouse-server/copier/stderr.log - /var/log/clickhouse-server/copier/stdout.log - - - - - zoo1 - 2181 - - 2000 - - diff --git a/tests/integration/test_cluster_copier/configs_two_nodes/users.xml b/tests/integration/test_cluster_copier/configs_two_nodes/users.xml deleted file mode 100644 index badaf46a5ca..00000000000 --- a/tests/integration/test_cluster_copier/configs_two_nodes/users.xml +++ /dev/null @@ -1,32 +0,0 @@ - - - - 1 - 1 - - - - - - - - ::/0 - - default - default - - - 12345678 - - ::/0 - - default - default - - - - - - - - diff --git a/tests/integration/test_cluster_copier/task0_description.xml b/tests/integration/test_cluster_copier/task0_description.xml deleted file mode 100644 index 8d74d0bdde0..00000000000 --- a/tests/integration/test_cluster_copier/task0_description.xml +++ /dev/null @@ -1,95 +0,0 @@ - - - 3 - - - - 1 - - - - - 0 - - - - - - - - - - cluster0 - default - hits - - cluster1 - default - hits - - 2 - - 3 4 5 6 1 2 0 - - - ENGINE=ReplicatedMergeTree PARTITION BY d % 3 ORDER BY (d, sipHash64(d)) SAMPLE BY sipHash64(d) SETTINGS index_granularity = 16 - - - d + 1 - - - d - d = 0 - - - - - - - - true - - s0_0_0 - 9000 - - - s0_0_1 - 9000 - - - - true - - s0_1_0 - 9000 - - - - - - - true - - s1_0_0 - 9000 - - - s1_0_1 - 9000 - - - - true - - s1_1_0 - 9000 - - - - 255.255.255.255 - 9000 - - - - - - diff --git a/tests/integration/test_cluster_copier/task_drop_target_partition.xml b/tests/integration/test_cluster_copier/task_drop_target_partition.xml deleted file mode 100644 index dc8e6452243..00000000000 --- a/tests/integration/test_cluster_copier/task_drop_target_partition.xml +++ /dev/null @@ -1,41 +0,0 @@ - - - - - false - - first_of_two - 9000 - - - - - - false - - second_of_two - 9000 - - - - - - 2 - - - - source - db_drop_target_partition - source - - destination - db_drop_target_partition - destination - - true - - ENGINE = MergeTree() PARTITION BY toYYYYMMDD(Column3) ORDER BY (Column3, Column2, Column1) - rand() - - - diff --git a/tests/integration/test_cluster_copier/task_month_to_week_description.xml b/tests/integration/test_cluster_copier/task_month_to_week_description.xml deleted file mode 100644 index bc290ca397f..00000000000 --- a/tests/integration/test_cluster_copier/task_month_to_week_description.xml +++ /dev/null @@ -1,99 +0,0 @@ - - - 4 - - - - 1 - 2 - - - - 0 - - - - - - cluster0 - default - a - - cluster1 - default - b - - - - 2 - - - ENGINE= - ReplicatedMergeTree - PARTITION BY toMonday(date) - ORDER BY d - - - - jumpConsistentHash(intHash64(d), 2) - - - - - - - - - - - true - - s0_0_0 - 9000 - - - s0_0_1 - 9000 - - - - true - - s0_1_0 - 9000 - - - - - - - true - - s1_0_0 - 9000 - - - s1_0_1 - 9000 - - - - true - - s1_1_0 - 9000 - - - - 255.255.255.255 - 9000 - - - - - - diff --git a/tests/integration/test_cluster_copier/task_no_arg.xml b/tests/integration/test_cluster_copier/task_no_arg.xml deleted file mode 100644 index 262ff073537..00000000000 --- a/tests/integration/test_cluster_copier/task_no_arg.xml +++ /dev/null @@ -1,39 +0,0 @@ - - - - - 1 - - s0_0_0 - 9000 - - - - - - - 1 - - s1_1_0 - 9000 - - - - - - 1 - - - - source_cluster - default - copier_test1 - - default_cluster - default - copier_test1_1 - ENGINE = MergeTree PARTITION BY date ORDER BY (date, sipHash64(date)) SAMPLE BY sipHash64(date) - rand() - - - diff --git a/tests/integration/test_cluster_copier/task_no_index.xml b/tests/integration/test_cluster_copier/task_no_index.xml deleted file mode 100644 index 265f99e21a6..00000000000 --- a/tests/integration/test_cluster_copier/task_no_index.xml +++ /dev/null @@ -1,109 +0,0 @@ - - - - - false - - s0_0_0 - 9000 - - - - - - - false - - s1_1_0 - 9000 - - - - - - - 2 - - - - 1 - - - - - 0 - - - - - 3 - - 1 - - - - - - - - source_cluster - default - ontime - - - - destination_cluster - default - ontime22 - - - - - - - ENGINE = MergeTree() PARTITION BY Year ORDER BY (Year, FlightDate) SETTINGS index_granularity=8192 - - - - - jumpConsistentHash(intHash64(Year), 2) - - - - - - - 2017 - - - - - - - diff --git a/tests/integration/test_cluster_copier/task_non_partitioned_table.xml b/tests/integration/test_cluster_copier/task_non_partitioned_table.xml deleted file mode 100644 index d5424b95f45..00000000000 --- a/tests/integration/test_cluster_copier/task_non_partitioned_table.xml +++ /dev/null @@ -1,39 +0,0 @@ - - - - - 1 - - s0_0_0 - 9000 - - - - - - - 1 - - s1_1_0 - 9000 - - - - - - 1 - - - - source_cluster - default - copier_test1 - - default_cluster - default - copier_test1_1 - ENGINE = MergeTree ORDER BY date SETTINGS index_granularity = 8192 - rand() - - - diff --git a/tests/integration/test_cluster_copier/task_self_copy.xml b/tests/integration/test_cluster_copier/task_self_copy.xml deleted file mode 100644 index 21d577bc397..00000000000 --- a/tests/integration/test_cluster_copier/task_self_copy.xml +++ /dev/null @@ -1,63 +0,0 @@ - - 9440 - - - - false - - s0_0_0 - 9000 - dbuser - 12345678 - 0 - - - - - - - false - - s0_0_0 - 9000 - dbuser - 12345678 - 0 - - - - - - 2 - - - 1 - - - - 0 - - - - 3 - 1 - - - - - source_cluster - db1 - source_table - - destination_cluster - db2 - destination_table - - - ENGINE = MergeTree PARTITION BY a ORDER BY a SETTINGS index_granularity = 8192 - - - rand() - - - \ No newline at end of file diff --git a/tests/integration/test_cluster_copier/task_skip_index.xml b/tests/integration/test_cluster_copier/task_skip_index.xml deleted file mode 100644 index b04cec963d4..00000000000 --- a/tests/integration/test_cluster_copier/task_skip_index.xml +++ /dev/null @@ -1,39 +0,0 @@ - - - - - false - - first_of_two - 9000 - - - - - - false - - second_of_two - 9000 - - - - - - 2 - - - - source - db_skip_index - source - - destination - db_skip_index - destination - - ENGINE = MergeTree() PARTITION BY toYYYYMMDD(Column3) ORDER BY (Column3, Column2, Column1) - rand() - - - diff --git a/tests/integration/test_cluster_copier/task_taxi_data.xml b/tests/integration/test_cluster_copier/task_taxi_data.xml deleted file mode 100644 index 94fa5087338..00000000000 --- a/tests/integration/test_cluster_copier/task_taxi_data.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - - - false - - first - 9000 - - - - false - - second - 9000 - - - - false - - third - 9000 - - - - - - 2 - - - - events - dailyhistory - yellow_tripdata_staging - events - monthlyhistory - yellow_tripdata_staging - Engine=ReplacingMergeTree() PRIMARY KEY (tpep_pickup_datetime, id) ORDER BY (tpep_pickup_datetime, id) PARTITION BY (pickup_location_id, toYYYYMM(tpep_pickup_datetime)) - sipHash64(id) % 3 - - - \ No newline at end of file diff --git a/tests/integration/test_cluster_copier/task_test_block_size.xml b/tests/integration/test_cluster_copier/task_test_block_size.xml deleted file mode 100644 index bf29c7e1832..00000000000 --- a/tests/integration/test_cluster_copier/task_test_block_size.xml +++ /dev/null @@ -1,101 +0,0 @@ - - - 1 - - - - 1 - - - - - - - - - shard_0_0 - default - test_block_size - - cluster1 - default - test_block_size - - - '1970-01-01' - - - - ENGINE= - ReplicatedMergeTree - ORDER BY d PARTITION BY partition - - - - jumpConsistentHash(intHash64(d), 2) - - - - - - - - - - - true - - s0_0_0 - 9000 - - - s0_0_1 - 9000 - - - - true - - s0_1_0 - 9000 - - - - - - - true - - s1_0_0 - 9000 - - - s1_0_1 - 9000 - - - - true - - s1_1_0 - 9000 - - - - - - - true - - s0_0_0 - 9000 - - - s0_0_1 - 9000 - - - - - - diff --git a/tests/integration/test_cluster_copier/task_trivial.xml b/tests/integration/test_cluster_copier/task_trivial.xml deleted file mode 100644 index a3b8bc03888..00000000000 --- a/tests/integration/test_cluster_copier/task_trivial.xml +++ /dev/null @@ -1,63 +0,0 @@ - - - 3 - - - - 1 - - - - - 0 - - - - - - - - - - source_trivial_cluster - default - trivial - - destination_trivial_cluster - default - trivial - - - ENGINE=ReplicatedMergeTree('/clickhouse/tables/cluster{cluster}/{shard}/hits', '{replica}') PARTITION BY d % 5 ORDER BY (d, sipHash64(d)) SAMPLE BY sipHash64(d) SETTINGS index_granularity = 16 - - - d + 1 - - - d - d = 0 - - - - - - - - - first_trivial - 9000 - - - - - - - - - second_trivial - 9000 - - - - - - diff --git a/tests/integration/test_cluster_copier/task_trivial_without_arguments.xml b/tests/integration/test_cluster_copier/task_trivial_without_arguments.xml deleted file mode 100644 index 0197dee0181..00000000000 --- a/tests/integration/test_cluster_copier/task_trivial_without_arguments.xml +++ /dev/null @@ -1,63 +0,0 @@ - - - 3 - - - - 1 - - - - - 0 - - - - - - - - - - source_trivial_cluster - default - trivial_without_arguments - - destination_trivial_cluster - default - trivial_without_arguments - - - ENGINE=ReplicatedMergeTree() PARTITION BY d % 5 ORDER BY (d, sipHash64(d)) SAMPLE BY sipHash64(d) SETTINGS index_granularity = 16 - - - d + 1 - - - d - d = 0 - - - - - - - - - first_trivial - 9000 - - - - - - - - - second_trivial - 9000 - - - - - - diff --git a/tests/integration/test_cluster_copier/task_ttl_columns.xml b/tests/integration/test_cluster_copier/task_ttl_columns.xml deleted file mode 100644 index 2069c509c87..00000000000 --- a/tests/integration/test_cluster_copier/task_ttl_columns.xml +++ /dev/null @@ -1,39 +0,0 @@ - - - - - false - - first_of_two - 9000 - - - - - - false - - second_of_two - 9000 - - - - - - 2 - - - - source - db_ttl_columns - source - - destination - db_ttl_columns - destination - - ENGINE = MergeTree() PARTITION BY toYYYYMMDD(Column3) ORDER BY (Column3, Column2, Column1) - rand() - - - diff --git a/tests/integration/test_cluster_copier/task_ttl_move_to_volume.xml b/tests/integration/test_cluster_copier/task_ttl_move_to_volume.xml deleted file mode 100644 index 2a51fa7a66d..00000000000 --- a/tests/integration/test_cluster_copier/task_ttl_move_to_volume.xml +++ /dev/null @@ -1,39 +0,0 @@ - - - - - false - - first_of_two - 9000 - - - - - - false - - second_of_two - 9000 - - - - - - 2 - - - - source - db_move_to_volume - source - - destination - db_move_to_volume - destination - - ENGINE = MergeTree() PARTITION BY toYYYYMMDD(Column3) ORDER BY (Column3, Column2, Column1) TTL Column3 + INTERVAL 1 MONTH TO VOLUME 'external' SETTINGS storage_policy = 'external_with_jbods' - rand() - - - diff --git a/tests/integration/test_cluster_copier/task_with_different_schema.xml b/tests/integration/test_cluster_copier/task_with_different_schema.xml deleted file mode 100644 index e1e6ee4dc42..00000000000 --- a/tests/integration/test_cluster_copier/task_with_different_schema.xml +++ /dev/null @@ -1,39 +0,0 @@ - - - - - false - - first_of_two - 9000 - - - - - - false - - second_of_two - 9000 - - - - - - 2 - - - - source - db_different_schema - source - - destination - db_different_schema - destination - - ENGINE = MergeTree() PARTITION BY toYYYYMMDD(Column3) ORDER BY (Column9, Column1, Column2, Column3, Column4) - rand() - - - diff --git a/tests/integration/test_cluster_copier/test.py b/tests/integration/test_cluster_copier/test.py deleted file mode 100644 index be71fc21e33..00000000000 --- a/tests/integration/test_cluster_copier/test.py +++ /dev/null @@ -1,653 +0,0 @@ -import os -import random -import sys -import time -import kazoo -import pytest -import string -import random -from contextlib import contextmanager -from helpers.cluster import ClickHouseCluster -from helpers.test_tools import TSV - -import docker - -CURRENT_TEST_DIR = os.path.dirname(os.path.abspath(__file__)) -sys.path.insert(0, os.path.dirname(CURRENT_TEST_DIR)) - -COPYING_FAIL_PROBABILITY = 0.2 -MOVING_FAIL_PROBABILITY = 0.2 - -cluster = ClickHouseCluster(__file__) - - -def generateRandomString(count): - return "".join( - random.choice(string.ascii_uppercase + string.digits) for _ in range(count) - ) - - -def check_all_hosts_sucesfully_executed(tsv_content, num_hosts): - M = TSV.toMat(tsv_content) - hosts = [(l[0], l[1]) for l in M] # (host, port) - codes = [l[2] for l in M] - - assert len(hosts) == num_hosts and len(set(hosts)) == num_hosts, "\n" + tsv_content - assert len(set(codes)) == 1, "\n" + tsv_content - assert codes[0] == "0", "\n" + tsv_content - - -def ddl_check_query(instance, query, num_hosts=3): - contents = instance.query(query) - check_all_hosts_sucesfully_executed(contents, num_hosts) - return contents - - -@pytest.fixture(scope="module") -def started_cluster(): - global cluster - try: - clusters_schema = { - "0": {"0": ["0", "1"], "1": ["0"]}, - "1": {"0": ["0", "1"], "1": ["0"]}, - } - - for cluster_name, shards in clusters_schema.items(): - for shard_name, replicas in shards.items(): - for replica_name in replicas: - name = "s{}_{}_{}".format(cluster_name, shard_name, replica_name) - cluster.add_instance( - name, - main_configs=[ - "configs/conf.d/query_log.xml", - "configs/conf.d/ddl.xml", - "configs/conf.d/clusters.xml", - ], - user_configs=["configs/users.xml"], - macros={ - "cluster": cluster_name, - "shard": shard_name, - "replica": replica_name, - }, - with_zookeeper=True, - ) - - cluster.start() - yield cluster - - finally: - cluster.shutdown() - - -class Task1: - def __init__(self, cluster): - self.cluster = cluster - self.zk_task_path = "/clickhouse-copier/task_simple_" + generateRandomString(10) - self.container_task_file = "/task0_description.xml" - - for instance_name, _ in cluster.instances.items(): - instance = cluster.instances[instance_name] - instance.copy_file_to_container( - os.path.join(CURRENT_TEST_DIR, "./task0_description.xml"), - self.container_task_file, - ) - print( - "Copied task file to container of '{}' instance. Path {}".format( - instance_name, self.container_task_file - ) - ) - - def start(self): - instance = cluster.instances["s0_0_0"] - - for cluster_num in ["0", "1"]: - ddl_check_query( - instance, - "DROP DATABASE IF EXISTS default ON CLUSTER cluster{} SYNC".format( - cluster_num - ), - ) - ddl_check_query( - instance, - "CREATE DATABASE default ON CLUSTER cluster{} ".format(cluster_num), - ) - - ddl_check_query( - instance, - "CREATE TABLE hits ON CLUSTER cluster0 (d UInt64, d1 UInt64 MATERIALIZED d+1) " - + "ENGINE=ReplicatedMergeTree " - + "PARTITION BY d % 3 ORDER BY (d, sipHash64(d)) SAMPLE BY sipHash64(d) SETTINGS index_granularity = 16", - ) - ddl_check_query( - instance, - "CREATE TABLE hits_all ON CLUSTER cluster0 (d UInt64) ENGINE=Distributed(cluster0, default, hits, d)", - ) - ddl_check_query( - instance, - "CREATE TABLE hits_all ON CLUSTER cluster1 (d UInt64) ENGINE=Distributed(cluster1, default, hits, d + 1)", - ) - instance.query( - "INSERT INTO hits_all SELECT * FROM system.numbers LIMIT 1002", - settings={"distributed_foreground_insert": 1}, - ) - - def check(self): - assert ( - self.cluster.instances["s0_0_0"] - .query("SELECT count() FROM hits_all") - .strip() - == "1002" - ) - assert ( - self.cluster.instances["s1_0_0"] - .query("SELECT count() FROM hits_all") - .strip() - == "1002" - ) - - assert ( - self.cluster.instances["s1_0_0"] - .query("SELECT DISTINCT d % 2 FROM hits") - .strip() - == "1" - ) - assert ( - self.cluster.instances["s1_1_0"] - .query("SELECT DISTINCT d % 2 FROM hits") - .strip() - == "0" - ) - - instance = self.cluster.instances["s0_0_0"] - ddl_check_query(instance, "DROP TABLE hits_all ON CLUSTER cluster0") - ddl_check_query(instance, "DROP TABLE hits_all ON CLUSTER cluster1") - ddl_check_query(instance, "DROP TABLE hits ON CLUSTER cluster0") - ddl_check_query(instance, "DROP TABLE hits ON CLUSTER cluster1") - - -class Task2: - def __init__(self, cluster, unique_zk_path): - self.cluster = cluster - self.zk_task_path = ( - "/clickhouse-copier/task_month_to_week_partition_" + generateRandomString(5) - ) - self.unique_zk_path = generateRandomString(10) - self.container_task_file = "/task_month_to_week_description.xml" - - for instance_name, _ in cluster.instances.items(): - instance = cluster.instances[instance_name] - instance.copy_file_to_container( - os.path.join(CURRENT_TEST_DIR, "./task_month_to_week_description.xml"), - self.container_task_file, - ) - print( - "Copied task file to container of '{}' instance. Path {}".format( - instance_name, self.container_task_file - ) - ) - - def start(self): - instance = cluster.instances["s0_0_0"] - - for cluster_num in ["0", "1"]: - ddl_check_query( - instance, - "DROP DATABASE IF EXISTS default ON CLUSTER cluster{}".format( - cluster_num - ), - ) - ddl_check_query( - instance, - "CREATE DATABASE IF NOT EXISTS default ON CLUSTER cluster{}".format( - cluster_num - ), - ) - - ddl_check_query( - instance, - "CREATE TABLE a ON CLUSTER cluster0 (date Date, d UInt64, d1 UInt64 ALIAS d+1) " - "ENGINE=ReplicatedMergeTree('/clickhouse/tables/cluster_{cluster}/{shard}/" - + self.unique_zk_path - + "', " - "'{replica}', date, intHash64(d), (date, intHash64(d)), 8192)", - ) - ddl_check_query( - instance, - "CREATE TABLE a_all ON CLUSTER cluster0 (date Date, d UInt64) ENGINE=Distributed(cluster0, default, a, d)", - ) - - instance.query( - "INSERT INTO a_all SELECT toDate(17581 + number) AS date, number AS d FROM system.numbers LIMIT 85", - settings={"distributed_foreground_insert": 1}, - ) - - def check(self): - assert TSV( - self.cluster.instances["s0_0_0"].query( - "SELECT count() FROM cluster(cluster0, default, a)" - ) - ) == TSV("85\n") - assert TSV( - self.cluster.instances["s1_0_0"].query( - "SELECT count(), uniqExact(date) FROM cluster(cluster1, default, b)" - ) - ) == TSV("85\t85\n") - - assert TSV( - self.cluster.instances["s1_0_0"].query( - "SELECT DISTINCT jumpConsistentHash(intHash64(d), 2) FROM b" - ) - ) == TSV("0\n") - assert TSV( - self.cluster.instances["s1_1_0"].query( - "SELECT DISTINCT jumpConsistentHash(intHash64(d), 2) FROM b" - ) - ) == TSV("1\n") - - assert TSV( - self.cluster.instances["s1_0_0"].query( - "SELECT uniqExact(partition) IN (12, 13) FROM system.parts WHERE active AND database='default' AND table='b'" - ) - ) == TSV("1\n") - assert TSV( - self.cluster.instances["s1_1_0"].query( - "SELECT uniqExact(partition) IN (12, 13) FROM system.parts WHERE active AND database='default' AND table='b'" - ) - ) == TSV("1\n") - - instance = cluster.instances["s0_0_0"] - ddl_check_query(instance, "DROP TABLE a ON CLUSTER cluster0") - ddl_check_query(instance, "DROP TABLE b ON CLUSTER cluster1") - - -class Task_test_block_size: - def __init__(self, cluster): - self.cluster = cluster - self.zk_task_path = ( - "/clickhouse-copier/task_test_block_size_" + generateRandomString(5) - ) - self.rows = 1000000 - self.container_task_file = "/task_test_block_size.xml" - - for instance_name, _ in cluster.instances.items(): - instance = cluster.instances[instance_name] - instance.copy_file_to_container( - os.path.join(CURRENT_TEST_DIR, "./task_test_block_size.xml"), - self.container_task_file, - ) - print( - "Copied task file to container of '{}' instance. Path {}".format( - instance_name, self.container_task_file - ) - ) - - def start(self): - instance = cluster.instances["s0_0_0"] - - ddl_check_query( - instance, - """ - CREATE TABLE test_block_size ON CLUSTER shard_0_0 (partition Date, d UInt64) - ENGINE=ReplicatedMergeTree - ORDER BY (d, sipHash64(d)) SAMPLE BY sipHash64(d)""", - 2, - ) - - instance.query( - "INSERT INTO test_block_size SELECT toDate(0) AS partition, number as d FROM system.numbers LIMIT {}".format( - self.rows - ) - ) - - def check(self): - assert TSV( - self.cluster.instances["s1_0_0"].query( - "SELECT count() FROM cluster(cluster1, default, test_block_size)" - ) - ) == TSV("{}\n".format(self.rows)) - - instance = cluster.instances["s0_0_0"] - ddl_check_query(instance, "DROP TABLE test_block_size ON CLUSTER shard_0_0", 2) - ddl_check_query(instance, "DROP TABLE test_block_size ON CLUSTER cluster1") - - -class Task_no_index: - def __init__(self, cluster): - self.cluster = cluster - self.zk_task_path = "/clickhouse-copier/task_no_index_" + generateRandomString( - 5 - ) - self.rows = 1000000 - self.container_task_file = "/task_no_index.xml" - - for instance_name, _ in cluster.instances.items(): - instance = cluster.instances[instance_name] - instance.copy_file_to_container( - os.path.join(CURRENT_TEST_DIR, "./task_no_index.xml"), - self.container_task_file, - ) - print( - "Copied task file to container of '{}' instance. Path {}".format( - instance_name, self.container_task_file - ) - ) - - def start(self): - instance = cluster.instances["s0_0_0"] - instance.query("DROP TABLE IF EXISTS ontime SYNC") - instance.query( - "create table IF NOT EXISTS ontime (Year UInt16, FlightDate String) ENGINE = Memory" - ) - instance.query( - "insert into ontime values (2016, 'test6'), (2017, 'test7'), (2018, 'test8')" - ) - - def check(self): - assert TSV( - self.cluster.instances["s1_1_0"].query("SELECT Year FROM ontime22") - ) == TSV("2017\n") - instance = cluster.instances["s0_0_0"] - instance.query("DROP TABLE ontime") - instance = cluster.instances["s1_1_0"] - instance.query("DROP TABLE ontime22") - - -class Task_no_arg: - def __init__(self, cluster): - self.cluster = cluster - self.zk_task_path = "/clickhouse-copier/task_no_arg" - self.rows = 1000000 - self.container_task_file = "/task_no_arg.xml" - - for instance_name, _ in cluster.instances.items(): - instance = cluster.instances[instance_name] - instance.copy_file_to_container( - os.path.join(CURRENT_TEST_DIR, "./task_no_arg.xml"), - self.container_task_file, - ) - print( - "Copied task file to container of '{}' instance. Path {}".format( - instance_name, self.container_task_file - ) - ) - - def start(self): - instance = cluster.instances["s0_0_0"] - instance.query("DROP TABLE IF EXISTS copier_test1 SYNC") - instance.query( - "create table if not exists copier_test1 (date Date, id UInt32) engine = MergeTree PARTITION BY date ORDER BY date SETTINGS index_granularity = 8192" - ) - instance.query("insert into copier_test1 values ('2016-01-01', 10);") - - def check(self): - assert TSV( - self.cluster.instances["s1_1_0"].query("SELECT date FROM copier_test1_1") - ) == TSV("2016-01-01\n") - instance = cluster.instances["s0_0_0"] - instance.query("DROP TABLE copier_test1 SYNC") - instance = cluster.instances["s1_1_0"] - instance.query("DROP TABLE copier_test1_1 SYNC") - - -class Task_non_partitioned_table: - def __init__(self, cluster): - self.cluster = cluster - self.zk_task_path = "/clickhouse-copier/task_non_partitoned_table" - self.rows = 1000000 - self.container_task_file = "/task_non_partitioned_table.xml" - - for instance_name, _ in cluster.instances.items(): - instance = cluster.instances[instance_name] - instance.copy_file_to_container( - os.path.join(CURRENT_TEST_DIR, "./task_non_partitioned_table.xml"), - self.container_task_file, - ) - print( - "Copied task file to container of '{}' instance. Path {}".format( - instance_name, self.container_task_file - ) - ) - - def start(self): - instance = cluster.instances["s0_0_0"] - instance.query("DROP TABLE IF EXISTS copier_test1 SYNC") - instance.query( - "create table copier_test1 (date Date, id UInt32) engine = MergeTree ORDER BY date SETTINGS index_granularity = 8192" - ) - instance.query("insert into copier_test1 values ('2016-01-01', 10);") - - def check(self): - assert TSV( - self.cluster.instances["s1_1_0"].query("SELECT date FROM copier_test1_1") - ) == TSV("2016-01-01\n") - instance = cluster.instances["s0_0_0"] - instance.query("DROP TABLE copier_test1") - instance = cluster.instances["s1_1_0"] - instance.query("DROP TABLE copier_test1_1") - - -class Task_self_copy: - def __init__(self, cluster): - self.cluster = cluster - self.zk_task_path = "/clickhouse-copier/task_self_copy" - self.container_task_file = "/task_self_copy.xml" - - for instance_name, _ in cluster.instances.items(): - instance = cluster.instances[instance_name] - instance.copy_file_to_container( - os.path.join(CURRENT_TEST_DIR, "./task_self_copy.xml"), - self.container_task_file, - ) - print( - "Copied task file to container of '{}' instance. Path {}".format( - instance_name, self.container_task_file - ) - ) - - def start(self): - instance = cluster.instances["s0_0_0"] - instance.query("DROP DATABASE IF EXISTS db1 SYNC") - instance.query("DROP DATABASE IF EXISTS db2 SYNC") - instance.query("CREATE DATABASE IF NOT EXISTS db1;") - instance.query( - "CREATE TABLE IF NOT EXISTS db1.source_table (`a` Int8, `b` String, `c` Int8) ENGINE = MergeTree PARTITION BY a ORDER BY a SETTINGS index_granularity = 8192" - ) - instance.query("CREATE DATABASE IF NOT EXISTS db2;") - instance.query( - "CREATE TABLE IF NOT EXISTS db2.destination_table (`a` Int8, `b` String, `c` Int8) ENGINE = MergeTree PARTITION BY a ORDER BY a SETTINGS index_granularity = 8192" - ) - instance.query("INSERT INTO db1.source_table VALUES (1, 'ClickHouse', 1);") - instance.query("INSERT INTO db1.source_table VALUES (2, 'Copier', 2);") - - def check(self): - instance = cluster.instances["s0_0_0"] - assert TSV( - instance.query("SELECT * FROM db2.destination_table ORDER BY a") - ) == TSV(instance.query("SELECT * FROM db1.source_table ORDER BY a")) - instance = cluster.instances["s0_0_0"] - instance.query("DROP DATABASE IF EXISTS db1 SYNC") - instance.query("DROP DATABASE IF EXISTS db2 SYNC") - - -def execute_task(started_cluster, task, cmd_options): - task.start() - - zk = started_cluster.get_kazoo_client("zoo1") - print("Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1])) - - try: - zk.delete("/clickhouse-copier", recursive=True) - except kazoo.exceptions.NoNodeError: - print("No node /clickhouse-copier. It is Ok in first test.") - - # Run cluster-copier processes on each node - docker_api = started_cluster.docker_client.api - copiers_exec_ids = [] - - cmd = [ - "/usr/bin/clickhouse", - "copier", - "--config", - "/etc/clickhouse-server/config-copier.xml", - "--task-path", - task.zk_task_path, - "--task-file", - task.container_task_file, - "--task-upload-force", - "true", - "--base-dir", - "/var/log/clickhouse-server/copier", - ] - cmd += cmd_options - - print(cmd) - - copiers = random.sample(list(started_cluster.instances.keys()), 3) - - for instance_name in copiers: - instance = started_cluster.instances[instance_name] - container = instance.get_docker_handle() - instance.copy_file_to_container( - os.path.join(CURRENT_TEST_DIR, "configs/config-copier.xml"), - "/etc/clickhouse-server/config-copier.xml", - ) - print("Copied copier config to {}".format(instance.name)) - exec_id = docker_api.exec_create(container.id, cmd, stderr=True) - output = docker_api.exec_start(exec_id).decode("utf8") - print(output) - copiers_exec_ids.append(exec_id) - print( - "Copier for {} ({}) has started".format(instance.name, instance.ip_address) - ) - - # Wait for copiers stopping and check their return codes - for exec_id, instance_name in zip(copiers_exec_ids, copiers): - instance = started_cluster.instances[instance_name] - while True: - res = docker_api.exec_inspect(exec_id) - if not res["Running"]: - break - time.sleep(0.5) - - assert res["ExitCode"] == 0, "Instance: {} ({}). Info: {}".format( - instance.name, instance.ip_address, repr(res) - ) - - try: - task.check() - finally: - zk.delete(task.zk_task_path, recursive=True) - - -# Tests - - -@pytest.mark.parametrize(("use_sample_offset"), [False, True]) -def test_copy_simple(started_cluster, use_sample_offset): - if use_sample_offset: - execute_task( - started_cluster, - Task1(started_cluster), - ["--experimental-use-sample-offset", "1"], - ) - else: - execute_task(started_cluster, Task1(started_cluster), []) - - -@pytest.mark.parametrize(("use_sample_offset"), [False, True]) -def test_copy_with_recovering(started_cluster, use_sample_offset): - if use_sample_offset: - execute_task( - started_cluster, - Task1(started_cluster), - [ - "--copy-fault-probability", - str(COPYING_FAIL_PROBABILITY), - "--experimental-use-sample-offset", - "1", - "--max-table-tries", - "10", - ], - ) - else: - execute_task( - started_cluster, - Task1(started_cluster), - [ - "--copy-fault-probability", - str(COPYING_FAIL_PROBABILITY), - "--max-table-tries", - "10", - ], - ) - - -@pytest.mark.parametrize(("use_sample_offset"), [False, True]) -def test_copy_with_recovering_after_move_faults(started_cluster, use_sample_offset): - if use_sample_offset: - execute_task( - started_cluster, - Task1(started_cluster), - [ - "--move-fault-probability", - str(MOVING_FAIL_PROBABILITY), - "--experimental-use-sample-offset", - "1", - ], - ) - else: - execute_task( - started_cluster, - Task1(started_cluster), - ["--move-fault-probability", str(MOVING_FAIL_PROBABILITY)], - ) - - -@pytest.mark.timeout(600) -def test_copy_month_to_week_partition(started_cluster): - execute_task(started_cluster, Task2(started_cluster, "test1"), []) - - -@pytest.mark.timeout(600) -def test_copy_month_to_week_partition_with_recovering(started_cluster): - execute_task( - started_cluster, - Task2(started_cluster, "test2"), - [ - "--copy-fault-probability", - str(COPYING_FAIL_PROBABILITY), - "--max-table-tries", - "10", - ], - ) - - -@pytest.mark.timeout(600) -def test_copy_month_to_week_partition_with_recovering_after_move_faults( - started_cluster, -): - execute_task( - started_cluster, - Task2(started_cluster, "test3"), - ["--move-fault-probability", str(MOVING_FAIL_PROBABILITY)], - ) - - -def test_block_size(started_cluster): - execute_task(started_cluster, Task_test_block_size(started_cluster), []) - - -def test_no_index(started_cluster): - execute_task(started_cluster, Task_no_index(started_cluster), []) - - -def test_no_arg(started_cluster): - execute_task(started_cluster, Task_no_arg(started_cluster), []) - - -def test_non_partitioned_table(started_cluster): - execute_task(started_cluster, Task_non_partitioned_table(started_cluster), []) - - -def test_self_copy(started_cluster): - execute_task(started_cluster, Task_self_copy(started_cluster), []) diff --git a/tests/integration/test_cluster_copier/test_three_nodes.py b/tests/integration/test_cluster_copier/test_three_nodes.py deleted file mode 100644 index e7d07757adb..00000000000 --- a/tests/integration/test_cluster_copier/test_three_nodes.py +++ /dev/null @@ -1,286 +0,0 @@ -import os -import sys -import time -import logging -import pytest - -from helpers.cluster import ClickHouseCluster -from helpers.test_tools import TSV - -import docker - -CURRENT_TEST_DIR = os.path.dirname(os.path.abspath(__file__)) -sys.path.insert(0, os.path.dirname(CURRENT_TEST_DIR)) - -cluster = ClickHouseCluster(__file__) - - -@pytest.fixture(scope="module") -def started_cluster(): - global cluster - try: - for name in ["first", "second", "third"]: - cluster.add_instance( - name, - main_configs=[ - "configs_three_nodes/conf.d/clusters.xml", - "configs_three_nodes/conf.d/ddl.xml", - ], - user_configs=["configs_three_nodes/users.xml"], - with_zookeeper=True, - ) - - cluster.start() - yield cluster - - finally: - cluster.shutdown() - - -class Task: - def __init__(self, cluster): - self.cluster = cluster - self.zk_task_path = "/clickhouse-copier/task" - self.container_task_file = "/task_taxi_data.xml" - - for instance_name, _ in cluster.instances.items(): - instance = cluster.instances[instance_name] - instance.copy_file_to_container( - os.path.join(CURRENT_TEST_DIR, "./task_taxi_data.xml"), - self.container_task_file, - ) - logging.debug( - f"Copied task file to container of '{instance_name}' instance. Path {self.container_task_file}" - ) - - def start(self): - for name in ["first", "second", "third"]: - node = cluster.instances[name] - node.query("DROP DATABASE IF EXISTS dailyhistory SYNC;") - node.query("DROP DATABASE IF EXISTS monthlyhistory SYNC;") - - first = cluster.instances["first"] - - # daily partition database - first.query("CREATE DATABASE IF NOT EXISTS dailyhistory on cluster events;") - first.query( - """CREATE TABLE dailyhistory.yellow_tripdata_staging ON CLUSTER events - ( - id UUID DEFAULT generateUUIDv4(), - vendor_id String, - tpep_pickup_datetime DateTime('UTC'), - tpep_dropoff_datetime DateTime('UTC'), - passenger_count Nullable(Float64), - trip_distance String, - pickup_longitude Float64, - pickup_latitude Float64, - rate_code_id String, - store_and_fwd_flag String, - dropoff_longitude Float64, - dropoff_latitude Float64, - payment_type String, - fare_amount String, - extra String, - mta_tax String, - tip_amount String, - tolls_amount String, - improvement_surcharge String, - total_amount String, - pickup_location_id String, - dropoff_location_id String, - congestion_surcharge String, - junk1 String, junk2 String - ) - Engine = ReplacingMergeTree() - PRIMARY KEY (tpep_pickup_datetime, id) - ORDER BY (tpep_pickup_datetime, id) - PARTITION BY (toYYYYMMDD(tpep_pickup_datetime))""" - ) - - first.query( - """CREATE TABLE dailyhistory.yellow_tripdata - ON CLUSTER events - AS dailyhistory.yellow_tripdata_staging - ENGINE = Distributed('events', 'dailyhistory', yellow_tripdata_staging, sipHash64(id) % 3);""" - ) - - first.query( - """INSERT INTO dailyhistory.yellow_tripdata - SELECT * FROM generateRandom( - 'id UUID DEFAULT generateUUIDv4(), - vendor_id String, - tpep_pickup_datetime DateTime(\\'UTC\\'), - tpep_dropoff_datetime DateTime(\\'UTC\\'), - passenger_count Nullable(Float64), - trip_distance String, - pickup_longitude Float64, - pickup_latitude Float64, - rate_code_id String, - store_and_fwd_flag String, - dropoff_longitude Float64, - dropoff_latitude Float64, - payment_type String, - fare_amount String, - extra String, - mta_tax String, - tip_amount String, - tolls_amount String, - improvement_surcharge String, - total_amount String, - pickup_location_id String, - dropoff_location_id String, - congestion_surcharge String, - junk1 String, - junk2 String', - 1, 10, 2) LIMIT 50;""" - ) - - # monthly partition database - first.query("create database IF NOT EXISTS monthlyhistory on cluster events;") - first.query( - """CREATE TABLE monthlyhistory.yellow_tripdata_staging ON CLUSTER events - ( - id UUID DEFAULT generateUUIDv4(), - vendor_id String, - tpep_pickup_datetime DateTime('UTC'), - tpep_dropoff_datetime DateTime('UTC'), - passenger_count Nullable(Float64), - trip_distance String, - pickup_longitude Float64, - pickup_latitude Float64, - rate_code_id String, - store_and_fwd_flag String, - dropoff_longitude Float64, - dropoff_latitude Float64, - payment_type String, - fare_amount String, - extra String, - mta_tax String, - tip_amount String, - tolls_amount String, - improvement_surcharge String, - total_amount String, - pickup_location_id String, - dropoff_location_id String, - congestion_surcharge String, - junk1 String, - junk2 String - ) - Engine = ReplacingMergeTree() - PRIMARY KEY (tpep_pickup_datetime, id) - ORDER BY (tpep_pickup_datetime, id) - PARTITION BY (pickup_location_id, toYYYYMM(tpep_pickup_datetime))""" - ) - - first.query( - """CREATE TABLE monthlyhistory.yellow_tripdata - ON CLUSTER events - AS monthlyhistory.yellow_tripdata_staging - ENGINE = Distributed('events', 'monthlyhistory', yellow_tripdata_staging, sipHash64(id) % 3);""" - ) - - def check(self): - first = cluster.instances["first"] - a = TSV(first.query("SELECT count() from dailyhistory.yellow_tripdata")) - b = TSV(first.query("SELECT count() from monthlyhistory.yellow_tripdata")) - assert a == b, "Distributed tables" - - for instance_name, instance in cluster.instances.items(): - instance = cluster.instances[instance_name] - a = instance.query( - "SELECT count() from dailyhistory.yellow_tripdata_staging" - ) - b = instance.query( - "SELECT count() from monthlyhistory.yellow_tripdata_staging" - ) - assert a == b, "MergeTree tables on each shard" - - a = TSV( - instance.query( - "SELECT sipHash64(*) from dailyhistory.yellow_tripdata_staging ORDER BY id" - ) - ) - b = TSV( - instance.query( - "SELECT sipHash64(*) from monthlyhistory.yellow_tripdata_staging ORDER BY id" - ) - ) - - assert a == b, "Data on each shard" - - for name in ["first", "second", "third"]: - node = cluster.instances[name] - node.query("DROP DATABASE IF EXISTS dailyhistory SYNC;") - node.query("DROP DATABASE IF EXISTS monthlyhistory SYNC;") - - -def execute_task(started_cluster, task, cmd_options): - task.start() - - zk = started_cluster.get_kazoo_client("zoo1") - logging.debug("Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1])) - - # Run cluster-copier processes on each node - docker_api = started_cluster.docker_client.api - copiers_exec_ids = [] - - cmd = [ - "/usr/bin/clickhouse", - "copier", - "--config", - "/etc/clickhouse-server/config-copier.xml", - "--task-path", - task.zk_task_path, - "--task-file", - task.container_task_file, - "--task-upload-force", - "true", - "--base-dir", - "/var/log/clickhouse-server/copier", - ] - cmd += cmd_options - - logging.debug(f"execute_task cmd: {cmd}") - - for instance_name in started_cluster.instances.keys(): - instance = started_cluster.instances[instance_name] - container = instance.get_docker_handle() - instance.copy_file_to_container( - os.path.join(CURRENT_TEST_DIR, "configs_three_nodes/config-copier.xml"), - "/etc/clickhouse-server/config-copier.xml", - ) - logging.info("Copied copier config to {}".format(instance.name)) - exec_id = docker_api.exec_create(container.id, cmd, stderr=True) - output = docker_api.exec_start(exec_id).decode("utf8") - logging.info(output) - copiers_exec_ids.append(exec_id) - logging.info( - "Copier for {} ({}) has started".format(instance.name, instance.ip_address) - ) - - # time.sleep(1000) - - # Wait for copiers stopping and check their return codes - for exec_id, instance in zip( - copiers_exec_ids, iter(started_cluster.instances.values()) - ): - while True: - res = docker_api.exec_inspect(exec_id) - if not res["Running"]: - break - time.sleep(1) - - assert res["ExitCode"] == 0, "Instance: {} ({}). Info: {}".format( - instance.name, instance.ip_address, repr(res) - ) - - try: - task.check() - finally: - zk.delete(task.zk_task_path, recursive=True) - - -# Tests -@pytest.mark.timeout(600) -def test(started_cluster): - execute_task(started_cluster, Task(started_cluster), []) diff --git a/tests/integration/test_cluster_copier/test_trivial.py b/tests/integration/test_cluster_copier/test_trivial.py deleted file mode 100644 index b8060583ef8..00000000000 --- a/tests/integration/test_cluster_copier/test_trivial.py +++ /dev/null @@ -1,227 +0,0 @@ -import os -import sys -import time -import random -import string - -from helpers.cluster import ClickHouseCluster -from helpers.test_tools import TSV - -import kazoo -import pytest -import docker - - -CURRENT_TEST_DIR = os.path.dirname(os.path.abspath(__file__)) -sys.path.insert(0, os.path.dirname(CURRENT_TEST_DIR)) - - -COPYING_FAIL_PROBABILITY = 0.1 -MOVING_FAIL_PROBABILITY = 0.1 - -cluster = ClickHouseCluster(__file__) - - -def generateRandomString(count): - return "".join( - random.choice(string.ascii_uppercase + string.digits) for _ in range(count) - ) - - -@pytest.fixture(scope="module") -def started_cluster(): - global cluster - try: - for name in ["first_trivial", "second_trivial"]: - instance = cluster.add_instance( - name, - main_configs=["configs/conf.d/clusters_trivial.xml"], - user_configs=["configs_two_nodes/users.xml"], - macros={ - "cluster": name, - "shard": "the_only_shard", - "replica": "the_only_replica", - }, - with_zookeeper=True, - ) - - cluster.start() - yield cluster - - finally: - cluster.shutdown() - - -class TaskTrivial: - def __init__(self, cluster): - self.cluster = cluster - self.zk_task_path = "/clickhouse-copier/task_trivial" - self.copier_task_config = open( - os.path.join(CURRENT_TEST_DIR, "task_trivial.xml"), "r" - ).read() - - def start(self): - source = cluster.instances["first_trivial"] - destination = cluster.instances["second_trivial"] - - for node in [source, destination]: - node.query("DROP DATABASE IF EXISTS default") - node.query("CREATE DATABASE IF NOT EXISTS default") - - source.query( - "CREATE TABLE trivial (d UInt64, d1 UInt64 MATERIALIZED d+1)" - "ENGINE=ReplicatedMergeTree('/clickhouse/tables/source_trivial_cluster/1/trivial/{}', '1') " - "PARTITION BY d % 5 ORDER BY (d, sipHash64(d)) SAMPLE BY sipHash64(d) SETTINGS index_granularity = 16".format( - generateRandomString(10) - ) - ) - - source.query( - "INSERT INTO trivial SELECT * FROM system.numbers LIMIT 1002", - settings={"distributed_foreground_insert": 1}, - ) - - def check(self): - zk = cluster.get_kazoo_client("zoo1") - status_data, _ = zk.get(self.zk_task_path + "/status") - assert ( - status_data - == b'{"hits":{"all_partitions_count":5,"processed_partitions_count":5}}' - ) - - source = cluster.instances["first_trivial"] - destination = cluster.instances["second_trivial"] - - assert TSV(source.query("SELECT count() FROM trivial")) == TSV("1002\n") - assert TSV(destination.query("SELECT count() FROM trivial")) == TSV("1002\n") - - for node in [source, destination]: - node.query("DROP TABLE trivial") - - -class TaskReplicatedWithoutArguments: - def __init__(self, cluster): - self.cluster = cluster - self.zk_task_path = "/clickhouse-copier/task_trivial_without_arguments" - self.copier_task_config = open( - os.path.join(CURRENT_TEST_DIR, "task_trivial_without_arguments.xml"), "r" - ).read() - - def start(self): - source = cluster.instances["first_trivial"] - destination = cluster.instances["second_trivial"] - - for node in [source, destination]: - node.query("DROP DATABASE IF EXISTS default") - node.query("CREATE DATABASE IF NOT EXISTS default") - - source.query( - "CREATE TABLE trivial_without_arguments ON CLUSTER source_trivial_cluster (d UInt64, d1 UInt64 MATERIALIZED d+1) " - "ENGINE=ReplicatedMergeTree() " - "PARTITION BY d % 5 ORDER BY (d, sipHash64(d)) SAMPLE BY sipHash64(d) SETTINGS index_granularity = 16" - ) - - source.query( - "INSERT INTO trivial_without_arguments SELECT * FROM system.numbers LIMIT 1002", - settings={"distributed_foreground_insert": 1}, - ) - - def check(self): - zk = cluster.get_kazoo_client("zoo1") - status_data, _ = zk.get(self.zk_task_path + "/status") - assert ( - status_data - == b'{"hits":{"all_partitions_count":5,"processed_partitions_count":5}}' - ) - - source = cluster.instances["first_trivial"] - destination = cluster.instances["second_trivial"] - - assert TSV( - source.query("SELECT count() FROM trivial_without_arguments") - ) == TSV("1002\n") - assert TSV( - destination.query("SELECT count() FROM trivial_without_arguments") - ) == TSV("1002\n") - - for node in [source, destination]: - node.query("DROP TABLE trivial_without_arguments") - - -def execute_task(started_cluster, task, cmd_options): - task.start() - - zk = started_cluster.get_kazoo_client("zoo1") - print("Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1])) - - try: - zk.delete("/clickhouse-copier", recursive=True) - except kazoo.exceptions.NoNodeError: - print("No node /clickhouse-copier. It is Ok in first test.") - - zk_task_path = task.zk_task_path - zk.ensure_path(zk_task_path) - zk.create(zk_task_path + "/description", task.copier_task_config.encode()) - - # Run cluster-copier processes on each node - docker_api = started_cluster.docker_client.api - copiers_exec_ids = [] - - cmd = [ - "/usr/bin/clickhouse", - "copier", - "--config", - "/etc/clickhouse-server/config-copier.xml", - "--task-path", - zk_task_path, - "--base-dir", - "/var/log/clickhouse-server/copier", - ] - cmd += cmd_options - - copiers = list(started_cluster.instances.keys()) - - for instance_name in copiers: - instance = started_cluster.instances[instance_name] - container = instance.get_docker_handle() - instance.copy_file_to_container( - os.path.join(CURRENT_TEST_DIR, "configs/config-copier.xml"), - "/etc/clickhouse-server/config-copier.xml", - ) - print("Copied copier config to {}".format(instance.name)) - exec_id = docker_api.exec_create(container.id, cmd, stderr=True) - output = docker_api.exec_start(exec_id).decode("utf8") - print(output) - copiers_exec_ids.append(exec_id) - print( - "Copier for {} ({}) has started".format(instance.name, instance.ip_address) - ) - - # Wait for copiers stopping and check their return codes - for exec_id, instance_name in zip(copiers_exec_ids, copiers): - instance = started_cluster.instances[instance_name] - while True: - res = docker_api.exec_inspect(exec_id) - if not res["Running"]: - break - time.sleep(0.5) - - assert res["ExitCode"] == 0, "Instance: {} ({}). Info: {}".format( - instance.name, instance.ip_address, repr(res) - ) - - try: - task.check() - finally: - zk.delete(zk_task_path, recursive=True) - - -# Tests - - -def test_trivial_copy(started_cluster): - execute_task(started_cluster, TaskTrivial(started_cluster), []) - - -def test_trivial_without_arguments(started_cluster): - execute_task(started_cluster, TaskReplicatedWithoutArguments(started_cluster), []) diff --git a/tests/integration/test_cluster_copier/test_two_nodes.py b/tests/integration/test_cluster_copier/test_two_nodes.py deleted file mode 100644 index 1bd3561f24f..00000000000 --- a/tests/integration/test_cluster_copier/test_two_nodes.py +++ /dev/null @@ -1,597 +0,0 @@ -import os -import sys -import time -import logging -import pytest - -from helpers.cluster import ClickHouseCluster -from helpers.test_tools import TSV - -import docker - -CURRENT_TEST_DIR = os.path.dirname(os.path.abspath(__file__)) -sys.path.insert(0, os.path.dirname(CURRENT_TEST_DIR)) - -cluster = ClickHouseCluster(__file__) - - -@pytest.fixture(scope="module") -def started_cluster(): - global cluster - try: - for name in ["first_of_two", "second_of_two"]: - instance = cluster.add_instance( - name, - main_configs=[ - "configs_two_nodes/conf.d/clusters.xml", - "configs_two_nodes/conf.d/ddl.xml", - "configs_two_nodes/conf.d/storage_configuration.xml", - ], - user_configs=["configs_two_nodes/users.xml"], - with_zookeeper=True, - ) - - cluster.start() - - for name in ["first_of_two", "second_of_two"]: - instance = cluster.instances[name] - instance.exec_in_container(["bash", "-c", "mkdir /jbod1"]) - instance.exec_in_container(["bash", "-c", "mkdir /jbod2"]) - instance.exec_in_container(["bash", "-c", "mkdir /external"]) - - yield cluster - - finally: - cluster.shutdown() - - -# Will copy table from `first` node to `second` -class TaskWithDifferentSchema: - def __init__(self, cluster): - self.cluster = cluster - self.zk_task_path = "/clickhouse-copier/task_with_different_schema" - self.container_task_file = "/task_with_different_schema.xml" - - for instance_name, _ in cluster.instances.items(): - instance = cluster.instances[instance_name] - instance.copy_file_to_container( - os.path.join(CURRENT_TEST_DIR, "./task_with_different_schema.xml"), - self.container_task_file, - ) - print( - "Copied task file to container of '{}' instance. Path {}".format( - instance_name, self.container_task_file - ) - ) - - def start(self): - first = cluster.instances["first_of_two"] - second = cluster.instances["second_of_two"] - - first.query("DROP DATABASE IF EXISTS db_different_schema SYNC") - second.query("DROP DATABASE IF EXISTS db_different_schema SYNC") - - first.query("CREATE DATABASE IF NOT EXISTS db_different_schema;") - first.query( - """CREATE TABLE db_different_schema.source - ( - Column1 String, - Column2 UInt32, - Column3 Date, - Column4 DateTime, - Column5 UInt16, - Column6 String, - Column7 String, - Column8 String, - Column9 String, - Column10 String, - Column11 String, - Column12 Decimal(3, 1), - Column13 DateTime, - Column14 UInt16 - ) - ENGINE = MergeTree() - PARTITION BY (toYYYYMMDD(Column3), Column3) - PRIMARY KEY (Column1, Column2, Column3, Column4, Column6, Column7, Column8, Column9) - ORDER BY (Column1, Column2, Column3, Column4, Column6, Column7, Column8, Column9) - SETTINGS index_granularity = 8192""" - ) - - first.query( - """INSERT INTO db_different_schema.source SELECT * FROM generateRandom( - 'Column1 String, Column2 UInt32, Column3 Date, Column4 DateTime, Column5 UInt16, - Column6 String, Column7 String, Column8 String, Column9 String, Column10 String, - Column11 String, Column12 Decimal(3, 1), Column13 DateTime, Column14 UInt16', 1, 10, 2) LIMIT 50;""" - ) - - second.query("CREATE DATABASE IF NOT EXISTS db_different_schema;") - second.query( - """CREATE TABLE db_different_schema.destination - ( - Column1 LowCardinality(String) CODEC(LZ4), - Column2 UInt32 CODEC(LZ4), - Column3 Date CODEC(DoubleDelta, LZ4), - Column4 DateTime CODEC(DoubleDelta, LZ4), - Column5 UInt16 CODEC(LZ4), - Column6 LowCardinality(String) CODEC(ZSTD), - Column7 LowCardinality(String) CODEC(ZSTD), - Column8 LowCardinality(String) CODEC(ZSTD), - Column9 LowCardinality(String) CODEC(ZSTD), - Column10 String CODEC(ZSTD(6)), - Column11 LowCardinality(String) CODEC(LZ4), - Column12 Decimal(3,1) CODEC(LZ4), - Column13 DateTime CODEC(DoubleDelta, LZ4), - Column14 UInt16 CODEC(LZ4) - ) ENGINE = MergeTree() - PARTITION BY toYYYYMMDD(Column3) - ORDER BY (Column9, Column1, Column2, Column3, Column4);""" - ) - - print("Preparation completed") - - def check(self): - first = cluster.instances["first_of_two"] - second = cluster.instances["second_of_two"] - - a = first.query("SELECT count() from db_different_schema.source") - b = second.query("SELECT count() from db_different_schema.destination") - assert a == b, "Count" - - a = TSV( - first.query( - """SELECT sipHash64(*) from db_different_schema.source - ORDER BY (Column1, Column2, Column3, Column4, Column5, Column6, Column7, Column8, Column9, Column10, Column11, Column12, Column13, Column14)""" - ) - ) - b = TSV( - second.query( - """SELECT sipHash64(*) from db_different_schema.destination - ORDER BY (Column1, Column2, Column3, Column4, Column5, Column6, Column7, Column8, Column9, Column10, Column11, Column12, Column13, Column14)""" - ) - ) - assert a == b, "Data" - - first.query("DROP DATABASE IF EXISTS db_different_schema SYNC") - second.query("DROP DATABASE IF EXISTS db_different_schema SYNC") - - -# Just simple copying, but table schema has TTL on columns -# Also table will have slightly different schema -class TaskTTL: - def __init__(self, cluster): - self.cluster = cluster - self.zk_task_path = "/clickhouse-copier/task_ttl_columns" - self.container_task_file = "/task_ttl_columns.xml" - - for instance_name, _ in cluster.instances.items(): - instance = cluster.instances[instance_name] - instance.copy_file_to_container( - os.path.join(CURRENT_TEST_DIR, "./task_ttl_columns.xml"), - self.container_task_file, - ) - print( - "Copied task file to container of '{}' instance. Path {}".format( - instance_name, self.container_task_file - ) - ) - - def start(self): - first = cluster.instances["first_of_two"] - second = cluster.instances["second_of_two"] - - first.query("DROP DATABASE IF EXISTS db_ttl_columns SYNC") - second.query("DROP DATABASE IF EXISTS db_ttl_columns SYNC") - - first.query("CREATE DATABASE IF NOT EXISTS db_ttl_columns;") - first.query( - """CREATE TABLE db_ttl_columns.source - ( - Column1 String, - Column2 UInt32, - Column3 Date, - Column4 DateTime, - Column5 UInt16, - Column6 String TTL now() + INTERVAL 1 MONTH, - Column7 Decimal(3, 1) TTL now() + INTERVAL 1 MONTH, - Column8 Tuple(Float64, Float64) TTL now() + INTERVAL 1 MONTH - ) - ENGINE = MergeTree() - PARTITION BY (toYYYYMMDD(Column3), Column3) - PRIMARY KEY (Column1, Column2, Column3) - ORDER BY (Column1, Column2, Column3) - SETTINGS index_granularity = 8192""" - ) - - first.query( - """INSERT INTO db_ttl_columns.source SELECT * FROM generateRandom( - 'Column1 String, Column2 UInt32, Column3 Date, Column4 DateTime, Column5 UInt16, - Column6 String, Column7 Decimal(3, 1), Column8 Tuple(Float64, Float64)', 1, 10, 2) LIMIT 50;""" - ) - - second.query("CREATE DATABASE IF NOT EXISTS db_ttl_columns;") - second.query( - """CREATE TABLE db_ttl_columns.destination - ( - Column1 String, - Column2 UInt32, - Column3 Date, - Column4 DateTime TTL now() + INTERVAL 1 MONTH, - Column5 UInt16 TTL now() + INTERVAL 1 MONTH, - Column6 String TTL now() + INTERVAL 1 MONTH, - Column7 Decimal(3, 1) TTL now() + INTERVAL 1 MONTH, - Column8 Tuple(Float64, Float64) - ) ENGINE = MergeTree() - PARTITION BY toYYYYMMDD(Column3) - ORDER BY (Column3, Column2, Column1);""" - ) - - print("Preparation completed") - - def check(self): - first = cluster.instances["first_of_two"] - second = cluster.instances["second_of_two"] - - a = first.query("SELECT count() from db_ttl_columns.source") - b = second.query("SELECT count() from db_ttl_columns.destination") - assert a == b, "Count" - - a = TSV( - first.query( - """SELECT sipHash64(*) from db_ttl_columns.source - ORDER BY (Column1, Column2, Column3, Column4, Column5, Column6, Column7, Column8)""" - ) - ) - b = TSV( - second.query( - """SELECT sipHash64(*) from db_ttl_columns.destination - ORDER BY (Column1, Column2, Column3, Column4, Column5, Column6, Column7, Column8)""" - ) - ) - assert a == b, "Data" - - first.query("DROP DATABASE IF EXISTS db_ttl_columns SYNC") - second.query("DROP DATABASE IF EXISTS db_ttl_columns SYNC") - - -class TaskSkipIndex: - def __init__(self, cluster): - self.cluster = cluster - self.zk_task_path = "/clickhouse-copier/task_skip_index" - self.container_task_file = "/task_skip_index.xml" - - for instance_name, _ in cluster.instances.items(): - instance = cluster.instances[instance_name] - instance.copy_file_to_container( - os.path.join(CURRENT_TEST_DIR, "./task_skip_index.xml"), - self.container_task_file, - ) - print( - "Copied task file to container of '{}' instance. Path {}".format( - instance_name, self.container_task_file - ) - ) - - def start(self): - first = cluster.instances["first_of_two"] - second = cluster.instances["second_of_two"] - - first.query("DROP DATABASE IF EXISTS db_skip_index SYNC") - second.query("DROP DATABASE IF EXISTS db_skip_index SYNC") - - first.query("CREATE DATABASE IF NOT EXISTS db_skip_index;") - first.query( - """CREATE TABLE db_skip_index.source - ( - Column1 UInt64, - Column2 Int32, - Column3 Date, - Column4 DateTime, - Column5 String, - INDEX a (Column1 * Column2, Column5) TYPE minmax GRANULARITY 3, - INDEX b (Column1 * length(Column5)) TYPE set(1000) GRANULARITY 4 - ) - ENGINE = MergeTree() - PARTITION BY (toYYYYMMDD(Column3), Column3) - PRIMARY KEY (Column1, Column2, Column3) - ORDER BY (Column1, Column2, Column3) - SETTINGS index_granularity = 8192""" - ) - - first.query( - """INSERT INTO db_skip_index.source SELECT * FROM generateRandom( - 'Column1 UInt64, Column2 Int32, Column3 Date, Column4 DateTime, Column5 String', 1, 10, 2) LIMIT 100;""" - ) - - second.query("CREATE DATABASE IF NOT EXISTS db_skip_index;") - second.query( - """CREATE TABLE db_skip_index.destination - ( - Column1 UInt64, - Column2 Int32, - Column3 Date, - Column4 DateTime, - Column5 String, - INDEX a (Column1 * Column2, Column5) TYPE minmax GRANULARITY 3, - INDEX b (Column1 * length(Column5)) TYPE set(1000) GRANULARITY 4 - ) ENGINE = MergeTree() - PARTITION BY toYYYYMMDD(Column3) - ORDER BY (Column3, Column2, Column1);""" - ) - - print("Preparation completed") - - def check(self): - first = cluster.instances["first_of_two"] - second = cluster.instances["second_of_two"] - - a = first.query("SELECT count() from db_skip_index.source") - b = second.query("SELECT count() from db_skip_index.destination") - assert a == b, "Count" - - a = TSV( - first.query( - """SELECT sipHash64(*) from db_skip_index.source - ORDER BY (Column1, Column2, Column3, Column4, Column5)""" - ) - ) - b = TSV( - second.query( - """SELECT sipHash64(*) from db_skip_index.destination - ORDER BY (Column1, Column2, Column3, Column4, Column5)""" - ) - ) - assert a == b, "Data" - - first.query("DROP DATABASE IF EXISTS db_skip_index SYNC") - second.query("DROP DATABASE IF EXISTS db_skip_index SYNC") - - -class TaskTTLMoveToVolume: - def __init__(self, cluster): - self.cluster = cluster - self.zk_task_path = "/clickhouse-copier/task_ttl_move_to_volume" - self.container_task_file = "/task_ttl_move_to_volume.xml" - - for instance_name, _ in cluster.instances.items(): - instance = cluster.instances[instance_name] - instance.copy_file_to_container( - os.path.join(CURRENT_TEST_DIR, "./task_ttl_move_to_volume.xml"), - self.container_task_file, - ) - print( - "Copied task file to container of '{}' instance. Path {}".format( - instance_name, self.container_task_file - ) - ) - - def start(self): - first = cluster.instances["first_of_two"] - second = cluster.instances["first_of_two"] - - first.query("DROP DATABASE IF EXISTS db_move_to_volume SYNC") - second.query("DROP DATABASE IF EXISTS db_move_to_volume SYNC") - - first.query("CREATE DATABASE IF NOT EXISTS db_move_to_volume;") - first.query( - """CREATE TABLE db_move_to_volume.source - ( - Column1 UInt64, - Column2 Int32, - Column3 Date, - Column4 DateTime, - Column5 String - ) - ENGINE = MergeTree() - PARTITION BY (toYYYYMMDD(Column3), Column3) - PRIMARY KEY (Column1, Column2, Column3) - ORDER BY (Column1, Column2, Column3) - TTL Column3 + INTERVAL 1 MONTH TO VOLUME 'external' - SETTINGS storage_policy = 'external_with_jbods';""" - ) - - first.query( - """INSERT INTO db_move_to_volume.source SELECT * FROM generateRandom( - 'Column1 UInt64, Column2 Int32, Column3 Date, Column4 DateTime, Column5 String', 1, 10, 2) LIMIT 100;""" - ) - - second.query("CREATE DATABASE IF NOT EXISTS db_move_to_volume;") - second.query( - """CREATE TABLE db_move_to_volume.destination - ( - Column1 UInt64, - Column2 Int32, - Column3 Date, - Column4 DateTime, - Column5 String - ) ENGINE = MergeTree() - PARTITION BY toYYYYMMDD(Column3) - ORDER BY (Column3, Column2, Column1) - TTL Column3 + INTERVAL 1 MONTH TO VOLUME 'external' - SETTINGS storage_policy = 'external_with_jbods';""" - ) - - print("Preparation completed") - - def check(self): - first = cluster.instances["first_of_two"] - second = cluster.instances["second_of_two"] - - a = first.query("SELECT count() from db_move_to_volume.source") - b = second.query("SELECT count() from db_move_to_volume.destination") - assert a == b, "Count" - - a = TSV( - first.query( - """SELECT sipHash64(*) from db_move_to_volume.source - ORDER BY (Column1, Column2, Column3, Column4, Column5)""" - ) - ) - b = TSV( - second.query( - """SELECT sipHash64(*) from db_move_to_volume.destination - ORDER BY (Column1, Column2, Column3, Column4, Column5)""" - ) - ) - assert a == b, "Data" - - first.query("DROP DATABASE IF EXISTS db_move_to_volume SYNC") - second.query("DROP DATABASE IF EXISTS db_move_to_volume SYNC") - - -class TaskDropTargetPartition: - def __init__(self, cluster): - self.cluster = cluster - self.zk_task_path = "/clickhouse-copier/task_drop_target_partition" - self.container_task_file = "/task_drop_target_partition.xml" - - for instance_name, _ in cluster.instances.items(): - instance = cluster.instances[instance_name] - instance.copy_file_to_container( - os.path.join(CURRENT_TEST_DIR, "./task_drop_target_partition.xml"), - self.container_task_file, - ) - print( - "Copied task file to container of '{}' instance. Path {}".format( - instance_name, self.container_task_file - ) - ) - - def start(self): - first = cluster.instances["first_of_two"] - second = cluster.instances["second_of_two"] - - first.query("DROP DATABASE IF EXISTS db_drop_target_partition SYNC") - second.query("DROP DATABASE IF EXISTS db_drop_target_partition SYNC") - - first.query("CREATE DATABASE IF NOT EXISTS db_drop_target_partition;") - first.query( - """CREATE TABLE db_drop_target_partition.source - ( - Column1 UInt64, - Column2 Int32, - Column3 Date, - Column4 DateTime, - Column5 String - ) - ENGINE = MergeTree() - PARTITION BY (toYYYYMMDD(Column3), Column3) - PRIMARY KEY (Column1, Column2, Column3) - ORDER BY (Column1, Column2, Column3);""" - ) - - first.query( - """INSERT INTO db_drop_target_partition.source SELECT * FROM generateRandom( - 'Column1 UInt64, Column2 Int32, Column3 Date, Column4 DateTime, Column5 String', 1, 10, 2) LIMIT 100;""" - ) - - second.query("CREATE DATABASE IF NOT EXISTS db_drop_target_partition;") - second.query( - """CREATE TABLE db_drop_target_partition.destination - ( - Column1 UInt64, - Column2 Int32, - Column3 Date, - Column4 DateTime, - Column5 String - ) ENGINE = MergeTree() - PARTITION BY toYYYYMMDD(Column3) - ORDER BY (Column3, Column2, Column1);""" - ) - - # Insert data in target too. It has to be dropped. - first.query( - """INSERT INTO db_drop_target_partition.destination SELECT * FROM db_drop_target_partition.source;""" - ) - - print("Preparation completed") - - def check(self): - first = cluster.instances["first_of_two"] - second = cluster.instances["second_of_two"] - - a = first.query("SELECT count() from db_drop_target_partition.source") - b = second.query("SELECT count() from db_drop_target_partition.destination") - assert a == b, "Count" - - a = TSV( - first.query( - """SELECT sipHash64(*) from db_drop_target_partition.source - ORDER BY (Column1, Column2, Column3, Column4, Column5)""" - ) - ) - b = TSV( - second.query( - """SELECT sipHash64(*) from db_drop_target_partition.destination - ORDER BY (Column1, Column2, Column3, Column4, Column5)""" - ) - ) - assert a == b, "Data" - - first.query("DROP DATABASE IF EXISTS db_drop_target_partition SYNC") - second.query("DROP DATABASE IF EXISTS db_drop_target_partition SYNC") - - -def execute_task(started_cluster, task, cmd_options): - task.start() - - zk = started_cluster.get_kazoo_client("zoo1") - print("Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1])) - - # Run cluster-copier processes on each node - docker_api = started_cluster.docker_client.api - copiers_exec_ids = [] - - cmd = [ - "/usr/bin/clickhouse", - "copier", - "--config", - "/etc/clickhouse-server/config-copier.xml", - "--task-path", - task.zk_task_path, - "--task-file", - task.container_task_file, - "--task-upload-force", - "true", - "--base-dir", - "/var/log/clickhouse-server/copier", - ] - cmd += cmd_options - - print(cmd) - - for instance_name in started_cluster.instances.keys(): - instance = started_cluster.instances[instance_name] - container = instance.get_docker_handle() - instance.copy_file_to_container( - os.path.join(CURRENT_TEST_DIR, "configs_two_nodes/config-copier.xml"), - "/etc/clickhouse-server/config-copier.xml", - ) - logging.info("Copied copier config to {}".format(instance.name)) - exec_id = docker_api.exec_create(container.id, cmd, stderr=True) - output = docker_api.exec_start(exec_id).decode("utf8") - logging.info(output) - copiers_exec_ids.append(exec_id) - logging.info( - "Copier for {} ({}) has started".format(instance.name, instance.ip_address) - ) - - # time.sleep(1000) - - # Wait for copiers stopping and check their return codes - for exec_id, instance in zip( - copiers_exec_ids, iter(started_cluster.instances.values()) - ): - while True: - res = docker_api.exec_inspect(exec_id) - if not res["Running"]: - break - time.sleep(1) - - assert res["ExitCode"] == 0, "Instance: {} ({}). Info: {}".format( - instance.name, instance.ip_address, repr(res) - ) - - try: - task.check() - finally: - zk.delete(task.zk_task_path, recursive=True) diff --git a/tests/integration/test_config_xml_full/configs/config.xml b/tests/integration/test_config_xml_full/configs/config.xml index ac59b3428e8..628e1432350 100644 --- a/tests/integration/test_config_xml_full/configs/config.xml +++ b/tests/integration/test_config_xml_full/configs/config.xml @@ -72,7 +72,7 @@ 8123 HeaderFilterRegex: '^.*/(src|base|programs|utils)/.*(h|hpp)$' HeaderFilterRegex: '^.*/(base|programs|utils)/.*(h|hpp)$' -Checks: '*, - -abseil-*, +Checks: [ + '*', - -altera-*, + '-abseil-*', - -android-*, + '-altera-*', - -bugprone-assignment-in-if-condition, - -bugprone-branch-clone, - -bugprone-easily-swappable-parameters, - -bugprone-exception-escape, - -bugprone-implicit-widening-of-multiplication-result, - -bugprone-narrowing-conversions, - -bugprone-not-null-terminated-result, - -bugprone-reserved-identifier, # useful but too slow, TODO retry when https://reviews.llvm.org/rG1c282052624f9d0bd273bde0b47b30c96699c6c7 is merged - -bugprone-unchecked-optional-access, + '-android-*', - -cert-dcl16-c, - -cert-dcl37-c, - -cert-dcl51-cpp, - -cert-err58-cpp, - -cert-msc32-c, - -cert-msc51-cpp, - -cert-oop54-cpp, - -cert-oop57-cpp, + '-bugprone-assignment-in-if-condition', + '-bugprone-branch-clone', + '-bugprone-easily-swappable-parameters', + '-bugprone-exception-escape', + '-bugprone-implicit-widening-of-multiplication-result', + '-bugprone-narrowing-conversions', + '-bugprone-not-null-terminated-result', + '-bugprone-reserved-identifier', # useful but too slow, TODO retry when https://reviews.llvm.org/rG1c282052624f9d0bd273bde0b47b30c96699c6c7 is merged + '-bugprone-unchecked-optional-access', - -clang-analyzer-unix.Malloc, + '-cert-dcl16-c', + '-cert-dcl37-c', + '-cert-dcl51-cpp', + '-cert-err58-cpp', + '-cert-msc32-c', + '-cert-msc51-cpp', + '-cert-oop54-cpp', + '-cert-oop57-cpp', - -cppcoreguidelines-*, # impractical in a codebase as large as ClickHouse, also slow + '-clang-analyzer-unix.Malloc', - -darwin-*, + '-cppcoreguidelines-*', # impractical in a codebase as large as ClickHouse, also slow - -fuchsia-*, + '-darwin-*', - -google-build-using-namespace, - -google-readability-braces-around-statements, - -google-readability-casting, - -google-readability-function-size, - -google-readability-namespace-comments, - -google-readability-todo, + '-fuchsia-*', - -hicpp-avoid-c-arrays, - -hicpp-avoid-goto, - -hicpp-braces-around-statements, - -hicpp-explicit-conversions, - -hicpp-function-size, - -hicpp-member-init, - -hicpp-move-const-arg, - -hicpp-multiway-paths-covered, - -hicpp-named-parameter, - -hicpp-no-array-decay, - -hicpp-no-assembler, - -hicpp-no-malloc, - -hicpp-signed-bitwise, - -hicpp-special-member-functions, - -hicpp-uppercase-literal-suffix, - -hicpp-use-auto, - -hicpp-use-emplace, - -hicpp-vararg, + '-google-build-using-namespace', + '-google-readability-braces-around-statements', + '-google-readability-casting', + '-google-readability-function-size', + '-google-readability-namespace-comments', + '-google-readability-todo', - -linuxkernel-*, + '-hicpp-avoid-c-arrays', + '-hicpp-avoid-goto', + '-hicpp-braces-around-statements', + '-hicpp-explicit-conversions', + '-hicpp-function-size', + '-hicpp-member-init', + '-hicpp-move-const-arg', + '-hicpp-multiway-paths-covered', + '-hicpp-named-parameter', + '-hicpp-no-array-decay', + '-hicpp-no-assembler', + '-hicpp-no-malloc', + '-hicpp-signed-bitwise', + '-hicpp-special-member-functions', + '-hicpp-uppercase-literal-suffix', + '-hicpp-use-auto', + '-hicpp-use-emplace', + '-hicpp-vararg', - -llvm-*, + '-linuxkernel-*', - -llvmlibc-*, + '-llvm-*', - -openmp-*, + '-llvmlibc-*', - -misc-const-correctness, - -misc-include-cleaner, # useful but far too many occurrences - -misc-no-recursion, - -misc-non-private-member-variables-in-classes, - -misc-confusable-identifiers, # useful but slooow - -misc-use-anonymous-namespace, + '-openmp-*', - -modernize-avoid-c-arrays, - -modernize-concat-nested-namespaces, - -modernize-macro-to-enum, - -modernize-pass-by-value, - -modernize-return-braced-init-list, - -modernize-use-auto, - -modernize-use-default-member-init, - -modernize-use-emplace, - -modernize-use-nodiscard, - -modernize-use-override, - -modernize-use-trailing-return-type, + '-misc-const-correctness', + '-misc-include-cleaner', # useful but far too many occurrences + '-misc-no-recursion', + '-misc-non-private-member-variables-in-classes', + '-misc-confusable-identifiers', # useful but slooo + '-misc-use-anonymous-namespace', - -performance-inefficient-string-concatenation, - -performance-no-int-to-ptr, - -performance-avoid-endl, - -performance-unnecessary-value-param, + '-modernize-avoid-c-arrays', + '-modernize-concat-nested-namespaces', + '-modernize-macro-to-enum', + '-modernize-pass-by-value', + '-modernize-return-braced-init-list', + '-modernize-use-auto', + '-modernize-use-default-member-init', + '-modernize-use-emplace', + '-modernize-use-nodiscard', + '-modernize-use-override', + '-modernize-use-trailing-return-type', - -portability-simd-intrinsics, + '-performance-inefficient-string-concatenation', + '-performance-no-int-to-ptr', + '-performance-avoid-endl', + '-performance-unnecessary-value-param', - -readability-avoid-unconditional-preprocessor-if, - -readability-braces-around-statements, - -readability-convert-member-functions-to-static, - -readability-else-after-return, - -readability-function-cognitive-complexity, - -readability-function-size, - -readability-identifier-length, - -readability-identifier-naming, # useful but too slow - -readability-implicit-bool-conversion, - -readability-isolate-declaration, - -readability-magic-numbers, - -readability-named-parameter, - -readability-redundant-declaration, - -readability-simplify-boolean-expr, - -readability-static-accessed-through-instance, - -readability-suspicious-call-argument, - -readability-uppercase-literal-suffix, - -readability-use-anyofallof, + '-portability-simd-intrinsics', - -zircon-*, -' + '-readability-avoid-unconditional-preprocessor-if', + '-readability-braces-around-statements', + '-readability-convert-member-functions-to-static', + '-readability-else-after-return', + '-readability-function-cognitive-complexity', + '-readability-function-size', + '-readability-identifier-length', + '-readability-identifier-naming', # useful but too slow + '-readability-implicit-bool-conversion', + '-readability-isolate-declaration', + '-readability-magic-numbers', + '-readability-named-parameter', + '-readability-redundant-declaration', + '-readability-simplify-boolean-expr', + '-readability-static-accessed-through-instance', + '-readability-suspicious-call-argument', + '-readability-uppercase-literal-suffix', + '-readability-use-anyofallof', + + '-zircon-*' +] WarningsAsErrors: '*' From e5e84419aff0f559bc545737bfdc0518a732f7ff Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sun, 10 Mar 2024 14:29:18 +0000 Subject: [PATCH 145/374] Fix clang-tidy-s --- contrib/libmetrohash/src/metrohash128.h | 3 +++ src/Access/AccessControl.h | 10 +++++----- src/Access/IAccessStorage.cpp | 2 +- src/Access/IAccessStorage.h | 2 +- src/Common/Arena.h | 4 +--- src/Common/DNSResolver.cpp | 2 +- src/Common/DNSResolver.h | 2 +- src/Common/DateLUTImpl.h | 2 +- src/Common/MultiVersion.h | 4 ++-- src/Common/PODArray.h | 6 +++--- src/Common/SipHash.h | 2 +- src/Common/TransactionID.h | 2 +- src/Common/ZooKeeper/IKeeper.cpp | 8 ++++---- src/Common/ZooKeeper/IKeeper.h | 16 ++++++++-------- src/Common/logger_useful.h | 16 ++++++++-------- src/Core/PostgreSQL/insertPostgreSQLValue.cpp | 4 ++-- src/Core/PostgreSQL/insertPostgreSQLValue.h | 4 ++-- src/Core/Settings.h | 2 ++ src/Dictionaries/CacheDictionary.cpp | 4 ++-- src/Dictionaries/CacheDictionary.h | 2 +- .../GeodataProviders/IHierarchiesProvider.h | 2 +- src/Dictionaries/RegExpTreeDictionary.cpp | 2 +- src/Dictionaries/RegExpTreeDictionary.h | 2 +- src/Functions/IFunction.h | 4 ---- src/IO/ReadSettings.h | 1 + src/Interpreters/AsynchronousInsertQueue.cpp | 2 +- src/Interpreters/AsynchronousInsertQueue.h | 2 +- src/Interpreters/Context.h | 4 ++-- src/Interpreters/IExternalLoadable.h | 2 +- src/Interpreters/ProcessList.h | 2 +- src/Processors/Chunk.h | 2 +- .../Algorithms/AggregatingSortedAlgorithm.cpp | 2 +- src/Processors/Port.h | 2 +- src/Processors/TTL/TTLUpdateInfoAlgorithm.cpp | 4 ++-- src/Processors/TTL/TTLUpdateInfoAlgorithm.h | 4 ++-- src/Storages/StorageInMemoryMetadata.h | 4 ++-- 36 files changed, 69 insertions(+), 69 deletions(-) diff --git a/contrib/libmetrohash/src/metrohash128.h b/contrib/libmetrohash/src/metrohash128.h index 2dbb6ca5a8a..f507c917caf 100644 --- a/contrib/libmetrohash/src/metrohash128.h +++ b/contrib/libmetrohash/src/metrohash128.h @@ -17,6 +17,8 @@ #ifndef METROHASH_METROHASH_128_H #define METROHASH_METROHASH_128_H +// NOLINTBEGIN(readability-avoid-const-params-in-decls) + #include class MetroHash128 @@ -68,5 +70,6 @@ private: void metrohash128_1(const uint8_t * key, uint64_t len, uint32_t seed, uint8_t * out); void metrohash128_2(const uint8_t * key, uint64_t len, uint32_t seed, uint8_t * out); +// NOLINTEND(readability-avoid-const-params-in-decls) #endif // #ifndef METROHASH_METROHASH_128_H diff --git a/src/Access/AccessControl.h b/src/Access/AccessControl.h index 55ea4e4f717..1af74e02fb7 100644 --- a/src/Access/AccessControl.h +++ b/src/Access/AccessControl.h @@ -133,20 +133,20 @@ public: /// This function also enables custom prefixes to be used. void setCustomSettingsPrefixes(const Strings & prefixes); void setCustomSettingsPrefixes(const String & comma_separated_prefixes); - bool isSettingNameAllowed(const std::string_view name) const; - void checkSettingNameIsAllowed(const std::string_view name) const; + bool isSettingNameAllowed(std::string_view name) const; + void checkSettingNameIsAllowed(std::string_view name) const; /// Allows implicit user creation without password (by default it's allowed). /// In other words, allow 'CREATE USER' queries without 'IDENTIFIED WITH' clause. - void setImplicitNoPasswordAllowed(const bool allow_implicit_no_password_); + void setImplicitNoPasswordAllowed(bool allow_implicit_no_password_); bool isImplicitNoPasswordAllowed() const; /// Allows users without password (by default it's allowed). - void setNoPasswordAllowed(const bool allow_no_password_); + void setNoPasswordAllowed(bool allow_no_password_); bool isNoPasswordAllowed() const; /// Allows users with plaintext password (by default it's allowed). - void setPlaintextPasswordAllowed(const bool allow_plaintext_password_); + void setPlaintextPasswordAllowed(bool allow_plaintext_password_); bool isPlaintextPasswordAllowed() const; /// Default password type when the user does not specify it. diff --git a/src/Access/IAccessStorage.cpp b/src/Access/IAccessStorage.cpp index fbe9e231002..1d6b8d99cd5 100644 --- a/src/Access/IAccessStorage.cpp +++ b/src/Access/IAccessStorage.cpp @@ -616,7 +616,7 @@ UUID IAccessStorage::generateRandomID() } -void IAccessStorage::clearConflictsInEntitiesList(std::vector> & entities, const LoggerPtr log_) +void IAccessStorage::clearConflictsInEntitiesList(std::vector> & entities, LoggerPtr log_) { std::unordered_map positions_by_id; std::unordered_map positions_by_type_and_name[static_cast(AccessEntityType::MAX)]; diff --git a/src/Access/IAccessStorage.h b/src/Access/IAccessStorage.h index ebb5a39cdf0..ad78bf92e02 100644 --- a/src/Access/IAccessStorage.h +++ b/src/Access/IAccessStorage.h @@ -228,7 +228,7 @@ protected: static UUID generateRandomID(); LoggerPtr getLogger() const; static String formatEntityTypeWithName(AccessEntityType type, const String & name) { return AccessEntityTypeInfo::get(type).formatEntityNameWithType(name); } - static void clearConflictsInEntitiesList(std::vector> & entities, const LoggerPtr log_); + static void clearConflictsInEntitiesList(std::vector> & entities, LoggerPtr log_); [[noreturn]] void throwNotFound(const UUID & id) const; [[noreturn]] void throwNotFound(AccessEntityType type, const String & name) const; [[noreturn]] static void throwBadCast(const UUID & id, AccessEntityType type, const String & name, AccessEntityType required_type); diff --git a/src/Common/Arena.h b/src/Common/Arena.h index cb26397844b..ba5b9ea9205 100644 --- a/src/Common/Arena.h +++ b/src/Common/Arena.h @@ -47,9 +47,7 @@ private: std::unique_ptr prev; - MemoryChunk() - { - } + MemoryChunk() = default; void swap(MemoryChunk & other) { diff --git a/src/Common/DNSResolver.cpp b/src/Common/DNSResolver.cpp index e36e1483da8..4b577a251af 100644 --- a/src/Common/DNSResolver.cpp +++ b/src/Common/DNSResolver.cpp @@ -297,7 +297,7 @@ void DNSResolver::setDisableCacheFlag(bool is_disabled) impl->disable_cache = is_disabled; } -void DNSResolver::setCacheMaxEntries(const UInt64 cache_max_entries) +void DNSResolver::setCacheMaxEntries(UInt64 cache_max_entries) { impl->cache_address.setMaxSizeInBytes(cache_max_entries); impl->cache_host.setMaxSizeInBytes(cache_max_entries); diff --git a/src/Common/DNSResolver.h b/src/Common/DNSResolver.h index e3030e51a96..1ddd9d3b991 100644 --- a/src/Common/DNSResolver.h +++ b/src/Common/DNSResolver.h @@ -56,7 +56,7 @@ public: void setDisableCacheFlag(bool is_disabled = true); /// Set a limit of entries in cache - void setCacheMaxEntries(const UInt64 cache_max_entries); + void setCacheMaxEntries(UInt64 cache_max_entries); /// Drops all caches void dropCache(); diff --git a/src/Common/DateLUTImpl.h b/src/Common/DateLUTImpl.h index 7bf66c0504a..4087e77d588 100644 --- a/src/Common/DateLUTImpl.h +++ b/src/Common/DateLUTImpl.h @@ -255,7 +255,7 @@ private: static LUTIndex toLUTIndex(ExtendedDayNum d) { - return normalizeLUTIndex(static_cast(d + daynum_offset_epoch)); + return normalizeLUTIndex(static_cast(d) + daynum_offset_epoch); } LUTIndex toLUTIndex(Time t) const diff --git a/src/Common/MultiVersion.h b/src/Common/MultiVersion.h index 8f488f9fcbc..680e224f869 100644 --- a/src/Common/MultiVersion.h +++ b/src/Common/MultiVersion.h @@ -41,9 +41,9 @@ public: } /// There is no copy constructor because only one MultiVersion should own the same object. - MultiVersion(MultiVersion && src) { *this = std::move(src); } + MultiVersion(MultiVersion && src) { *this = std::move(src); } /// NOLINT - MultiVersion & operator=(MultiVersion && src) + MultiVersion & operator=(MultiVersion && src) /// NOLINT { if (this != &src) { diff --git a/src/Common/PODArray.h b/src/Common/PODArray.h index 1a4047a2588..af863e01fb2 100644 --- a/src/Common/PODArray.h +++ b/src/Common/PODArray.h @@ -25,7 +25,7 @@ */ template constexpr bool memcpy_can_be_used_for_assignment = std::is_same_v - || (std::is_integral_v && std::is_integral_v && sizeof(T) == sizeof(U)); + || (std::is_integral_v && std::is_integral_v && sizeof(T) == sizeof(U)); /// NOLINT(misc-redundant-expression) namespace DB { @@ -558,7 +558,7 @@ public: } template - void swap(PODArray & rhs, TAllocatorParams &&... allocator_params) + void swap(PODArray & rhs, TAllocatorParams &&... allocator_params) /// NOLINT(performance-noexcept-swap) { #ifndef NDEBUG this->unprotect(); @@ -756,7 +756,7 @@ public: }; template -void swap(PODArray & lhs, PODArray & rhs) +void swap(PODArray & lhs, PODArray & rhs) /// NOLINT { lhs.swap(rhs); } diff --git a/src/Common/SipHash.h b/src/Common/SipHash.h index 729fb76a573..c89ee2c9d90 100644 --- a/src/Common/SipHash.h +++ b/src/Common/SipHash.h @@ -149,7 +149,7 @@ public: /// Pad the remainder, which is missing up to an 8-byte word. current_word = 0; - switch (end - data) + switch (end - data) /// NOLINT(bugprone-switch-missing-default-case) { case 7: current_bytes[CURRENT_BYTES_IDX(6)] = data[6]; [[fallthrough]]; case 6: current_bytes[CURRENT_BYTES_IDX(5)] = data[5]; [[fallthrough]]; diff --git a/src/Common/TransactionID.h b/src/Common/TransactionID.h index 3ab86f7589c..97d0072bc14 100644 --- a/src/Common/TransactionID.h +++ b/src/Common/TransactionID.h @@ -16,7 +16,7 @@ class MergeTreeTransaction; /// or transaction object is not needed and not passed intentionally. #ifndef NO_TRANSACTION_PTR #define NO_TRANSACTION_PTR std::shared_ptr(nullptr) -#define NO_TRANSACTION_RAW static_cast(nullptr) +#define NO_TRANSACTION_RAW static_cast(nullptr) /// NOLINT(bugprone-macro-parentheses) #endif /// Commit Sequence Number diff --git a/src/Common/ZooKeeper/IKeeper.cpp b/src/Common/ZooKeeper/IKeeper.cpp index 6c47ea68b84..7d2602bde1e 100644 --- a/src/Common/ZooKeeper/IKeeper.cpp +++ b/src/Common/ZooKeeper/IKeeper.cpp @@ -23,7 +23,7 @@ namespace ProfileEvents namespace Coordination { -void Exception::incrementErrorMetrics(const Error code_) +void Exception::incrementErrorMetrics(Error code_) { if (Coordination::isUserError(code_)) ProfileEvents::increment(ProfileEvents::ZooKeeperUserExceptions); @@ -33,14 +33,14 @@ void Exception::incrementErrorMetrics(const Error code_) ProfileEvents::increment(ProfileEvents::ZooKeeperOtherExceptions); } -Exception::Exception(const std::string & msg, const Error code_, int) +Exception::Exception(const std::string & msg, Error code_, int) : DB::Exception(msg, DB::ErrorCodes::KEEPER_EXCEPTION) , code(code_) { incrementErrorMetrics(code); } -Exception::Exception(PreformattedMessage && msg, const Error code_) +Exception::Exception(PreformattedMessage && msg, Error code_) : DB::Exception(std::move(msg), DB::ErrorCodes::KEEPER_EXCEPTION) , code(code_) { @@ -48,7 +48,7 @@ Exception::Exception(PreformattedMessage && msg, const Error code_) incrementErrorMetrics(code); } -Exception::Exception(const Error code_) +Exception::Exception(Error code_) : Exception(code_, "Coordination error: {}", errorMessage(code_)) { } diff --git a/src/Common/ZooKeeper/IKeeper.h b/src/Common/ZooKeeper/IKeeper.h index c7b902ea03a..ec49c94808e 100644 --- a/src/Common/ZooKeeper/IKeeper.h +++ b/src/Common/ZooKeeper/IKeeper.h @@ -466,13 +466,13 @@ class Exception : public DB::Exception { private: /// Delegate constructor, used to minimize repetition; last parameter used for overload resolution. - Exception(const std::string & msg, const Error code_, int); /// NOLINT - Exception(PreformattedMessage && msg, const Error code_); + Exception(const std::string & msg, Error code_, int); /// NOLINT + Exception(PreformattedMessage && msg, Error code_); /// Message must be a compile-time constant template requires std::is_convertible_v - Exception(T && message, const Error code_) : DB::Exception(std::forward(message), DB::ErrorCodes::KEEPER_EXCEPTION, /* remote_= */ false), code(code_) + Exception(T && message, Error code_) : DB::Exception(std::forward(message), DB::ErrorCodes::KEEPER_EXCEPTION, /* remote_= */ false), code(code_) { incrementErrorMetrics(code); } @@ -480,23 +480,23 @@ private: static void incrementErrorMetrics(Error code_); public: - explicit Exception(const Error code_); /// NOLINT + explicit Exception(Error code_); /// NOLINT Exception(const Exception & exc); template - Exception(const Error code_, FormatStringHelper fmt, Args &&... args) + Exception(Error code_, FormatStringHelper fmt, Args &&... args) : DB::Exception(DB::ErrorCodes::KEEPER_EXCEPTION, std::move(fmt), std::forward(args)...) , code(code_) { incrementErrorMetrics(code); } - inline static Exception createDeprecated(const std::string & msg, const Error code_) + inline static Exception createDeprecated(const std::string & msg, Error code_) { return Exception(msg, code_, 0); } - inline static Exception fromPath(const Error code_, const std::string & path) + inline static Exception fromPath(Error code_, const std::string & path) { return Exception(code_, "Coordination error: {}, path {}", errorMessage(code_), path); } @@ -504,7 +504,7 @@ public: /// Message must be a compile-time constant template requires std::is_convertible_v - inline static Exception fromMessage(const Error code_, T && message) + inline static Exception fromMessage(Error code_, T && message) { return Exception(std::forward(message), code_); } diff --git a/src/Common/logger_useful.h b/src/Common/logger_useful.h index 8e78e93e198..013b35e695e 100644 --- a/src/Common/logger_useful.h +++ b/src/Common/logger_useful.h @@ -19,14 +19,14 @@ namespace Poco { class Logger; } using LogSeriesLimiterPtr = std::shared_ptr; -namespace +namespace impl { - [[maybe_unused]] LoggerPtr getLoggerHelper(const LoggerPtr & logger) { return logger; } - [[maybe_unused]] LoggerPtr getLoggerHelper(const AtomicLogger & logger) { return logger.load(); } - [[maybe_unused]] const ::Poco::Logger * getLoggerHelper(const ::Poco::Logger * logger) { return logger; } - [[maybe_unused]] std::unique_ptr getLoggerHelper(std::unique_ptr && logger) { return logger; } - [[maybe_unused]] std::unique_ptr getLoggerHelper(std::unique_ptr && logger) { return logger; } - [[maybe_unused]] LogSeriesLimiterPtr getLoggerHelper(LogSeriesLimiterPtr & logger) { return logger; } + [[maybe_unused]] inline LoggerPtr getLoggerHelper(const LoggerPtr & logger) { return logger; } + [[maybe_unused]] inline LoggerPtr getLoggerHelper(const AtomicLogger & logger) { return logger.load(); } + [[maybe_unused]] inline const ::Poco::Logger * getLoggerHelper(const ::Poco::Logger * logger) { return logger; } + [[maybe_unused]] inline std::unique_ptr getLoggerHelper(std::unique_ptr && logger) { return logger; } + [[maybe_unused]] inline std::unique_ptr getLoggerHelper(std::unique_ptr && logger) { return logger; } + [[maybe_unused]] inline LogSeriesLimiterPtr getLoggerHelper(LogSeriesLimiterPtr & logger) { return logger; } } #define LOG_IMPL_FIRST_ARG(X, ...) X @@ -65,7 +65,7 @@ namespace #define LOG_IMPL(logger, priority, PRIORITY, ...) do \ { \ - auto _logger = ::getLoggerHelper(logger); \ + auto _logger = ::impl::getLoggerHelper(logger); \ const bool _is_clients_log = (DB::CurrentThread::getGroup() != nullptr) && \ (DB::CurrentThread::get().getClientLogsLevel() >= (priority)); \ if (!_is_clients_log && !_logger->is((PRIORITY))) \ diff --git a/src/Core/PostgreSQL/insertPostgreSQLValue.cpp b/src/Core/PostgreSQL/insertPostgreSQLValue.cpp index aa60bdee28a..b507b300769 100644 --- a/src/Core/PostgreSQL/insertPostgreSQLValue.cpp +++ b/src/Core/PostgreSQL/insertPostgreSQLValue.cpp @@ -36,7 +36,7 @@ void insertDefaultPostgreSQLValue(IColumn & column, const IColumn & sample_colum void insertPostgreSQLValue( IColumn & column, std::string_view value, - const ExternalResultDescription::ValueType type, const DataTypePtr data_type, + ExternalResultDescription::ValueType type, DataTypePtr data_type, const std::unordered_map & array_info, size_t idx) { switch (type) @@ -170,7 +170,7 @@ void insertPostgreSQLValue( void preparePostgreSQLArrayInfo( - std::unordered_map & array_info, size_t column_idx, const DataTypePtr data_type) + std::unordered_map & array_info, size_t column_idx, DataTypePtr data_type) { const auto * array_type = typeid_cast(data_type.get()); auto nested = array_type->getNestedType(); diff --git a/src/Core/PostgreSQL/insertPostgreSQLValue.h b/src/Core/PostgreSQL/insertPostgreSQLValue.h index 3bc83292b96..bfb85422aa1 100644 --- a/src/Core/PostgreSQL/insertPostgreSQLValue.h +++ b/src/Core/PostgreSQL/insertPostgreSQLValue.h @@ -22,11 +22,11 @@ struct PostgreSQLArrayInfo void insertPostgreSQLValue( IColumn & column, std::string_view value, - const ExternalResultDescription::ValueType type, const DataTypePtr data_type, + ExternalResultDescription::ValueType type, DataTypePtr data_type, const std::unordered_map & array_info, size_t idx); void preparePostgreSQLArrayInfo( - std::unordered_map & array_info, size_t column_idx, const DataTypePtr data_type); + std::unordered_map & array_info, size_t column_idx, DataTypePtr data_type); void insertDefaultPostgreSQLValue(IColumn & column, const IColumn & sample_column); diff --git a/src/Core/Settings.h b/src/Core/Settings.h index d70a6cf51c5..c41db9d2141 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -1192,6 +1192,7 @@ class IColumn; FORMAT_FACTORY_SETTINGS(M, ALIAS) \ OBSOLETE_FORMAT_SETTINGS(M, ALIAS) \ +/// NOLINTNEXTLINE(clang-analyzer-optin.performance.Padding) DECLARE_SETTINGS_TRAITS_ALLOW_CUSTOM_SETTINGS(SettingsTraits, LIST_OF_SETTINGS) @@ -1236,6 +1237,7 @@ private: /* * User-specified file format settings for File and URL engines. */ +/// NOLINTNEXTLINE(clang-analyzer-optin.performance.Padding) DECLARE_SETTINGS_TRAITS(FormatFactorySettingsTraits, LIST_OF_ALL_FORMAT_SETTINGS) struct FormatFactorySettings : public BaseSettings diff --git a/src/Dictionaries/CacheDictionary.cpp b/src/Dictionaries/CacheDictionary.cpp index 8444042db9e..6e9b09f8919 100644 --- a/src/Dictionaries/CacheDictionary.cpp +++ b/src/Dictionaries/CacheDictionary.cpp @@ -423,7 +423,7 @@ MutableColumns CacheDictionary::aggregateColumnsInOrderOfKe const DictionaryStorageFetchRequest & request, const MutableColumns & fetched_columns, const PaddedPODArray & key_index_to_state, - IColumn::Filter * const default_mask) const + IColumn::Filter * default_mask) const { MutableColumns aggregated_columns = request.makeAttributesResultColumns(); @@ -473,7 +473,7 @@ MutableColumns CacheDictionary::aggregateColumns( const PaddedPODArray & key_index_to_fetched_columns_from_storage_result, const MutableColumns & fetched_columns_during_update, const HashMap & found_keys_to_fetched_columns_during_update_index, - IColumn::Filter * const default_mask) const + IColumn::Filter * default_mask) const { /** * Aggregation of columns fetched from storage and from source during update. diff --git a/src/Dictionaries/CacheDictionary.h b/src/Dictionaries/CacheDictionary.h index 8897fb40fa9..c02fb91c60e 100644 --- a/src/Dictionaries/CacheDictionary.h +++ b/src/Dictionaries/CacheDictionary.h @@ -162,7 +162,7 @@ private: const DictionaryStorageFetchRequest & request, const MutableColumns & fetched_columns, const PaddedPODArray & key_index_to_state, - IColumn::Filter * const default_mask = nullptr) const; + IColumn::Filter * default_mask = nullptr) const; MutableColumns aggregateColumns( const PaddedPODArray & keys, diff --git a/src/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h b/src/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h index 68ab0fdca2d..a4b88127786 100644 --- a/src/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h +++ b/src/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h @@ -14,7 +14,7 @@ class IRegionsHierarchyReader public: virtual bool readNext(RegionEntry & entry) = 0; - virtual ~IRegionsHierarchyReader() {} + virtual ~IRegionsHierarchyReader() = default; }; using IRegionsHierarchyReaderPtr = std::unique_ptr; diff --git a/src/Dictionaries/RegExpTreeDictionary.cpp b/src/Dictionaries/RegExpTreeDictionary.cpp index 4d82aa9ca0e..1f5c2d6d2c7 100644 --- a/src/Dictionaries/RegExpTreeDictionary.cpp +++ b/src/Dictionaries/RegExpTreeDictionary.cpp @@ -568,7 +568,7 @@ bool RegExpTreeDictionary::setAttributesShortCircuit( const String & data, std::unordered_set & visited_nodes, const std::unordered_map & attributes, - std::unordered_set * const defaults) const + std::unordered_set * defaults) const { if (visited_nodes.contains(id)) return attributes_to_set.attributesFull() == attributes.size(); diff --git a/src/Dictionaries/RegExpTreeDictionary.h b/src/Dictionaries/RegExpTreeDictionary.h index 9e14abb49d0..d6bc90ef651 100644 --- a/src/Dictionaries/RegExpTreeDictionary.h +++ b/src/Dictionaries/RegExpTreeDictionary.h @@ -210,7 +210,7 @@ private: const String & data, std::unordered_set & visited_nodes, const std::unordered_map & attributes, - std::unordered_set * const defaults) const; + std::unordered_set * defaults) const; struct RegexTreeNode; using RegexTreeNodePtr = std::shared_ptr; diff --git a/src/Functions/IFunction.h b/src/Functions/IFunction.h index 05aa08e2ad7..9b7cdf12d57 100644 --- a/src/Functions/IFunction.h +++ b/src/Functions/IFunction.h @@ -13,10 +13,6 @@ #include -#if USE_EMBEDDED_COMPILER -# include -#endif - /// This file contains user interface for functions. namespace llvm diff --git a/src/IO/ReadSettings.h b/src/IO/ReadSettings.h index 38904df4403..5c401c0c8d9 100644 --- a/src/IO/ReadSettings.h +++ b/src/IO/ReadSettings.h @@ -63,6 +63,7 @@ enum class RemoteFSReadMethod class MMappedFileCache; class PageCache; +/// NOLINTNEXTLINE(clang-analyzer-optin.performance.Padding) struct ReadSettings { /// Method to use reading from local filesystem. diff --git a/src/Interpreters/AsynchronousInsertQueue.cpp b/src/Interpreters/AsynchronousInsertQueue.cpp index 7d56dbabe3c..9327f31b6ff 100644 --- a/src/Interpreters/AsynchronousInsertQueue.cpp +++ b/src/Interpreters/AsynchronousInsertQueue.cpp @@ -905,7 +905,7 @@ Chunk AsynchronousInsertQueue::processEntriesWithParsing( const InsertDataPtr & data, const Block & header, const ContextPtr & insert_context, - const LoggerPtr logger, + LoggerPtr logger, LogFunc && add_to_async_insert_log) { size_t total_rows = 0; diff --git a/src/Interpreters/AsynchronousInsertQueue.h b/src/Interpreters/AsynchronousInsertQueue.h index f60b3d343fb..5076701d0b0 100644 --- a/src/Interpreters/AsynchronousInsertQueue.h +++ b/src/Interpreters/AsynchronousInsertQueue.h @@ -265,7 +265,7 @@ private: const InsertDataPtr & data, const Block & header, const ContextPtr & insert_context, - const LoggerPtr logger, + LoggerPtr logger, LogFunc && add_to_async_insert_log); template diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index c8aa3604a6f..43df8d6adf2 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -330,7 +330,7 @@ protected: return *this; } - void swap(QueryAccessInfo & rhs) + void swap(QueryAccessInfo & rhs) noexcept { std::swap(databases, rhs.databases); std::swap(tables, rhs.tables); @@ -680,7 +680,7 @@ public: void addSpecialScalar(const String & name, const Block & block); const QueryAccessInfo & getQueryAccessInfo() const { return *getQueryAccessInfoPtr(); } - const QueryAccessInfoPtr getQueryAccessInfoPtr() const { return query_access_info; } + QueryAccessInfoPtr getQueryAccessInfoPtr() const { return query_access_info; } void setQueryAccessInfo(QueryAccessInfoPtr other) { query_access_info = other; } void addQueryAccessInfo( diff --git a/src/Interpreters/IExternalLoadable.h b/src/Interpreters/IExternalLoadable.h index 3c004508b0a..47031778876 100644 --- a/src/Interpreters/IExternalLoadable.h +++ b/src/Interpreters/IExternalLoadable.h @@ -23,7 +23,7 @@ struct ExternalLoadableLifetime UInt64 max_sec = 0; ExternalLoadableLifetime(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix); - ExternalLoadableLifetime() {} + ExternalLoadableLifetime() = default; }; /// Get delay before trying to load again after error. diff --git a/src/Interpreters/ProcessList.h b/src/Interpreters/ProcessList.h index 1c253f562e8..ad47041c762 100644 --- a/src/Interpreters/ProcessList.h +++ b/src/Interpreters/ProcessList.h @@ -318,7 +318,7 @@ public: ~ProcessListEntry(); QueryStatusPtr getQueryStatus() { return *it; } - const QueryStatusPtr getQueryStatus() const { return *it; } + QueryStatusPtr getQueryStatus() const { return *it; } }; diff --git a/src/Processors/Chunk.h b/src/Processors/Chunk.h index 9a7d6bc294d..4f753798eaa 100644 --- a/src/Processors/Chunk.h +++ b/src/Processors/Chunk.h @@ -59,7 +59,7 @@ public: Chunk clone() const; - void swap(Chunk & other) + void swap(Chunk & other) noexcept { columns.swap(other.columns); chunk_info.swap(other.chunk_info); diff --git a/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.cpp index d2d2434c477..3bd0b532d90 100644 --- a/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.cpp @@ -126,7 +126,7 @@ static void postprocessChunk(Chunk & chunk, const AggregatingSortedAlgorithm::Co AggregatingSortedAlgorithm::SimpleAggregateDescription::SimpleAggregateDescription( - AggregateFunctionPtr function_, const size_t column_number_, + AggregateFunctionPtr function_, size_t column_number_, DataTypePtr nested_type_, DataTypePtr real_type_) : function(std::move(function_)), column_number(column_number_) , nested_type(std::move(nested_type_)), real_type(std::move(real_type_)) diff --git a/src/Processors/Port.h b/src/Processors/Port.h index 67af2f041aa..f3c7bbb5fee 100644 --- a/src/Processors/Port.h +++ b/src/Processors/Port.h @@ -110,7 +110,7 @@ protected: return result; } - uintptr_t ALWAYS_INLINE swap(std::atomic & value, std::uintptr_t flags, std::uintptr_t mask) + uintptr_t ALWAYS_INLINE swap(std::atomic & value, std::uintptr_t flags, std::uintptr_t mask) /// NOLINT { Data * expected = nullptr; Data * desired = getPtr(flags | getUInt(data)); diff --git a/src/Processors/TTL/TTLUpdateInfoAlgorithm.cpp b/src/Processors/TTL/TTLUpdateInfoAlgorithm.cpp index b7cddf3c165..13d3030bbb8 100644 --- a/src/Processors/TTL/TTLUpdateInfoAlgorithm.cpp +++ b/src/Processors/TTL/TTLUpdateInfoAlgorithm.cpp @@ -6,8 +6,8 @@ namespace DB TTLUpdateInfoAlgorithm::TTLUpdateInfoAlgorithm( const TTLExpressions & ttl_expressions_, const TTLDescription & description_, - const TTLUpdateField ttl_update_field_, - const String ttl_update_key_, + TTLUpdateField ttl_update_field_, + String ttl_update_key_, const TTLInfo & old_ttl_info_, time_t current_time_, bool force_) diff --git a/src/Processors/TTL/TTLUpdateInfoAlgorithm.h b/src/Processors/TTL/TTLUpdateInfoAlgorithm.h index 0cf31765aef..b6aee6f7cb0 100644 --- a/src/Processors/TTL/TTLUpdateInfoAlgorithm.h +++ b/src/Processors/TTL/TTLUpdateInfoAlgorithm.h @@ -22,8 +22,8 @@ public: TTLUpdateInfoAlgorithm( const TTLExpressions & ttl_expressions_, const TTLDescription & description_, - const TTLUpdateField ttl_update_field_, - const String ttl_update_key_, + TTLUpdateField ttl_update_field_, + String ttl_update_key_, const TTLInfo & old_ttl_info_, time_t current_time_, bool force_ ); diff --git a/src/Storages/StorageInMemoryMetadata.h b/src/Storages/StorageInMemoryMetadata.h index 2823aba1224..69cd3422a7d 100644 --- a/src/Storages/StorageInMemoryMetadata.h +++ b/src/Storages/StorageInMemoryMetadata.h @@ -72,8 +72,8 @@ struct StorageInMemoryMetadata StorageInMemoryMetadata(const StorageInMemoryMetadata & other); StorageInMemoryMetadata & operator=(const StorageInMemoryMetadata & other); - StorageInMemoryMetadata(StorageInMemoryMetadata && other) = default; - StorageInMemoryMetadata & operator=(StorageInMemoryMetadata && other) = default; + StorageInMemoryMetadata(StorageInMemoryMetadata && other) = default; /// NOLINT + StorageInMemoryMetadata & operator=(StorageInMemoryMetadata && other) = default; /// NOLINT /// NOTE: Thread unsafe part. You should not modify same StorageInMemoryMetadata /// structure from different threads. It should be used as MultiVersion From 90b27432a26c0a5204e09ff5ff5f2ae8df3055af Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 11 Mar 2024 12:18:58 +0100 Subject: [PATCH 146/374] Update test.py --- tests/integration/test_backup_restore_s3/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_backup_restore_s3/test.py b/tests/integration/test_backup_restore_s3/test.py index 452a9143067..f3f4837c317 100644 --- a/tests/integration/test_backup_restore_s3/test.py +++ b/tests/integration/test_backup_restore_s3/test.py @@ -130,7 +130,7 @@ def check_system_tables(backup_query_id=None): if disk ] expected_disks = ( - ("default", "local", "", ""), + ("default", "local", "None", "None"), ("disk_s3", "object_storage", "s3", "local"), ("disk_s3_cache", "object_storage", "s3", "local"), ("disk_s3_other_bucket", "object_storage", "s3", "local"), From 57f6263f67dd91e624003199295c840a228947a0 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 11 Mar 2024 12:31:40 +0100 Subject: [PATCH 147/374] Lock contention fix --- src/Common/ProfileEvents.cpp | 1 + src/Interpreters/Cache/FileCache.cpp | 12 +++++++++--- src/Interpreters/Cache/FileCache.h | 2 +- src/Interpreters/Cache/FileCache_fwd.h | 1 + src/Interpreters/Cache/Guards.h | 15 ++++++++++++--- 5 files changed, 24 insertions(+), 7 deletions(-) diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index c1ac3d08245..ab1a16a3edf 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -476,6 +476,7 @@ The server successfully detected this situation and will download merged part fr M(FileSegmentRemoveMicroseconds, "File segment remove() time") \ M(FileSegmentHolderCompleteMicroseconds, "File segments holder complete() time") \ M(FileSegmentFailToIncreasePriority, "Number of times the priority was not increased due to a high contention on the cache lock") \ + M(FilesystemCacheFailToReserveSpaceBecauseOfLockContention, "Number of times space reservation was skipped due to a high contention on the cache lock") \ M(FilesystemCacheHoldFileSegments, "Filesystem cache file segments count, which were hold") \ M(FilesystemCacheUnusedHoldFileSegments, "Filesystem cache file segments count, which were hold, but not used (because of seek or LIMIT n, etc)") \ \ diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index 9c705ddc27c..5650b9ce44e 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -27,6 +27,7 @@ namespace ProfileEvents extern const Event FilesystemCacheReserveMicroseconds; extern const Event FilesystemCacheGetOrSetMicroseconds; extern const Event FilesystemCacheGetMicroseconds; + extern const Event FilesystemCacheFailToReserveSpaceBecauseOfLockContention; } namespace DB @@ -188,9 +189,9 @@ CacheGuard::Lock FileCache::lockCache() const return cache_guard.lock(); } -CacheGuard::Lock FileCache::tryLockCache() const +CacheGuard::Lock FileCache::tryLockCache(std::optional acquire_timeout) const { - return cache_guard.tryLock(); + return acquire_timeout.has_value() ? cache_guard.tryLockFor(acquire_timeout.value()) : cache_guard.tryLock(); } FileSegments FileCache::getImpl(const LockedKey & locked_key, const FileSegment::Range & range, size_t file_segments_limit) const @@ -781,7 +782,12 @@ bool FileCache::tryReserve( ProfileEventTimeIncrement watch(ProfileEvents::FilesystemCacheReserveMicroseconds); assertInitialized(); - auto cache_lock = lockCache(); + auto cache_lock = tryLockCache(std::chrono::milliseconds(FILECACHE_TRY_RESERVE_LOCK_TIMEOUT_MILLISECONDS)); + if (!cache_lock) + { + ProfileEvents::increment(ProfileEvents::FilesystemCacheFailToReserveSpaceBecauseOfLockContention); + return false; + } LOG_TEST( log, "Trying to reserve space ({} bytes) for {}:{}, current usage {}/{}", diff --git a/src/Interpreters/Cache/FileCache.h b/src/Interpreters/Cache/FileCache.h index 5b665ad0271..7434b2ac78a 100644 --- a/src/Interpreters/Cache/FileCache.h +++ b/src/Interpreters/Cache/FileCache.h @@ -173,7 +173,7 @@ public: void deactivateBackgroundOperations(); CacheGuard::Lock lockCache() const; - CacheGuard::Lock tryLockCache() const; + CacheGuard::Lock tryLockCache(std::optional acquire_timeout = std::nullopt) const; std::vector sync(); diff --git a/src/Interpreters/Cache/FileCache_fwd.h b/src/Interpreters/Cache/FileCache_fwd.h index 06261b19db7..eaed279e7fd 100644 --- a/src/Interpreters/Cache/FileCache_fwd.h +++ b/src/Interpreters/Cache/FileCache_fwd.h @@ -12,6 +12,7 @@ static constexpr int FILECACHE_DEFAULT_LOAD_METADATA_THREADS = 16; static constexpr int FILECACHE_DEFAULT_MAX_ELEMENTS = 10000000; static constexpr int FILECACHE_DEFAULT_HITS_THRESHOLD = 0; static constexpr size_t FILECACHE_BYPASS_THRESHOLD = 256 * 1024 * 1024; +static constexpr size_t FILECACHE_TRY_RESERVE_LOCK_TIMEOUT_MILLISECONDS = 1000; /// 1 sec. class FileCache; using FileCachePtr = std::shared_ptr; diff --git a/src/Interpreters/Cache/Guards.h b/src/Interpreters/Cache/Guards.h index 5729620d82f..0ac7cb80483 100644 --- a/src/Interpreters/Cache/Guards.h +++ b/src/Interpreters/Cache/Guards.h @@ -61,17 +61,26 @@ namespace DB */ struct CacheGuard : private boost::noncopyable { + using Mutex = std::timed_mutex; /// struct is used (not keyword `using`) to make CacheGuard::Lock non-interchangable with other guards locks /// so, we wouldn't be able to pass CacheGuard::Lock to a function which accepts KeyGuard::Lock, for example - struct Lock : public std::unique_lock + struct Lock : public std::unique_lock { - using Base = std::unique_lock; + using Base = std::unique_lock; using Base::Base; }; Lock lock() { return Lock(mutex); } + Lock tryLock() { return Lock(mutex, std::try_to_lock); } - std::mutex mutex; + + Lock tryLockFor(const std::chrono::milliseconds & acquire_timeout) + { + return Lock(mutex, std::chrono::duration(acquire_timeout)); + } + +private: + Mutex mutex; }; /** From c0689f3760c738dc1f73cf58c1c9de12b4c096a7 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 7 Mar 2024 11:24:34 +0000 Subject: [PATCH 148/374] Fix ASTRenameQuery::clone --- src/Parsers/ASTRenameQuery.h | 1 + src/Parsers/tests/gtest_Parser.cpp | 38 ++++++++++++++++++++++++++---- 2 files changed, 35 insertions(+), 4 deletions(-) diff --git a/src/Parsers/ASTRenameQuery.h b/src/Parsers/ASTRenameQuery.h index 73d12be094a..582060ab34a 100644 --- a/src/Parsers/ASTRenameQuery.h +++ b/src/Parsers/ASTRenameQuery.h @@ -60,6 +60,7 @@ public: ASTPtr clone() const override { auto res = std::make_shared(*this); + res->cloneChildren(); cloneOutputOptions(*res); return res; } diff --git a/src/Parsers/tests/gtest_Parser.cpp b/src/Parsers/tests/gtest_Parser.cpp index 36d2deae8d7..19947cd38cc 100644 --- a/src/Parsers/tests/gtest_Parser.cpp +++ b/src/Parsers/tests/gtest_Parser.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -62,10 +63,29 @@ TEST_P(ParserTest, parseQuery) if (std::string("CREATE USER or ALTER USER query") != parser->getName() && std::string("ATTACH access entity query") != parser->getName()) { - WriteBufferFromOwnString buf; - formatAST(*ast->clone(), buf, false, false); - String formatted_ast = buf.str(); - EXPECT_EQ(expected_ast, formatted_ast); + ASTPtr ast_clone = ast->clone(); + { + WriteBufferFromOwnString buf; + formatAST(*ast_clone, buf, false, false); + String formatted_ast = buf.str(); + EXPECT_EQ(expected_ast, formatted_ast); + } + + + ASTPtr ast_clone2 = ast_clone->clone(); + /// Break `ast_clone2`, it should not affect `ast_clone` if `clone()` implemented properly + for (auto & child : ast_clone2->children) + { + if (auto * identifier = dynamic_cast(child.get())) + identifier->setShortName("new_name"); + } + + { + WriteBufferFromOwnString buf; + formatAST(*ast_clone, buf, false, false); + String formatted_ast = buf.str(); + EXPECT_EQ(expected_ast, formatted_ast); + } } else { @@ -299,6 +319,16 @@ INSTANTIATE_TEST_SUITE_P(ParserAttachUserQuery, ParserTest, } }))); +INSTANTIATE_TEST_SUITE_P(ParserRenameQuery, ParserTest, + ::testing::Combine( + ::testing::Values(std::make_shared()), + ::testing::ValuesIn(std::initializer_list{ + { + "RENAME TABLE eligible_test TO eligible_test2", + "RENAME TABLE eligible_test TO eligible_test2" + } +}))); + INSTANTIATE_TEST_SUITE_P(ParserKQLQuery, ParserKQLTest, ::testing::Combine( ::testing::Values(std::make_shared()), From 38b8e7ac7e87b0efcea7560735697de0583e0f17 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 8 Mar 2024 11:54:20 +0000 Subject: [PATCH 149/374] Maintain list of ASTRenameQuery children --- src/Interpreters/AddDefaultDatabaseVisitor.h | 8 +--- src/Interpreters/InterpreterCreateQuery.cpp | 3 +- src/Interpreters/InterpreterRenameQuery.cpp | 8 ++-- .../MySQL/InterpretersMySQLDDLQuery.cpp | 25 ++++++------ src/Interpreters/SystemLog.cpp | 3 +- src/Parsers/ASTRenameQuery.h | 38 ++++++++++++++++++- src/Parsers/ParserRenameQuery.cpp | 26 ++++--------- src/Storages/StorageMaterializedView.cpp | 5 ++- 8 files changed, 69 insertions(+), 47 deletions(-) diff --git a/src/Interpreters/AddDefaultDatabaseVisitor.h b/src/Interpreters/AddDefaultDatabaseVisitor.h index b977a73d461..e36f22e8ba1 100644 --- a/src/Interpreters/AddDefaultDatabaseVisitor.h +++ b/src/Interpreters/AddDefaultDatabaseVisitor.h @@ -275,13 +275,7 @@ private: if (only_replace_current_database_function) return; - for (ASTRenameQuery::Element & elem : node.elements) - { - if (!elem.from.database) - elem.from.database = std::make_shared(database_name); - if (!elem.to.database) - elem.to.database = std::make_shared(database_name); - } + node.setDatabaseIfNotExists(database_name); } void visitDDL(ASTAlterQuery & node, ASTPtr &) const diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index d05d8b8deb1..edd7452c130 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1612,7 +1612,6 @@ BlockIO InterpreterCreateQuery::doCreateOrReplaceTable(ASTCreateQuery & create, executeTrivialBlockIO(fill_io, getContext()); /// Replace target table with created one - auto ast_rename = std::make_shared(); ASTRenameQuery::Element elem { ASTRenameQuery::Table @@ -1627,7 +1626,7 @@ BlockIO InterpreterCreateQuery::doCreateOrReplaceTable(ASTCreateQuery & create, } }; - ast_rename->elements.push_back(std::move(elem)); + auto ast_rename = std::make_shared(ASTRenameQuery::Elements{std::move(elem)}); ast_rename->dictionary = create.is_dictionary; if (create.create_or_replace) { diff --git a/src/Interpreters/InterpreterRenameQuery.cpp b/src/Interpreters/InterpreterRenameQuery.cpp index 52001fdcaf4..06b6ebc9cbb 100644 --- a/src/Interpreters/InterpreterRenameQuery.cpp +++ b/src/Interpreters/InterpreterRenameQuery.cpp @@ -47,12 +47,12 @@ BlockIO InterpreterRenameQuery::execute() */ RenameDescriptions descriptions; - descriptions.reserve(rename.elements.size()); + descriptions.reserve(rename.getElements().size()); /// Don't allow to drop tables (that we are renaming); don't allow to create tables in places where tables will be renamed. TableGuards table_guards; - for (const auto & elem : rename.elements) + for (const auto & elem : rename.getElements()) { descriptions.emplace_back(elem, current_database); const auto & description = descriptions.back(); @@ -186,7 +186,7 @@ AccessRightsElements InterpreterRenameQuery::getRequiredAccess(InterpreterRename { AccessRightsElements required_access; const auto & rename = query_ptr->as(); - for (const auto & elem : rename.elements) + for (const auto & elem : rename.getElements()) { if (type == RenameType::RenameTable) { @@ -214,7 +214,7 @@ AccessRightsElements InterpreterRenameQuery::getRequiredAccess(InterpreterRename void InterpreterRenameQuery::extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr & ast, ContextPtr) const { const auto & rename = ast->as(); - for (const auto & element : rename.elements) + for (const auto & element : rename.getElements()) { { String database = backQuoteIfNeed(!element.from.database ? getContext()->getCurrentDatabase() : element.from.getDatabase()); diff --git a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp index 107b435ded4..fd7ffca2872 100644 --- a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp +++ b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp @@ -579,7 +579,7 @@ ASTs InterpreterRenameImpl::getRewrittenQueries( const InterpreterRenameImpl::TQuery & rename_query, ContextPtr context, const String & mapped_to_database, const String & mysql_database) { ASTRenameQuery::Elements elements; - for (const auto & rename_element : rename_query.elements) + for (const auto & rename_element : rename_query.getElements()) { const auto & to_database = resolveDatabase(rename_element.to.getDatabase(), mysql_database, mapped_to_database, context); const auto & from_database = resolveDatabase(rename_element.from.getDatabase(), mysql_database, mapped_to_database, context); @@ -600,8 +600,7 @@ ASTs InterpreterRenameImpl::getRewrittenQueries( if (elements.empty()) return ASTs{}; - auto rewritten_query = std::make_shared(); - rewritten_query->elements = elements; + auto rewritten_query = std::make_shared(std::move(elements)); return ASTs{rewritten_query}; } @@ -616,7 +615,8 @@ ASTs InterpreterAlterImpl::getRewrittenQueries( return {}; auto rewritten_alter_query = std::make_shared(); - auto rewritten_rename_query = std::make_shared(); + ASTRenameQuery::Elements rename_elements; + rewritten_alter_query->setDatabase(mapped_to_database); rewritten_alter_query->setTable(alter_query.table); rewritten_alter_query->alter_object = ASTAlterQuery::AlterObjectType::TABLE; @@ -749,13 +749,13 @@ ASTs InterpreterAlterImpl::getRewrittenQueries( /// For ALTER TABLE table_name RENAME TO new_table_name_1, RENAME TO new_table_name_2; /// We just need to generate RENAME TABLE table_name TO new_table_name_2; - if (rewritten_rename_query->elements.empty()) - rewritten_rename_query->elements.push_back(ASTRenameQuery::Element()); + if (rename_elements.empty()) + rename_elements.push_back(ASTRenameQuery::Element()); - rewritten_rename_query->elements.back().from.database = std::make_shared(mapped_to_database); - rewritten_rename_query->elements.back().from.table = std::make_shared(alter_query.table); - rewritten_rename_query->elements.back().to.database = std::make_shared(mapped_to_database); - rewritten_rename_query->elements.back().to.table = std::make_shared(alter_command->new_table_name); + rename_elements.back().from.database = std::make_shared(mapped_to_database); + rename_elements.back().from.table = std::make_shared(alter_query.table); + rename_elements.back().to.database = std::make_shared(mapped_to_database); + rename_elements.back().to.table = std::make_shared(alter_command->new_table_name); } } @@ -765,8 +765,11 @@ ASTs InterpreterAlterImpl::getRewrittenQueries( if (!rewritten_alter_query->command_list->children.empty()) rewritten_queries.push_back(rewritten_alter_query); - if (!rewritten_rename_query->elements.empty()) + if (!rename_elements.empty()) + { + auto rewritten_rename_query = std::make_shared(std::move(rename_elements)); rewritten_queries.push_back(rewritten_rename_query); + } return rewritten_queries; } diff --git a/src/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp index 6580dc3e9b7..a74b5c67726 100644 --- a/src/Interpreters/SystemLog.cpp +++ b/src/Interpreters/SystemLog.cpp @@ -563,7 +563,6 @@ void SystemLog::prepareTable() {table_id.database_name, table_id.table_name + "_" + toString(suffix)}, getContext())) ++suffix; - auto rename = std::make_shared(); ASTRenameQuery::Element elem { ASTRenameQuery::Table @@ -586,7 +585,7 @@ void SystemLog::prepareTable() old_create_query, create_query); - rename->elements.emplace_back(std::move(elem)); + auto rename = std::make_shared(ASTRenameQuery::Elements{std::move(elem)}); ActionLock merges_lock; if (DatabaseCatalog::instance().getDatabase(table_id.database_name)->getUUID() == UUIDHelpers::Nil) diff --git a/src/Parsers/ASTRenameQuery.h b/src/Parsers/ASTRenameQuery.h index 582060ab34a..beaf93c4761 100644 --- a/src/Parsers/ASTRenameQuery.h +++ b/src/Parsers/ASTRenameQuery.h @@ -45,7 +45,6 @@ public: }; using Elements = std::vector; - Elements elements; bool exchange{false}; /// For EXCHANGE TABLES bool database{false}; /// For RENAME DATABASE @@ -54,6 +53,41 @@ public: /// Special flag for CREATE OR REPLACE. Do not throw if the second table does not exist. bool rename_if_cannot_exchange{false}; + explicit ASTRenameQuery(Elements elements_ = {}) + : elements(std::move(elements_)) + { + for (const auto & elem : elements) + { + if (elem.from.database) + children.push_back(elem.from.database); + if (elem.from.table) + children.push_back(elem.from.table); + if (elem.to.database) + children.push_back(elem.to.database); + if (elem.to.table) + children.push_back(elem.to.table); + } + } + + void setDatabaseIfNotExists(const String & database_name) + { + for (auto & elem : elements) + { + if (!elem.from.database) + { + elem.from.database = std::make_shared(database_name); + children.push_back(elem.from.database); + } + if (!elem.to.database) + { + elem.to.database = std::make_shared(database_name); + children.push_back(elem.to.database); + } + } + } + + const Elements & getElements() const { return elements; } + /** Get the text that identifies this element. */ String getID(char) const override { return "Rename"; } @@ -146,6 +180,8 @@ protected: formatOnCluster(settings); } + + Elements elements; }; } diff --git a/src/Parsers/ParserRenameQuery.cpp b/src/Parsers/ParserRenameQuery.cpp index 27f2ed1cd22..f9d29108ed6 100644 --- a/src/Parsers/ParserRenameQuery.cpp +++ b/src/Parsers/ParserRenameQuery.cpp @@ -44,15 +44,14 @@ bool ParserRenameQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) if (!ASTQueryWithOnCluster::parse(pos, cluster_str, expected)) return false; } + ASTRenameQuery::Elements rename_elements; + rename_elements.emplace_back(); + rename_elements.back().if_exists = if_exists; + rename_elements.back().from.database = from_db; + rename_elements.back().to.database = to_db; - auto query = std::make_shared(); + auto query = std::make_shared(std::move(rename_elements)); query->database = true; - query->elements.emplace({}); - query->elements.front().if_exists = if_exists; - query->elements.front().from.database = from_db; - query->elements.front().to.database = to_db; - query->children.push_back(std::move(from_db)); - query->children.push_back(std::move(to_db)); query->cluster = cluster_str; node = query; return true; @@ -75,9 +74,8 @@ bool ParserRenameQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) const auto ignore_delim = [&] { return exchange ? s_and.ignore(pos) : s_to.ignore(pos); }; - auto query = std::make_shared(); - ASTRenameQuery::Elements & elements = query->elements; + ASTRenameQuery::Elements elements; while (true) { @@ -93,15 +91,6 @@ bool ParserRenameQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) || !ignore_delim() || !parseDatabaseAndTableAsAST(pos, expected, ref.to.database, ref.to.table)) return false; - - if (ref.from.database) - query->children.push_back(ref.from.database); - if (ref.from.table) - query->children.push_back(ref.from.table); - if (ref.to.database) - query->children.push_back(ref.to.database); - if (ref.to.table) - query->children.push_back(ref.to.table); } String cluster_str; @@ -111,6 +100,7 @@ bool ParserRenameQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) return false; } + auto query = std::make_shared(std::move(elements)); query->cluster = cluster_str; query->exchange = exchange; query->dictionary = dictionary; diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index 02cba1cf753..9958d65819b 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -465,8 +465,8 @@ void StorageMaterializedView::renameInMemory(const StorageID & new_table_id) if (!from_atomic_to_atomic_database && has_inner_table && tryGetTargetTable()) { auto new_target_table_name = generateInnerTableName(new_table_id); - auto rename = std::make_shared(); + ASTRenameQuery::Elements rename_elements; assert(inner_table_id.database_name == old_table_id.database_name); ASTRenameQuery::Element elem @@ -482,8 +482,9 @@ void StorageMaterializedView::renameInMemory(const StorageID & new_table_id) std::make_shared(new_target_table_name) } }; - rename->elements.emplace_back(std::move(elem)); + rename_elements.emplace_back(std::move(elem)); + auto rename = std::make_shared(std::move(rename_elements)); InterpreterRenameQuery(rename, getContext()).execute(); updateTargetTableId(new_table_id.database_name, new_target_table_name); } From 59c6311ead26e48f861e27d19d58deffe4c6d622 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Mon, 11 Mar 2024 09:55:13 +0000 Subject: [PATCH 150/374] improve report #do_not_test --- utils/check-style/process_style_check_result.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/utils/check-style/process_style_check_result.py b/utils/check-style/process_style_check_result.py index e620d85b9d0..b043aa548d7 100755 --- a/utils/check-style/process_style_check_result.py +++ b/utils/check-style/process_style_check_result.py @@ -32,8 +32,13 @@ def process_result(result_folder): if not os.path.exists(full_path): test_results.append((f"Check {name}", "SKIPPED")) elif os.stat(full_path).st_size != 0: + with open(full_path, 'r') as file: + lines = file.readlines() + if len(lines) > 100: + lines = lines[:100] + ['====TRIMMED===='] + content = "\n".join(lines) description += f"Check {name} failed. " - test_results.append((f"Check {name}", "FAIL")) + test_results.append((f"Check {name}", "FAIL", None, content)) status = "failure" else: test_results.append((f"Check {name}", "OK")) From 8f2ff2ccd833dd1cc839922c59a90360f2b882c7 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Mon, 11 Mar 2024 13:07:39 +0100 Subject: [PATCH 151/374] Enable black back --- tests/ci/style_check.py | 18 +++++++++--------- utils/check-style/check_py.sh | 4 ++-- .../check-style/process_style_check_result.py | 10 +++++----- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/tests/ci/style_check.py b/tests/ci/style_check.py index 9f113b6e6f9..7fb5d22a3ce 100644 --- a/tests/ci/style_check.py +++ b/tests/ci/style_check.py @@ -1,12 +1,12 @@ #!/usr/bin/env python3 import argparse -from concurrent.futures import ProcessPoolExecutor import csv import logging import os import shutil import subprocess import sys +from concurrent.futures import ProcessPoolExecutor from pathlib import Path from typing import List, Tuple @@ -121,7 +121,7 @@ def checkout_last_ref(pr_info: PRInfo) -> None: def main(): logging.basicConfig(level=logging.INFO) logging.getLogger("git_helper").setLevel(logging.DEBUG) - # args = parse_args() + args = parse_args() stopwatch = Stopwatch() @@ -141,12 +141,14 @@ def main(): f"--entrypoint= -w/ClickHouse/utils/check-style " f"{image} ./check_cpp.sh" ) + cmd_py = ( f"docker run -u $(id -u ${{USER}}):$(id -g ${{USER}}) --cap-add=SYS_PTRACE " f"--volume={repo_path}:/ClickHouse --volume={temp_path}:/test_output " f"--entrypoint= -w/ClickHouse/utils/check-style " f"{image} ./check_py.sh" ) + cmd_docs = ( f"docker run -u $(id -u ${{USER}}):$(id -g ${{USER}}) --cap-add=SYS_PTRACE " f"--volume={repo_path}:/ClickHouse --volume={temp_path}:/test_output " @@ -175,12 +177,14 @@ def main(): _ = future1.result() if run_pycheck: + if args.push: + checkout_head(pr_info) logging.info("Run py files check: %s", cmd_py) future2 = executor.submit(subprocess.run, cmd_py, shell=True) _ = future2.result() - - # if args.push: - # checkout_head(pr_info) + if args.push: + commit_push_staged(pr_info) + checkout_last_ref(pr_info) subprocess.check_call( f"python3 ../../utils/check-style/process_style_check_result.py --in-results-dir {temp_path} " @@ -189,10 +193,6 @@ def main(): shell=True, ) - # if args.push: - # commit_push_staged(pr_info) - # checkout_last_ref(pr_info) - state, description, test_results, additional_files = process_result(temp_path) JobReport( diff --git a/utils/check-style/check_py.sh b/utils/check-style/check_py.sh index b729cd78124..13f4e754ed3 100755 --- a/utils/check-style/check_py.sh +++ b/utils/check-style/check_py.sh @@ -5,8 +5,8 @@ cd /ClickHouse/utils/check-style || echo -e "failure\tRepo not found" > /test_ou start_total=`date +%s` # FIXME: 1 min to wait + head checkout -# echo "Check python formatting with black" | ts -# ./check-black -n |& tee /test_output/black_output.txt +echo "Check python formatting with black" | ts +./check-black -n |& tee /test_output/black_output.txt start=`date +%s` ./check-pylint -n |& tee /test_output/pylint_output.txt diff --git a/utils/check-style/process_style_check_result.py b/utils/check-style/process_style_check_result.py index b043aa548d7..8c9837b4fc6 100755 --- a/utils/check-style/process_style_check_result.py +++ b/utils/check-style/process_style_check_result.py @@ -1,9 +1,9 @@ #!/usr/bin/env python3 -import os -import logging import argparse import csv +import logging +import os # TODO: add typing and log files to the fourth column, think about launching @@ -17,7 +17,7 @@ def process_result(result_folder): # "shellcheck", "style", "pylint", - # "black", + "black", "mypy", "typos", "whitespaces", @@ -32,10 +32,10 @@ def process_result(result_folder): if not os.path.exists(full_path): test_results.append((f"Check {name}", "SKIPPED")) elif os.stat(full_path).st_size != 0: - with open(full_path, 'r') as file: + with open(full_path, "r") as file: lines = file.readlines() if len(lines) > 100: - lines = lines[:100] + ['====TRIMMED===='] + lines = lines[:100] + ["====TRIMMED===="] content = "\n".join(lines) description += f"Check {name} failed. " test_results.append((f"Check {name}", "FAIL", None, content)) From 1ea68265b50a8c8ded253e392181b3b81df72815 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 11 Mar 2024 12:46:30 +0000 Subject: [PATCH 152/374] Revert "Adds makeDateTime64 function." This reverts commit bd194aab41401492c5d628269df53e68243a1211. --- .../functions/date-time-functions.md | 137 ++++-------------- .../functions/other-functions.md | 65 +-------- 2 files changed, 32 insertions(+), 170 deletions(-) diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index 12f0c996ce7..41503abfa2f 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -26,115 +26,66 @@ SELECT ## makeDate -Creates a [Date](../../sql-reference/data-types/date.md) from either one of the following sets of arguments: +Creates a [Date](../../sql-reference/data-types/date.md) +- from a year, month and day argument, or +- from a year and day of year argument. -- a year, month, and day. -- a year and day of year. +**Syntax** -### Syntax - -Using a year, month, and day: - -```sql -makeDate(year, month, day) +``` sql +makeDate(year, month, day); +makeDate(year, day_of_year); ``` -Using a year and day of year: +Alias: +- `MAKEDATE(year, month, day);` +- `MAKEDATE(year, day_of_year);` -```sql -makeDate(year, day_of_year) -``` - -### Arguments +**Arguments** - `year` — Year. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). - `month` — Month. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). - `day` — Day. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). - `day_of_year` — Day of the year. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). -### Returned values +**Returned value** -A date created from the arguments. +- A date created from the arguments. Type: [Date](../../sql-reference/data-types/date.md). -### Examples +**Example** Create a Date from a year, month and day: -```sql +``` sql SELECT makeDate(2023, 2, 28) AS Date; ``` -```response -2023-02-28 +Result: + +``` text +┌───────date─┐ +│ 2023-02-28 │ +└────────────┘ ``` -Create a Date from a year and day of year: +Create a Date from a year and day of year argument: ``` sql SELECT makeDate(2023, 42) AS Date; ``` -```response -2023-02-11 -``` +Result: +``` text +┌───────date─┐ +│ 2023-02-11 │ +└────────────┘ +``` ## makeDate32 -Creates a date of type [Date32](../../sql-reference/data-types/date32.md) from either one of the following sets of arguments: - -- a year, month, and day. -- a year and day of year. - -### Syntax - -Using a year, month, and day: - -```sql -makeDate32(year, month, day) -``` - -Using a year and day of year: - -```sql -makeDate32(year, day_of_year) -``` - -### Arguments - -- `year` — Year. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). -- `month` — Month. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). -- `day` — Day. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). -- `day_of_year` — Day of the year. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). - -### Returned values - -A date created from the arguments. - -Type: [Date32](../../sql-reference/data-types/date32.md). - -### Examples - -Create a date from a year, month, and day: - -```sql -SELECT makeDate32(2024, 1, 1); -``` - -```response -2024-01-01 -``` - -Create a Date from a year and day of year: - -``` sql -SELECT makeDate32(2024, 100); -``` - -```response -2024-04-09 -``` +Like [makeDate](#makeDate) but produces a [Date32](../../sql-reference/data-types/date32.md). ## makeDateTime @@ -178,38 +129,12 @@ Result: ## makeDateTime64 -Create a [DateTime64](../../sql-reference/data-types/datetime64.md) data type value from its components (year, month, day, hour, minute, second, and optionally, subsecond precision). - -The DateTime64 data type stores both the date and time components in a single 64-bit integer value. The precision of the time component is configurable, allowing you to store time values with subsecond precision up to nanoseconds. +Like [makeDateTime](#makedatetime) but produces a [DateTime64](../../sql-reference/data-types/datetime64.md). **Syntax** -```sql -makeDateTime64(year, month, day, hour, minute, second[, fraction[, precision]]) -``` - -**Arguments** - -- `year` — [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). The year component (0-9999). -- `month` — Month. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). The month component (1-12). -- `day` — Day. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). The day component (1-31). -- `hour` — Hour. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). The hour component (0-23). -- `minute` — Minute. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). The minute component (0-59). -- `second` — Second. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md). The second component (0-59). -- `subsecond_precision` (optional) [Integer](../../sql-reference/data-types/int-uint.md): The precision of the subsecond component (0-9, where 0 means no subsecond precision, and 9 means nanosecond precision). - -**Returned value** - -A date and time element of type [DateTime64](../../sql-reference/data-types/datetime64.md) with created from the supplied arguments. - -**Example** - ``` sql -SELECT makeDateTime64(2023, 5, 15, 10, 30, 45, 779, 5); -``` - -```response -2023-05-15 10:30:45.00779 +makeDateTime64(year, month, day, hour, minute, second[, fraction[, precision[, timezone]]]) ``` ## timestamp diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 10ceedad9aa..739b688a0d2 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -1866,7 +1866,7 @@ As you can see, `runningAccumulate` merges states for each group of rows separat ## joinGet -Allows you to extract data from a specific column in a Join table, similar to how you would access a value from a dictionary. +The function lets you extract data from the table the same way as from a [dictionary](../../sql-reference/dictionaries/index.md). Gets the data from [Join](../../engines/table-engines/special/join.md#creating-a-table) tables using the specified join key. @@ -1927,69 +1927,6 @@ Result: └──────────────────────────────────────────────────┘ ``` -## joinGetOrNull - -Allows you to extract data from a specific column in a Join table, similar to how you would access a value from a dictionary. - -Gets the data from [Join](../../engines/table-engines/special/join.md#creating-a-table) tables using the specified join key. - -Only supports tables created with the `ENGINE = Join(ANY, LEFT, )` statement. - -### Syntax - -```sql -joinGet(join_storage_table_name, `value_column`, join_keys) -``` - -### Parameters - -- `join_storage_table_name` — an [identifier](../../sql-reference/syntax.md#syntax-identifiers) indicating where the search is performed. The identifier is searched in the default database (see setting `default_database` in the config file). To override the default database, use `USE db_name` or specify the database and the table through the separator `db_name.db_table` as in the example. -- `value_column` — name of the column of the table that contains required data. -- `join_keys` — list of keys. - -### Returned value - -Returns a list of values corresponded to list of keys. - -If certain does not exist in source table then `0` or `null` will be returned based on [join_use_nulls](../../operations/settings/settings.md#join_use_nulls) setting. - -More info about `join_use_nulls` in [Join operation](../../engines/table-engines/special/join.md). - -**Example** - -Input table: - -``` sql -CREATE DATABASE db_test -CREATE TABLE db_test.id_val(`id` UInt32, `val` UInt32) ENGINE = Join(ANY, LEFT, id) SETTINGS join_use_nulls = 1 -INSERT INTO db_test.id_val VALUES (1,11)(2,12)(4,13) -``` - -``` text -┌─id─┬─val─┐ -│ 4 │ 13 │ -│ 2 │ 12 │ -│ 1 │ 11 │ -└────┴─────┘ -``` - -Query: - -``` sql -SELECT joinGet(db_test.id_val, 'val', toUInt32(number)) from numbers(4) SETTINGS join_use_nulls = 1 -``` - -Result: - -``` text -┌─joinGet(db_test.id_val, 'val', toUInt32(number))─┐ -│ 0 │ -│ 11 │ -│ 12 │ -│ 0 │ -└──────────────────────────────────────────────────┘ -``` - ## catboostEvaluate(path_to_model, feature_1, feature_2, …, feature_n) :::note From a7350299396d5ba3f2322584195554a7d946562f Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 11 Mar 2024 12:50:54 +0000 Subject: [PATCH 153/374] Fix tests --- src/Common/DateLUTImpl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/DateLUTImpl.h b/src/Common/DateLUTImpl.h index 4087e77d588..082127e717c 100644 --- a/src/Common/DateLUTImpl.h +++ b/src/Common/DateLUTImpl.h @@ -255,7 +255,7 @@ private: static LUTIndex toLUTIndex(ExtendedDayNum d) { - return normalizeLUTIndex(static_cast(d) + daynum_offset_epoch); + return normalizeLUTIndex(static_cast(d + daynum_offset_epoch)); /// NOLINT } LUTIndex toLUTIndex(Time t) const From 802bae9661a6f22a8c42a6f88f9816e3497d9355 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 11 Mar 2024 12:54:34 +0000 Subject: [PATCH 154/374] GCC --> clang pragmas --- base/base/coverage.cpp | 2 +- base/base/sort.h | 6 +++--- programs/client/Client.cpp | 4 ++-- programs/copier/ClusterCopierApp.cpp | 4 ++-- programs/extract-from-config/ExtractFromConfig.cpp | 4 ++-- programs/format/Format.cpp | 4 ++-- programs/local/LocalServer.cpp | 4 ++-- programs/obfuscator/Obfuscator.cpp | 4 ++-- src/Common/SSH/Wrappers.cpp | 8 ++++---- src/Functions/GatherUtils/Sources.h | 8 ++++---- 10 files changed, 24 insertions(+), 24 deletions(-) diff --git a/base/base/coverage.cpp b/base/base/coverage.cpp index 99b897c4571..d96b3ea1e9a 100644 --- a/base/base/coverage.cpp +++ b/base/base/coverage.cpp @@ -1,7 +1,7 @@ #include "coverage.h" #include -#pragma GCC diagnostic ignored "-Wreserved-identifier" +#pragma clang diagnostic ignored "-Wreserved-identifier" /// WITH_COVERAGE enables the default implementation of code coverage, diff --git a/base/base/sort.h b/base/base/sort.h index 99bf8a0830e..e46c388d185 100644 --- a/base/base/sort.h +++ b/base/base/sort.h @@ -59,8 +59,8 @@ using ComparatorWrapper = Comparator; #endif -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wold-style-cast" +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wold-style-cast" #include @@ -115,7 +115,7 @@ void partial_sort(RandomIt first, RandomIt middle, RandomIt last) ::partial_sort(first, middle, last, comparator()); } -#pragma GCC diagnostic pop +#pragma clang diagnostic pop template void sort(RandomIt first, RandomIt last, Compare compare) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index a2bd6b6016a..1c7e57dac76 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1377,8 +1377,8 @@ void Client::readArguments( } -#pragma GCC diagnostic ignored "-Wunused-function" -#pragma GCC diagnostic ignored "-Wmissing-declarations" +#pragma clang diagnostic ignored "-Wunused-function" +#pragma clang diagnostic ignored "-Wmissing-declarations" int mainEntryClickHouseClient(int argc, char ** argv) { diff --git a/programs/copier/ClusterCopierApp.cpp b/programs/copier/ClusterCopierApp.cpp index fdf07dec61a..ed748a17a55 100644 --- a/programs/copier/ClusterCopierApp.cpp +++ b/programs/copier/ClusterCopierApp.cpp @@ -232,8 +232,8 @@ int ClusterCopierApp::main(const std::vector &) } -#pragma GCC diagnostic ignored "-Wunused-function" -#pragma GCC diagnostic ignored "-Wmissing-declarations" +#pragma clang diagnostic ignored "-Wunused-function" +#pragma clang diagnostic ignored "-Wmissing-declarations" int mainEntryClickHouseClusterCopier(int argc, char ** argv) { diff --git a/programs/extract-from-config/ExtractFromConfig.cpp b/programs/extract-from-config/ExtractFromConfig.cpp index 56041ee382f..61d451664e3 100644 --- a/programs/extract-from-config/ExtractFromConfig.cpp +++ b/programs/extract-from-config/ExtractFromConfig.cpp @@ -109,8 +109,8 @@ static std::vector extractFromConfig( return {configuration->getString(key)}; } -#pragma GCC diagnostic ignored "-Wunused-function" -#pragma GCC diagnostic ignored "-Wmissing-declarations" +#pragma clang diagnostic ignored "-Wunused-function" +#pragma clang diagnostic ignored "-Wmissing-declarations" int mainEntryClickHouseExtractFromConfig(int argc, char ** argv) { diff --git a/programs/format/Format.cpp b/programs/format/Format.cpp index a1c51565ae3..c92106e2f90 100644 --- a/programs/format/Format.cpp +++ b/programs/format/Format.cpp @@ -70,8 +70,8 @@ void skipSpacesAndComments(const char*& pos, const char* end, bool print_comment } -#pragma GCC diagnostic ignored "-Wunused-function" -#pragma GCC diagnostic ignored "-Wmissing-declarations" +#pragma clang diagnostic ignored "-Wunused-function" +#pragma clang diagnostic ignored "-Wmissing-declarations" extern const char * auto_time_zones[]; diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 68f0e52ce08..99639d5e604 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -944,8 +944,8 @@ void LocalServer::readArguments(int argc, char ** argv, Arguments & common_argum } -#pragma GCC diagnostic ignored "-Wunused-function" -#pragma GCC diagnostic ignored "-Wmissing-declarations" +#pragma clang diagnostic ignored "-Wunused-function" +#pragma clang diagnostic ignored "-Wmissing-declarations" int mainEntryClickHouseLocal(int argc, char ** argv) { diff --git a/programs/obfuscator/Obfuscator.cpp b/programs/obfuscator/Obfuscator.cpp index 242e995e466..317d93aaf0c 100644 --- a/programs/obfuscator/Obfuscator.cpp +++ b/programs/obfuscator/Obfuscator.cpp @@ -1204,8 +1204,8 @@ public: } -#pragma GCC diagnostic ignored "-Wunused-function" -#pragma GCC diagnostic ignored "-Wmissing-declarations" +#pragma clang diagnostic ignored "-Wunused-function" +#clang GCC diagnostic ignored "-Wmissing-declarations" int mainEntryClickHouseObfuscator(int argc, char ** argv) try diff --git a/src/Common/SSH/Wrappers.cpp b/src/Common/SSH/Wrappers.cpp index 463338dbe3f..a9b9f758c6e 100644 --- a/src/Common/SSH/Wrappers.cpp +++ b/src/Common/SSH/Wrappers.cpp @@ -2,13 +2,13 @@ # if USE_SSH # include -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wreserved-macro-identifier" -# pragma GCC diagnostic ignored "-Wreserved-identifier" +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wreserved-macro-identifier" +# pragma clang diagnostic ignored "-Wreserved-identifier" # include -# pragma GCC diagnostic pop +# pragma clang diagnostic pop namespace { diff --git a/src/Functions/GatherUtils/Sources.h b/src/Functions/GatherUtils/Sources.h index 222f9f19168..80fb9ce3900 100644 --- a/src/Functions/GatherUtils/Sources.h +++ b/src/Functions/GatherUtils/Sources.h @@ -140,9 +140,9 @@ struct NumericArraySource : public ArraySourceImpl> /// The methods can be virtual or not depending on the template parameter. See IStringSource. -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wsuggest-override" -#pragma GCC diagnostic ignored "-Wsuggest-destructor-override" +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wsuggest-override" +#pragma clang diagnostic ignored "-Wsuggest-destructor-override" template struct ConstSource : public Base @@ -231,7 +231,7 @@ struct ConstSource : public Base } }; -#pragma GCC diagnostic pop +#pragma clang diagnostic pop struct StringSource { From a7568ddbfaf107e9dafa6fa83c9d5f747a0e7153 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 11 Mar 2024 12:58:20 +0000 Subject: [PATCH 155/374] Fix spelling --- utils/check-style/aspell-ignore/en/aspell-dict.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 3614bcb7452..473907b45ae 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1846,6 +1846,7 @@ linearized lineasstring linefeeds lineorder +linestring linux llvm loadDefaultCAFile @@ -2204,7 +2205,9 @@ rankCorr rapidjson rawblob readWKTMultiPolygon +readWKTPoint readWKTPolygon +readWKTRing readahead readline readme From c4f29af8ec0927c09b9d12d83767607020915c30 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Mon, 11 Mar 2024 13:35:05 +0000 Subject: [PATCH 156/374] CI: fix nightly job issue #do_not_test --- tests/ci/ci.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/ci/ci.py b/tests/ci/ci.py index 1eec9a6771b..9d57f161be3 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -1756,7 +1756,8 @@ def main() -> int: result["build"] = build_digest result["docs"] = docs_digest result["ci_flags"] = ci_flags - result["stages_data"] = _generate_ci_stage_config(jobs_data) + if not args.skip_jobs: + result["stages_data"] = _generate_ci_stage_config(jobs_data) result["jobs_data"] = jobs_data result["docker_data"] = docker_data ### CONFIGURE action: end From 19d8256fa83a4e8353dcad372067085ec8f0828d Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 11 Mar 2024 14:44:19 +0100 Subject: [PATCH 157/374] Update test.py --- tests/integration/test_backup_restore_s3/test.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/integration/test_backup_restore_s3/test.py b/tests/integration/test_backup_restore_s3/test.py index f3f4837c317..d65fc1f09d6 100644 --- a/tests/integration/test_backup_restore_s3/test.py +++ b/tests/integration/test_backup_restore_s3/test.py @@ -130,11 +130,11 @@ def check_system_tables(backup_query_id=None): if disk ] expected_disks = ( - ("default", "local", "None", "None"), - ("disk_s3", "object_storage", "s3", "local"), - ("disk_s3_cache", "object_storage", "s3", "local"), - ("disk_s3_other_bucket", "object_storage", "s3", "local"), - ("disk_s3_plain", "object_storage", "s3", "plain"), + ("default", "Local", "None", "None"), + ("disk_s3", "ObjectStorage", "S3", "Local"), + ("disk_s3_cache", "ObjectStorage", "S3", "Local"), + ("disk_s3_other_bucket", "ObjectStorage", "S3", "Local"), + ("disk_s3_plain", "ObjectStorage", "S3", "Plain"), ) assert len(expected_disks) == len(disks) for expected_disk in expected_disks: From 9bada70f45654495a30e394d94a374a862c24fb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 11 Mar 2024 14:52:32 +0100 Subject: [PATCH 158/374] Remove a bunch of transitive dependencies --- src/Backups/BackupCoordinationRemote.cpp | 2 ++ src/Formats/ReadSchemaUtils.cpp | 9 +++++---- src/Interpreters/DatabaseCatalog.h | 7 +++---- src/Interpreters/GraceHashJoin.cpp | 15 ++++++--------- src/Interpreters/TemporaryDataOnDisk.h | 6 +++--- src/Planner/PlannerExpressionAnalysis.cpp | 2 ++ src/Processors/QueryPlan/AggregatingStep.cpp | 1 + src/Processors/QueryPlan/CubeStep.cpp | 7 ++++--- src/Processors/QueryPlan/SortingStep.cpp | 1 + 9 files changed, 27 insertions(+), 23 deletions(-) diff --git a/src/Backups/BackupCoordinationRemote.cpp b/src/Backups/BackupCoordinationRemote.cpp index 9c509858b2a..ec652f20069 100644 --- a/src/Backups/BackupCoordinationRemote.cpp +++ b/src/Backups/BackupCoordinationRemote.cpp @@ -14,6 +14,8 @@ #include #include +#include + namespace DB { diff --git a/src/Formats/ReadSchemaUtils.cpp b/src/Formats/ReadSchemaUtils.cpp index 5badf4301bf..b05b768899b 100644 --- a/src/Formats/ReadSchemaUtils.cpp +++ b/src/Formats/ReadSchemaUtils.cpp @@ -1,10 +1,11 @@ #include -#include -#include -#include -#include #include #include +#include +#include +#include +#include +#include namespace DB { diff --git a/src/Interpreters/DatabaseCatalog.h b/src/Interpreters/DatabaseCatalog.h index 4fe114cc493..6f05a3cea0f 100644 --- a/src/Interpreters/DatabaseCatalog.h +++ b/src/Interpreters/DatabaseCatalog.h @@ -1,15 +1,14 @@ #pragma once #include +#include +#include #include #include -#include #include #include -#include "Common/NamePrompter.h" +#include #include -#include "Storages/IStorage.h" -#include "Databases/IDatabase.h" #include #include diff --git a/src/Interpreters/GraceHashJoin.cpp b/src/Interpreters/GraceHashJoin.cpp index 5fb92a68a29..53d1f48c291 100644 --- a/src/Interpreters/GraceHashJoin.cpp +++ b/src/Interpreters/GraceHashJoin.cpp @@ -1,21 +1,18 @@ +#include +#include +#include +#include #include #include #include - -#include #include - -#include +#include #include #include #include -#include -#include - -#include - #include +#include namespace CurrentMetrics diff --git a/src/Interpreters/TemporaryDataOnDisk.h b/src/Interpreters/TemporaryDataOnDisk.h index e57d9130369..8b0649be1b1 100644 --- a/src/Interpreters/TemporaryDataOnDisk.h +++ b/src/Interpreters/TemporaryDataOnDisk.h @@ -2,11 +2,11 @@ #include -#include -#include +#include #include -#include +#include #include +#include namespace CurrentMetrics diff --git a/src/Planner/PlannerExpressionAnalysis.cpp b/src/Planner/PlannerExpressionAnalysis.cpp index 52001eb27c5..30d90a68072 100644 --- a/src/Planner/PlannerExpressionAnalysis.cpp +++ b/src/Planner/PlannerExpressionAnalysis.cpp @@ -3,6 +3,8 @@ #include #include +#include + #include #include #include diff --git a/src/Processors/QueryPlan/AggregatingStep.cpp b/src/Processors/QueryPlan/AggregatingStep.cpp index f374a7b7b10..a76bacdd97b 100644 --- a/src/Processors/QueryPlan/AggregatingStep.cpp +++ b/src/Processors/QueryPlan/AggregatingStep.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Processors/QueryPlan/CubeStep.cpp b/src/Processors/QueryPlan/CubeStep.cpp index 0c632c346c7..d010a3327a6 100644 --- a/src/Processors/QueryPlan/CubeStep.cpp +++ b/src/Processors/QueryPlan/CubeStep.cpp @@ -1,10 +1,11 @@ +#include +#include +#include +#include #include #include #include -#include #include -#include -#include namespace DB { diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index 641b9036d4c..d0491cb4b82 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include From 91de3825171eefb8f828c2907181b6a5e6b0f017 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 11 Mar 2024 14:00:01 +0000 Subject: [PATCH 159/374] Spit `DictionaryPipelineExecutor` into separate file --- src/Dictionaries/CacheDictionary.cpp | 2 +- .../DictionaryPipelineExecutor.cpp | 42 +++++++++++++++++++ src/Dictionaries/DictionaryPipelineExecutor.h | 27 ++++++++++++ src/Dictionaries/DictionarySourceHelpers.cpp | 29 ------------- src/Dictionaries/DictionarySourceHelpers.h | 17 -------- src/Dictionaries/FlatDictionary.cpp | 2 +- src/Dictionaries/HashedArrayDictionary.cpp | 1 + src/Dictionaries/HashedDictionary.h | 2 +- src/Dictionaries/IPAddressDictionary.cpp | 1 + src/Dictionaries/PolygonDictionary.cpp | 3 +- src/Dictionaries/RangeHashedDictionary.h | 6 +-- src/Dictionaries/RegExpTreeDictionary.cpp | 1 + .../registerRangeHashedDictionary.cpp | 5 ++- 13 files changed, 82 insertions(+), 56 deletions(-) create mode 100644 src/Dictionaries/DictionaryPipelineExecutor.cpp create mode 100644 src/Dictionaries/DictionaryPipelineExecutor.h diff --git a/src/Dictionaries/CacheDictionary.cpp b/src/Dictionaries/CacheDictionary.cpp index 8444042db9e..b136d5ebd71 100644 --- a/src/Dictionaries/CacheDictionary.cpp +++ b/src/Dictionaries/CacheDictionary.cpp @@ -9,7 +9,7 @@ #include #include -#include +#include #include #include diff --git a/src/Dictionaries/DictionaryPipelineExecutor.cpp b/src/Dictionaries/DictionaryPipelineExecutor.cpp new file mode 100644 index 00000000000..30d1ab95f53 --- /dev/null +++ b/src/Dictionaries/DictionaryPipelineExecutor.cpp @@ -0,0 +1,42 @@ +#include + +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ +extern const int LOGICAL_ERROR; +} + +DictionaryPipelineExecutor::DictionaryPipelineExecutor(QueryPipeline & pipeline_, bool async) + : async_executor(async ? std::make_unique(pipeline_) : nullptr) + , executor(async ? nullptr : std::make_unique(pipeline_)) +{ +} + +bool DictionaryPipelineExecutor::pull(Block & block) +{ + if (async_executor) + { + while (true) + { + bool has_data = async_executor->pull(block); + if (has_data && !block) + continue; + return has_data; + } + } + else if (executor) + return executor->pull(block); + else + throw Exception(ErrorCodes::LOGICAL_ERROR, "DictionaryPipelineExecutor is not initialized"); +} + +DictionaryPipelineExecutor::~DictionaryPipelineExecutor() = default; + +} diff --git a/src/Dictionaries/DictionaryPipelineExecutor.h b/src/Dictionaries/DictionaryPipelineExecutor.h new file mode 100644 index 00000000000..601213e5039 --- /dev/null +++ b/src/Dictionaries/DictionaryPipelineExecutor.h @@ -0,0 +1,27 @@ +#pragma once + +#include + +namespace DB +{ + +class Block; +class QueryPipeline; +class PullingAsyncPipelineExecutor; +class PullingPipelineExecutor; + +/// Wrapper for `Pulling(Async)PipelineExecutor` to dynamically dispatch calls to the right executor +class DictionaryPipelineExecutor +{ +public: + DictionaryPipelineExecutor(QueryPipeline & pipeline_, bool async); + bool pull(Block & block); + + ~DictionaryPipelineExecutor(); + +private: + std::unique_ptr async_executor; + std::unique_ptr executor; +}; + +} diff --git a/src/Dictionaries/DictionarySourceHelpers.cpp b/src/Dictionaries/DictionarySourceHelpers.cpp index d9a4d9ccbcf..f0e1bc4109a 100644 --- a/src/Dictionaries/DictionarySourceHelpers.cpp +++ b/src/Dictionaries/DictionarySourceHelpers.cpp @@ -9,15 +9,11 @@ #include #include -#include -#include - namespace DB { namespace ErrorCodes { - extern const int LOGICAL_ERROR; extern const int SIZES_OF_COLUMNS_DOESNT_MATCH; } @@ -135,29 +131,4 @@ String TransformWithAdditionalColumns::getName() const return "TransformWithAdditionalColumns"; } -DictionaryPipelineExecutor::DictionaryPipelineExecutor(QueryPipeline & pipeline_, bool async) - : async_executor(async ? std::make_unique(pipeline_) : nullptr) - , executor(async ? nullptr : std::make_unique(pipeline_)) -{} - -bool DictionaryPipelineExecutor::pull(Block & block) -{ - if (async_executor) - { - while (true) - { - bool has_data = async_executor->pull(block); - if (has_data && !block) - continue; - return has_data; - } - } - else if (executor) - return executor->pull(block); - else - throw Exception(ErrorCodes::LOGICAL_ERROR, "DictionaryPipelineExecutor is not initialized"); -} - -DictionaryPipelineExecutor::~DictionaryPipelineExecutor() = default; - } diff --git a/src/Dictionaries/DictionarySourceHelpers.h b/src/Dictionaries/DictionarySourceHelpers.h index a545b5cdac7..39c6e7b3c42 100644 --- a/src/Dictionaries/DictionarySourceHelpers.h +++ b/src/Dictionaries/DictionarySourceHelpers.h @@ -16,10 +16,6 @@ namespace DB struct DictionaryStructure; class SettingsChanges; -class PullingPipelineExecutor; -class PullingAsyncPipelineExecutor; -class QueryPipeline; - /// For simple key Block blockForIds( @@ -55,17 +51,4 @@ private: size_t current_range_index = 0; }; -/// Wrapper for `Pulling(Async)PipelineExecutor` to dynamically dispatch calls to the right executor -class DictionaryPipelineExecutor -{ -public: - DictionaryPipelineExecutor(QueryPipeline & pipeline_, bool async); - bool pull(Block & block); - - ~DictionaryPipelineExecutor(); -private: - std::unique_ptr async_executor; - std::unique_ptr executor; -}; - } diff --git a/src/Dictionaries/FlatDictionary.cpp b/src/Dictionaries/FlatDictionary.cpp index efb7d0a176c..fc58ff525bd 100644 --- a/src/Dictionaries/FlatDictionary.cpp +++ b/src/Dictionaries/FlatDictionary.cpp @@ -15,7 +15,7 @@ #include #include -#include +#include #include #include diff --git a/src/Dictionaries/HashedArrayDictionary.cpp b/src/Dictionaries/HashedArrayDictionary.cpp index d09f402143e..2420c07277c 100644 --- a/src/Dictionaries/HashedArrayDictionary.cpp +++ b/src/Dictionaries/HashedArrayDictionary.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include diff --git a/src/Dictionaries/HashedDictionary.h b/src/Dictionaries/HashedDictionary.h index b3b8cc56868..46a0af487f5 100644 --- a/src/Dictionaries/HashedDictionary.h +++ b/src/Dictionaries/HashedDictionary.h @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/Dictionaries/IPAddressDictionary.cpp b/src/Dictionaries/IPAddressDictionary.cpp index e1c9572e607..1bc6d16c932 100644 --- a/src/Dictionaries/IPAddressDictionary.cpp +++ b/src/Dictionaries/IPAddressDictionary.cpp @@ -19,6 +19,7 @@ #include #include #include +#include #include #include diff --git a/src/Dictionaries/PolygonDictionary.cpp b/src/Dictionaries/PolygonDictionary.cpp index 485b48d9d81..1456a0db750 100644 --- a/src/Dictionaries/PolygonDictionary.cpp +++ b/src/Dictionaries/PolygonDictionary.cpp @@ -1,6 +1,5 @@ #include "PolygonDictionary.h" -#include #include #include @@ -15,7 +14,7 @@ #include #include #include -#include +#include namespace DB diff --git a/src/Dictionaries/RangeHashedDictionary.h b/src/Dictionaries/RangeHashedDictionary.h index 1a6ee7e81d4..509b991b30c 100644 --- a/src/Dictionaries/RangeHashedDictionary.h +++ b/src/Dictionaries/RangeHashedDictionary.h @@ -15,6 +15,8 @@ #include #include #include +#include +#include #include #include @@ -29,10 +31,6 @@ #include #include -#include -#include -#include - namespace DB { diff --git a/src/Dictionaries/RegExpTreeDictionary.cpp b/src/Dictionaries/RegExpTreeDictionary.cpp index 4d82aa9ca0e..8930074bbe0 100644 --- a/src/Dictionaries/RegExpTreeDictionary.cpp +++ b/src/Dictionaries/RegExpTreeDictionary.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include #include diff --git a/src/Dictionaries/registerRangeHashedDictionary.cpp b/src/Dictionaries/registerRangeHashedDictionary.cpp index 4e20abfdb79..8123b811198 100644 --- a/src/Dictionaries/registerRangeHashedDictionary.cpp +++ b/src/Dictionaries/registerRangeHashedDictionary.cpp @@ -1,5 +1,8 @@ -#include "RangeHashedDictionary.h" +#include + #include +#include +#include #include namespace DB From 8b5ccb4735365ef81af4debcc3180f296452268d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 11 Mar 2024 15:53:46 +0100 Subject: [PATCH 160/374] Remove one template --- src/Interpreters/Aggregator.cpp | 65 +++++++++++++-------------------- src/Interpreters/Aggregator.h | 4 +- 2 files changed, 28 insertions(+), 41 deletions(-) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 80a98683867..69625dbd57d 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -2609,8 +2609,9 @@ void NO_INLINE Aggregator::mergeDataNullKey( } } -template -void NO_INLINE Aggregator::mergeDataImpl(Table & table_dst, Table & table_src, Arena * arena) const +template +void NO_INLINE +Aggregator::mergeDataImpl(Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions [[maybe_unused]]) const { if constexpr (Method::low_cardinality_optimization || Method::one_key_nullable_optimization) mergeDataNullKey(table_dst, table_src, arena); @@ -2637,7 +2638,7 @@ void NO_INLINE Aggregator::mergeDataImpl(Table & table_dst, Table & table_src, A table_src.clearAndShrink(); #if USE_EMBEDDED_COMPILER - if constexpr (use_compiled_functions) + if (use_compiled_functions) { const auto & compiled_functions = compiled_aggregate_functions_holder->compiled_aggregate_functions; compiled_functions.merge_aggregate_states_function(dst_places.data(), src_places.data(), dst_places.size()); @@ -2787,26 +2788,16 @@ void NO_INLINE Aggregator::mergeSingleLevelDataImpl( if (!no_more_keys) { + bool use_compiled_functions = false; #if USE_EMBEDDED_COMPILER - if (compiled_aggregate_functions_holder) - { - if (prefetch) - mergeDataImpl( - getDataVariant(*res).data, getDataVariant(current).data, res->aggregates_pool); - else - mergeDataImpl( - getDataVariant(*res).data, getDataVariant(current).data, res->aggregates_pool); - } - else + use_compiled_functions = compiled_aggregate_functions_holder != nullptr; #endif - { - if (prefetch) - mergeDataImpl( - getDataVariant(*res).data, getDataVariant(current).data, res->aggregates_pool); - else - mergeDataImpl( - getDataVariant(*res).data, getDataVariant(current).data, res->aggregates_pool); - } + if (prefetch) + mergeDataImpl( + getDataVariant(*res).data, getDataVariant(current).data, res->aggregates_pool, use_compiled_functions); + else + mergeDataImpl( + getDataVariant(*res).data, getDataVariant(current).data, res->aggregates_pool, use_compiled_functions); } else if (res->without_key) { @@ -2851,26 +2842,22 @@ void NO_INLINE Aggregator::mergeBucketImpl( return; AggregatedDataVariants & current = *data[result_num]; + bool use_compiled_functions = false; #if USE_EMBEDDED_COMPILER - if (compiled_aggregate_functions_holder) - { - if (prefetch) - mergeDataImpl( - getDataVariant(*res).data.impls[bucket], getDataVariant(current).data.impls[bucket], arena); - else - mergeDataImpl( - getDataVariant(*res).data.impls[bucket], getDataVariant(current).data.impls[bucket], arena); - } - else + use_compiled_functions = compiled_aggregate_functions_holder != nullptr; #endif - { - if (prefetch) - mergeDataImpl( - getDataVariant(*res).data.impls[bucket], getDataVariant(current).data.impls[bucket], arena); - else - mergeDataImpl( - getDataVariant(*res).data.impls[bucket], getDataVariant(current).data.impls[bucket], arena); - } + if (prefetch) + mergeDataImpl( + getDataVariant(*res).data.impls[bucket], + getDataVariant(current).data.impls[bucket], + arena, + use_compiled_functions); + else + mergeDataImpl( + getDataVariant(*res).data.impls[bucket], + getDataVariant(current).data.impls[bucket], + arena, + use_compiled_functions); } } diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index 375b8986101..67e82cdd784 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -1429,8 +1429,8 @@ private: Arena * arena) const; /// Merge data from hash table `src` into `dst`. - template - void mergeDataImpl(Table & table_dst, Table & table_src, Arena * arena) const; + template + void mergeDataImpl(Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions) const; /// Merge data from hash table `src` into `dst`, but only for keys that already exist in dst. In other cases, merge the data into `overflows`. template From 724cc903afb9283a8369a62a836d04eceae42e57 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Mon, 11 Mar 2024 15:56:02 +0100 Subject: [PATCH 161/374] Restart CI --- tests/queries/0_stateless/03001_parallel_parsing_deadlock.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/queries/0_stateless/03001_parallel_parsing_deadlock.sh b/tests/queries/0_stateless/03001_parallel_parsing_deadlock.sh index 1bf21dfc53b..6cd5c3b486c 100755 --- a/tests/queries/0_stateless/03001_parallel_parsing_deadlock.sh +++ b/tests/queries/0_stateless/03001_parallel_parsing_deadlock.sh @@ -9,4 +9,3 @@ DATA_FILE=$CLICKHOUSE_TEST_UNIQUE_NAME.csv $CLICKHOUSE_LOCAL -q "select number > 1000000 ? 'error' : toString(number) from numbers(2000000) format CSV" > $DATA_FILE $CLICKHOUSE_LOCAL -q "select * from file($DATA_FILE, CSV, 'x UInt64') format Null settings input_format_allow_errors_ratio=1" rm $DATA_FILE - From ecc30448baa1c6283f3f0f13c83cfd1bf4428b9b Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 11 Mar 2024 15:26:29 +0000 Subject: [PATCH 162/374] Fix filtering when optimize_use_implicit_projections=1 --- .../optimizeUseAggregateProjection.cpp | 4 ++ src/Storages/VirtualColumnUtils.cpp | 2 +- src/Storages/VirtualColumnUtils.h | 3 + ...ions_non_deterministoc_functions.reference | 55 +++++++++++++++++++ ...rojections_non_deterministoc_functions.sql | 28 ++++++++++ 5 files changed, 91 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03008_filter_projections_non_deterministoc_functions.reference create mode 100644 tests/queries/0_stateless/03008_filter_projections_non_deterministoc_functions.sql diff --git a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp index 91f4213ff43..b40fea47b3c 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp @@ -18,6 +18,7 @@ #include #include +#include #include #include #include @@ -464,6 +465,9 @@ AggregateProjectionCandidates getAggregateProjectionCandidates( // LOG_TRACE(getLogger("optimizeUseProjections"), "Query DAG: {}", dag.dag->dumpDAG()); candidates.has_filter = dag.filter_node; + /// We can't use minmax projection if filter has non-deterministic functions. + if (dag.filter_node && !VirtualColumnUtils::isDeterministicInScopeOfQuery(dag.filter_node)) + can_use_minmax_projection = false; if (can_use_minmax_projection) { diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 897090223d6..c3ac27903c9 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -238,7 +238,7 @@ static bool canEvaluateSubtree(const ActionsDAG::Node * node, const Block & allo return true; } -static bool isDeterministicInScopeOfQuery(const ActionsDAG::Node * node) +bool isDeterministicInScopeOfQuery(const ActionsDAG::Node * node) { for (const auto * child : node->children) { diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index b5526fc5c7f..83494872cac 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -25,6 +25,9 @@ void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, /// Just filters block. Block should contain all the required columns. void filterBlockWithDAG(ActionsDAGPtr dag, Block & block, ContextPtr context); +/// Recursively checks if all functions used in DAG are deterministic in scope of query. +bool isDeterministicInScopeOfQuery(const ActionsDAG::Node * node); + /// Extract a part of predicate that can be evaluated using only columns from input_names. ActionsDAGPtr splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs); diff --git a/tests/queries/0_stateless/03008_filter_projections_non_deterministoc_functions.reference b/tests/queries/0_stateless/03008_filter_projections_non_deterministoc_functions.reference new file mode 100644 index 00000000000..8233925d609 --- /dev/null +++ b/tests/queries/0_stateless/03008_filter_projections_non_deterministoc_functions.reference @@ -0,0 +1,55 @@ +-- count +100000 all_10_10_0 +100000 all_1_1_0 +100000 all_2_2_0 +100000 all_3_3_0 +100000 all_4_4_0 +100000 all_5_5_0 +100000 all_6_6_0 +100000 all_7_7_0 +100000 all_8_8_0 +100000 all_9_9_0 +-- rand()%2=0: +1 all_10_10_0 +1 all_1_1_0 +1 all_2_2_0 +1 all_3_3_0 +1 all_4_4_0 +1 all_5_5_0 +1 all_6_6_0 +1 all_7_7_0 +1 all_8_8_0 +1 all_9_9_0 +-- optimize_use_implicit_projections=0 +1 all_10_10_0 +1 all_1_1_0 +1 all_2_2_0 +1 all_3_3_0 +1 all_4_4_0 +1 all_5_5_0 +1 all_6_6_0 +1 all_7_7_0 +1 all_8_8_0 +1 all_9_9_0 +-- optimize_trivial_count_query=0 +1 all_10_10_0 +1 all_1_1_0 +1 all_2_2_0 +1 all_3_3_0 +1 all_4_4_0 +1 all_5_5_0 +1 all_6_6_0 +1 all_7_7_0 +1 all_8_8_0 +1 all_9_9_0 +-- optimize_trivial_count_query=0, optimize_use_implicit_projections=0 +1 all_10_10_0 +1 all_1_1_0 +1 all_2_2_0 +1 all_3_3_0 +1 all_4_4_0 +1 all_5_5_0 +1 all_6_6_0 +1 all_7_7_0 +1 all_8_8_0 +1 all_9_9_0 diff --git a/tests/queries/0_stateless/03008_filter_projections_non_deterministoc_functions.sql b/tests/queries/0_stateless/03008_filter_projections_non_deterministoc_functions.sql new file mode 100644 index 00000000000..3be9bc3982a --- /dev/null +++ b/tests/queries/0_stateless/03008_filter_projections_non_deterministoc_functions.sql @@ -0,0 +1,28 @@ +create table test (number UInt64) engine=MergeTree order by number; +system stop merges test; +INSERT INTO test select number from numbers(100000); +INSERT INTO test select number from numbers(100000); +INSERT INTO test select number from numbers(100000); +INSERT INTO test select number from numbers(100000); +INSERT INTO test select number from numbers(100000); +INSERT INTO test select number from numbers(100000); +INSERT INTO test select number from numbers(100000); +INSERT INTO test select number from numbers(100000); +INSERT INTO test select number from numbers(100000); +INSERT INTO test select number from numbers(100000); + +select '-- count'; +SELECT count(), _part FROM test GROUP BY _part ORDER BY _part; + +select '-- rand()%2=0:'; +SELECT count() > 0 AND count() < 100000, _part FROM test WHERE rand(1)%2=1 GROUP BY _part ORDER BY _part; + +select '-- optimize_use_implicit_projections=0'; +SELECT count() > 0 AND count() < 100000, _part FROM test WHERE rand(2)%2=1 GROUP BY _part ORDER BY _part settings optimize_use_implicit_projections=0; + +select '-- optimize_trivial_count_query=0'; +SELECT count() > 0 AND count() < 100000, _part FROM test WHERE rand(3)%2=1 GROUP BY _part ORDER BY _part settings optimize_trivial_count_query=0; + +select '-- optimize_trivial_count_query=0, optimize_use_implicit_projections=0'; +SELECT count() > 0 AND count() < 100000, _part FROM test WHERE rand(4)%2=1 GROUP BY _part ORDER BY _part settings optimize_trivial_count_query=0,optimize_use_implicit_projections=0; + From 879f7f2f8c862aae51ddc5a8faebb8d07b5d4493 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 11 Mar 2024 16:28:25 +0100 Subject: [PATCH 163/374] Remove more templates for JIT --- src/Interpreters/Aggregator.cpp | 185 ++++++++++++++------------------ src/Interpreters/Aggregator.h | 21 ++-- 2 files changed, 96 insertions(+), 110 deletions(-) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 69625dbd57d..c7ce3e46446 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -1111,6 +1111,7 @@ void NO_INLINE Aggregator::executeImpl( bool all_keys_are_const, AggregateDataPtr overflow_row) const { + bool use_compiled_functions = false; if (!no_more_keys) { /// Prefetching doesn't make sense for small hash tables, because they fit in caches entirely. @@ -1118,33 +1119,47 @@ void NO_INLINE Aggregator::executeImpl( && (method.data.getBufferSizeInBytes() > min_bytes_for_prefetch); #if USE_EMBEDDED_COMPILER - if (compiled_aggregate_functions_holder && !hasSparseArguments(aggregate_instructions)) - { - if (prefetch) - executeImplBatch( - method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, all_keys_are_const, overflow_row); - else - executeImplBatch( - method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, all_keys_are_const, overflow_row); - } - else + use_compiled_functions = compiled_aggregate_functions_holder && !hasSparseArguments(aggregate_instructions); #endif - { - if (prefetch) - executeImplBatch( - method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, all_keys_are_const, overflow_row); - else - executeImplBatch( - method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, all_keys_are_const, overflow_row); - } + if (prefetch) + executeImplBatch( + method, + state, + aggregates_pool, + row_begin, + row_end, + aggregate_instructions, + all_keys_are_const, + use_compiled_functions, + overflow_row); + else + executeImplBatch( + method, + state, + aggregates_pool, + row_begin, + row_end, + aggregate_instructions, + all_keys_are_const, + use_compiled_functions, + overflow_row); } else { - executeImplBatch(method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, all_keys_are_const, overflow_row); + executeImplBatch( + method, + state, + aggregates_pool, + row_begin, + row_end, + aggregate_instructions, + all_keys_are_const, + use_compiled_functions, + overflow_row); } } -template +template void NO_INLINE Aggregator::executeImplBatch( Method & method, State & state, @@ -1153,6 +1168,7 @@ void NO_INLINE Aggregator::executeImplBatch( size_t row_end, AggregateFunctionInstruction * aggregate_instructions, bool all_keys_are_const, + bool use_compiled_functions [[maybe_unused]], AggregateDataPtr overflow_row) const { using KeyHolder = decltype(state.getKeyHolder(0, std::declval())); @@ -1284,7 +1300,7 @@ void NO_INLINE Aggregator::executeImplBatch( aggregate_data = aggregates_pool->alignedAlloc(total_size_of_aggregate_states, align_aggregate_states); #if USE_EMBEDDED_COMPILER - if constexpr (use_compiled_functions) + if (use_compiled_functions) { const auto & compiled_aggregate_functions = compiled_aggregate_functions_holder->compiled_aggregate_functions; compiled_aggregate_functions.create_aggregate_states_function(aggregate_data); @@ -1293,20 +1309,6 @@ void NO_INLINE Aggregator::executeImplBatch( static constexpr bool skip_compiled_aggregate_functions = true; createAggregateStates(aggregate_data); } - -#if defined(MEMORY_SANITIZER) - - /// We compile only functions that do not allocate some data in Arena. Only store necessary state in AggregateData place. - for (size_t aggregate_function_index = 0; aggregate_function_index < aggregate_functions.size(); ++aggregate_function_index) - { - if (!is_aggregate_function_compiled[aggregate_function_index]) - continue; - - auto aggregate_data_with_offset = aggregate_data + offsets_of_aggregate_states[aggregate_function_index]; - auto data_size = params.aggregates[aggregate_function_index].function->sizeOfData(); - __msan_unpoison(aggregate_data_with_offset, data_size); - } -#endif } else #endif @@ -1339,7 +1341,7 @@ void NO_INLINE Aggregator::executeImplBatch( } #if USE_EMBEDDED_COMPILER - if constexpr (use_compiled_functions) + if (use_compiled_functions) { std::vector columns_data; @@ -1372,9 +1374,8 @@ void NO_INLINE Aggregator::executeImplBatch( for (size_t i = 0; i < aggregate_functions.size(); ++i) { #if USE_EMBEDDED_COMPILER - if constexpr (use_compiled_functions) - if (is_aggregate_function_compiled[i]) - continue; + if (use_compiled_functions && is_aggregate_function_compiled[i]) + continue; #endif AggregateFunctionInstruction * inst = aggregate_instructions + i; @@ -1387,18 +1388,19 @@ void NO_INLINE Aggregator::executeImplBatch( } -template void NO_INLINE Aggregator::executeWithoutKeyImpl( AggregatedDataWithoutKey & res, - size_t row_begin, size_t row_end, + size_t row_begin, + size_t row_end, AggregateFunctionInstruction * aggregate_instructions, - Arena * arena) const + Arena * arena, + bool use_compiled_functions [[maybe_unused]]) const { if (row_begin == row_end) return; #if USE_EMBEDDED_COMPILER - if constexpr (use_compiled_functions) + if (use_compiled_functions) { std::vector columns_data; @@ -1418,20 +1420,6 @@ void NO_INLINE Aggregator::executeWithoutKeyImpl( auto add_into_aggregate_states_function_single_place = compiled_aggregate_functions_holder->compiled_aggregate_functions.add_into_aggregate_states_function_single_place; add_into_aggregate_states_function_single_place(row_begin, row_end, columns_data.data(), res); - -#if defined(MEMORY_SANITIZER) - - /// We compile only functions that do not allocate some data in Arena. Only store necessary state in AggregateData place. - for (size_t aggregate_function_index = 0; aggregate_function_index < aggregate_functions.size(); ++aggregate_function_index) - { - if (!is_aggregate_function_compiled[aggregate_function_index]) - continue; - - auto aggregate_data_with_offset = res + offsets_of_aggregate_states[aggregate_function_index]; - auto data_size = params.aggregates[aggregate_function_index].function->sizeOfData(); - __msan_unpoison(aggregate_data_with_offset, data_size); - } -#endif } #endif @@ -1439,13 +1427,10 @@ void NO_INLINE Aggregator::executeWithoutKeyImpl( for (size_t i = 0; i < aggregate_functions.size(); ++i) { AggregateFunctionInstruction * inst = aggregate_instructions + i; - #if USE_EMBEDDED_COMPILER - if constexpr (use_compiled_functions) - if (is_aggregate_function_compiled[i]) - continue; + if (use_compiled_functions && is_aggregate_function_compiled[i]) + continue; #endif - addBatchSinglePlace(row_begin, row_end, inst, res + inst->state_offset, arena); } } @@ -1704,16 +1689,14 @@ bool Aggregator::executeOnBlock(Columns columns, if (result.type == AggregatedDataVariants::Type::without_key) { /// TODO: Enable compilation after investigation -// #if USE_EMBEDDED_COMPILER -// if (compiled_aggregate_functions_holder) -// { -// executeWithoutKeyImpl(result.without_key, row_begin, row_end, aggregate_functions_instructions.data(), result.aggregates_pool); -// } -// else -// #endif - { - executeWithoutKeyImpl(result.without_key, row_begin, row_end, aggregate_functions_instructions.data(), result.aggregates_pool); - } + bool use_compiled_functions = false; + executeWithoutKeyImpl( + result.without_key, + row_begin, + row_end, + aggregate_functions_instructions.data(), + result.aggregates_pool, + use_compiled_functions); } else { @@ -1965,19 +1948,13 @@ Aggregator::convertToBlockImpl(Method & method, Table & data, Arena * arena, Are ConvertToBlockRes res; + bool use_compiled_functions = false; if (final) { #if USE_EMBEDDED_COMPILER - if (compiled_aggregate_functions_holder) - { - static constexpr bool use_compiled_functions = !Method::low_cardinality_optimization; - res = convertToBlockImplFinal(method, data, arena, aggregates_pools, rows); - } - else + use_compiled_functions = compiled_aggregate_functions_holder != nullptr && !Method::low_cardinality_optimization; #endif - { - res = convertToBlockImplFinal(method, data, arena, aggregates_pools, rows); - } + res = convertToBlockImplFinal(method, data, arena, aggregates_pools, use_compiled_functions, rows); } else { @@ -2059,8 +2036,12 @@ inline void Aggregator::insertAggregatesIntoColumns(Mapped & mapped, MutableColu } -template -Block Aggregator::insertResultsIntoColumns(PaddedPODArray & places, OutputBlockColumns && out_cols, Arena * arena, bool has_null_key_data [[maybe_unused]]) const +Block Aggregator::insertResultsIntoColumns( + PaddedPODArray & places, + OutputBlockColumns && out_cols, + Arena * arena, + bool has_null_key_data [[maybe_unused]], + bool use_compiled_functions [[maybe_unused]]) const { std::exception_ptr exception; size_t aggregate_functions_destroy_index = 0; @@ -2068,7 +2049,7 @@ Block Aggregator::insertResultsIntoColumns(PaddedPODArray & pl try { #if USE_EMBEDDED_COMPILER - if constexpr (use_compiled_functions) + if (use_compiled_functions) { /** For JIT compiled functions we need to resize columns before pass them into compiled code. * insert_aggregates_into_columns_function function does not throw exception. @@ -2098,14 +2079,13 @@ Block Aggregator::insertResultsIntoColumns(PaddedPODArray & pl for (; aggregate_functions_destroy_index < params.aggregates_size;) { - if constexpr (use_compiled_functions) +#if USE_EMBEDDED_COMPILER + if (use_compiled_functions && is_aggregate_function_compiled[aggregate_functions_destroy_index]) { - if (is_aggregate_function_compiled[aggregate_functions_destroy_index]) - { - ++aggregate_functions_destroy_index; - continue; - } + ++aggregate_functions_destroy_index; + continue; } +#endif auto & final_aggregate_column = out_cols.final_aggregate_columns[aggregate_functions_destroy_index]; size_t offset = offsets_of_aggregate_states[aggregate_functions_destroy_index]; @@ -2127,14 +2107,13 @@ Block Aggregator::insertResultsIntoColumns(PaddedPODArray & pl for (; aggregate_functions_destroy_index < params.aggregates_size; ++aggregate_functions_destroy_index) { - if constexpr (use_compiled_functions) +#if USE_EMBEDDED_COMPILER + if (use_compiled_functions && is_aggregate_function_compiled[aggregate_functions_destroy_index]) { - if (is_aggregate_function_compiled[aggregate_functions_destroy_index]) - { - ++aggregate_functions_destroy_index; - continue; - } + ++aggregate_functions_destroy_index; + continue; } +#endif size_t offset = offsets_of_aggregate_states[aggregate_functions_destroy_index]; aggregate_functions[aggregate_functions_destroy_index]->destroyBatch(0, places.size(), places.data(), offset); @@ -2146,9 +2125,9 @@ Block Aggregator::insertResultsIntoColumns(PaddedPODArray & pl return finalizeBlock(params, getHeader(/* final */ true), std::move(out_cols), /* final */ true, places.size()); } -template -Aggregator::ConvertToBlockRes NO_INLINE -Aggregator::convertToBlockImplFinal(Method & method, Table & data, Arena * arena, Arenas & aggregates_pools, size_t) const +template +Aggregator::ConvertToBlockRes NO_INLINE Aggregator::convertToBlockImplFinal( + Method & method, Table & data, Arena * arena, Arenas & aggregates_pools, bool use_compiled_functions [[maybe_unused]], size_t) const { /// +1 for nullKeyData, if `data` doesn't have it - not a problem, just some memory for one excessive row will be preallocated const size_t max_block_size = (return_single_block ? data.size() : std::min(params.max_block_size, data.size())) + 1; @@ -2204,7 +2183,8 @@ Aggregator::convertToBlockImplFinal(Method & method, Table & data, Arena * arena { if (places.size() >= max_block_size) { - res.emplace_back(insertResultsIntoColumns(places, std::move(out_cols.value()), arena, has_null_key_data)); + res.emplace_back( + insertResultsIntoColumns(places, std::move(out_cols.value()), arena, has_null_key_data, use_compiled_functions)); places.clear(); out_cols.reset(); has_null_key_data = false; @@ -2214,12 +2194,13 @@ Aggregator::convertToBlockImplFinal(Method & method, Table & data, Arena * arena if constexpr (return_single_block) { - return insertResultsIntoColumns(places, std::move(out_cols.value()), arena, has_null_key_data); + return insertResultsIntoColumns(places, std::move(out_cols.value()), arena, has_null_key_data, use_compiled_functions); } else { if (out_cols.has_value()) - res.emplace_back(insertResultsIntoColumns(places, std::move(out_cols.value()), arena, has_null_key_data)); + res.emplace_back( + insertResultsIntoColumns(places, std::move(out_cols.value()), arena, has_null_key_data, use_compiled_functions)); return res; } } diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index 67e82cdd784..d7bbe5950a0 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -1395,7 +1395,7 @@ private: AggregateDataPtr overflow_row) const; /// Specialization for a particular value no_more_keys. - template + template void executeImplBatch( Method & method, State & state, @@ -1404,16 +1404,17 @@ private: size_t row_end, AggregateFunctionInstruction * aggregate_instructions, bool all_keys_are_const, + bool use_compiled_functions, AggregateDataPtr overflow_row) const; /// For case when there are no keys (all aggregate into one row). - template void executeWithoutKeyImpl( AggregatedDataWithoutKey & res, size_t row_begin, size_t row_end, AggregateFunctionInstruction * aggregate_instructions, - Arena * arena) const; + Arena * arena, + bool use_compiled_functions) const; template void writeToTemporaryFileImpl( @@ -1467,12 +1468,16 @@ private: MutableColumns & final_aggregate_columns, Arena * arena) const; - template - Block insertResultsIntoColumns(PaddedPODArray & places, OutputBlockColumns && out_cols, Arena * arena, bool has_null_key_data) const; + Block insertResultsIntoColumns( + PaddedPODArray & places, + OutputBlockColumns && out_cols, + Arena * arena, + bool has_null_key_data, + bool use_compiled_functions) const; - template - ConvertToBlockRes - convertToBlockImplFinal(Method & method, Table & data, Arena * arena, Arenas & aggregates_pools, size_t rows) const; + template + ConvertToBlockRes convertToBlockImplFinal( + Method & method, Table & data, Arena * arena, Arenas & aggregates_pools, bool use_compiled_functions, size_t rows) const; template ConvertToBlockRes From 81b2a1f621d9bd64fde2c4e4f6a83c9b3b0c461a Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 11 Mar 2024 15:34:02 +0000 Subject: [PATCH 164/374] Fix build --- programs/obfuscator/Obfuscator.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/obfuscator/Obfuscator.cpp b/programs/obfuscator/Obfuscator.cpp index 317d93aaf0c..b2bf942af4e 100644 --- a/programs/obfuscator/Obfuscator.cpp +++ b/programs/obfuscator/Obfuscator.cpp @@ -1205,7 +1205,7 @@ public: } #pragma clang diagnostic ignored "-Wunused-function" -#clang GCC diagnostic ignored "-Wmissing-declarations" +#pragma clang diagnostic ignored "-Wmissing-declarations" int mainEntryClickHouseObfuscator(int argc, char ** argv) try From 38f41ee311d0a36d194965e5815489a25c60e449 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 11 Mar 2024 16:55:30 +0100 Subject: [PATCH 165/374] Fix integration test --- tests/integration/test_disk_types/test.py | 10 +++++----- .../test_endpoint_macro_substitution/test.py | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/integration/test_disk_types/test.py b/tests/integration/test_disk_types/test.py index af482b97be3..86579911b3e 100644 --- a/tests/integration/test_disk_types/test.py +++ b/tests/integration/test_disk_types/test.py @@ -3,10 +3,10 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV disk_types = { - "default": "local", - "disk_s3": "s3", - "disk_hdfs": "hdfs", - "disk_encrypted": "s3", + "default": "Local", + "disk_s3": "S3", + "disk_hdfs": "HDFS", + "disk_encrypted": "S3", } @@ -55,7 +55,7 @@ def test_different_types(cluster): def test_select_by_type(cluster): node = cluster.instances["node"] for name, disk_type in list(disk_types.items()): - if disk_type != "s3": + if disk_type != "S3": assert ( node.query( "SELECT name FROM system.disks WHERE type='" + disk_type + "'" diff --git a/tests/integration/test_endpoint_macro_substitution/test.py b/tests/integration/test_endpoint_macro_substitution/test.py index 42a8ddbda84..bec3d9de0e3 100644 --- a/tests/integration/test_endpoint_macro_substitution/test.py +++ b/tests/integration/test_endpoint_macro_substitution/test.py @@ -4,10 +4,10 @@ from helpers.test_tools import TSV from pyhdfs import HdfsClient disk_types = { - "default": "local", - "disk_s3": "s3", - "disk_hdfs": "hdfs", - "disk_encrypted": "s3", + "default": "Local", + "disk_s3": "S3", + "disk_hdfs": "HDFS", + "disk_encrypted": "S3", } @@ -63,7 +63,7 @@ def test_select_by_type(cluster): fs = HdfsClient(hosts=cluster.hdfs_ip) for name, disk_type in list(disk_types.items()): - if disk_type != "s3": + if disk_type != "S3": assert ( node.query( "SELECT name FROM system.disks WHERE type='" + disk_type + "'" From 5db08292455fb0c6f47fc0344382ab7cf3508e91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 11 Mar 2024 17:20:53 +0100 Subject: [PATCH 166/374] Remove another template --- src/Interpreters/Aggregator.cpp | 55 ++++++++++++++------------------- src/Interpreters/Aggregator.h | 3 +- 2 files changed, 26 insertions(+), 32 deletions(-) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index c7ce3e46446..a9578b5540f 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -2906,11 +2906,12 @@ ManyAggregatedDataVariants Aggregator::prepareVariantsToMerge(ManyAggregatedData return non_empty_data; } -template +template void NO_INLINE Aggregator::mergeStreamsImplCase( Arena * aggregates_pool, State & state, Table & data, + bool no_more_keys, AggregateDataPtr overflow_row, size_t row_begin, size_t row_end, @@ -2922,36 +2923,34 @@ void NO_INLINE Aggregator::mergeStreamsImplCase( if (!arena_for_keys) arena_for_keys = aggregates_pool; - for (size_t i = row_begin; i < row_end; ++i) + if (no_more_keys) { - AggregateDataPtr aggregate_data = nullptr; - - if constexpr (!no_more_keys) + for (size_t i = row_begin; i < row_end; i++) { - auto emplace_result = state.emplaceKey(data, i, *arena_for_keys); // NOLINT - if (emplace_result.isInserted()) + auto find_result = state.findKey(data, i, *arena_for_keys); + /// aggregate_date == nullptr means that the new key did not fit in the hash table because of no_more_keys. + AggregateDataPtr value = find_result.isFound() ? find_result.getMapped() : overflow_row; + places[i] = value; + } + } + else + { + for (size_t i = row_begin; i < row_end; i++) + { + auto emplace_result = state.emplaceKey(data, i, *arena_for_keys); + if (!emplace_result.isInserted()) + places[i] = emplace_result.getMapped(); + else { emplace_result.setMapped(nullptr); - aggregate_data = aggregates_pool->alignedAlloc(total_size_of_aggregate_states, align_aggregate_states); + AggregateDataPtr aggregate_data = aggregates_pool->alignedAlloc(total_size_of_aggregate_states, align_aggregate_states); createAggregateStates(aggregate_data); emplace_result.setMapped(aggregate_data); + places[i] = aggregate_data; } - else - aggregate_data = emplace_result.getMapped(); } - else - { - auto find_result = state.findKey(data, i, *arena_for_keys); - if (find_result.isFound()) - aggregate_data = find_result.getMapped(); - } - - /// aggregate_date == nullptr means that the new key did not fit in the hash table because of no_more_keys. - - AggregateDataPtr value = aggregate_data ? aggregate_data : overflow_row; - places[i] = value; } for (size_t j = 0; j < params.aggregates_size; ++j) @@ -3005,22 +3004,16 @@ void NO_INLINE Aggregator::mergeStreamsImpl( if (use_cache) { typename Method::State state(key_columns, key_sizes, aggregation_state_cache); - - if (!no_more_keys) - mergeStreamsImplCase(aggregates_pool, state, data, overflow_row, row_begin, row_end, aggregate_columns_data, arena_for_keys); - else - mergeStreamsImplCase(aggregates_pool, state, data, overflow_row, row_begin, row_end, aggregate_columns_data, arena_for_keys); + mergeStreamsImplCase( + aggregates_pool, state, data, no_more_keys, overflow_row, row_begin, row_end, aggregate_columns_data, arena_for_keys); consecutive_keys_cache_stats.update(row_end - row_begin, state.getCacheMissesSinceLastReset()); } else { typename Method::StateNoCache state(key_columns, key_sizes, aggregation_state_cache); - - if (!no_more_keys) - mergeStreamsImplCase(aggregates_pool, state, data, overflow_row, row_begin, row_end, aggregate_columns_data, arena_for_keys); - else - mergeStreamsImplCase(aggregates_pool, state, data, overflow_row, row_begin, row_end, aggregate_columns_data, arena_for_keys); + mergeStreamsImplCase( + aggregates_pool, state, data, no_more_keys, overflow_row, row_begin, row_end, aggregate_columns_data, arena_for_keys); } } diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index d7bbe5950a0..6c357623003 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -1513,11 +1513,12 @@ private: bool final, ThreadPool * thread_pool) const; - template + template void mergeStreamsImplCase( Arena * aggregates_pool, State & state, Table & data, + bool no_more_keys, AggregateDataPtr overflow_row, size_t row_begin, size_t row_end, From 74281b0a4e3d9328d01379129ace85e03c145fa4 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 11 Mar 2024 16:42:45 +0000 Subject: [PATCH 167/374] Avoid some logical errors in experimantal Object type --- src/Columns/ColumnObject.cpp | 8 ++-- src/Common/ErrorCodes.cpp | 1 + src/DataTypes/ObjectUtils.cpp | 41 ++++++++++++++----- .../Serializations/SerializationObject.cpp | 6 +-- 4 files changed, 38 insertions(+), 18 deletions(-) diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index ac2ee309e87..25a62440629 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -20,12 +20,12 @@ namespace DB namespace ErrorCodes { - extern const int LOGICAL_ERROR; extern const int ILLEGAL_COLUMN; extern const int DUPLICATE_COLUMN; extern const int NUMBER_OF_DIMENSIONS_MISMATCHED; extern const int SIZES_OF_COLUMNS_DOESNT_MATCH; extern const int ARGUMENT_OUT_OF_BOUND; + extern const int EXPERIMENTAL_FEATURE_ERROR; } namespace @@ -247,7 +247,7 @@ void ColumnObject::Subcolumn::checkTypes() const prefix_types.push_back(current_type); auto prefix_common_type = getLeastSupertype(prefix_types); if (!prefix_common_type->equals(*current_type)) - throw Exception(ErrorCodes::LOGICAL_ERROR, + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Data type {} of column at position {} cannot represent all columns from i-th prefix", current_type->getName(), i); } @@ -635,7 +635,7 @@ void ColumnObject::checkConsistency() const { if (num_rows != leaf->data.size()) { - throw Exception(ErrorCodes::LOGICAL_ERROR, "Sizes of subcolumns are inconsistent in ColumnObject." + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Sizes of subcolumns are inconsistent in ColumnObject." " Subcolumn '{}' has {} rows, but expected size is {}", leaf->path.getPath(), leaf->data.size(), num_rows); } @@ -919,7 +919,7 @@ void ColumnObject::addSubcolumn(const PathInData & key, size_t new_size) void ColumnObject::addNestedSubcolumn(const PathInData & key, const FieldInfo & field_info, size_t new_size) { if (!key.hasNested()) - throw Exception(ErrorCodes::LOGICAL_ERROR, + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Cannot add Nested subcolumn, because path doesn't contain Nested"); bool inserted = false; diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 44463f7f437..9f2572cbfc6 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -598,6 +598,7 @@ M(714, UNEXPECTED_CLUSTER) \ M(715, CANNOT_DETECT_FORMAT) \ M(716, CANNOT_FORGET_PARTITION) \ + M(717, EXPERIMENTAL_FEATURE_ERROR) \ \ M(999, KEEPER_EXCEPTION) \ M(1000, POCO_EXCEPTION) \ diff --git a/src/DataTypes/ObjectUtils.cpp b/src/DataTypes/ObjectUtils.cpp index 23d29136c85..48392a614a5 100644 --- a/src/DataTypes/ObjectUtils.cpp +++ b/src/DataTypes/ObjectUtils.cpp @@ -28,9 +28,9 @@ namespace DB namespace ErrorCodes { extern const int TYPE_MISMATCH; - extern const int LOGICAL_ERROR; extern const int INCOMPATIBLE_COLUMNS; extern const int NOT_IMPLEMENTED; + extern const int EXPERIMENTAL_FEATURE_ERROR; } size_t getNumberOfDimensions(const IDataType & type) @@ -92,7 +92,7 @@ ColumnPtr createArrayOfColumn(ColumnPtr column, size_t num_dimensions) Array createEmptyArrayField(size_t num_dimensions) { if (num_dimensions == 0) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot create array field with 0 dimensions"); + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Cannot create array field with 0 dimensions"); Array array; Array * current_array = &array; @@ -231,7 +231,7 @@ static std::pair recursivlyConvertDynamicColumnToTuple( }; } - throw Exception(ErrorCodes::LOGICAL_ERROR, "Type {} unexpectedly has dynamic columns", type->getName()); + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Type {} unexpectedly has dynamic columns", type->getName()); } void convertDynamicColumnsToTuples(Block & block, const StorageSnapshotPtr & storage_snapshot) @@ -247,7 +247,7 @@ void convertDynamicColumnsToTuples(Block & block, const StorageSnapshotPtr & sto GetColumnsOptions options(GetColumnsOptions::AllPhysical); auto storage_column = storage_snapshot->tryGetColumn(options, column.name); if (!storage_column) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Column '{}' not found in storage", column.name); + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Column '{}' not found in storage", column.name); auto storage_column_concrete = storage_snapshot->getColumn(options.withExtendedObjects(), column.name); @@ -315,7 +315,7 @@ static DataTypePtr getLeastCommonTypeForObject(const DataTypes & types, bool che { const auto * type_tuple = typeid_cast(type.get()); if (!type_tuple) - throw Exception(ErrorCodes::LOGICAL_ERROR, + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Least common type for object can be deduced only from tuples, but {} given", type->getName()); auto [tuple_paths, tuple_types] = flattenTuple(type); @@ -427,7 +427,7 @@ static DataTypePtr getLeastCommonTypeForDynamicColumnsImpl( if (const auto * type_tuple = typeid_cast(type_in_storage.get())) return getLeastCommonTypeForTuple(*type_tuple, concrete_types, check_ambiguos_paths); - throw Exception(ErrorCodes::LOGICAL_ERROR, "Type {} unexpectedly has dynamic columns", type_in_storage->getName()); + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Type {} unexpectedly has dynamic columns", type_in_storage->getName()); } DataTypePtr getLeastCommonTypeForDynamicColumns( @@ -481,7 +481,7 @@ DataTypePtr createConcreteEmptyDynamicColumn(const DataTypePtr & type_in_storage return recreateTupleWithElements(*type_tuple, new_elements); } - throw Exception(ErrorCodes::LOGICAL_ERROR, "Type {} unexpectedly has dynamic columns", type_in_storage->getName()); + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Type {} unexpectedly has dynamic columns", type_in_storage->getName()); } bool hasDynamicSubcolumns(const ColumnsDescription & columns) @@ -613,7 +613,7 @@ DataTypePtr reduceNumberOfDimensions(DataTypePtr type, size_t dimensions_to_redu { const auto * type_array = typeid_cast(type.get()); if (!type_array) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Not enough dimensions to reduce"); + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Not enough dimensions to reduce"); type = type_array->getNestedType(); } @@ -627,7 +627,7 @@ ColumnPtr reduceNumberOfDimensions(ColumnPtr column, size_t dimensions_to_reduce { const auto * column_array = typeid_cast(column.get()); if (!column_array) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Not enough dimensions to reduce"); + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Not enough dimensions to reduce"); column = column_array->getDataPtr(); } @@ -705,6 +705,7 @@ ColumnWithTypeAndDimensions createTypeFromNode(const Node & node) size_t num_elements = tuple_columns.size(); Columns tuple_elements_columns(num_elements); DataTypes tuple_elements_types(num_elements); + size_t last_offset = assert_cast(*offsets_columns.back()).getData().back(); /// Reduce extra array dimensions to get columns and types of Nested elements. for (size_t i = 0; i < num_elements; ++i) @@ -712,6 +713,14 @@ ColumnWithTypeAndDimensions createTypeFromNode(const Node & node) assert(tuple_columns[i].array_dimensions == tuple_columns[0].array_dimensions); tuple_elements_columns[i] = reduceNumberOfDimensions(tuple_columns[i].column, tuple_columns[i].array_dimensions); tuple_elements_types[i] = reduceNumberOfDimensions(tuple_columns[i].type, tuple_columns[i].array_dimensions); + if (tuple_elements_columns[i]->size() != last_offset) + throw Exception( + ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, + "Cannot create a type for subcolumn {} in Object data type: offsets_column has data inconsistent with nested_column. " + "Data size: {}, last offset: {}", + node.path.getPath(), + tuple_elements_columns[i]->size(), + last_offset); } auto result_column = ColumnArray::create(ColumnTuple::create(tuple_elements_columns), offsets_columns.back()); @@ -720,6 +729,16 @@ ColumnWithTypeAndDimensions createTypeFromNode(const Node & node) /// Recreate result Array type and Array column. for (auto it = offsets_columns.rbegin() + 1; it != offsets_columns.rend(); ++it) { + last_offset = assert_cast((**it)).getData().back(); + if (result_column->size() != last_offset) + throw Exception( + ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, + "Cannot create a type for subcolumn {} in Object data type: offsets_column has data inconsistent with nested_column. " + "Data size: {}, last offset: {}", + node.path.getPath(), + result_column->size(), + last_offset); + result_column = ColumnArray::create(result_column, *it); result_type = std::make_shared(result_type); } @@ -822,7 +841,7 @@ std::pair unflattenTuple( assert(paths.size() == tuple_columns.size()); if (paths.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot unflatten empty Tuple"); + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Cannot unflatten empty Tuple"); /// We add all paths to the subcolumn tree and then create a type from it. /// The tree stores column, type and number of array dimensions @@ -841,7 +860,7 @@ std::pair unflattenTuple( tree.add(paths[i], [&](Node::Kind kind, bool exists) -> std::shared_ptr { if (pos >= num_parts) - throw Exception(ErrorCodes::LOGICAL_ERROR, + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Not enough name parts for path {}. Expected at least {}, got {}", paths[i].getPath(), pos + 1, num_parts); diff --git a/src/DataTypes/Serializations/SerializationObject.cpp b/src/DataTypes/Serializations/SerializationObject.cpp index e6dc16ef5a0..1bdc29daebd 100644 --- a/src/DataTypes/Serializations/SerializationObject.cpp +++ b/src/DataTypes/Serializations/SerializationObject.cpp @@ -29,7 +29,7 @@ namespace ErrorCodes extern const int INCORRECT_DATA; extern const int CANNOT_READ_ALL_DATA; extern const int ARGUMENT_OUT_OF_BOUND; - extern const int LOGICAL_ERROR; + extern const int EXPERIMENTAL_FEATURE_ERROR; } template @@ -177,7 +177,7 @@ void SerializationObject::serializeBinaryBulkStatePrefix( auto * stream = settings.getter(settings.path); if (!stream) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Missing stream for kind of binary serialization"); + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Missing stream for kind of binary serialization"); auto [tuple_column, tuple_type] = unflattenObjectToTuple(column_object); @@ -288,7 +288,7 @@ void SerializationObject::serializeBinaryBulkWithMultipleStreams( if (!state_object->nested_type->equals(*tuple_type)) { - throw Exception(ErrorCodes::LOGICAL_ERROR, + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Types of internal column of Object mismatched. Expected: {}, Got: {}", state_object->nested_type->getName(), tuple_type->getName()); } From 260c6387555b3823e56db135f80adaa5e0c300ab Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 11 Mar 2024 16:59:01 +0000 Subject: [PATCH 168/374] Replace forgotten logical error --- src/DataTypes/ObjectUtils.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/DataTypes/ObjectUtils.cpp b/src/DataTypes/ObjectUtils.cpp index 48392a614a5..c565930f49a 100644 --- a/src/DataTypes/ObjectUtils.cpp +++ b/src/DataTypes/ObjectUtils.cpp @@ -653,7 +653,7 @@ ColumnWithTypeAndDimensions createTypeFromNode(const Node & node) auto collect_tuple_elemets = [](const auto & children) { if (children.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot create type from empty Tuple or Nested node"); + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Cannot create type from empty Tuple or Nested node"); std::vector> tuple_elements; tuple_elements.reserve(children.size()); From 16e01eb93ad449c61417dcaccd570439364b0714 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Mon, 11 Mar 2024 18:05:51 +0100 Subject: [PATCH 169/374] Fix style --- src/Core/Settings.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index fb456b46d89..8257b94cd9f 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -15,6 +15,7 @@ namespace ErrorCodes extern const int THERE_IS_NO_PROFILE; extern const int NO_ELEMENTS_IN_CONFIG; extern const int UNKNOWN_ELEMENT_IN_CONFIG; + extern const int BAD_ARGUMENTS; } IMPLEMENT_SETTINGS_TRAITS(SettingsTraits, LIST_OF_SETTINGS) From f656a015385898602cb651b419b46927f99ab602 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Mon, 11 Mar 2024 16:39:13 +0000 Subject: [PATCH 170/374] CI: fix sync build issue with reuse #do_not_test --- tests/ci/ci.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/tests/ci/ci.py b/tests/ci/ci.py index 9d57f161be3..cc4d0b11eef 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -140,7 +140,7 @@ class CiCache: self.s3 = s3 self.job_digests = job_digests self.cache_s3_paths = { - job_type: f"{self._S3_CACHE_PREFIX}/{job_type.value}-{self.job_digests[self._get_reference_job_name(job_type)]}/" + job_type: f"{self._S3_CACHE_PREFIX}/{job_type.value}-{self._get_digest_for_job_type(self.job_digests, job_type)}/" for job_type in self.JobType } self.s3_record_prefixes = { @@ -155,14 +155,23 @@ class CiCache: if not self._LOCAL_CACHE_PATH.exists(): self._LOCAL_CACHE_PATH.mkdir(parents=True, exist_ok=True) - def _get_reference_job_name(self, job_type: JobType) -> str: - res = Build.PACKAGE_RELEASE + def _get_digest_for_job_type( + self, job_digests: Dict[str, str], job_type: JobType + ) -> str: if job_type == self.JobType.DOCS: - res = JobNames.DOCS_CHECK + res = job_digests[JobNames.DOCS_CHECK] elif job_type == self.JobType.SRCS: - res = Build.PACKAGE_RELEASE + # any build type job has the same digest - pick up Build.PACKAGE_RELEASE or Build.PACKAGE_ASAN as a failover + # Build.PACKAGE_RELEASE may not exist in the list if we have reduced CI pipeline + if Build.PACKAGE_RELEASE in job_digests: + res = job_digests[Build.PACKAGE_RELEASE] + elif Build.PACKAGE_ASAN in job_digests: + # failover, if failover does not work - fix it! + res = job_digests[Build.PACKAGE_ASAN] + else: + assert False, "BUG, no build job in digest' list" else: - assert False + assert False, "BUG, New JobType? - please update func" return res def _get_record_file_name( From f973e405eeb4f28a6a937c26d19cad54acd00eb4 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Wed, 7 Feb 2024 19:36:28 +0000 Subject: [PATCH 171/374] CI: fixing ARM integration tests #do_not_test --- tests/ci/ci.py | 41 ++++++++++++++++++++++++++++++++++++----- 1 file changed, 36 insertions(+), 5 deletions(-) diff --git a/tests/ci/ci.py b/tests/ci/ci.py index 9d57f161be3..898d23be843 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -1183,13 +1183,13 @@ def _configure_jobs( if batches_to_do: jobs_to_do.append(job) + jobs_params[job] = { + "batches": batches_to_do, + "num_batches": num_batches, + } elif add_to_skip: # treat job as being skipped only if it's controlled by digest jobs_to_skip.append(job) - jobs_params[job] = { - "batches": batches_to_do, - "num_batches": num_batches, - } if not pr_info.is_release_branch(): # randomization bucket filtering (pick one random job from each bucket, for jobs with configured random_bucket property) @@ -1268,6 +1268,33 @@ def _configure_jobs( jobs_to_do = list( set(job for job in jobs_to_do_requested if job not in jobs_to_skip) ) + # if requested job does not have params in jobs_params (it happens for "run_by_label" job) + # we need to add params - otherwise it won't run as "batches" list will be empty + for job in jobs_to_do: + if job not in jobs_params: + num_batches = CI_CONFIG.get_job_config(job).num_batches + jobs_params[job] = { + "batches": list(range(num_batches)), + "num_batches": num_batches, + } + + requested_batches = set() + for token in commit_tokens: + if token.startswith("batch_"): + try: + batches = [ + int(batch) for batch in token.removeprefix("batch_").split("_") + ] + except Exception: + print(f"ERROR: failed to parse commit tag [{token}]") + requested_batches.update(batches) + if requested_batches: + print( + f"NOTE: Only specific job batches were requested [{list(requested_batches)}]" + ) + for job, params in jobs_params.items(): + if params["num_batches"] > 1: + params["batches"] = list(requested_batches) return { "digests": digests, @@ -1372,7 +1399,11 @@ def _update_gh_statuses_action(indata: Dict, s3: S3Helper) -> None: def _fetch_commit_tokens(message: str) -> List[str]: pattern = r"#[\w-]+" matches = [match[1:] for match in re.findall(pattern, message)] - res = [match for match in matches if match in Labels or match.startswith("job_")] + res = [ + match + for match in matches + if match in Labels or match.startswith("job_") or match.startswith("batch_") + ] return res From 4b94bcd54ebd8713db8aefc453edaf15a1aa55ab Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 11 Mar 2024 18:41:17 +0100 Subject: [PATCH 172/374] Update ReadSettings.h --- src/IO/ReadSettings.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/IO/ReadSettings.h b/src/IO/ReadSettings.h index fb8cbaf4a98..c0a63bf51b1 100644 --- a/src/IO/ReadSettings.h +++ b/src/IO/ReadSettings.h @@ -99,7 +99,6 @@ struct ReadSettings bool enable_filesystem_cache = true; bool read_from_filesystem_cache_if_exists_otherwise_bypass_cache = false; bool enable_filesystem_cache_log = false; - bool force_read_through_cache_merges = false; size_t filesystem_cache_segments_batch_size = 20; bool use_page_cache_for_disks_without_file_cache = false; From fd68fed0470214d5c8734b0c89fbb779cf9b821f Mon Sep 17 00:00:00 2001 From: Han Fei Date: Mon, 11 Mar 2024 19:21:35 +0100 Subject: [PATCH 173/374] fix flaky 02949_ttl_group_by_bug --- tests/queries/0_stateless/02949_ttl_group_by_bug.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02949_ttl_group_by_bug.sql b/tests/queries/0_stateless/02949_ttl_group_by_bug.sql index 2888f6e7d66..a3d0794c897 100644 --- a/tests/queries/0_stateless/02949_ttl_group_by_bug.sql +++ b/tests/queries/0_stateless/02949_ttl_group_by_bug.sql @@ -2,7 +2,7 @@ DROP TABLE IF EXISTS ttl_group_by_bug; CREATE TABLE ttl_group_by_bug (key UInt32, ts DateTime, value UInt32, min_value UInt32 default value, max_value UInt32 default value) -ENGINE = MergeTree() PARTITION BY toYYYYMM(ts) +ENGINE = MergeTree() ORDER BY (key, toStartOfInterval(ts, toIntervalMinute(3)), ts) TTL ts + INTERVAL 5 MINUTE GROUP BY key, toStartOfInterval(ts, toIntervalMinute(3)) SET value = sum(value), min_value = min(min_value), max_value = max(max_value), ts=min(toStartOfInterval(ts, toIntervalMinute(3))); From a1e5161cee50650a5c4e87ca60e7ed9eb61451b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 11 Mar 2024 19:25:34 +0100 Subject: [PATCH 174/374] Disable sanitizers with 02784_parallel_replicas_automatic_decision_join --- .../02784_parallel_replicas_automatic_decision_join.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02784_parallel_replicas_automatic_decision_join.sh b/tests/queries/0_stateless/02784_parallel_replicas_automatic_decision_join.sh index ef3e6000903..801cd22b79f 100755 --- a/tests/queries/0_stateless/02784_parallel_replicas_automatic_decision_join.sh +++ b/tests/queries/0_stateless/02784_parallel_replicas_automatic_decision_join.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash -## Note: The analyzer doesn't support JOIN with parallel replicas yet +# Tags: no-tsan, no-asan, no-msan +# It's not clear why distributed aggregation is much slower with sanitizers (https://github.com/ClickHouse/ClickHouse/issues/60625) CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From 55a82047613c607dedb592fed019d04455e8c8e8 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Mon, 11 Mar 2024 19:43:30 +0100 Subject: [PATCH 175/374] Fix test --- .../0_stateless/03003_compatibility_setting_bad_value.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03003_compatibility_setting_bad_value.sql b/tests/queries/0_stateless/03003_compatibility_setting_bad_value.sql index 9a6f4e7944a..48e98798c51 100644 --- a/tests/queries/0_stateless/03003_compatibility_setting_bad_value.sql +++ b/tests/queries/0_stateless/03003_compatibility_setting_bad_value.sql @@ -1,2 +1,2 @@ -select 42 settings compatibility=NULL; -- {clientError BAD_GET} +select 42 settings compatibility=NULL; -- {clientError BAD_ARGUMENTS} From 5a71636411cb358c94e58b7caac18c22104b0e1c Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 11 Mar 2024 19:44:52 +0100 Subject: [PATCH 176/374] Fxi --- tests/integration/test_disk_types/test.py | 30 +++++++++++++++++++ .../test_endpoint_macro_substitution/test.py | 9 +++++- 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_disk_types/test.py b/tests/integration/test_disk_types/test.py index 86579911b3e..5047cdc605e 100644 --- a/tests/integration/test_disk_types/test.py +++ b/tests/integration/test_disk_types/test.py @@ -50,6 +50,36 @@ def test_different_types(cluster): assert ( fields[encrypted_col_ix] == "0" ), f"{fields[name_col_ix]} expected to be non-encrypted!" +def test_different_types(cluster): + node = cluster.instances["node"] + response = TSV.toMat(node.query("SELECT * FROM system.disks FORMAT TSVWithNames")) + + assert len(response) > len(disk_types) # at least one extra line for header + + name_col_ix = response[0].index("name") + type_col_ix = response[0].index("type") + encrypted_col_ix = response[0].index("is_encrypted") + + for fields in response[1:]: # skip header + assert len(fields) >= 7 + expected_disk_type = disk_types.get(fields[name_col_ix], "UNKNOWN") + + if expected_disk_type != "Local": + disk_type = fields[response[0].index("object_storage_type")] + else: + disk_type = fields[type_col_ix] + + assert ( + expected_disk_type == disk_type + ), f"Wrong type ({fields[type_col_ix]}) for disk {fields[name_col_ix]}!" + if "encrypted" in fields[name_col_ix]: + assert ( + fields[encrypted_col_ix] == "1" + ), f"{fields[name_col_ix]} expected to be encrypted!" + else: + assert ( + fields[encrypted_col_ix] == "0" + ), f"{fields[name_col_ix]} expected to be non-encrypted!" def test_select_by_type(cluster): diff --git a/tests/integration/test_endpoint_macro_substitution/test.py b/tests/integration/test_endpoint_macro_substitution/test.py index bec3d9de0e3..e161d8e82ff 100644 --- a/tests/integration/test_endpoint_macro_substitution/test.py +++ b/tests/integration/test_endpoint_macro_substitution/test.py @@ -45,8 +45,15 @@ def test_different_types(cluster): for fields in response[1:]: # skip header assert len(fields) >= 7 + expected_disk_type = disk_types.get(fields[name_col_ix], "UNKNOWN") + + if expected_disk_type != "Local": + disk_type = fields[response[0].index("object_storage_type")] + else: + disk_type = fields[type_col_ix] + assert ( - disk_types.get(fields[name_col_ix], "UNKNOWN") == fields[type_col_ix] + expected_disk_type == disk_type ), f"Wrong type ({fields[type_col_ix]}) for disk {fields[name_col_ix]}!" if "encrypted" in fields[name_col_ix]: assert ( From b7b83085d1722b192d802ffc3677ea6d0f03c85a Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Mon, 11 Mar 2024 18:49:45 +0000 Subject: [PATCH 177/374] CI: ci.py hot style fix #do_not_test --- tests/ci/ci.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/ci/ci.py b/tests/ci/ci.py index a699642691b..29906e6571f 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -55,6 +55,8 @@ from report import ERROR, SUCCESS, BuildResult, JobReport from s3_helper import S3Helper from version_helper import get_version_from_repo +# pylint: disable=too-many-lines + @dataclass class PendingState: From 5a400c181b818cf3bbf371eb388a937ee816a66c Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 11 Mar 2024 09:49:25 +0100 Subject: [PATCH 178/374] Fix llvm symbolizer on CI In #61011 the whole toolchain installation had been removed from the base image to reduce image sizes, and this is a good thing indeed. However it also breaks the symbolizer for sanitizers, which makes stacktraces unreadable, so let's fix this by getting back llvm package, this should be OK, since it's size is not gigabytes, but only 48MiB (at least for llvm-14): # dpkg -L llvm-14| xargs file | grep -v directory | cut -d: -f1 | xargs du -sch | grep total 48M total Signed-off-by: Azat Khuzhin --- docker/test/base/Dockerfile | 3 +++ docker/test/fasttest/Dockerfile | 3 --- docker/test/util/Dockerfile | 2 ++ 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/docker/test/base/Dockerfile b/docker/test/base/Dockerfile index 55229e893de..2317f84e0cb 100644 --- a/docker/test/base/Dockerfile +++ b/docker/test/base/Dockerfile @@ -33,6 +33,9 @@ ENV TSAN_OPTIONS='halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_m ENV UBSAN_OPTIONS='print_stacktrace=1' ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1' +# for external_symbolizer_path +RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer + RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8 ENV LC_ALL en_US.UTF-8 diff --git a/docker/test/fasttest/Dockerfile b/docker/test/fasttest/Dockerfile index 62cdcc3f830..912ff191e57 100644 --- a/docker/test/fasttest/Dockerfile +++ b/docker/test/fasttest/Dockerfile @@ -14,7 +14,6 @@ RUN apt-get update \ libclang-${LLVM_VERSION}-dev \ libclang-rt-${LLVM_VERSION}-dev \ lld-${LLVM_VERSION} \ - llvm-${LLVM_VERSION} \ llvm-${LLVM_VERSION}-dev \ lsof \ ninja-build \ @@ -37,8 +36,6 @@ RUN pip3 install numpy==1.26.3 scipy==1.12.0 pandas==1.5.3 Jinja2==3.1.3 # This symlink is required by gcc to find the lld linker RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld -# for external_symbolizer_path -RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer # FIXME: workaround for "The imported target "merge-fdata" references the file" error # https://salsa.debian.org/pkg-llvm-team/llvm-toolchain/-/commit/992e52c0b156a5ba9c6a8a54f8c4857ddd3d371d RUN sed -i '/_IMPORT_CHECK_FILES_FOR_\(mlir-\|llvm-bolt\|merge-fdata\|MLIR\)/ {s|^|#|}' /usr/lib/llvm-${LLVM_VERSION}/lib/cmake/llvm/LLVMExports-*.cmake diff --git a/docker/test/util/Dockerfile b/docker/test/util/Dockerfile index 4f2dc9df849..5446adf3793 100644 --- a/docker/test/util/Dockerfile +++ b/docker/test/util/Dockerfile @@ -26,6 +26,8 @@ RUN apt-get update \ && export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \ && echo "deb https://apt.llvm.org/${CODENAME}/ llvm-toolchain-${CODENAME}-${LLVM_VERSION} main" >> \ /etc/apt/sources.list \ + && apt-get update \ + && apt-get install --yes --no-install-recommends --verbose-versions llvm-${LLVM_VERSION} \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* From 9b055c3a43039387b42e755efddd83b9a8934ca6 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Mon, 11 Mar 2024 20:38:30 +0100 Subject: [PATCH 179/374] Use assert_cast to prevent nullptr dereference on bad column types in FunctionsConversion --- src/Functions/FunctionsConversion.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 1522e76893e..f338af28240 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -4561,7 +4561,7 @@ arguments, result_type, input_rows_count); \ if (from_low_cardinality) { - const auto * col_low_cardinality = typeid_cast(arguments[0].column.get()); + const auto * col_low_cardinality = assert_cast(arguments[0].column.get()); if (skip_not_null_check && col_low_cardinality->containsNull()) throw Exception(ErrorCodes::CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN, "Cannot convert NULL value to non-Nullable type"); @@ -4586,7 +4586,7 @@ arguments, result_type, input_rows_count); \ if (to_low_cardinality) { auto res_column = to_low_cardinality->createColumn(); - auto * col_low_cardinality = typeid_cast(res_column.get()); + auto * col_low_cardinality = assert_cast(res_column.get()); if (from_low_cardinality && !src_converted_to_full_column) { From 3a26b9c89ee3083884fde341c2af418bcde2f4cf Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 11 Mar 2024 19:42:25 +0000 Subject: [PATCH 180/374] impl --- .../0_stateless/02887_mutations_subcolumns.reference | 6 +++--- tests/queries/0_stateless/02887_mutations_subcolumns.sql | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/02887_mutations_subcolumns.reference b/tests/queries/0_stateless/02887_mutations_subcolumns.reference index c2d6cbbd225..1ccc83b48a3 100644 --- a/tests/queries/0_stateless/02887_mutations_subcolumns.reference +++ b/tests/queries/0_stateless/02887_mutations_subcolumns.reference @@ -5,6 +5,6 @@ 4 ttt 5 ttt 6 ttt -{"a":"1","obj":{"k1":1,"k2":null,"k3":null}} -{"a":"3","obj":{"k1":null,"k2":null,"k3":1}} -{"a":"1","obj":{"k1":1,"k2":null,"k3":null}} +1 [('k1',1)] +3 [('k3',1)] +1 [('k1',1)] diff --git a/tests/queries/0_stateless/02887_mutations_subcolumns.sql b/tests/queries/0_stateless/02887_mutations_subcolumns.sql index a01158e1b06..87b3009e929 100644 --- a/tests/queries/0_stateless/02887_mutations_subcolumns.sql +++ b/tests/queries/0_stateless/02887_mutations_subcolumns.sql @@ -40,9 +40,9 @@ INSERT INTO t_mutations_subcolumns VALUES (2, '{"k2": 1}'); INSERT INTO t_mutations_subcolumns VALUES (3, '{"k3": 1}'); ALTER TABLE t_mutations_subcolumns DELETE WHERE obj.k2 = 1; -SELECT * FROM t_mutations_subcolumns ORDER BY a FORMAT JSONEachRow; +SELECT a, arrayFilter(x -> not isNull(x.2), tupleToNameValuePairs(obj)) FROM t_mutations_subcolumns ORDER BY a; ALTER TABLE t_mutations_subcolumns DELETE WHERE isNull(obj.k1); -SELECT * FROM t_mutations_subcolumns ORDER BY a FORMAT JSONEachRow; +SELECT a, arrayFilter(x -> not isNull(x.2), tupleToNameValuePairs(obj)) FROM t_mutations_subcolumns ORDER BY a; DROP TABLE t_mutations_subcolumns; From 2e74685ba6ea8a3cc32ff0e21d0ee657517ef5a4 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 11 Mar 2024 19:58:43 +0000 Subject: [PATCH 181/374] Make variant tests a bit faster --- .../02941_variant_type_2.reference | 80 +++++++++---------- .../0_stateless/02941_variant_type_2.sh | 12 +-- ...different_local_and_global_order.reference | 30 +++---- ...e_with_different_local_and_global_order.sh | 8 +- 4 files changed, 65 insertions(+), 65 deletions(-) diff --git a/tests/queries/0_stateless/02941_variant_type_2.reference b/tests/queries/0_stateless/02941_variant_type_2.reference index 4b6d53c52ac..20a5176cb5e 100644 --- a/tests/queries/0_stateless/02941_variant_type_2.reference +++ b/tests/queries/0_stateless/02941_variant_type_2.reference @@ -1,51 +1,51 @@ Memory test4 insert test4 select -1000000 -200000 -200000 -200000 -200000 -200000 -200000 -200000 +500000 +100000 +100000 +100000 +100000 +100000 +100000 +100000 MergeTree compact test4 insert test4 select -1000000 -200000 -200000 -200000 -200000 -200000 -200000 -200000 +500000 +100000 +100000 +100000 +100000 +100000 +100000 +100000 test4 select -1000000 -200000 -200000 -200000 -200000 -200000 -200000 -200000 +500000 +100000 +100000 +100000 +100000 +100000 +100000 +100000 MergeTree wide test4 insert test4 select -1000000 -200000 -200000 -200000 -200000 -200000 -200000 -200000 +500000 +100000 +100000 +100000 +100000 +100000 +100000 +100000 test4 select -1000000 -200000 -200000 -200000 -200000 -200000 -200000 -200000 +500000 +100000 +100000 +100000 +100000 +100000 +100000 +100000 diff --git a/tests/queries/0_stateless/02941_variant_type_2.sh b/tests/queries/0_stateless/02941_variant_type_2.sh index 509c537e7fc..d1fa0a777c9 100755 --- a/tests/queries/0_stateless/02941_variant_type_2.sh +++ b/tests/queries/0_stateless/02941_variant_type_2.sh @@ -12,12 +12,12 @@ CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspic function test4_insert() { echo "test4 insert" - $CH_CLIENT -nmq "insert into test select number, NULL from numbers(200000); -insert into test select number + 200000, number from numbers(200000); -insert into test select number + 400000, 'str_' || toString(number) from numbers(200000); -insert into test select number + 600000, ('lc_str_' || toString(number))::LowCardinality(String) from numbers(200000); -insert into test select number + 800000, tuple(number, number + 1)::Tuple(a UInt32, b UInt32) from numbers(200000); -insert into test select number + 1000000, range(number % 20 + 1)::Array(UInt64) from numbers(200000);" + $CH_CLIENT -nmq "insert into test select number, NULL from numbers(100000); +insert into test select number + 100000, number from numbers(100000); +insert into test select number + 200000, 'str_' || toString(number) from numbers(100000); +insert into test select number + 300000, ('lc_str_' || toString(number))::LowCardinality(String) from numbers(100000); +insert into test select number + 400000, tuple(number, number + 1)::Tuple(a UInt32, b UInt32) from numbers(100000); +insert into test select number + 500000, range(number % 20 + 1)::Array(UInt64) from numbers(100000);" } function test4_select diff --git a/tests/queries/0_stateless/02943_variant_type_with_different_local_and_global_order.reference b/tests/queries/0_stateless/02943_variant_type_with_different_local_and_global_order.reference index 1736a307c42..4109a88997c 100644 --- a/tests/queries/0_stateless/02943_variant_type_with_different_local_and_global_order.reference +++ b/tests/queries/0_stateless/02943_variant_type_with_different_local_and_global_order.reference @@ -44,9 +44,9 @@ str_38 str_38 \N ----------------------------------------------------------------------------------------------------------- test2 insert test2 select -2500000 -750000 -1750000 +500000 +100000 +400000 ----------------------------------------------------------------------------------------------------------- MergeTree compact test1 insert @@ -136,14 +136,14 @@ str_38 str_38 \N ----------------------------------------------------------------------------------------------------------- test2 insert test2 select -2500000 -750000 -1750000 +500000 +100000 +400000 ----------------------------------------------------------------------------------------------------------- test2 select -2500000 -750000 -1750000 +500000 +100000 +400000 ----------------------------------------------------------------------------------------------------------- MergeTree wide test1 insert @@ -233,12 +233,12 @@ str_38 str_38 \N ----------------------------------------------------------------------------------------------------------- test2 insert test2 select -2500000 -750000 -1750000 +500000 +100000 +400000 ----------------------------------------------------------------------------------------------------------- test2 select -2500000 -750000 -1750000 +500000 +100000 +400000 ----------------------------------------------------------------------------------------------------------- diff --git a/tests/queries/0_stateless/02943_variant_type_with_different_local_and_global_order.sh b/tests/queries/0_stateless/02943_variant_type_with_different_local_and_global_order.sh index 3bb37719a3f..1d88757a5d6 100755 --- a/tests/queries/0_stateless/02943_variant_type_with_different_local_and_global_order.sh +++ b/tests/queries/0_stateless/02943_variant_type_with_different_local_and_global_order.sh @@ -29,10 +29,10 @@ function test1_select() function test2_insert() { echo "test2 insert" - $CH_CLIENT -q "insert into test select number, number::Variant(UInt64)::Variant(UInt64, Array(UInt64)) from numbers(1000000) settings max_insert_block_size = 100000, min_insert_block_size_rows=100000" - $CH_CLIENT -q "insert into test select number, if(number % 2, NULL, number)::Variant(UInt64)::Variant(UInt64, String, Array(UInt64)) as res from numbers(1000000, 1000000) settings max_insert_block_size = 100000, min_insert_block_size_rows=100000" - $CH_CLIENT -q "insert into test select number, if(number % 2, NULL, 'str_' || toString(number))::Variant(String)::Variant(UInt64, String, Array(UInt64)) as res from numbers(2000000, 1000000) settings max_insert_block_size = 100000, min_insert_block_size_rows=100000" - $CH_CLIENT -q "insert into test select number, if(number < 3500000, if(number % 2, NULL, number)::Variant(UInt64)::Variant(UInt64, String, Array(UInt64)), if(number % 2, NULL, 'str_' || toString(number))::Variant(String)::Variant(UInt64, String, Array(UInt64))) from numbers(3000000, 1000000) settings max_insert_block_size = 100000, min_insert_block_size_rows=100000" + $CH_CLIENT -q "insert into test select number, number::Variant(UInt64)::Variant(UInt64, Array(UInt64)) from numbers(200000) settings max_insert_block_size = 10000, min_insert_block_size_rows=10000" + $CH_CLIENT -q "insert into test select number, if(number % 2, NULL, number)::Variant(UInt64)::Variant(UInt64, String, Array(UInt64)) as res from numbers(200000, 200000) settings max_insert_block_size = 10000, min_insert_block_size_rows=10000" + $CH_CLIENT -q "insert into test select number, if(number % 2, NULL, 'str_' || toString(number))::Variant(String)::Variant(UInt64, String, Array(UInt64)) as res from numbers(400000, 200000) settings max_insert_block_size = 10000, min_insert_block_size_rows=10000" + $CH_CLIENT -q "insert into test select number, if(number < 3500000, if(number % 2, NULL, number)::Variant(UInt64)::Variant(UInt64, String, Array(UInt64)), if(number % 2, NULL, 'str_' || toString(number))::Variant(String)::Variant(UInt64, String, Array(UInt64))) from numbers(600000, 200000) settings max_insert_block_size = 10000, min_insert_block_size_rows=10000" } function test2_select() From a90a6e9a271515dec58e4d4f716bcd591f245c00 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Mon, 11 Mar 2024 21:05:44 +0100 Subject: [PATCH 182/374] Revert "Don't allow to set max_parallel_replicas to 0 as it doesn't make sense" --- src/Client/ConnectionPoolWithFailover.cpp | 10 ---------- src/Client/HedgedConnectionsFactory.cpp | 6 +----- src/Client/HedgedConnectionsFactory.h | 2 +- src/Interpreters/InterpreterSelectQuery.cpp | 2 +- src/Planner/PlannerJoinTree.cpp | 4 ++-- .../03001_max_parallel_replicas_zero_value.reference | 0 .../03001_max_parallel_replicas_zero_value.sql | 5 ----- 7 files changed, 5 insertions(+), 24 deletions(-) delete mode 100644 tests/queries/0_stateless/03001_max_parallel_replicas_zero_value.reference delete mode 100644 tests/queries/0_stateless/03001_max_parallel_replicas_zero_value.sql diff --git a/src/Client/ConnectionPoolWithFailover.cpp b/src/Client/ConnectionPoolWithFailover.cpp index ad8ed0067d8..492fd4ae9e2 100644 --- a/src/Client/ConnectionPoolWithFailover.cpp +++ b/src/Client/ConnectionPoolWithFailover.cpp @@ -21,7 +21,6 @@ namespace ErrorCodes { extern const int LOGICAL_ERROR; extern const int ALL_CONNECTION_TRIES_FAILED; - extern const int BAD_ARGUMENTS; } @@ -192,20 +191,11 @@ std::vector ConnectionPoolWithFailover::g max_entries = nested_pools.size(); } else if (pool_mode == PoolMode::GET_ONE) - { max_entries = 1; - } else if (pool_mode == PoolMode::GET_MANY) - { - if (settings.max_parallel_replicas == 0) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "The value of the setting max_parallel_replicas must be greater than 0"); - max_entries = settings.max_parallel_replicas; - } else - { throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Unknown pool allocation mode"); - } if (!priority_func) priority_func = makeGetPriorityFunc(settings); diff --git a/src/Client/HedgedConnectionsFactory.cpp b/src/Client/HedgedConnectionsFactory.cpp index 703cc1f8821..f5b074a0257 100644 --- a/src/Client/HedgedConnectionsFactory.cpp +++ b/src/Client/HedgedConnectionsFactory.cpp @@ -19,7 +19,6 @@ namespace ErrorCodes extern const int ALL_CONNECTION_TRIES_FAILED; extern const int ALL_REPLICAS_ARE_STALE; extern const int LOGICAL_ERROR; - extern const int BAD_ARGUMENTS; } HedgedConnectionsFactory::HedgedConnectionsFactory( @@ -83,10 +82,7 @@ std::vector HedgedConnectionsFactory::getManyConnections(PoolMode } case PoolMode::GET_MANY: { - if (max_parallel_replicas == 0) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "The value of the setting max_parallel_replicas must be greater than 0"); - - max_entries = std::min(max_parallel_replicas, shuffled_pools.size()); + max_entries = max_parallel_replicas; break; } } diff --git a/src/Client/HedgedConnectionsFactory.h b/src/Client/HedgedConnectionsFactory.h index dd600d58e1e..ce7b553acdd 100644 --- a/src/Client/HedgedConnectionsFactory.h +++ b/src/Client/HedgedConnectionsFactory.h @@ -158,7 +158,7 @@ private: /// checking the number of requested replicas that are still in process). size_t requested_connections_count = 0; - const size_t max_parallel_replicas = 1; + const size_t max_parallel_replicas = 0; const bool skip_unavailable_shards = 0; }; diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index e28d8366aa7..bcedba7346d 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -947,7 +947,7 @@ bool InterpreterSelectQuery::adjustParallelReplicasAfterAnalysis() if (number_of_replicas_to_use <= 1) { context->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); - context->setSetting("max_parallel_replicas", UInt64{1}); + context->setSetting("max_parallel_replicas", UInt64{0}); LOG_DEBUG(log, "Disabling parallel replicas because there aren't enough rows to read"); return true; } diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 0fe943e0bc7..7b3fb0c5c91 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -295,7 +295,7 @@ bool applyTrivialCountIfPossible( /// The query could use trivial count if it didn't use parallel replicas, so let's disable it query_context->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); - query_context->setSetting("max_parallel_replicas", UInt64{1}); + query_context->setSetting("max_parallel_replicas", UInt64{0}); LOG_TRACE(getLogger("Planner"), "Disabling parallel replicas to be able to use a trivial count optimization"); } @@ -756,7 +756,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres { planner_context->getMutableQueryContext()->setSetting( "allow_experimental_parallel_reading_from_replicas", Field(0)); - planner_context->getMutableQueryContext()->setSetting("max_parallel_replicas", UInt64{1}); + planner_context->getMutableQueryContext()->setSetting("max_parallel_replicas", UInt64{0}); LOG_DEBUG(getLogger("Planner"), "Disabling parallel replicas because there aren't enough rows to read"); } else if (number_of_replicas_to_use < settings.max_parallel_replicas) diff --git a/tests/queries/0_stateless/03001_max_parallel_replicas_zero_value.reference b/tests/queries/0_stateless/03001_max_parallel_replicas_zero_value.reference deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/queries/0_stateless/03001_max_parallel_replicas_zero_value.sql b/tests/queries/0_stateless/03001_max_parallel_replicas_zero_value.sql deleted file mode 100644 index 611aa4777ba..00000000000 --- a/tests/queries/0_stateless/03001_max_parallel_replicas_zero_value.sql +++ /dev/null @@ -1,5 +0,0 @@ -drop table if exists test_d; -create table test_d engine=Distributed(test_cluster_two_shard_three_replicas_localhost, system, numbers); -select * from test_d limit 10 settings max_parallel_replicas = 0, prefer_localhost_replica = 0; --{serverError BAD_ARGUMENTS} -drop table test_d; - From 120a1fdb5f817b442bf659da243407fb7003eaa1 Mon Sep 17 00:00:00 2001 From: johnnymatthews <9611008+johnnymatthews@users.noreply.github.com> Date: Mon, 11 Mar 2024 17:24:33 -0300 Subject: [PATCH 183/374] Improves varPop docs. Adds varPopStable. --- .../aggregate-functions/reference/varpop.md | 99 +++++++++++++++++-- 1 file changed, 91 insertions(+), 8 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/varpop.md b/docs/en/sql-reference/aggregate-functions/reference/varpop.md index 751688b0830..5f18bdc30f6 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/varpop.md +++ b/docs/en/sql-reference/aggregate-functions/reference/varpop.md @@ -1,16 +1,99 @@ --- -slug: /en/sql-reference/aggregate-functions/reference/varpop +title: "varPop" +slug: "/en/sql-reference/aggregate-functions/reference/varpop" sidebar_position: 32 --- -# varPop(x) +This page covers the `varPop` and `varPopStable` functions available in ClickHouse. -Calculates the amount `Σ((x - x̅)^2) / n`, where `n` is the sample size and `x̅`is the average value of `x`. +## varPop -In other words, dispersion for a set of values. Returns `Float64`. +Calculates the population covariance between two data columns. The population covariance measures the degree to which two variables vary together. Calculates the amount `Σ((x - x̅)^2) / n`, where `n` is the sample size and `x̅`is the average value of `x`. -Alias: `VAR_POP`. +**Syntax** -:::note -This function uses a numerically unstable algorithm. If you need [numerical stability](https://en.wikipedia.org/wiki/Numerical_stability) in calculations, use the `varPopStable` function. It works slower but provides a lower computational error. -::: \ No newline at end of file +```sql +covarPop(x, y) +``` + +**Parameters** + +- `x`: The first data column. [Numeric](../../../native-protocol/columns.md) +- `y`: The second data column. [Numeric](../../../native-protocol/columns.md) + +**Returned value** + +Returns an integer of type `Float64`. + +**Implementation details** + +This function uses a numerically unstable algorithm. If you need numerical stability in calculations, use the slower but more stable [`varPopStable` function](#varPopStable). + +**Example** + +```sql +DROP TABLE IF EXISTS test_data; +CREATE TABLE test_data +( + x Int32, + y Int32 +) +ENGINE = Memory; + +INSERT INTO test_data VALUES (1, 2), (2, 3), (3, 5), (4, 6), (5, 8); + +SELECT + covarPop(x, y) AS covar_pop +FROM test_data; +``` + +```response +3 +``` + +## varPopStable + +Calculates population covariance between two data columns using a stable, numerically accurate method to calculate the variance. This function is designed to provide reliable results even with large datasets or values that might cause numerical instability in other implementations. + +**Syntax** + +```sql +covarPopStable(x, y) +``` + +**Parameters** + +- `x`: The first data column. [String literal](../syntax#syntax-string-literal) +- `y`: The second data column. [Expression](../syntax#syntax-expressions) + +**Returned value** + +Returns an integer of type `Float64`. + +**Implementation details** + +Unlike [`varPop()`](#varPop), this function uses a stable, numerically accurate algorithm to calculate the population variance to avoid issues like catastrophic cancellation or loss of precision. This function also handles `NaN` and `Inf` values correctly, excluding them from calculations. + +**Example** + +Query: + +```sql +DROP TABLE IF EXISTS test_data; +CREATE TABLE test_data +( + x Int32, + y Int32 +) +ENGINE = Memory; + +INSERT INTO test_data VALUES (1, 2), (2, 9), (9, 5), (4, 6), (5, 8); + +SELECT + covarPopStable(x, y) AS covar_pop_stable +FROM test_data; +``` + +```response +0.5999999999999999 +``` From 281dc8d29deba2980e6b191edefa3b62114d38a7 Mon Sep 17 00:00:00 2001 From: johnnymatthews <9611008+johnnymatthews@users.noreply.github.com> Date: Mon, 11 Mar 2024 17:48:12 -0300 Subject: [PATCH 184/374] Improves varSamp docs. Adds varSampStable docs. --- .../aggregate-functions/reference/varsamp.md | 126 ++++++++++++++++-- 1 file changed, 118 insertions(+), 8 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/varsamp.md b/docs/en/sql-reference/aggregate-functions/reference/varsamp.md index 9b2b94936ec..e75cb075ff8 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/varsamp.md +++ b/docs/en/sql-reference/aggregate-functions/reference/varsamp.md @@ -1,18 +1,128 @@ --- +title: "varSamp" slug: /en/sql-reference/aggregate-functions/reference/varsamp sidebar_position: 33 --- -# varSamp +This page contains information on the `varSamp` and `varSampStable` ClickHouse functions. -Calculates the amount `Σ((x - x̅)^2) / (n - 1)`, where `n` is the sample size and `x̅`is the average value of `x`. +## varSamp -It represents an unbiased estimate of the variance of a random variable if passed values from its sample. +Calculate the sample variance of a data set. -Returns `Float64`. When `n <= 1`, returns `+∞`. +**Syntax** -Alias: `VAR_SAMP`. +```sql +varSamp(expr) +``` -:::note -This function uses a numerically unstable algorithm. If you need [numerical stability](https://en.wikipedia.org/wiki/Numerical_stability) in calculations, use the `varSampStable` function. It works slower but provides a lower computational error. -::: +**Parameters** + +- `expr`: An expression representing the data set for which you want to calculate the sample variance. [Expression](../syntax#syntax-expressions) + +**Returned value** + +Returns a Float64 value representing the sample variance of the input data set. + +**Implementation details** + +The `varSamp()` function calculates the sample variance using the following formula: + +```plaintext +∑(x - mean(x))^2 / (n - 1) +``` + +Where: + +- `x` is each individual data point in the data set. +- `mean(x)` is the arithmetic mean of the data set. +- `n` is the number of data points in the data set. + +The function assumes that the input data set represents a sample from a larger population. If you want to calculate the variance of the entire population (when you have the complete data set), you should use the [`varPop()` function](./varpop#varpop) instead. + +This function uses a numerically unstable algorithm. If you need numerical stability in calculations, use the slower but more stable [`varSampStable` function](#varSampStable). + +**Example** + +Query: + +```sql +CREATE TABLE example_table +( + id UInt64, + value Float64 +) +ENGINE = MergeTree +ORDER BY id; + +INSERT INTO example_table VALUES (1, 10.5), (2, 12.3), (3, 9.8), (4, 11.2), (5, 10.7); + +SELECT varSamp(value) FROM example_table; +``` + +Response: + +```response +0.8650000000000091 +``` + +## varSampStable + +Calculate the sample variance of a data set using a numerically stable algorithm. + +**Syntax** + +```sql +varSampStable(expr) +``` + +**Parameters** + +- `expr`: An expression representing the data set for which you want to calculate the sample variance. [Expression](../syntax#syntax-expressions) + +**Returned value** + +The `varSampStable()` function returns a Float64 value representing the sample variance of the input data set. + +**Implementation details** + +The `varSampStable()` function calculates the sample variance using the same formula as the [`varSamp()`](#varSamp function): + +```plaintext +∑(x - mean(x))^2 / (n - 1) +``` + +Where: +- `x` is each individual data point in the data set. +- `mean(x)` is the arithmetic mean of the data set. +- `n` is the number of data points in the data set. + +The difference between `varSampStable()` and `varSamp()` is that `varSampStable()` is designed to provide a more deterministic and stable result when dealing with floating-point arithmetic. It uses an algorithm that minimizes the accumulation of rounding errors, which can be particularly important when dealing with large data sets or data with a wide range of values. + +Like `varSamp()`, the `varSampStable()` function assumes that the input data set represents a sample from a larger population. If you want to calculate the variance of the entire population (when you have the complete data set), you should use the [`varPopStable()` function](./varpop#varpopstable) instead. + +**Example** + +Query: + +```sql +CREATE TABLE example_table +( + id UInt64, + value Float64 +) +ENGINE = MergeTree +ORDER BY id; + +INSERT INTO example_table VALUES (1, 10.5), (2, 12.3), (3, 9.8), (4, 11.2), (5, 10.7); + +SELECT varSampStable(value) FROM example_table; +``` + +Response: + +```response +0.865 +``` + +This query calculates the sample variance of the `value` column in the `example_table` using the `varSampStable()` function. The result shows that the sample variance of the values `[10.5, 12.3, 9.8, 11.2, 10.7]` is approximately 0.865, which may differ slightly from the result of `varSamp()` due to the more precise handling of floating-point arithmetic. From 563df9bdcb425810a0c2d3ecb11302e22039c048 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 11 Mar 2024 22:49:18 +0100 Subject: [PATCH 185/374] Fix multiple bugs in groupArraySorted --- .../AggregateFunctionGroupArraySorted.cpp | 8 +++++--- .../0_stateless/03008_groupSortedArray_field.reference | 3 +++ .../queries/0_stateless/03008_groupSortedArray_field.sql | 6 ++++++ 3 files changed, 14 insertions(+), 3 deletions(-) create mode 100644 tests/queries/0_stateless/03008_groupSortedArray_field.reference create mode 100644 tests/queries/0_stateless/03008_groupSortedArray_field.sql diff --git a/src/AggregateFunctions/AggregateFunctionGroupArraySorted.cpp b/src/AggregateFunctions/AggregateFunctionGroupArraySorted.cpp index 0e9856cfab9..0692ff28f18 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArraySorted.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArraySorted.cpp @@ -310,10 +310,12 @@ public: { for (Field & element : values) { - UInt8 is_null = 0; - readBinary(is_null, buf); - if (!is_null) + bool has_value = 0; + readBinary(has_value, buf); + if (has_value) serialization->deserializeBinary(element, buf, {}); + else + element = Field{}; } } else diff --git a/tests/queries/0_stateless/03008_groupSortedArray_field.reference b/tests/queries/0_stateless/03008_groupSortedArray_field.reference new file mode 100644 index 00000000000..a7f89ebcf58 --- /dev/null +++ b/tests/queries/0_stateless/03008_groupSortedArray_field.reference @@ -0,0 +1,3 @@ +0A01003C79A557B3C43400C4865AA84C3B4B01000650BC18F7DE0B00FAAF43E708213401008ED706EA0A9F13007228F915F5602C0100C692CA8FB81405003A6D357047EB1A01008416B7C3239EE3FF7BE9483CDC61DC01003E133A7C081AF5FFC1ECC583F7E5EA01000000000000000000000000000000000100C4865AA84C3BCBFF3B79A557B3C4B4010024C46EF500F1ECFFDB3B910AFF0ED301005E2FC14EBAEAE5FFA1D03EB14515DA +070109000000010600000001080000000103000000010500000001040000000107000000 AggregateFunction(groupArraySorted(10), Nullable(Decimal(3, 0))) +[3,4,5,6,7,8,9] diff --git a/tests/queries/0_stateless/03008_groupSortedArray_field.sql b/tests/queries/0_stateless/03008_groupSortedArray_field.sql new file mode 100644 index 00000000000..6d2aea641a5 --- /dev/null +++ b/tests/queries/0_stateless/03008_groupSortedArray_field.sql @@ -0,0 +1,6 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/61186 +SELECT hex(CAST(unhex('0A01003C79A557B3C43400C4865AA84C3B4B01000650BC18F7DE0B00FAAF43E708213401008ED706EA0A9F13007228F915F5602C0100C692CA8FB81405003A6D357047EB1A01008416B7C3239EE3FF7BE9483CDC61DC01003E133A7C081AF5FFC1ECC583F7E5EA01000000000000000000000000000000000100C4865AA84C3BCBFF3B79A557B3C4B4010024C46EF500F1ECFFDB3B910AFF0ED301005E2FC14EBAEAE5FFA1D03EB14515DA'), + 'AggregateFunction(groupArraySorted(10), Decimal(38, 38))')); + +Select hex(groupArraySortedState(10)((number < 3 ? NULL : number)::Nullable(Decimal(3))) as t), toTypeName(t) from numbers(10); +Select finalizeAggregation(unhex('070109000000010600000001080000000103000000010500000001040000000107000000')::AggregateFunction(groupArraySorted(10), Nullable(Decimal(3, 0)))); From 1b04cc0b4da6d32fd4741ea953dfed060f846d0b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 12 Mar 2024 03:56:10 +0100 Subject: [PATCH 186/374] Fix strange log message --- src/Loggers/Loggers.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Loggers/Loggers.cpp b/src/Loggers/Loggers.cpp index 1d17585cc96..cc6e4691737 100644 --- a/src/Loggers/Loggers.cpp +++ b/src/Loggers/Loggers.cpp @@ -304,6 +304,9 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log log_settings.turn_off_logger = DB::TextLog::shouldTurnOffLogger(); + log_settings.database = config.getString("text_log.database", "system"); + log_settings.table = config.getString("text_log.table", "text_log"); + split->addTextLog(DB::TextLog::getLogQueue(log_settings), text_log_level); } #endif From c628eaca8ba19584fe36067dee8e6ec3e8f5cc4b Mon Sep 17 00:00:00 2001 From: Zhuo Qiu Date: Tue, 26 Dec 2023 14:13:07 +0800 Subject: [PATCH 187/374] Consider deleted rows when selecting parts to merge --- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 90 +++++++++++++++++++ src/Storages/MergeTree/IMergeTreeDataPart.h | 13 +++ .../MergeTree/MergeFromLogEntryTask.cpp | 2 +- src/Storages/MergeTree/MergeTreeData.cpp | 1 + .../MergeTree/MergeTreeDataMergerMutator.cpp | 11 ++- .../MergeTree/MergeTreeDataMergerMutator.h | 2 +- .../MergeTree/MergeTreeDataWriter.cpp | 1 + src/Storages/MergeTree/MergeTreeSettings.h | 2 + .../MergeTree/MergedBlockOutputStream.cpp | 5 ++ .../MergeTree/MutateFromLogEntryTask.cpp | 2 +- src/Storages/MergeTree/MutateTask.cpp | 54 +++++++++++ .../MergeTree/ReplicatedMergeTreeQueue.cpp | 5 +- src/Storages/StorageMergeTree.cpp | 4 +- .../03001_consider_lwd_when_merge.reference | 3 + .../03001_consider_lwd_when_merge.sql | 23 +++++ 15 files changed, 208 insertions(+), 10 deletions(-) create mode 100644 tests/queries/0_stateless/03001_consider_lwd_when_merge.reference create mode 100644 tests/queries/0_stateless/03001_consider_lwd_when_merge.sql diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 3fea6d04944..c099512d636 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -609,6 +609,15 @@ UInt64 IMergeTreeDataPart::getMarksCount() const return index_granularity.getMarksCount(); } +UInt64 IMergeTreeDataPart::getExistingBytesOnDisk() const +{ + if (storage.getSettings()->exclude_deleted_rows_for_part_size_in_merge && supportLightweightDeleteMutate() && hasLightweightDelete() + && existing_rows_count.has_value() && existing_rows_count.value() < rows_count && rows_count > 0) + return bytes_on_disk * existing_rows_count.value() / rows_count; + else + return bytes_on_disk; +} + size_t IMergeTreeDataPart::getFileSizeOrZero(const String & file_name) const { auto checksum = checksums.files.find(file_name); @@ -691,6 +700,7 @@ void IMergeTreeDataPart::loadColumnsChecksumsIndexes(bool require_columns_checks calculateColumnsAndSecondaryIndicesSizesOnDisk(); loadRowsCount(); /// Must be called after loadIndexGranularity() as it uses the value of `index_granularity`. + loadExistingRowsCount(); /// Must be called after loadRowsCount() as it uses the value of `rows_count`. loadPartitionAndMinMaxIndex(); if (!parent_part) { @@ -1313,6 +1323,86 @@ void IMergeTreeDataPart::loadRowsCount() } } +void IMergeTreeDataPart::loadExistingRowsCount() +{ + if (existing_rows_count.has_value()) + return; + + if (!rows_count || !storage.getSettings()->load_existing_rows_count_for_old_parts || !supportLightweightDeleteMutate() + || !hasLightweightDelete()) + existing_rows_count = rows_count; + else + existing_rows_count = readExistingRowsCount(); +} + +UInt64 IMergeTreeDataPart::readExistingRowsCount() +{ + const size_t total_mark = getMarksCount(); + if (!total_mark) + return rows_count; + + NamesAndTypesList cols; + cols.push_back(LightweightDeleteDescription::FILTER_COLUMN); + + StorageMetadataPtr metadata_ptr = storage.getInMemoryMetadataPtr(); + StorageSnapshotPtr storage_snapshot_ptr = std::make_shared(storage, metadata_ptr); + + MergeTreeReaderPtr reader = getReader( + cols, + storage_snapshot_ptr, + MarkRanges{MarkRange(0, total_mark)}, + nullptr, + storage.getContext()->getMarkCache().get(), + std::make_shared(), + MergeTreeReaderSettings{}, + ValueSizeMap{}, + ReadBufferFromFileBase::ProfileCallback{}); + + if (!reader) + { + LOG_WARNING(storage.log, "Create reader failed while reading existing rows count"); + return rows_count; + } + + size_t current_mark = 0; + bool continue_reading = false; + size_t current_row = 0; + size_t existing_count = 0; + + while (current_row < rows_count) + { + size_t rows_to_read = index_granularity.getMarkRows(current_mark); + continue_reading = (current_mark != 0); + + Columns result; + result.resize(1); + + size_t rows_read = reader->readRows(current_mark, total_mark, continue_reading, rows_to_read, result); + if (!rows_read) + { + LOG_WARNING(storage.log, "Part {} has lightweight delete, but _row_exists column not found", name); + return rows_count; + } + + current_row += rows_read; + current_mark += (rows_to_read == rows_read); + + const ColumnUInt8 * row_exists_col = typeid_cast(result[0].get()); + if (!row_exists_col) + { + LOG_WARNING(storage.log, "Part {} _row_exists column type is not UInt8", name); + return rows_count; + } + + for (UInt8 row_exists : row_exists_col->getData()) + if (row_exists) + existing_count++; + } + + LOG_DEBUG(storage.log, "Part {} existing_rows_count = {}", name, existing_count); + return existing_count; +} + void IMergeTreeDataPart::appendFilesOfRowsCount(Strings & files) { files.push_back("count.txt"); diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index aaae64a5970..8bd32e777bc 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -231,6 +231,9 @@ public: size_t rows_count = 0; + /// Existing rows count (excluding lightweight deleted rows) + std::optional existing_rows_count; + time_t modification_time = 0; /// When the part is removed from the working set. Changes once. mutable std::atomic remove_time { std::numeric_limits::max() }; @@ -373,6 +376,10 @@ public: void setBytesOnDisk(UInt64 bytes_on_disk_) { bytes_on_disk = bytes_on_disk_; } void setBytesUncompressedOnDisk(UInt64 bytes_uncompressed_on_disk_) { bytes_uncompressed_on_disk = bytes_uncompressed_on_disk_; } + /// Returns estimated size of existing rows if setting exclude_deleted_rows_for_part_size_in_merge is true + /// Otherwise returns bytes_on_disk + UInt64 getExistingBytesOnDisk() const; + size_t getFileSizeOrZero(const String & file_name) const; auto getFilesChecksums() const { return checksums.files; } @@ -499,6 +506,9 @@ public: /// True if here is lightweight deleted mask file in part. bool hasLightweightDelete() const; + /// Read existing rows count from _row_exists column + UInt64 readExistingRowsCount(); + void writeChecksums(const MergeTreeDataPartChecksums & checksums_, const WriteSettings & settings); /// Checks the consistency of this data part. @@ -664,6 +674,9 @@ private: /// For the older format version calculates rows count from the size of a column with a fixed size. void loadRowsCount(); + /// Load existing rows count from _row_exists column if load_existing_rows_count_for_old_parts is true. + void loadExistingRowsCount(); + static void appendFilesOfRowsCount(Strings & files); /// Loads ttl infos in json format from file ttl.txt. If file doesn't exists assigns ttl infos with all zeros diff --git a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp index ae6e398026d..5ef004ec019 100644 --- a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp @@ -174,7 +174,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare() } /// Start to make the main work - size_t estimated_space_for_merge = MergeTreeDataMergerMutator::estimateNeededDiskSpace(parts); + size_t estimated_space_for_merge = MergeTreeDataMergerMutator::estimateNeededDiskSpace(parts, true); /// Can throw an exception while reserving space. IMergeTreeDataPart::TTLInfos ttl_infos; diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index d56cf761cf4..5e05f75c1c5 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -8261,6 +8261,7 @@ std::pair MergeTreeData::createE new_data_part->setColumns(columns, {}, metadata_snapshot->getMetadataVersion()); new_data_part->rows_count = block.rows(); + new_data_part->existing_rows_count = block.rows(); new_data_part->partition = partition; diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 1bf1d4a3c29..90144a8cc8f 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -405,7 +405,7 @@ MergeTreeDataMergerMutator::MergeSelectingInfo MergeTreeDataMergerMutator::getPo } IMergeSelector::Part part_info; - part_info.size = part->getBytesOnDisk(); + part_info.size = part->getExistingBytesOnDisk(); part_info.age = res.current_time - part->modification_time; part_info.level = part->info.level; part_info.data = ∂ @@ -611,7 +611,7 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectAllPartsToMergeWithinParti return SelectPartsDecision::CANNOT_SELECT; } - sum_bytes += (*it)->getBytesOnDisk(); + sum_bytes += (*it)->getExistingBytesOnDisk(); prev_it = it; ++it; @@ -793,7 +793,7 @@ MergeTreeData::DataPartPtr MergeTreeDataMergerMutator::renameMergedTemporaryPart } -size_t MergeTreeDataMergerMutator::estimateNeededDiskSpace(const MergeTreeData::DataPartsVector & source_parts) +size_t MergeTreeDataMergerMutator::estimateNeededDiskSpace(const MergeTreeData::DataPartsVector & source_parts, const bool & is_merge) { size_t res = 0; time_t current_time = std::time(nullptr); @@ -804,7 +804,10 @@ size_t MergeTreeDataMergerMutator::estimateNeededDiskSpace(const MergeTreeData:: if (part_max_ttl && part_max_ttl <= current_time) continue; - res += part->getBytesOnDisk(); + if (is_merge) + res += part->getExistingBytesOnDisk(); + else + res += part->getBytesOnDisk(); } return static_cast(res * DISK_USAGE_COEFFICIENT_TO_RESERVE); diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h index f3a3f51b6c3..731c5e1d176 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h @@ -193,7 +193,7 @@ public: /// The approximate amount of disk space needed for merge or mutation. With a surplus. - static size_t estimateNeededDiskSpace(const MergeTreeData::DataPartsVector & source_parts); + static size_t estimateNeededDiskSpace(const MergeTreeData::DataPartsVector & source_parts, const bool & is_merge); private: /** Select all parts belonging to the same partition. diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index fdac16ae19a..2ba74e44b40 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -537,6 +537,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPartImpl( new_data_part->setColumns(columns, infos, metadata_snapshot->getMetadataVersion()); new_data_part->rows_count = block.rows(); + new_data_part->existing_rows_count = block.rows(); new_data_part->partition = std::move(partition); new_data_part->minmax_idx = std::move(minmax_idx); new_data_part->is_temp = true; diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index 925dc973dc3..ea54f61b4b6 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -42,6 +42,7 @@ struct Settings; M(UInt64, compact_parts_max_bytes_to_buffer, 128 * 1024 * 1024, "Only available in ClickHouse Cloud", 0) \ M(UInt64, compact_parts_max_granules_to_buffer, 128, "Only available in ClickHouse Cloud", 0) \ M(UInt64, compact_parts_merge_max_bytes_to_prefetch_part, 16 * 1024 * 1024, "Only available in ClickHouse Cloud", 0) \ + M(Bool, load_existing_rows_count_for_old_parts, false, "Whether to load existing_rows_count for existing parts. If false, existing_rows_count will be equal to rows_count for existing parts.", 0) \ \ /** Merge settings. */ \ M(UInt64, merge_max_block_size, 8192, "How many rows in blocks should be formed for merge operations. By default has the same value as `index_granularity`.", 0) \ @@ -79,6 +80,7 @@ struct Settings; M(UInt64, number_of_mutations_to_throw, 1000, "If table has at least that many unfinished mutations, throw 'Too many mutations' exception. Disabled if set to 0", 0) \ M(UInt64, min_delay_to_mutate_ms, 10, "Min delay of mutating MergeTree table in milliseconds, if there are a lot of unfinished mutations", 0) \ M(UInt64, max_delay_to_mutate_ms, 1000, "Max delay of mutating MergeTree table in milliseconds, if there are a lot of unfinished mutations", 0) \ + M(Bool, exclude_deleted_rows_for_part_size_in_merge, false, "Use an estimated source part size (excluding lightweight deleted rows) when selecting parts to merge", 0) \ \ /** Inserts settings. */ \ M(UInt64, parts_to_delay_insert, 1000, "If table contains at least that many active parts in single partition, artificially slow down insert into table. Disabled if set to 0", 0) \ diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp index f2fe2e0f255..d8555d69788 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -188,6 +188,11 @@ MergedBlockOutputStream::Finalizer MergedBlockOutputStream::finalizePartAsync( new_part->index_granularity = writer->getIndexGranularity(); new_part->calculateColumnsAndSecondaryIndicesSizesOnDisk(); + /// In mutation, existing_rows_count is already calculated in PartMergerWriter + /// In merge situation, lightweight deleted rows was physically deleted, existing_rows_count equals rows_count + if (!new_part->existing_rows_count.has_value()) + new_part->existing_rows_count = rows_count; + if (default_codec != nullptr) new_part->default_codec = default_codec; diff --git a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp index a9ff687fe4d..620b0e34c6a 100644 --- a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp @@ -49,7 +49,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MutateFromLogEntryTask::prepare() } /// TODO - some better heuristic? - size_t estimated_space_for_result = MergeTreeDataMergerMutator::estimateNeededDiskSpace({source_part}); + size_t estimated_space_for_result = MergeTreeDataMergerMutator::estimateNeededDiskSpace({source_part}, false); if (entry.create_time + storage_settings_ptr->prefer_fetch_merged_part_time_threshold.totalSeconds() <= time(nullptr) && estimated_space_for_result >= storage_settings_ptr->prefer_fetch_merged_part_size_threshold) diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 150cc27c369..3d31d2f05db 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -60,6 +60,26 @@ static bool checkOperationIsNotCanceled(ActionBlocker & merges_blocker, MergeLis return true; } +static UInt64 getExistingRowsCount(const Block & block) +{ + auto column = block.getByName(LightweightDeleteDescription::FILTER_COLUMN.name).column; + const ColumnUInt8 * row_exists_col = typeid_cast(column.get()); + + if (!row_exists_col) + { + LOG_WARNING(&Poco::Logger::get("MutationHelpers::getExistingRowsCount"), "_row_exists column type is not UInt8"); + return block.rows(); + } + + UInt64 existing_count = 0; + + for (UInt8 row_exists : row_exists_col->getData()) + if (row_exists) + existing_count++; + + return existing_count; +} + /** Split mutation commands into two parts: * First part should be executed by mutations interpreter. * Other is just simple drop/renames, so they can be executed without interpreter. @@ -997,6 +1017,9 @@ struct MutationContext bool need_prefix = true; scope_guard temporary_directory_lock; + + /// Whether this mutation contains lightweight delete + bool has_lightweight_delete; }; using MutationContextPtr = std::shared_ptr; @@ -1191,6 +1214,7 @@ public: } case State::SUCCESS: { + finalize(); return false; } } @@ -1226,6 +1250,11 @@ private: const ProjectionsDescription & projections; ExecutableTaskPtr merge_projection_parts_task_ptr; + + /// Existing rows count calculated during part writing. + /// It is initialized in prepare(), calculated in mutateOriginalPartAndPrepareProjections() + /// and set to new_data_part in finalize() + size_t existing_rows_count; }; @@ -1238,6 +1267,8 @@ void PartMergerWriter::prepare() // We split the materialization into multiple stages similar to the process of INSERT SELECT query. projection_squashes.emplace_back(settings.min_insert_block_size_rows, settings.min_insert_block_size_bytes); } + + existing_rows_count = 0; } @@ -1251,6 +1282,9 @@ bool PartMergerWriter::mutateOriginalPartAndPrepareProjections() ctx->out->write(cur_block); + if (ctx->has_lightweight_delete) + existing_rows_count += MutationHelpers::getExistingRowsCount(cur_block); + for (size_t i = 0, size = ctx->projections_to_build.size(); i < size; ++i) { const auto & projection = *ctx->projections_to_build[i]; @@ -1340,6 +1374,12 @@ bool PartMergerWriter::iterateThroughAllProjections() return true; } +void PartMergerWriter::finalize() +{ + if (ctx->has_lightweight_delete) + ctx->new_data_part->existing_rows_count = existing_rows_count; +} + class MutateAllPartColumnsTask : public IExecutableTask { public: @@ -2185,6 +2225,20 @@ bool MutateTask::prepare() if (ctx->mutating_pipeline_builder.initialized()) ctx->execute_ttl_type = MutationHelpers::shouldExecuteTTL(ctx->metadata_snapshot, ctx->interpreter->getColumnDependencies()); + if (ctx->updated_header.has(LightweightDeleteDescription::FILTER_COLUMN.name)) + { + /// This mutation contains lightweight delete, reset existing_rows_count of new data part to 0 + /// It will be updated while writing _row_exists column + ctx->has_lightweight_delete = true; + } + else + { + ctx->has_lightweight_delete = false; + + /// This mutation does not contains lightweight delete, copy existing_rows_count from source part + ctx->new_data_part->existing_rows_count = ctx->source_part->existing_rows_count.value_or(ctx->source_part->rows_count); + } + /// All columns from part are changed and may be some more that were missing before in part /// TODO We can materialize compact part without copying data if (!isWidePart(ctx->source_part) || !isFullPartStorage(ctx->source_part->getDataPartStorage()) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 858eae4afd9..42f564f40da 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -1350,7 +1350,10 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( auto part = data.getPartIfExists(name, {MergeTreeDataPartState::PreActive, MergeTreeDataPartState::Active, MergeTreeDataPartState::Outdated}); if (part) { - sum_parts_size_in_bytes += part->getBytesOnDisk(); + if (entry.type == LogEntry::MERGE_PARTS) + sum_parts_size_in_bytes += part->getExistingBytesOnDisk(); + else + sum_parts_size_in_bytes += part->getBytesOnDisk(); if (entry.type == LogEntry::MUTATE_PART && !storage.mutation_backoff_policy.partCanBeMutated(part->name)) { diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 663e7f435b7..c816a6f0dce 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -1113,7 +1113,7 @@ MergeMutateSelectedEntryPtr StorageMergeTree::selectPartsToMerge( if (isTTLMergeType(future_part->merge_type)) getContext()->getMergeList().bookMergeWithTTL(); - merging_tagger = std::make_unique(future_part, MergeTreeDataMergerMutator::estimateNeededDiskSpace(future_part->parts), *this, metadata_snapshot, false); + merging_tagger = std::make_unique(future_part, MergeTreeDataMergerMutator::estimateNeededDiskSpace(future_part->parts, true), *this, metadata_snapshot, false); return std::make_shared(future_part, std::move(merging_tagger), std::make_shared()); } @@ -1336,7 +1336,7 @@ MergeMutateSelectedEntryPtr StorageMergeTree::selectPartsToMutate( future_part->name = part->getNewName(new_part_info); future_part->part_format = part->getFormat(); - tagger = std::make_unique(future_part, MergeTreeDataMergerMutator::estimateNeededDiskSpace({part}), *this, metadata_snapshot, true); + tagger = std::make_unique(future_part, MergeTreeDataMergerMutator::estimateNeededDiskSpace({part}, false), *this, metadata_snapshot, true); return std::make_shared(future_part, std::move(tagger), commands, txn); } } diff --git a/tests/queries/0_stateless/03001_consider_lwd_when_merge.reference b/tests/queries/0_stateless/03001_consider_lwd_when_merge.reference new file mode 100644 index 00000000000..19920de3d3c --- /dev/null +++ b/tests/queries/0_stateless/03001_consider_lwd_when_merge.reference @@ -0,0 +1,3 @@ +2 +2 +1 diff --git a/tests/queries/0_stateless/03001_consider_lwd_when_merge.sql b/tests/queries/0_stateless/03001_consider_lwd_when_merge.sql new file mode 100644 index 00000000000..a65e8877020 --- /dev/null +++ b/tests/queries/0_stateless/03001_consider_lwd_when_merge.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS lwd_merge; + +CREATE TABLE lwd_merge (id UInt64 CODEC(NONE)) + ENGINE = MergeTree ORDER BY id +SETTINGS max_bytes_to_merge_at_max_space_in_pool = 80000, exclude_deleted_rows_for_part_size_in_merge = 0; + +INSERT INTO lwd_merge SELECT number FROM numbers(10000); +INSERT INTO lwd_merge SELECT number FROM numbers(10000, 10000); + +OPTIMIZE TABLE lwd_merge; +SELECT count() FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_merge' AND active = 1; + +DELETE FROM lwd_merge WHERE id % 10 > 0; + +OPTIMIZE TABLE lwd_merge; +SELECT count() FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_merge' AND active = 1; + +ALTER TABLE lwd_merge MODIFY SETTING exclude_deleted_rows_for_part_size_in_merge = 1; + +OPTIMIZE TABLE lwd_merge; +SELECT count() FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_merge' AND active = 1; + +DROP TABLE IF EXISTS lwd_merge; From 4ad8141a162b3b7735e2f08c069e98b9c2ba2382 Mon Sep 17 00:00:00 2001 From: Zhuo Qiu Date: Wed, 28 Feb 2024 19:54:21 -0600 Subject: [PATCH 188/374] Maintain compatibility of estimateNeededDiskSpace() Co-authored-by: Yakov Olkhovskiy <99031427+yakov-olkhovskiy@users.noreply.github.com> --- src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp | 4 ++-- src/Storages/MergeTree/MergeTreeDataMergerMutator.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 90144a8cc8f..53d49b51e8f 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -793,7 +793,7 @@ MergeTreeData::DataPartPtr MergeTreeDataMergerMutator::renameMergedTemporaryPart } -size_t MergeTreeDataMergerMutator::estimateNeededDiskSpace(const MergeTreeData::DataPartsVector & source_parts, const bool & is_merge) +size_t MergeTreeDataMergerMutator::estimateNeededDiskSpace(const MergeTreeData::DataPartsVector & source_parts, const bool & account_for_deleted) { size_t res = 0; time_t current_time = std::time(nullptr); @@ -804,7 +804,7 @@ size_t MergeTreeDataMergerMutator::estimateNeededDiskSpace(const MergeTreeData:: if (part_max_ttl && part_max_ttl <= current_time) continue; - if (is_merge) + if (account_for_deleted) res += part->getExistingBytesOnDisk(); else res += part->getBytesOnDisk(); diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h index 731c5e1d176..669ee040af3 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h @@ -193,7 +193,7 @@ public: /// The approximate amount of disk space needed for merge or mutation. With a surplus. - static size_t estimateNeededDiskSpace(const MergeTreeData::DataPartsVector & source_parts, const bool & is_merge); + static size_t estimateNeededDiskSpace(const MergeTreeData::DataPartsVector & source_parts, const bool & account_for_deleted = false); private: /** Select all parts belonging to the same partition. From 05969a39f390445c8d0df43b7077e0eb81db3538 Mon Sep 17 00:00:00 2001 From: Zhuo Qiu Date: Tue, 12 Mar 2024 14:45:25 +0800 Subject: [PATCH 189/374] resolve conflicts --- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 5 +++-- src/Storages/MergeTree/MutateTask.cpp | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index c099512d636..5fede923252 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -1342,7 +1342,7 @@ UInt64 IMergeTreeDataPart::readExistingRowsCount() return rows_count; NamesAndTypesList cols; - cols.push_back(LightweightDeleteDescription::FILTER_COLUMN); + cols.emplace_back(RowExistsColumn::name, RowExistsColumn::type); StorageMetadataPtr metadata_ptr = storage.getInMemoryMetadataPtr(); StorageSnapshotPtr storage_snapshot_ptr = std::make_shared(storage, metadata_ptr); @@ -1351,7 +1351,8 @@ UInt64 IMergeTreeDataPart::readExistingRowsCount() cols, storage_snapshot_ptr, MarkRanges{MarkRange(0, total_mark)}, - nullptr, + /*virtual_fields=*/ {}, + /*uncompressed_cache=*/{}, storage.getContext()->getMarkCache().get(), std::make_shared(), MergeTreeReaderSettings{}, diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 3d31d2f05db..4d1e60f450e 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -62,7 +62,7 @@ static bool checkOperationIsNotCanceled(ActionBlocker & merges_blocker, MergeLis static UInt64 getExistingRowsCount(const Block & block) { - auto column = block.getByName(LightweightDeleteDescription::FILTER_COLUMN.name).column; + auto column = block.getByName(RowExistsColumn::name).column; const ColumnUInt8 * row_exists_col = typeid_cast(column.get()); if (!row_exists_col) @@ -2225,7 +2225,7 @@ bool MutateTask::prepare() if (ctx->mutating_pipeline_builder.initialized()) ctx->execute_ttl_type = MutationHelpers::shouldExecuteTTL(ctx->metadata_snapshot, ctx->interpreter->getColumnDependencies()); - if (ctx->updated_header.has(LightweightDeleteDescription::FILTER_COLUMN.name)) + if (ctx->updated_header.has(RowExistsColumn::name)) { /// This mutation contains lightweight delete, reset existing_rows_count of new data part to 0 /// It will be updated while writing _row_exists column From fb1e5923a333bad5336ccb2631c81c95f4eb57dc Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Tue, 12 Mar 2024 09:12:25 +0000 Subject: [PATCH 190/374] CI: fix runner for arm perf tests #do_not_test --- tests/ci/ci_config.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index df8bfb1c2a8..7c213da27ec 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -629,7 +629,9 @@ class CIConfig: assert result, f"BUG, no runner for [{check_name}]" - if ("aarch" in check_name or "arm" in check_name) and "aarch" not in result: + if ( + "aarch" in check_name.lower() or "arm64" in check_name.lower() + ) and "aarch" not in result: if result == Runners.STRESS_TESTER: # FIXME: no arm stress tester group atm result = Runners.FUNC_TESTER_ARM From f6abe68b607dd1893e838b55f85245c716d41f8b Mon Sep 17 00:00:00 2001 From: Pablo Musa Date: Tue, 12 Mar 2024 10:52:22 +0100 Subject: [PATCH 191/374] Clarify sentence about clusterAllReplicas The explanation about `clusterAllReplicas` is not clear. This change tries to simplify the sentence and clarify the explanation. --- docs/en/sql-reference/table-functions/cluster.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/table-functions/cluster.md b/docs/en/sql-reference/table-functions/cluster.md index ad92ab39183..136ff72e4a9 100644 --- a/docs/en/sql-reference/table-functions/cluster.md +++ b/docs/en/sql-reference/table-functions/cluster.md @@ -5,7 +5,7 @@ sidebar_label: cluster title: "cluster, clusterAllReplicas" --- -Allows to access all shards in an existing cluster which configured in `remote_servers` section without creating a [Distributed](../../engines/table-engines/special/distributed.md) table. One replica of each shard is queried. +Allows to access all shards (configured in the `remote_servers` section) of a cluster without creating a [Distributed](../../engines/table-engines/special/distributed.md) table. Only one replica of each shard is queried. `clusterAllReplicas` function — same as `cluster`, but all replicas are queried. Each replica in a cluster is used as a separate shard/connection. From ccd47126fdcc82e57d0c648b82613b5672847702 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 12 Mar 2024 11:01:47 +0100 Subject: [PATCH 192/374] Fix 01603_insert_select_too_many_parts flakiness CI: https://s3.amazonaws.com/clickhouse-test-reports/60695/048a042dc4963631a23358d3e454dcd8a9eaafa2/stateless_tests__aarch64_.html Signed-off-by: Azat Khuzhin --- .../queries/0_stateless/01603_insert_select_too_many_parts.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/01603_insert_select_too_many_parts.sql b/tests/queries/0_stateless/01603_insert_select_too_many_parts.sql index a56b680e212..0c33c1d6b18 100644 --- a/tests/queries/0_stateless/01603_insert_select_too_many_parts.sql +++ b/tests/queries/0_stateless/01603_insert_select_too_many_parts.sql @@ -3,6 +3,8 @@ CREATE TABLE too_many_parts (x UInt64) ENGINE = MergeTree ORDER BY tuple() SETTI SYSTEM STOP MERGES too_many_parts; SET max_block_size = 1, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; +-- Avoid concurrent partse check to avoid flakiness +SET max_threads=1, max_insert_threads=1; -- exception is not thrown if threshold is exceeded when multi-block INSERT is already started. INSERT INTO too_many_parts SELECT * FROM numbers(10); From 47a4ce8a4e629f2d6321be1411d93c0bfabbbc95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 12 Mar 2024 10:02:33 +0000 Subject: [PATCH 193/374] Move `RangeHashedDictionary::getColumn` into source file --- src/Dictionaries/RangeHashedDictionary.cpp | 448 +++++++++++++++++++++ src/Dictionaries/RangeHashedDictionary.h | 421 ------------------- 2 files changed, 448 insertions(+), 421 deletions(-) create mode 100644 src/Dictionaries/RangeHashedDictionary.cpp diff --git a/src/Dictionaries/RangeHashedDictionary.cpp b/src/Dictionaries/RangeHashedDictionary.cpp new file mode 100644 index 00000000000..203561fc23d --- /dev/null +++ b/src/Dictionaries/RangeHashedDictionary.cpp @@ -0,0 +1,448 @@ +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int TYPE_MISMATCH; +} + +template +ColumnPtr RangeHashedDictionary::getColumn( + const std::string & attribute_name, + const DataTypePtr & attribute_type, + const Columns & key_columns, + const DataTypes & key_types, + DefaultOrFilter default_or_filter) const +{ + bool is_short_circuit = std::holds_alternative(default_or_filter); + assert(is_short_circuit || std::holds_alternative(default_or_filter)); + + if (dictionary_key_type == DictionaryKeyType::Complex) + { + auto key_types_copy = key_types; + key_types_copy.pop_back(); + dict_struct.validateKeyTypes(key_types_copy); + } + + ColumnPtr result; + + const auto & dictionary_attribute = dict_struct.getAttribute(attribute_name, attribute_type); + const size_t attribute_index = dict_struct.attribute_name_to_index.find(attribute_name)->second; + const auto & attribute = attributes[attribute_index]; + + /// Cast range column to storage type + Columns modified_key_columns = key_columns; + const ColumnPtr & range_storage_column = key_columns.back(); + ColumnWithTypeAndName column_to_cast = {range_storage_column->convertToFullColumnIfConst(), key_types.back(), ""}; + modified_key_columns.back() = castColumnAccurate(column_to_cast, dict_struct.range_min->type); + + size_t keys_size = key_columns.front()->size(); + bool is_attribute_nullable = attribute.is_value_nullable.has_value(); + + ColumnUInt8::MutablePtr col_null_map_to; + ColumnUInt8::Container * vec_null_map_to = nullptr; + if (is_attribute_nullable) + { + col_null_map_to = ColumnUInt8::create(keys_size, false); + vec_null_map_to = &col_null_map_to->getData(); + } + + auto type_call = [&](const auto & dictionary_attribute_type) + { + using Type = std::decay_t; + using AttributeType = typename Type::AttributeType; + using ValueType = DictionaryValueType; + using ColumnProvider = DictionaryAttributeColumnProvider; + + auto column = ColumnProvider::getColumn(dictionary_attribute, keys_size); + + if (is_short_circuit) + { + IColumn::Filter & default_mask = std::get(default_or_filter).get(); + size_t keys_found = 0; + + if constexpr (std::is_same_v) + { + auto * out = column.get(); + + keys_found = getItemsShortCircuitImpl( + attribute, + modified_key_columns, + [&](size_t, const Array & value, bool) + { + out->insert(value); + }, + default_mask); + } + else if constexpr (std::is_same_v) + { + auto * out = column.get(); + + if (is_attribute_nullable) + keys_found = getItemsShortCircuitImpl( + attribute, + modified_key_columns, + [&](size_t row, StringRef value, bool is_null) + { + (*vec_null_map_to)[row] = is_null; + out->insertData(value.data, value.size); + }, + default_mask); + else + keys_found = getItemsShortCircuitImpl( + attribute, + modified_key_columns, + [&](size_t, StringRef value, bool) + { + out->insertData(value.data, value.size); + }, + default_mask); + } + else + { + auto & out = column->getData(); + + if (is_attribute_nullable) + keys_found = getItemsShortCircuitImpl( + attribute, + modified_key_columns, + [&](size_t row, const auto value, bool is_null) + { + (*vec_null_map_to)[row] = is_null; + out[row] = value; + }, + default_mask); + else + keys_found = getItemsShortCircuitImpl( + attribute, + modified_key_columns, + [&](size_t row, const auto value, bool) + { + out[row] = value; + }, + default_mask); + + out.resize(keys_found); + } + + if (is_attribute_nullable) + vec_null_map_to->resize(keys_found); + } + else + { + const ColumnPtr & default_values_column = std::get(default_or_filter).get(); + + DictionaryDefaultValueExtractor default_value_extractor( + dictionary_attribute.null_value, default_values_column); + + if constexpr (std::is_same_v) + { + auto * out = column.get(); + + getItemsImpl( + attribute, + modified_key_columns, + [&](size_t, const Array & value, bool) + { + out->insert(value); + }, + default_value_extractor); + } + else if constexpr (std::is_same_v) + { + auto * out = column.get(); + + if (is_attribute_nullable) + getItemsImpl( + attribute, + modified_key_columns, + [&](size_t row, StringRef value, bool is_null) + { + (*vec_null_map_to)[row] = is_null; + out->insertData(value.data, value.size); + }, + default_value_extractor); + else + getItemsImpl( + attribute, + modified_key_columns, + [&](size_t, StringRef value, bool) + { + out->insertData(value.data, value.size); + }, + default_value_extractor); + } + else + { + auto & out = column->getData(); + + if (is_attribute_nullable) + getItemsImpl( + attribute, + modified_key_columns, + [&](size_t row, const auto value, bool is_null) + { + (*vec_null_map_to)[row] = is_null; + out[row] = value; + }, + default_value_extractor); + else + getItemsImpl( + attribute, + modified_key_columns, + [&](size_t row, const auto value, bool) + { + out[row] = value; + }, + default_value_extractor); + } + } + + result = std::move(column); + }; + + callOnDictionaryAttributeType(attribute.type, type_call); + + if (is_attribute_nullable) + result = ColumnNullable::create(result, std::move(col_null_map_to)); + + return result; +} + +template +template +void RangeHashedDictionary::getItemsImpl( + const Attribute & attribute, + const Columns & key_columns, + ValueSetter && set_value, + DefaultValueExtractor & default_value_extractor) const +{ + const auto & attribute_container = std::get>(attribute.container); + + size_t keys_found = 0; + + const ColumnPtr & range_column = key_columns.back(); + auto key_columns_copy = key_columns; + key_columns_copy.pop_back(); + + DictionaryKeysArenaHolder arena_holder; + DictionaryKeysExtractor keys_extractor(key_columns_copy, arena_holder.getComplexKeyArena()); + const size_t keys_size = keys_extractor.getKeysSize(); + + callOnRangeType(dict_struct.range_min->type, [&](const auto & types) + { + using Types = std::decay_t; + using RangeColumnType = typename Types::LeftType; + using RangeStorageType = typename RangeColumnType::ValueType; + using RangeInterval = Interval; + + const auto * range_column_typed = typeid_cast(range_column.get()); + if (!range_column_typed) + throw Exception(ErrorCodes::TYPE_MISMATCH, + "Dictionary {} range column type should be equal to {}", + getFullName(), + dict_struct.range_min->type->getName()); + + const auto & range_column_data = range_column_typed->getData(); + + const auto & key_attribute_container = std::get>(key_attribute.container); + + for (size_t key_index = 0; key_index < keys_size; ++key_index) + { + auto key = keys_extractor.extractCurrentKey(); + const auto it = key_attribute_container.find(key); + + if (it) + { + const auto date = range_column_data[key_index]; + const auto & interval_tree = it->getMapped(); + + size_t value_index = 0; + std::optional range; + + interval_tree.find(date, [&](auto & interval, auto & interval_value_index) + { + if (range) + { + if (likely(configuration.lookup_strategy == RangeHashedDictionaryLookupStrategy::min) && interval < *range) + { + range = interval; + value_index = interval_value_index; + } + else if (configuration.lookup_strategy == RangeHashedDictionaryLookupStrategy::max && interval > * range) + { + range = interval; + value_index = interval_value_index; + } + } + else + { + range = interval; + value_index = interval_value_index; + } + + return true; + }); + + if (range.has_value()) + { + ++keys_found; + + AttributeType value = attribute_container[value_index]; + + if constexpr (is_nullable) + { + bool is_null = (*attribute.is_value_nullable)[value_index]; + set_value(key_index, value, is_null); + } + else + { + set_value(key_index, value, false); + } + + keys_extractor.rollbackCurrentKey(); + continue; + } + } + + if constexpr (is_nullable) + set_value(key_index, default_value_extractor[key_index], default_value_extractor.isNullAt(key_index)); + else + set_value(key_index, default_value_extractor[key_index], false); + + keys_extractor.rollbackCurrentKey(); + } + }); + + query_count.fetch_add(keys_size, std::memory_order_relaxed); + found_count.fetch_add(keys_found, std::memory_order_relaxed); +} + +template +template +size_t RangeHashedDictionary::getItemsShortCircuitImpl( + const Attribute & attribute, + const Columns & key_columns, + ValueSetter && set_value, + IColumn::Filter & default_mask) const +{ + const auto & attribute_container = std::get>(attribute.container); + + size_t keys_found = 0; + + const ColumnPtr & range_column = key_columns.back(); + auto key_columns_copy = key_columns; + key_columns_copy.pop_back(); + + DictionaryKeysArenaHolder arena_holder; + DictionaryKeysExtractor keys_extractor(key_columns_copy, arena_holder.getComplexKeyArena()); + const size_t keys_size = keys_extractor.getKeysSize(); + default_mask.resize(keys_size); + + callOnRangeType(dict_struct.range_min->type, [&](const auto & types) + { + using Types = std::decay_t; + using RangeColumnType = typename Types::LeftType; + using RangeStorageType = typename RangeColumnType::ValueType; + using RangeInterval = Interval; + + const auto * range_column_typed = typeid_cast(range_column.get()); + if (!range_column_typed) + throw Exception(ErrorCodes::TYPE_MISMATCH, + "Dictionary {} range column type should be equal to {}", + getFullName(), + dict_struct.range_min->type->getName()); + + const auto & range_column_data = range_column_typed->getData(); + + const auto & key_attribute_container = std::get>(key_attribute.container); + + for (size_t key_index = 0; key_index < keys_size; ++key_index) + { + auto key = keys_extractor.extractCurrentKey(); + const auto it = key_attribute_container.find(key); + + if (it) + { + const auto date = range_column_data[key_index]; + const auto & interval_tree = it->getMapped(); + + size_t value_index = 0; + std::optional range; + + interval_tree.find(date, [&](auto & interval, auto & interval_value_index) + { + if (range) + { + if (likely(configuration.lookup_strategy == RangeHashedDictionaryLookupStrategy::min) && interval < *range) + { + range = interval; + value_index = interval_value_index; + } + else if (configuration.lookup_strategy == RangeHashedDictionaryLookupStrategy::max && interval > * range) + { + range = interval; + value_index = interval_value_index; + } + } + else + { + range = interval; + value_index = interval_value_index; + } + + return true; + }); + + if (range.has_value()) + { + default_mask[key_index] = 0; + ++keys_found; + + AttributeType value = attribute_container[value_index]; + + if constexpr (is_nullable) + { + bool is_null = (*attribute.is_value_nullable)[value_index]; + set_value(key_index, value, is_null); + } + else + { + set_value(key_index, value, false); + } + + keys_extractor.rollbackCurrentKey(); + continue; + } + } + + default_mask[key_index] = 1; + + keys_extractor.rollbackCurrentKey(); + } + }); + + query_count.fetch_add(keys_size, std::memory_order_relaxed); + found_count.fetch_add(keys_found, std::memory_order_relaxed); + return keys_found; +} + +template +ColumnPtr RangeHashedDictionary::getColumn( + const std::string & attribute_name, + const DataTypePtr & attribute_type, + const Columns & key_columns, + const DataTypes & key_types, + DefaultOrFilter default_or_filter) const; + +template +ColumnPtr RangeHashedDictionary::getColumn( + const std::string & attribute_name, + const DataTypePtr & attribute_type, + const Columns & key_columns, + const DataTypes & key_types, + DefaultOrFilter default_or_filter) const; + +} diff --git a/src/Dictionaries/RangeHashedDictionary.h b/src/Dictionaries/RangeHashedDictionary.h index 509b991b30c..4a8008b9051 100644 --- a/src/Dictionaries/RangeHashedDictionary.h +++ b/src/Dictionaries/RangeHashedDictionary.h @@ -339,209 +339,6 @@ RangeHashedDictionary::RangeHashedDictionary( calculateBytesAllocated(); } -template -ColumnPtr RangeHashedDictionary::getColumn( - const std::string & attribute_name, - const DataTypePtr & attribute_type, - const Columns & key_columns, - const DataTypes & key_types, - DefaultOrFilter default_or_filter) const -{ - bool is_short_circuit = std::holds_alternative(default_or_filter); - assert(is_short_circuit || std::holds_alternative(default_or_filter)); - - if (dictionary_key_type == DictionaryKeyType::Complex) - { - auto key_types_copy = key_types; - key_types_copy.pop_back(); - dict_struct.validateKeyTypes(key_types_copy); - } - - ColumnPtr result; - - const auto & dictionary_attribute = dict_struct.getAttribute(attribute_name, attribute_type); - const size_t attribute_index = dict_struct.attribute_name_to_index.find(attribute_name)->second; - const auto & attribute = attributes[attribute_index]; - - /// Cast range column to storage type - Columns modified_key_columns = key_columns; - const ColumnPtr & range_storage_column = key_columns.back(); - ColumnWithTypeAndName column_to_cast = {range_storage_column->convertToFullColumnIfConst(), key_types.back(), ""}; - modified_key_columns.back() = castColumnAccurate(column_to_cast, dict_struct.range_min->type); - - size_t keys_size = key_columns.front()->size(); - bool is_attribute_nullable = attribute.is_value_nullable.has_value(); - - ColumnUInt8::MutablePtr col_null_map_to; - ColumnUInt8::Container * vec_null_map_to = nullptr; - if (is_attribute_nullable) - { - col_null_map_to = ColumnUInt8::create(keys_size, false); - vec_null_map_to = &col_null_map_to->getData(); - } - - auto type_call = [&](const auto & dictionary_attribute_type) - { - using Type = std::decay_t; - using AttributeType = typename Type::AttributeType; - using ValueType = DictionaryValueType; - using ColumnProvider = DictionaryAttributeColumnProvider; - - auto column = ColumnProvider::getColumn(dictionary_attribute, keys_size); - - if (is_short_circuit) - { - IColumn::Filter & default_mask = std::get(default_or_filter).get(); - size_t keys_found = 0; - - if constexpr (std::is_same_v) - { - auto * out = column.get(); - - keys_found = getItemsShortCircuitImpl( - attribute, - modified_key_columns, - [&](size_t, const Array & value, bool) - { - out->insert(value); - }, - default_mask); - } - else if constexpr (std::is_same_v) - { - auto * out = column.get(); - - if (is_attribute_nullable) - keys_found = getItemsShortCircuitImpl( - attribute, - modified_key_columns, - [&](size_t row, StringRef value, bool is_null) - { - (*vec_null_map_to)[row] = is_null; - out->insertData(value.data, value.size); - }, - default_mask); - else - keys_found = getItemsShortCircuitImpl( - attribute, - modified_key_columns, - [&](size_t, StringRef value, bool) - { - out->insertData(value.data, value.size); - }, - default_mask); - } - else - { - auto & out = column->getData(); - - if (is_attribute_nullable) - keys_found = getItemsShortCircuitImpl( - attribute, - modified_key_columns, - [&](size_t row, const auto value, bool is_null) - { - (*vec_null_map_to)[row] = is_null; - out[row] = value; - }, - default_mask); - else - keys_found = getItemsShortCircuitImpl( - attribute, - modified_key_columns, - [&](size_t row, const auto value, bool) - { - out[row] = value; - }, - default_mask); - - out.resize(keys_found); - } - - if (is_attribute_nullable) - vec_null_map_to->resize(keys_found); - } - else - { - const ColumnPtr & default_values_column = std::get(default_or_filter).get(); - - DictionaryDefaultValueExtractor default_value_extractor( - dictionary_attribute.null_value, default_values_column); - - if constexpr (std::is_same_v) - { - auto * out = column.get(); - - getItemsImpl( - attribute, - modified_key_columns, - [&](size_t, const Array & value, bool) - { - out->insert(value); - }, - default_value_extractor); - } - else if constexpr (std::is_same_v) - { - auto * out = column.get(); - - if (is_attribute_nullable) - getItemsImpl( - attribute, - modified_key_columns, - [&](size_t row, StringRef value, bool is_null) - { - (*vec_null_map_to)[row] = is_null; - out->insertData(value.data, value.size); - }, - default_value_extractor); - else - getItemsImpl( - attribute, - modified_key_columns, - [&](size_t, StringRef value, bool) - { - out->insertData(value.data, value.size); - }, - default_value_extractor); - } - else - { - auto & out = column->getData(); - - if (is_attribute_nullable) - getItemsImpl( - attribute, - modified_key_columns, - [&](size_t row, const auto value, bool is_null) - { - (*vec_null_map_to)[row] = is_null; - out[row] = value; - }, - default_value_extractor); - else - getItemsImpl( - attribute, - modified_key_columns, - [&](size_t row, const auto value, bool) - { - out[row] = value; - }, - default_value_extractor); - } - } - - result = std::move(column); - }; - - callOnDictionaryAttributeType(attribute.type, type_call); - - if (is_attribute_nullable) - result = ColumnNullable::create(result, std::move(col_null_map_to)); - - return result; -} - template ColumnPtr RangeHashedDictionary::getColumnInternal( const std::string & attribute_name, @@ -840,224 +637,6 @@ typename RangeHashedDictionary::Attribute RangeHashedDictio return attribute; } -template -template -void RangeHashedDictionary::getItemsImpl( - const Attribute & attribute, - const Columns & key_columns, - ValueSetter && set_value, - DefaultValueExtractor & default_value_extractor) const -{ - const auto & attribute_container = std::get>(attribute.container); - - size_t keys_found = 0; - - const ColumnPtr & range_column = key_columns.back(); - auto key_columns_copy = key_columns; - key_columns_copy.pop_back(); - - DictionaryKeysArenaHolder arena_holder; - DictionaryKeysExtractor keys_extractor(key_columns_copy, arena_holder.getComplexKeyArena()); - const size_t keys_size = keys_extractor.getKeysSize(); - - callOnRangeType(dict_struct.range_min->type, [&](const auto & types) - { - using Types = std::decay_t; - using RangeColumnType = typename Types::LeftType; - using RangeStorageType = typename RangeColumnType::ValueType; - using RangeInterval = Interval; - - const auto * range_column_typed = typeid_cast(range_column.get()); - if (!range_column_typed) - throw Exception(ErrorCodes::TYPE_MISMATCH, - "Dictionary {} range column type should be equal to {}", - getFullName(), - dict_struct.range_min->type->getName()); - - const auto & range_column_data = range_column_typed->getData(); - - const auto & key_attribute_container = std::get>(key_attribute.container); - - for (size_t key_index = 0; key_index < keys_size; ++key_index) - { - auto key = keys_extractor.extractCurrentKey(); - const auto it = key_attribute_container.find(key); - - if (it) - { - const auto date = range_column_data[key_index]; - const auto & interval_tree = it->getMapped(); - - size_t value_index = 0; - std::optional range; - - interval_tree.find(date, [&](auto & interval, auto & interval_value_index) - { - if (range) - { - if (likely(configuration.lookup_strategy == RangeHashedDictionaryLookupStrategy::min) && interval < *range) - { - range = interval; - value_index = interval_value_index; - } - else if (configuration.lookup_strategy == RangeHashedDictionaryLookupStrategy::max && interval > * range) - { - range = interval; - value_index = interval_value_index; - } - } - else - { - range = interval; - value_index = interval_value_index; - } - - return true; - }); - - if (range.has_value()) - { - ++keys_found; - - AttributeType value = attribute_container[value_index]; - - if constexpr (is_nullable) - { - bool is_null = (*attribute.is_value_nullable)[value_index]; - set_value(key_index, value, is_null); - } - else - { - set_value(key_index, value, false); - } - - keys_extractor.rollbackCurrentKey(); - continue; - } - } - - if constexpr (is_nullable) - set_value(key_index, default_value_extractor[key_index], default_value_extractor.isNullAt(key_index)); - else - set_value(key_index, default_value_extractor[key_index], false); - - keys_extractor.rollbackCurrentKey(); - } - }); - - query_count.fetch_add(keys_size, std::memory_order_relaxed); - found_count.fetch_add(keys_found, std::memory_order_relaxed); -} - -template -template -size_t RangeHashedDictionary::getItemsShortCircuitImpl( - const Attribute & attribute, - const Columns & key_columns, - ValueSetter && set_value, - IColumn::Filter & default_mask) const -{ - const auto & attribute_container = std::get>(attribute.container); - - size_t keys_found = 0; - - const ColumnPtr & range_column = key_columns.back(); - auto key_columns_copy = key_columns; - key_columns_copy.pop_back(); - - DictionaryKeysArenaHolder arena_holder; - DictionaryKeysExtractor keys_extractor(key_columns_copy, arena_holder.getComplexKeyArena()); - const size_t keys_size = keys_extractor.getKeysSize(); - default_mask.resize(keys_size); - - callOnRangeType(dict_struct.range_min->type, [&](const auto & types) - { - using Types = std::decay_t; - using RangeColumnType = typename Types::LeftType; - using RangeStorageType = typename RangeColumnType::ValueType; - using RangeInterval = Interval; - - const auto * range_column_typed = typeid_cast(range_column.get()); - if (!range_column_typed) - throw Exception(ErrorCodes::TYPE_MISMATCH, - "Dictionary {} range column type should be equal to {}", - getFullName(), - dict_struct.range_min->type->getName()); - - const auto & range_column_data = range_column_typed->getData(); - - const auto & key_attribute_container = std::get>(key_attribute.container); - - for (size_t key_index = 0; key_index < keys_size; ++key_index) - { - auto key = keys_extractor.extractCurrentKey(); - const auto it = key_attribute_container.find(key); - - if (it) - { - const auto date = range_column_data[key_index]; - const auto & interval_tree = it->getMapped(); - - size_t value_index = 0; - std::optional range; - - interval_tree.find(date, [&](auto & interval, auto & interval_value_index) - { - if (range) - { - if (likely(configuration.lookup_strategy == RangeHashedDictionaryLookupStrategy::min) && interval < *range) - { - range = interval; - value_index = interval_value_index; - } - else if (configuration.lookup_strategy == RangeHashedDictionaryLookupStrategy::max && interval > * range) - { - range = interval; - value_index = interval_value_index; - } - } - else - { - range = interval; - value_index = interval_value_index; - } - - return true; - }); - - if (range.has_value()) - { - default_mask[key_index] = 0; - ++keys_found; - - AttributeType value = attribute_container[value_index]; - - if constexpr (is_nullable) - { - bool is_null = (*attribute.is_value_nullable)[value_index]; - set_value(key_index, value, is_null); - } - else - { - set_value(key_index, value, false); - } - - keys_extractor.rollbackCurrentKey(); - continue; - } - } - - default_mask[key_index] = 1; - - keys_extractor.rollbackCurrentKey(); - } - }); - - query_count.fetch_add(keys_size, std::memory_order_relaxed); - found_count.fetch_add(keys_found, std::memory_order_relaxed); - return keys_found; -} - template template void RangeHashedDictionary::getItemsInternalImpl( From f1f388d9d26dbaf0775e90fdc26f9085b83b1c26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 11 Mar 2024 16:33:12 +0000 Subject: [PATCH 194/374] Remove RangeHashedDictionary as exception from large objects check --- utils/check-style/check-large-objects.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/utils/check-style/check-large-objects.sh b/utils/check-style/check-large-objects.sh index 5c1276e5732..5b0e8e88df5 100755 --- a/utils/check-style/check-large-objects.sh +++ b/utils/check-style/check-large-objects.sh @@ -7,8 +7,6 @@ TU_EXCLUDES=( AggregateFunctionUniq FunctionsConversion - RangeHashedDictionary - Aggregator ) From c78a029d15f3b8d4796b69ea370af0e3b8abf8e2 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 12 Mar 2024 11:12:27 +0100 Subject: [PATCH 195/374] tests: fix typo in 01603_insert_select_too_many_parts.sql MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: János Benjamin Antal --- .../queries/0_stateless/01603_insert_select_too_many_parts.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01603_insert_select_too_many_parts.sql b/tests/queries/0_stateless/01603_insert_select_too_many_parts.sql index 0c33c1d6b18..9408ccf2bbf 100644 --- a/tests/queries/0_stateless/01603_insert_select_too_many_parts.sql +++ b/tests/queries/0_stateless/01603_insert_select_too_many_parts.sql @@ -3,7 +3,7 @@ CREATE TABLE too_many_parts (x UInt64) ENGINE = MergeTree ORDER BY tuple() SETTI SYSTEM STOP MERGES too_many_parts; SET max_block_size = 1, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; --- Avoid concurrent partse check to avoid flakiness +-- Avoid concurrent parts check to avoid flakiness SET max_threads=1, max_insert_threads=1; -- exception is not thrown if threshold is exceeded when multi-block INSERT is already started. From c9c73378eb169e3de7e7579c5ec7edd573e1b72e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 12 Mar 2024 11:52:17 +0100 Subject: [PATCH 196/374] Fix problem detected by ubsan --- .../AggregateFunctionGroupArraySorted.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionGroupArraySorted.cpp b/src/AggregateFunctions/AggregateFunctionGroupArraySorted.cpp index 0692ff28f18..1ee29c362cd 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArraySorted.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArraySorted.cpp @@ -310,12 +310,12 @@ public: { for (Field & element : values) { - bool has_value = 0; + /// We must initialize the Field type since some internal functions (like operator=) use them + new (&element) Field; + bool has_value = false; readBinary(has_value, buf); if (has_value) serialization->deserializeBinary(element, buf, {}); - else - element = Field{}; } } else From 4ce5245157418217b3f7e41724df3b8cf3dd3272 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 12 Mar 2024 11:03:39 +0000 Subject: [PATCH 197/374] Automatic style fix --- tests/integration/test_disk_types/test.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/test_disk_types/test.py b/tests/integration/test_disk_types/test.py index 5047cdc605e..a53d073d30b 100644 --- a/tests/integration/test_disk_types/test.py +++ b/tests/integration/test_disk_types/test.py @@ -50,6 +50,8 @@ def test_different_types(cluster): assert ( fields[encrypted_col_ix] == "0" ), f"{fields[name_col_ix]} expected to be non-encrypted!" + + def test_different_types(cluster): node = cluster.instances["node"] response = TSV.toMat(node.query("SELECT * FROM system.disks FORMAT TSVWithNames")) From c1cefe18744d06bebd67b6425ae5afc9cb922e78 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 12 Mar 2024 11:41:36 +0000 Subject: [PATCH 198/374] Fix keeper reconfig for standalone binary --- src/Coordination/Standalone/Context.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Coordination/Standalone/Context.cpp b/src/Coordination/Standalone/Context.cpp index 264cf118501..75b81187973 100644 --- a/src/Coordination/Standalone/Context.cpp +++ b/src/Coordination/Standalone/Context.cpp @@ -374,7 +374,7 @@ void Context::updateKeeperConfiguration([[maybe_unused]] const Poco::Util::Abstr if (!shared->keeper_dispatcher) return; - shared->keeper_dispatcher->updateConfiguration(getConfigRef(), getMacros()); + shared->keeper_dispatcher->updateConfiguration(config_, getMacros()); } std::shared_ptr Context::getZooKeeper() const From d431276045f6600af92562abf2b6387bd37d068c Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Tue, 12 Mar 2024 12:43:48 +0100 Subject: [PATCH 199/374] Fix usage of session_token in S3 --- src/Storages/StorageS3.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index 11da394feec..ff055508aa6 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -1451,7 +1451,8 @@ void StorageS3::Configuration::connect(const ContextPtr & context) auth_settings.expiration_window_seconds.value_or( context->getConfigRef().getUInt64("s3.expiration_window_seconds", S3::DEFAULT_EXPIRATION_WINDOW_SECONDS)), auth_settings.no_sign_request.value_or(context->getConfigRef().getBool("s3.no_sign_request", false)), - }); + }, + credentials.GetSessionToken()); } void StorageS3::processNamedCollectionResult(StorageS3::Configuration & configuration, const NamedCollection & collection) From 2e803f29f8f89eec0d9eb95b089ecad5dd6e18b7 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Tue, 12 Mar 2024 12:45:02 +0100 Subject: [PATCH 200/374] Fix KeeperSnapshotManagerS3 --- src/Coordination/KeeperSnapshotManagerS3.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Coordination/KeeperSnapshotManagerS3.cpp b/src/Coordination/KeeperSnapshotManagerS3.cpp index 80345db2524..796506a07db 100644 --- a/src/Coordination/KeeperSnapshotManagerS3.cpp +++ b/src/Coordination/KeeperSnapshotManagerS3.cpp @@ -121,7 +121,8 @@ void KeeperSnapshotManagerS3::updateS3Configuration(const Poco::Util::AbstractCo auth_settings.use_insecure_imds_request.value_or(false), auth_settings.expiration_window_seconds.value_or(S3::DEFAULT_EXPIRATION_WINDOW_SECONDS), auth_settings.no_sign_request.value_or(false), - }); + }, + credentials.GetSessionToken()); auto new_client = std::make_shared(std::move(new_uri), std::move(auth_settings), std::move(client)); From 612ff3c5bc1f4878f869bd6e49cc1e60e58e3afd Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Tue, 12 Mar 2024 12:35:33 +0100 Subject: [PATCH 201/374] Make every style-checker runner types scaling-out very quickly --- tests/ci/autoscale_runners_lambda/app.py | 8 ++++++-- tests/ci/autoscale_runners_lambda/test_autoscale.py | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/ci/autoscale_runners_lambda/app.py b/tests/ci/autoscale_runners_lambda/app.py index 1aa2e424320..a36c508482c 100644 --- a/tests/ci/autoscale_runners_lambda/app.py +++ b/tests/ci/autoscale_runners_lambda/app.py @@ -51,7 +51,7 @@ class Queue: label: str -def get_scales() -> Tuple[int, int]: +def get_scales(runner_type: str) -> Tuple[int, int]: "returns the multipliers for scaling down and up ASG by types" # Scaling down is quicker on the lack of running jobs than scaling up on # queue @@ -63,8 +63,12 @@ def get_scales() -> Tuple[int, int]: # 10. I am trying 7 now. # 7 still looks a bit slow, so I try 6 # Let's have it the same as the other ASG + # + # All type of style-checkers should be added very quickly to not block the workflows # UPDATE THE COMMENT ON CHANGES scale_up = 3 + if "style" in runner_type: + scale_up = 1 return scale_down, scale_up @@ -95,7 +99,7 @@ def set_capacity( continue raise ValueError("Queue status is not in ['in_progress', 'queued']") - scale_down, scale_up = get_scales() + scale_down, scale_up = get_scales(runner_type) # With lyfecycle hooks some instances are actually free because some of # them are in 'Terminating:Wait' state effective_capacity = max( diff --git a/tests/ci/autoscale_runners_lambda/test_autoscale.py b/tests/ci/autoscale_runners_lambda/test_autoscale.py index 21a407276f9..75f178ac394 100644 --- a/tests/ci/autoscale_runners_lambda/test_autoscale.py +++ b/tests/ci/autoscale_runners_lambda/test_autoscale.py @@ -80,7 +80,7 @@ class TestSetCapacity(unittest.TestCase): ), TestCase("increase-1", 1, 13, 20, [Queue("queued", 23, "increase-1")], 17), TestCase( - "style-checker", 1, 13, 20, [Queue("queued", 33, "style-checker")], 20 + "style-checker", 1, 13, 20, [Queue("queued", 19, "style-checker")], 19 ), TestCase("increase-2", 1, 13, 20, [Queue("queued", 18, "increase-2")], 15), TestCase("increase-3", 1, 13, 20, [Queue("queued", 183, "increase-3")], 20), From 50aab0b69f75c027b0b6dd53ff747492485eed53 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Tue, 12 Mar 2024 14:06:17 +0100 Subject: [PATCH 202/374] Fix annoying typo --- tests/ci/autoscale_runners_lambda/app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/autoscale_runners_lambda/app.py b/tests/ci/autoscale_runners_lambda/app.py index a36c508482c..6c3d71708e9 100644 --- a/tests/ci/autoscale_runners_lambda/app.py +++ b/tests/ci/autoscale_runners_lambda/app.py @@ -142,7 +142,7 @@ def set_capacity( logging.info( "The ASG %s capacity will be increased to %s, current capacity=%s, " - "effective capacity=%sm maximum capacity=%s, running jobs=%s, queue size=%s", + "effective capacity=%s, maximum capacity=%s, running jobs=%s, queue size=%s", asg["AutoScalingGroupName"], desired_capacity, effective_capacity, From c011901d01709dff91e8afe53121af7388842f33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 12 Mar 2024 14:28:28 +0100 Subject: [PATCH 203/374] Fix coverage --- src/Functions/coverage.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Functions/coverage.cpp b/src/Functions/coverage.cpp index f4cac26df78..a1a43d0cf58 100644 --- a/src/Functions/coverage.cpp +++ b/src/Functions/coverage.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include From c27c1e1035772cff76f318a361153ca21f9adc55 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 12 Mar 2024 14:36:07 +0100 Subject: [PATCH 204/374] Fix build --- src/Functions/FunctionsConversion.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 94fd960a99e..e2d9c4a173e 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -3391,9 +3391,9 @@ arguments, result_type, input_rows_count); \ case IntervalKind::Kind::INTERVAL_KIND: \ return createFunctionAdaptor(FunctionConvert::create(), from_type); - static WrapperType createIntervalWrapper(const DataTypePtr & from_type, IntervalKind::Kind kind) + static WrapperType createIntervalWrapper(const DataTypePtr & from_type, IntervalKind kind) { - switch (kind) + switch (kind.kind) { GENERATE_INTERVAL_CASE(Nanosecond) GENERATE_INTERVAL_CASE(Microsecond) From b5489ac9620311b160b8c5fa23c02afcc7271114 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 12 Mar 2024 14:05:39 +0000 Subject: [PATCH 205/374] Print out all queries to have better insights --- ...materialized_views_ignore_errors.reference | 26 +++++++++++++++++-- ..._logs_materialized_views_ignore_errors.sql | 8 +++--- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/02572_system_logs_materialized_views_ignore_errors.reference b/tests/queries/0_stateless/02572_system_logs_materialized_views_ignore_errors.reference index 029f80b46b0..596a047c104 100644 --- a/tests/queries/0_stateless/02572_system_logs_materialized_views_ignore_errors.reference +++ b/tests/queries/0_stateless/02572_system_logs_materialized_views_ignore_errors.reference @@ -1,2 +1,24 @@ -11 queryfinish OK -11 querystart OK +"-- Attach MV to system.query_log and check that writing query_log will not fail\n\nset log_queries=1;","querystart","OK" +"-- Attach MV to system.query_log and check that writing query_log will not fail\n\nset log_queries=1;","queryfinish","OK" +"drop table if exists log_proxy_02572;","querystart","OK" +"drop table if exists log_proxy_02572;","queryfinish","OK" +"drop table if exists push_to_logs_proxy_mv_02572;","querystart","OK" +"drop table if exists push_to_logs_proxy_mv_02572;","queryfinish","OK" +"-- create log tables\nsystem flush logs;","querystart","OK" +"-- create log tables\nsystem flush logs;","queryfinish","OK" +"create table log_proxy_02572 as system.query_log engine=Distributed('test_shard_localhost', currentDatabase(), 'receiver_02572');","querystart","OK" +"create table log_proxy_02572 as system.query_log engine=Distributed('test_shard_localhost', currentDatabase(), 'receiver_02572');","queryfinish","OK" +"create materialized view push_to_logs_proxy_mv_02572 to log_proxy_02572 as select * from system.query_log;","querystart","OK" +"create materialized view push_to_logs_proxy_mv_02572 to log_proxy_02572 as select * from system.query_log;","queryfinish","OK" +"select 1 format Null;","querystart","OK" +"select 1 format Null;","queryfinish","OK" +"system flush logs;","querystart","OK" +"system flush logs;","queryfinish","OK" +"system flush logs;","querystart","OK" +"system flush logs;","queryfinish","OK" +"drop table log_proxy_02572;","querystart","OK" +"drop table log_proxy_02572;","queryfinish","OK" +"drop table push_to_logs_proxy_mv_02572;","querystart","OK" +"drop table push_to_logs_proxy_mv_02572;","queryfinish","OK" +"set log_queries=0;","querystart","OK" +"set log_queries=0;","queryfinish","OK" diff --git a/tests/queries/0_stateless/02572_system_logs_materialized_views_ignore_errors.sql b/tests/queries/0_stateless/02572_system_logs_materialized_views_ignore_errors.sql index a7a74190821..2381639fba0 100644 --- a/tests/queries/0_stateless/02572_system_logs_materialized_views_ignore_errors.sql +++ b/tests/queries/0_stateless/02572_system_logs_materialized_views_ignore_errors.sql @@ -21,10 +21,12 @@ system flush logs; drop table log_proxy_02572; drop table push_to_logs_proxy_mv_02572; +set log_queries=0; + system flush logs; -- lower() to pass through clickhouse-test "exception" check -select count(), lower(type::String), errorCodeToName(exception_code) +select replaceAll(query, '\n', '\\n'), lower(type::String), errorCodeToName(exception_code) from system.query_log where current_database = currentDatabase() - group by 2, 3 - order by 2; + order by event_time_microseconds + format CSV; From 0b588480f5165af38675911ebba043fe410465db Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 12 Mar 2024 14:34:15 +0000 Subject: [PATCH 206/374] Fix lazy execution in dictGetOrDefault for RangeHashedDictionary --- src/Functions/FunctionsExternalDictionaries.h | 7 ++-- .../03009_range_dict_get_or_default.reference | 1 + .../03009_range_dict_get_or_default.sql | 34 +++++++++++++++++++ 3 files changed, 40 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/03009_range_dict_get_or_default.reference create mode 100644 tests/queries/0_stateless/03009_range_dict_get_or_default.sql diff --git a/src/Functions/FunctionsExternalDictionaries.h b/src/Functions/FunctionsExternalDictionaries.h index 261c728e9e1..011772baab9 100644 --- a/src/Functions/FunctionsExternalDictionaries.h +++ b/src/Functions/FunctionsExternalDictionaries.h @@ -324,12 +324,15 @@ public: String getName() const override { return name; } bool isVariadic() const override { return true; } - bool isShortCircuit(ShortCircuitSettings & settings, size_t /*number_of_arguments*/) const override + bool isShortCircuit(ShortCircuitSettings & settings, size_t number_of_arguments) const override { if constexpr (dictionary_get_function_type != DictionaryGetFunctionType::getOrDefault) return false; - settings.arguments_with_disabled_lazy_execution.insert({0, 1, 2}); + /// We execute lazily only last argument with default expression. + for (size_t i = 0; i != number_of_arguments - 1; ++i) + settings.arguments_with_disabled_lazy_execution.insert(i); + settings.enable_lazy_execution_for_common_descendants_of_arguments = false; settings.force_enable_lazy_execution = false; return true; diff --git a/tests/queries/0_stateless/03009_range_dict_get_or_default.reference b/tests/queries/0_stateless/03009_range_dict_get_or_default.reference new file mode 100644 index 00000000000..0cfbf08886f --- /dev/null +++ b/tests/queries/0_stateless/03009_range_dict_get_or_default.reference @@ -0,0 +1 @@ +2 diff --git a/tests/queries/0_stateless/03009_range_dict_get_or_default.sql b/tests/queries/0_stateless/03009_range_dict_get_or_default.sql new file mode 100644 index 00000000000..1f4b4073b9f --- /dev/null +++ b/tests/queries/0_stateless/03009_range_dict_get_or_default.sql @@ -0,0 +1,34 @@ +DROP DICTIONARY IF EXISTS range_dictionary; +DROP TABLE IF EXISTS range_dictionary_nullable_source_table; + + +CREATE TABLE range_dictionary_nullable_source_table +( + key UInt64, + start_date Date, + end_date Date, + value Nullable(UInt64) +) +ENGINE = TinyLog; + +INSERT INTO range_dictionary_nullable_source_table VALUES (0, toDate('2019-05-05'), toDate('2019-05-20'), 0), (1, toDate('2019-05-05'), toDate('2019-05-20'), NULL); + +CREATE DICTIONARY range_dictionary +( + key UInt64, + start_date Date, + end_date Date, + value Nullable(UInt64) DEFAULT NULL +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'range_dictionary_nullable_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(RANGE_HASHED()) +RANGE(MIN start_date MAX end_date); + +SELECT dictGetOrDefault('range_dictionary', 'value', toUInt64(2), toDate(toLowCardinality(materialize('2019-05-15'))), 2); + + +DROP DICTIONARY IF EXISTS range_dictionary; +DROP TABLE IF EXISTS range_dictionary_nullable_source_table; + From 94f15dd5e058fc9f68bf3644c895908bba849db6 Mon Sep 17 00:00:00 2001 From: Mikhail Koviazin Date: Tue, 12 Mar 2024 16:48:43 +0200 Subject: [PATCH 207/374] Fix `has()` function with `Nullable` column Previous implementation didn't check for `null_map` when the given argument was not `Null`. This commit adds the missing check. Fixes #60214 --- src/Functions/array/arrayIndex.h | 9 +++++++-- tests/queries/0_stateless/00662_has_nullable.reference | 2 ++ tests/queries/0_stateless/00662_has_nullable.sql | 7 +++++++ 3 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/00662_has_nullable.reference create mode 100644 tests/queries/0_stateless/00662_has_nullable.sql diff --git a/src/Functions/array/arrayIndex.h b/src/Functions/array/arrayIndex.h index 3b19f0b486a..cd537763b4a 100644 --- a/src/Functions/array/arrayIndex.h +++ b/src/Functions/array/arrayIndex.h @@ -1007,8 +1007,13 @@ private: if (!(*null_map)[row]) continue; } - else if (!applyVisitor(FieldVisitorAccurateEquals(), arr[i], value)) - continue; + else + { + if (null_map && (*null_map)[row]) + continue; + if (!applyVisitor(FieldVisitorAccurateEquals(), arr[i], value)) + continue; + } ConcreteAction::apply(data[row], i); diff --git a/tests/queries/0_stateless/00662_has_nullable.reference b/tests/queries/0_stateless/00662_has_nullable.reference new file mode 100644 index 00000000000..97da99d655e --- /dev/null +++ b/tests/queries/0_stateless/00662_has_nullable.reference @@ -0,0 +1,2 @@ +1 1 +\N 0 diff --git a/tests/queries/0_stateless/00662_has_nullable.sql b/tests/queries/0_stateless/00662_has_nullable.sql new file mode 100644 index 00000000000..0d395871d9f --- /dev/null +++ b/tests/queries/0_stateless/00662_has_nullable.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS 00662_has_nullable; +CREATE TABLE 00662_has_nullable(a Nullable(UInt64)) ENGINE = Memory; + +INSERT INTO 00662_has_nullable VALUES (1), (Null); +SELECT a, has([0, 1], a) FROM 00662_has_nullable; + +DROP TABLE 00662_has_nullable; From 01d0fba0853930de42d4c122c9f067bd91f9af4b Mon Sep 17 00:00:00 2001 From: yariks5s Date: Tue, 12 Mar 2024 14:48:59 +0000 Subject: [PATCH 208/374] init --- src/Interpreters/InterpreterSelectQuery.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index bcedba7346d..3c84d086d85 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -2552,7 +2552,12 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc /// If necessary, we request more sources than the number of threads - to distribute the work evenly over the threads. if (max_streams > 1 && !is_sync_remote) - max_streams = static_cast(max_streams * settings.max_streams_to_max_threads_ratio); + { + if (auto streams_with_ratio = max_streams * settings.max_streams_to_max_threads_ratio; streams_with_ratio > sizeof(size_t)) + max_streams = static_cast(streams_with_ratio); + else + throw Exception(ErrorCodes::INCORRECT_DATA, "Exceeded limit for `max_streams_to_max_threads_ratio`. Make sure that `max_streams * max_streams_to_max_threads_ratio` not exceeds {}, current value: {}", sizeof(size_t), streams_with_ratio); + } auto & prewhere_info = analysis_result.prewhere_info; From 5576c170517c10757611868444b4f2aa7d9cf41c Mon Sep 17 00:00:00 2001 From: Peter Date: Tue, 12 Mar 2024 22:57:06 +0800 Subject: [PATCH 209/374] Using straightforward approach to import GPG key --- docs/en/getting-started/install.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/getting-started/install.md b/docs/en/getting-started/install.md index 234420de374..f3bacf7d092 100644 --- a/docs/en/getting-started/install.md +++ b/docs/en/getting-started/install.md @@ -78,8 +78,8 @@ It is recommended to use official pre-compiled `deb` packages for Debian or Ubun #### Setup the Debian repository ``` bash -sudo apt-get install -y apt-transport-https ca-certificates dirmngr -sudo gpg --no-default-keyring --keyring /usr/share/keyrings/clickhouse-keyring.gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 8919F6BD2B48D754 +sudo apt-get install -y apt-transport-https ca-certificates dirmngr curl gpg +curl -fsSL 'https://packages.clickhouse.com/rpm/lts/repodata/repomd.xml.key' | sudo gpg --dearmor -o /usr/share/keyrings/clickhouse-keyring.gpg echo "deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb stable main" | sudo tee \ /etc/apt/sources.list.d/clickhouse.list From 24f4cda4845698bc2e014087a46222d135541f36 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 12 Mar 2024 15:34:07 +0000 Subject: [PATCH 210/374] impl --- .../02908_many_requests_to_system_replicas.sh | 32 +++++++++++++------ 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh b/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh index d3eed891ab9..2c57545e603 100755 --- a/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh +++ b/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh @@ -12,22 +12,34 @@ CONCURRENCY=200 echo "Creating $NUM_TABLES tables" +function get_done_or_die_trying() +{ + # Sometimes curl produces errors like 'Recv failure: Connection reset by peer' and fails test, let's add a little bit of retries + for _ in $(seq 1 10) + do + curl "$CLICKHOUSE_URL" --silent --fail --show-error --data "$1" &>/dev/null && return + done + + echo "Cannot successfully make request" + exit 1 +} + function init_table() { set -e i=$1 - curl $CLICKHOUSE_URL --silent --fail --show-error --data "CREATE TABLE test_02908_r1_$i (a UInt64) ENGINE=ReplicatedMergeTree('/02908/{database}/test_$i', 'r1') ORDER BY tuple()" 2>&1 - curl $CLICKHOUSE_URL --silent --fail --show-error --data "CREATE TABLE test_02908_r2_$i (a UInt64) ENGINE=ReplicatedMergeTree('/02908/{database}/test_$i', 'r2') ORDER BY tuple()" 2>&1 - curl $CLICKHOUSE_URL --silent --fail --show-error --data "CREATE TABLE test_02908_r3_$i (a UInt64) ENGINE=ReplicatedMergeTree('/02908/{database}/test_$i', 'r3') ORDER BY tuple()" 2>&1 + get_done_or_die_trying "CREATE TABLE test_02908_r1_$i (a UInt64) ENGINE=ReplicatedMergeTree('/02908/{database}/test_$i', 'r1') ORDER BY tuple()" + get_done_or_die_trying "CREATE TABLE test_02908_r2_$i (a UInt64) ENGINE=ReplicatedMergeTree('/02908/{database}/test_$i', 'r2') ORDER BY tuple()" + get_done_or_die_trying "CREATE TABLE test_02908_r3_$i (a UInt64) ENGINE=ReplicatedMergeTree('/02908/{database}/test_$i', 'r3') ORDER BY tuple()" - curl $CLICKHOUSE_URL --silent --fail --show-error --data "INSERT INTO test_02908_r1_$i SELECT rand64() FROM numbers(5);" 2>&1 + get_done_or_die_trying "INSERT INTO test_02908_r1_$i SELECT rand64() FROM numbers(5);" } export init_table; -for i in `seq 1 $NUM_TABLES`; +for i in $(seq 1 $NUM_TABLES) do - init_table $i & + init_table "$i" & done wait; @@ -35,15 +47,15 @@ wait; echo "Making $CONCURRENCY requests to system.replicas" -for i in `seq 1 $CONCURRENCY`; +for i in $(seq 1 $CONCURRENCY) do - curl $CLICKHOUSE_URL --silent --fail --show-error --data "SELECT * FROM system.replicas WHERE database=currentDatabase() FORMAT Null;" 2>&1 || echo "query $i failed" & + curl "$CLICKHOUSE_URL" --silent --fail --show-error --data "SELECT * FROM system.replicas WHERE database=currentDatabase() FORMAT Null;" 2>&1 || echo "query $i failed" & done echo "Query system.replicas while waiting for other concurrent requests to finish" # lost_part_count column is read from ZooKeeper -curl $CLICKHOUSE_URL --silent --fail --show-error --data "SELECT sum(lost_part_count) FROM system.replicas WHERE database=currentDatabase();" 2>&1; +curl "$CLICKHOUSE_URL" --silent --fail --show-error --data "SELECT sum(lost_part_count) FROM system.replicas WHERE database=currentDatabase();" 2>&1; # is_leader column is filled without ZooKeeper -curl $CLICKHOUSE_URL --silent --fail --show-error --data "SELECT sum(is_leader) FROM system.replicas WHERE database=currentDatabase();" 2>&1; +curl "$CLICKHOUSE_URL" --silent --fail --show-error --data "SELECT sum(is_leader) FROM system.replicas WHERE database=currentDatabase();" 2>&1; wait; From 07ee777c8d354526cf89c3647863916a0855de49 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 12 Mar 2024 15:39:04 +0000 Subject: [PATCH 211/374] Restart CI From 858ad2d68860d7993280b90c0f17ae3a0e84d712 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Tue, 12 Mar 2024 16:39:10 +0100 Subject: [PATCH 212/374] beautify and change max_value --- src/Interpreters/InterpreterSelectQuery.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 3c84d086d85..a314492c5b0 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -2553,10 +2553,10 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc /// If necessary, we request more sources than the number of threads - to distribute the work evenly over the threads. if (max_streams > 1 && !is_sync_remote) { - if (auto streams_with_ratio = max_streams * settings.max_streams_to_max_threads_ratio; streams_with_ratio > sizeof(size_t)) + if (auto streams_with_ratio = max_streams * settings.max_streams_to_max_threads_ratio; streams_with_ratio < SIZE_MAX) max_streams = static_cast(streams_with_ratio); else - throw Exception(ErrorCodes::INCORRECT_DATA, "Exceeded limit for `max_streams_to_max_threads_ratio`. Make sure that `max_streams * max_streams_to_max_threads_ratio` not exceeds {}, current value: {}", sizeof(size_t), streams_with_ratio); + throw Exception(ErrorCodes::PARAMETER_OUT_OF_BOUND, "Exceeded limit for `max_streams` with `max_streams_to_max_threads_ratio`. Make sure that `max_streams * max_streams_to_max_threads_ratio` not exceeds {}, current value: {}", SIZE_MAX, streams_with_ratio); } auto & prewhere_info = analysis_result.prewhere_info; From 2fbc11302148f3374e5ce69ed50adebbce9cbae4 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Tue, 12 Mar 2024 17:00:44 +0100 Subject: [PATCH 213/374] Adjust the packages in installation docs --- docs/en/getting-started/install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/getting-started/install.md b/docs/en/getting-started/install.md index f3bacf7d092..ca689ef7995 100644 --- a/docs/en/getting-started/install.md +++ b/docs/en/getting-started/install.md @@ -78,7 +78,7 @@ It is recommended to use official pre-compiled `deb` packages for Debian or Ubun #### Setup the Debian repository ``` bash -sudo apt-get install -y apt-transport-https ca-certificates dirmngr curl gpg +sudo apt-get install -y apt-transport-https ca-certificates curl gnupg curl -fsSL 'https://packages.clickhouse.com/rpm/lts/repodata/repomd.xml.key' | sudo gpg --dearmor -o /usr/share/keyrings/clickhouse-keyring.gpg echo "deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb stable main" | sudo tee \ From e1ea0671b132097182279831560f1bafc07c95ce Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Tue, 12 Mar 2024 17:27:25 +0100 Subject: [PATCH 214/374] Disable test 02998_primary_key_skip_columns.sql in sanitizer builds as it can be slow --- tests/queries/0_stateless/02998_primary_key_skip_columns.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/02998_primary_key_skip_columns.sql b/tests/queries/0_stateless/02998_primary_key_skip_columns.sql index b2dadcc5e7c..b567155ab1f 100644 --- a/tests/queries/0_stateless/02998_primary_key_skip_columns.sql +++ b/tests/queries/0_stateless/02998_primary_key_skip_columns.sql @@ -1,3 +1,5 @@ +-- Tags: no-asan, no-tsan, no-msan, no-ubsan + DROP TABLE IF EXISTS test; CREATE TABLE test (a UInt64, b UInt64, c UInt64) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 1, primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns = 1; From ccc6df0e432783e3d128fe1c7b6a22b399f2b510 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 12 Mar 2024 16:43:55 +0000 Subject: [PATCH 215/374] fix aggregate function uniqExact --- .../AggregateFunctionUniq.h | 2 ++ .../Combinators/AggregateFunctionArray.h | 1 + .../Combinators/AggregateFunctionIf.h | 1 + .../Combinators/AggregateFunctionMerge.h | 1 + .../Combinators/AggregateFunctionNull.h | 1 + .../Combinators/AggregateFunctionState.h | 1 + src/AggregateFunctions/IAggregateFunction.h | 4 +++ src/Common/ColumnsHashingImpl.h | 3 +- src/Common/ProfileEvents.cpp | 2 ++ src/Interpreters/Aggregator.cpp | 18 ++++++++-- src/Interpreters/Aggregator.h | 1 + .../03008_optimize_equal_ranges.reference | 8 +++++ .../03008_optimize_equal_ranges.sql | 25 +++++++++++++ .../03008_uniq_exact_equal_ranges.reference | 0 .../03008_uniq_exact_equal_ranges.sql | 36 +++++++++++++++++++ 15 files changed, 100 insertions(+), 4 deletions(-) create mode 100644 tests/queries/0_stateless/03008_optimize_equal_ranges.reference create mode 100644 tests/queries/0_stateless/03008_optimize_equal_ranges.sql create mode 100644 tests/queries/0_stateless/03008_uniq_exact_equal_ranges.reference create mode 100644 tests/queries/0_stateless/03008_uniq_exact_equal_ranges.sql diff --git a/src/AggregateFunctions/AggregateFunctionUniq.h b/src/AggregateFunctions/AggregateFunctionUniq.h index 8ac75e4451c..891f2ac4284 100644 --- a/src/AggregateFunctions/AggregateFunctionUniq.h +++ b/src/AggregateFunctions/AggregateFunctionUniq.h @@ -483,6 +483,7 @@ public: } bool isAbleToParallelizeMerge() const override { return is_able_to_parallelize_merge; } + bool canOptimizeEqualKeysRanges() const override { return !is_able_to_parallelize_merge; } void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, ThreadPool & thread_pool, Arena *) const override { @@ -576,6 +577,7 @@ public: } bool isAbleToParallelizeMerge() const override { return is_able_to_parallelize_merge; } + bool canOptimizeEqualKeysRanges() const override { return !is_able_to_parallelize_merge; } void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, ThreadPool & thread_pool, Arena *) const override { diff --git a/src/AggregateFunctions/Combinators/AggregateFunctionArray.h b/src/AggregateFunctions/Combinators/AggregateFunctionArray.h index 7f38453f86b..6b918926d0d 100644 --- a/src/AggregateFunctions/Combinators/AggregateFunctionArray.h +++ b/src/AggregateFunctions/Combinators/AggregateFunctionArray.h @@ -142,6 +142,7 @@ public: } bool isAbleToParallelizeMerge() const override { return nested_func->isAbleToParallelizeMerge(); } + bool canOptimizeEqualKeysRanges() const override { return nested_func->canOptimizeEqualKeysRanges(); } void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, ThreadPool & thread_pool, Arena * arena) const override { diff --git a/src/AggregateFunctions/Combinators/AggregateFunctionIf.h b/src/AggregateFunctions/Combinators/AggregateFunctionIf.h index e81f2203e7b..df23398a10d 100644 --- a/src/AggregateFunctions/Combinators/AggregateFunctionIf.h +++ b/src/AggregateFunctions/Combinators/AggregateFunctionIf.h @@ -165,6 +165,7 @@ public: } bool isAbleToParallelizeMerge() const override { return nested_func->isAbleToParallelizeMerge(); } + bool canOptimizeEqualKeysRanges() const override { return nested_func->canOptimizeEqualKeysRanges(); } void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, ThreadPool & thread_pool, Arena * arena) const override { diff --git a/src/AggregateFunctions/Combinators/AggregateFunctionMerge.h b/src/AggregateFunctions/Combinators/AggregateFunctionMerge.h index 5b9e8e606af..53c24bd60c1 100644 --- a/src/AggregateFunctions/Combinators/AggregateFunctionMerge.h +++ b/src/AggregateFunctions/Combinators/AggregateFunctionMerge.h @@ -111,6 +111,7 @@ public: } bool isAbleToParallelizeMerge() const override { return nested_func->isAbleToParallelizeMerge(); } + bool canOptimizeEqualKeysRanges() const override { return nested_func->canOptimizeEqualKeysRanges(); } void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, ThreadPool & thread_pool, Arena * arena) const override { diff --git a/src/AggregateFunctions/Combinators/AggregateFunctionNull.h b/src/AggregateFunctions/Combinators/AggregateFunctionNull.h index 8b614f68540..ba72f960852 100644 --- a/src/AggregateFunctions/Combinators/AggregateFunctionNull.h +++ b/src/AggregateFunctions/Combinators/AggregateFunctionNull.h @@ -152,6 +152,7 @@ public: } bool isAbleToParallelizeMerge() const override { return nested_function->isAbleToParallelizeMerge(); } + bool canOptimizeEqualKeysRanges() const override { return nested_function->canOptimizeEqualKeysRanges(); } void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, ThreadPool & thread_pool, Arena * arena) const override { diff --git a/src/AggregateFunctions/Combinators/AggregateFunctionState.h b/src/AggregateFunctions/Combinators/AggregateFunctionState.h index 8335d21cb1e..b0ab6d49604 100644 --- a/src/AggregateFunctions/Combinators/AggregateFunctionState.h +++ b/src/AggregateFunctions/Combinators/AggregateFunctionState.h @@ -92,6 +92,7 @@ public: } bool isAbleToParallelizeMerge() const override { return nested_func->isAbleToParallelizeMerge(); } + bool canOptimizeEqualKeysRanges() const override { return nested_func->canOptimizeEqualKeysRanges(); } void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, ThreadPool & thread_pool, Arena * arena) const override { diff --git a/src/AggregateFunctions/IAggregateFunction.h b/src/AggregateFunctions/IAggregateFunction.h index 94bb121893d..499185320e6 100644 --- a/src/AggregateFunctions/IAggregateFunction.h +++ b/src/AggregateFunctions/IAggregateFunction.h @@ -162,6 +162,10 @@ public: /// Tells if merge() with thread pool parameter could be used. virtual bool isAbleToParallelizeMerge() const { return false; } + /// Return true if it is allowed to replace call of `addBatch` + /// to `addBatchSinglePlace` for ranges of consecutive equal keys. + virtual bool canOptimizeEqualKeysRanges() const { return true; } + /// Should be used only if isAbleToParallelizeMerge() returned true. virtual void merge(AggregateDataPtr __restrict /*place*/, ConstAggregateDataPtr /*rhs*/, ThreadPool & /*thread_pool*/, Arena * /*arena*/) const diff --git a/src/Common/ColumnsHashingImpl.h b/src/Common/ColumnsHashingImpl.h index 7116160e94c..d68171a6566 100644 --- a/src/Common/ColumnsHashingImpl.h +++ b/src/Common/ColumnsHashingImpl.h @@ -62,7 +62,6 @@ struct LastElementCache bool check(const Key & key) const { return value.first == key; } bool hasOnlyOneValue() const { return found && misses == 1; } - UInt64 getMisses() const { return misses; } }; template @@ -232,7 +231,7 @@ public: ALWAYS_INLINE UInt64 getCacheMissesSinceLastReset() const { if constexpr (consecutive_keys_optimization) - return cache.getMisses(); + return cache.misses; return 0; } diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index c1ac3d08245..8fd1e189977 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -533,6 +533,8 @@ The server successfully detected this situation and will download merged part fr \ M(AggregationPreallocatedElementsInHashTables, "How many elements were preallocated in hash tables for aggregation.") \ M(AggregationHashTablesInitializedAsTwoLevel, "How many hash tables were inited as two-level for aggregation.") \ + M(AggregationProcessedBlocks, "How many blocks were processed by Aggregator") \ + M(AggregationOptimizedEqualRangesOfKeys, "For how many blocks optimization of equal ranges of keys was applied") \ \ M(MetadataFromKeeperCacheHit, "Number of times an object storage metadata request was answered from cache without making request to Keeper") \ M(MetadataFromKeeperCacheMiss, "Number of times an object storage metadata request had to be answered from Keeper") \ diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 80a98683867..7c9dac82eff 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -53,6 +53,8 @@ namespace ProfileEvents extern const Event OverflowThrow; extern const Event OverflowBreak; extern const Event OverflowAny; + extern const Event AggregationProcessedBlocks; + extern const Event AggregationOptimizedEqualRangesOfKeys; } namespace CurrentMetrics @@ -985,6 +987,7 @@ void Aggregator::executeOnBlockSmall( { /// `result` will destroy the states of aggregate functions in the destructor result.aggregator = this; + ProfileEvents::increment(ProfileEvents::AggregationProcessedBlocks); /// How to perform the aggregation? if (result.empty()) @@ -1342,6 +1345,7 @@ void NO_INLINE Aggregator::executeImplBatch( if constexpr (use_compiled_functions) { std::vector columns_data; + bool can_optimize_equal_keys_ranges = true; for (size_t i = 0; i < aggregate_functions.size(); ++i) { @@ -1350,13 +1354,15 @@ void NO_INLINE Aggregator::executeImplBatch( AggregateFunctionInstruction * inst = aggregate_instructions + i; size_t arguments_size = inst->that->getArgumentTypes().size(); // NOLINT + can_optimize_equal_keys_ranges &= inst->can_optimize_equal_keys_ranges; for (size_t argument_index = 0; argument_index < arguments_size; ++argument_index) columns_data.emplace_back(getColumnData(inst->batch_arguments[argument_index])); } - if (all_keys_are_const || (!no_more_keys && state.hasOnlyOneValueSinceLastReset())) + if (all_keys_are_const || (can_optimize_equal_keys_ranges && state.hasOnlyOneValueSinceLastReset())) { + ProfileEvents::increment(ProfileEvents::AggregationOptimizedEqualRangesOfKeys); auto add_into_aggregate_states_function_single_place = compiled_aggregate_functions_holder->compiled_aggregate_functions.add_into_aggregate_states_function_single_place; add_into_aggregate_states_function_single_place(row_begin, row_end, columns_data.data(), places[key_start]); } @@ -1379,10 +1385,15 @@ void NO_INLINE Aggregator::executeImplBatch( AggregateFunctionInstruction * inst = aggregate_instructions + i; - if (all_keys_are_const || (!no_more_keys && state.hasOnlyOneValueSinceLastReset())) + if (all_keys_are_const || (inst->can_optimize_equal_keys_ranges && state.hasOnlyOneValueSinceLastReset())) + { + ProfileEvents::increment(ProfileEvents::AggregationOptimizedEqualRangesOfKeys); addBatchSinglePlace(row_begin, row_end, inst, places[key_start] + inst->state_offset, aggregates_pool); + } else + { addBatch(row_begin, row_end, inst, places.get(), aggregates_pool); + } } } @@ -1510,6 +1521,7 @@ void NO_INLINE Aggregator::executeOnIntervalWithoutKey( /// `data_variants` will destroy the states of aggregate functions in the destructor data_variants.aggregator = this; data_variants.init(AggregatedDataVariants::Type::without_key); + ProfileEvents::increment(ProfileEvents::AggregationProcessedBlocks); AggregatedDataWithoutKey & res = data_variants.without_key; @@ -1588,6 +1600,7 @@ void Aggregator::prepareAggregateInstructions( } aggregate_functions_instructions[i].has_sparse_arguments = has_sparse_arguments; + aggregate_functions_instructions[i].can_optimize_equal_keys_ranges = aggregate_functions[i]->canOptimizeEqualKeysRanges(); aggregate_functions_instructions[i].arguments = aggregate_columns[i].data(); aggregate_functions_instructions[i].state_offset = offsets_of_aggregate_states[i]; @@ -1640,6 +1653,7 @@ bool Aggregator::executeOnBlock(Columns columns, { /// `result` will destroy the states of aggregate functions in the destructor result.aggregator = this; + ProfileEvents::increment(ProfileEvents::AggregationProcessedBlocks); /// How to perform the aggregation? if (result.empty()) diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index 375b8986101..2d3f497fec0 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -1221,6 +1221,7 @@ public: const IColumn ** batch_arguments{}; const UInt64 * offsets{}; bool has_sparse_arguments = false; + bool can_optimize_equal_keys_ranges = true; }; /// Used for optimize_aggregation_in_order: diff --git a/tests/queries/0_stateless/03008_optimize_equal_ranges.reference b/tests/queries/0_stateless/03008_optimize_equal_ranges.reference new file mode 100644 index 00000000000..08f8008fca6 --- /dev/null +++ b/tests/queries/0_stateless/03008_optimize_equal_ranges.reference @@ -0,0 +1,8 @@ +0 30000 +1 30000 +2 30000 +0 449985000 +1 449985000 +2 449985000 +sum 1 +uniqExact 0 diff --git a/tests/queries/0_stateless/03008_optimize_equal_ranges.sql b/tests/queries/0_stateless/03008_optimize_equal_ranges.sql new file mode 100644 index 00000000000..c6143fb7f51 --- /dev/null +++ b/tests/queries/0_stateless/03008_optimize_equal_ranges.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS t_optimize_equal_ranges; + +CREATE TABLE t_optimize_equal_ranges (a UInt64, b String, c UInt64) ENGINE = MergeTree ORDER BY a; + +SET max_block_size = 1024; +SET max_bytes_before_external_group_by = 0; +SET optimize_aggregation_in_order = 0; + +INSERT INTO t_optimize_equal_ranges SELECT 0, toString(number), number FROM numbers(30000); +INSERT INTO t_optimize_equal_ranges SELECT 1, toString(number), number FROM numbers(30000); +INSERT INTO t_optimize_equal_ranges SELECT 2, toString(number), number FROM numbers(30000); + +SELECT a, uniqExact(b) FROM t_optimize_equal_ranges GROUP BY a ORDER BY a; +SELECT a, sum(c) FROM t_optimize_equal_ranges GROUP BY a ORDER BY a; + +SYSTEM FLUSH LOGS; + +SELECT + used_aggregate_functions[1] AS func, + ProfileEvents['AggregationOptimizedEqualRangesOfKeys'] > 0 +FROM system.query_log +WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND query LIKE '%SELECT%FROM%t_optimize_equal_ranges%' +ORDER BY func; + +DROP TABLE t_optimize_equal_ranges; diff --git a/tests/queries/0_stateless/03008_uniq_exact_equal_ranges.reference b/tests/queries/0_stateless/03008_uniq_exact_equal_ranges.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03008_uniq_exact_equal_ranges.sql b/tests/queries/0_stateless/03008_uniq_exact_equal_ranges.sql new file mode 100644 index 00000000000..2e708f28cac --- /dev/null +++ b/tests/queries/0_stateless/03008_uniq_exact_equal_ranges.sql @@ -0,0 +1,36 @@ +DROP TABLE IF EXISTS t_uniq_exact; + +CREATE TABLE t_uniq_exact (a UInt64, b String, c UInt64) ENGINE = MergeTree ORDER BY a; + +SET group_by_two_level_threshold_bytes = 1; +SET group_by_two_level_threshold = 1; +SET max_threads = 4; +SET max_bytes_before_external_group_by = 0; +SET optimize_aggregation_in_order = 0; + +INSERT INTO t_uniq_exact SELECT 0, randomPrintableASCII(5), rand() FROM numbers(300000); +INSERT INTO t_uniq_exact SELECT 1, randomPrintableASCII(5), rand() FROM numbers(300000); +INSERT INTO t_uniq_exact SELECT 2, randomPrintableASCII(5), rand() FROM numbers(300000); +INSERT INTO t_uniq_exact SELECT 3, randomPrintableASCII(5), rand() FROM numbers(300000); +INSERT INTO t_uniq_exact SELECT 4, randomPrintableASCII(5), rand() FROM numbers(300000); +INSERT INTO t_uniq_exact SELECT 5, randomPrintableASCII(5), rand() FROM numbers(300000); +INSERT INTO t_uniq_exact SELECT 6, randomPrintableASCII(5), rand() FROM numbers(300000); +INSERT INTO t_uniq_exact SELECT 7, randomPrintableASCII(5), rand() FROM numbers(300000); +INSERT INTO t_uniq_exact SELECT 8, randomPrintableASCII(5), rand() FROM numbers(300000); +INSERT INTO t_uniq_exact SELECT 9, randomPrintableASCII(5), rand() FROM numbers(300000); + +OPTIMIZE TABLE t_uniq_exact FINAL; + +SELECT a, uniqExact(b) FROM t_uniq_exact GROUP BY a ORDER BY a +SETTINGS min_hit_rate_to_use_consecutive_keys_optimization = 1.0 +EXCEPT +SELECT a, uniqExact(b) FROM t_uniq_exact GROUP BY a ORDER BY a +SETTINGS min_hit_rate_to_use_consecutive_keys_optimization = 0.5; + +SELECT a, sum(c) FROM t_uniq_exact GROUP BY a ORDER BY a +SETTINGS min_hit_rate_to_use_consecutive_keys_optimization = 1.0 +EXCEPT +SELECT a, sum(c) FROM t_uniq_exact GROUP BY a ORDER BY a +SETTINGS min_hit_rate_to_use_consecutive_keys_optimization = 0.5; + +DROP TABLE t_uniq_exact; From 53442f49140c9c8b391f5b3a76a9bf4ab45dbed0 Mon Sep 17 00:00:00 2001 From: johnnymatthews <9611008+johnnymatthews@users.noreply.github.com> Date: Tue, 12 Mar 2024 13:48:47 -0300 Subject: [PATCH 216/374] Cleans up markdown. --- docs/en/sql-reference/aggregate-functions/reference/varpop.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/varpop.md b/docs/en/sql-reference/aggregate-functions/reference/varpop.md index 5f18bdc30f6..76472f62789 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/varpop.md +++ b/docs/en/sql-reference/aggregate-functions/reference/varpop.md @@ -27,7 +27,7 @@ Returns an integer of type `Float64`. **Implementation details** -This function uses a numerically unstable algorithm. If you need numerical stability in calculations, use the slower but more stable [`varPopStable` function](#varPopStable). +This function uses a numerically unstable algorithm. If you need numerical stability in calculations, use the slower but more stable [`varPopStable` function](#varPopStable). **Example** From b3cfb8a2cba2f779e63cb35aec49fd263a24eb19 Mon Sep 17 00:00:00 2001 From: johnnymatthews <9611008+johnnymatthews@users.noreply.github.com> Date: Tue, 12 Mar 2024 13:57:03 -0300 Subject: [PATCH 217/374] Adds sleep function docs. --- .../functions/other-functions.md | 92 ++++++++++++++++++- 1 file changed, 89 insertions(+), 3 deletions(-) diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 739b688a0d2..de8ac67a936 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -300,11 +300,97 @@ The argument is internally still evaluated. Useful e.g. for benchmarks. ## sleep(seconds) -Sleeps ‘seconds’ seconds for each data block. The sleep time can be specified as integer or as floating-point number. +Used to introduce a delay or pause in the execution of a query. It is primarily used for testing and debugging purposes. -## sleepEachRow(seconds) +**Syntax** -Sleeps ‘seconds’ seconds for each row. The sleep time can be specified as integer or as floating-point number. +```sql +sleep(seconds) +``` + +**Arguments** + +- `seconds`: [Int](../../sql-reference/data-types/int-uint.md) The number of seconds to pause the query execution to a maximum of 3 seconds. It can be a floating-point value to specify fractional seconds. + +**Returned value** + +This function does not return any value. + +**Example** + +```sql +SELECT sleep(2); +``` + +This function does not return any value. However, if you run the function with `clickhouse client` you will see something similar to: + +```response +SELECT sleep(2) + +Query id: 8aa9943e-a686-45e1-8317-6e8e3a5596ac + +┌─sleep(2)─┐ +│ 0 │ +└──────────┘ + +1 row in set. Elapsed: 2.012 sec. +``` + +This query will pause for 2 seconds before completing. During this time, no results will be returned, and the query will appear to be hanging or unresponsive. + +**Implementation details** + +The `sleep()` function is generally not used in production environments, as it can negatively impact query performance and system responsiveness. However, it can be useful in the following scenarios: + +1. **Testing**: When testing or benchmarking ClickHouse, you may want to simulate delays or introduce pauses to observe how the system behaves under certain conditions. +2. **Debugging**: If you need to examine the state of the system or the execution of a query at a specific point in time, you can use `sleep()` to introduce a pause, allowing you to inspect or collect relevant information. +3. **Simulation**: In some cases, you may want to simulate real-world scenarios where delays or pauses occur, such as network latency or external system dependencies. + +It's important to use the `sleep()` function judiciously and only when necessary, as it can potentially impact the overall performance and responsiveness of your ClickHouse system. + +## sleepEachRow + +Pauses the execution of a query for a specified number of seconds for each row in the result set. + +**Syntax** + +```sql +sleepEachRow(seconds) +``` + +**Arguments** + +- `seconds`: [Int](../../sql-reference/data-types/int-uint.md) The number of seconds to pause the query execution for each row in the result set. It can be a floating-point value to specify fractional seconds. + +**Returned value** + +This function returns the same input values as it receives, without modifying them. + +**Example** + +```sql +SELECT number, sleepEachRow(0.5) FROM system.numbers LIMIT 5; +``` + +```response +┌─number─┬─sleepEachRow(0.5)─┐ +│ 0 │ 0 │ +│ 1 │ 0 │ +│ 2 │ 0 │ +│ 3 │ 0 │ +│ 4 │ 0 │ +└────────┴───────────────────┘ +``` + +But the output will be delayed, with a 0.5-second pause between each row. + +The `sleepEachRow()` function is primarily used for testing and debugging purposes, similar to the `sleep()` function. It allows you to simulate delays or introduce pauses in the processing of each row, which can be useful in scenarios such as: + +1. **Testing**: When testing or benchmarking ClickHouse's performance under specific conditions, you can use `sleepEachRow()` to simulate delays or introduce pauses for each row processed. +2. **Debugging**: If you need to examine the state of the system or the execution of a query for each row processed, you can use `sleepEachRow()` to introduce pauses, allowing you to inspect or collect relevant information. +3. **Simulation**: In some cases, you may want to simulate real-world scenarios where delays or pauses occur for each row processed, such as when dealing with external systems or network latencies. + +Like the [`sleep()` function](#sleep), it's important to use `sleepEachRow()` judiciously and only when necessary, as it can significantly impact the overall performance and responsiveness of your ClickHouse system, especially when dealing with large result sets. ## currentDatabase() From 2847def5176509bdbefe0809c2abbe31baf540af Mon Sep 17 00:00:00 2001 From: johnnymatthews <9611008+johnnymatthews@users.noreply.github.com> Date: Tue, 12 Mar 2024 13:57:34 -0300 Subject: [PATCH 218/374] Formats markdown with prettier. --- .../functions/other-functions.md | 408 +++++++++--------- 1 file changed, 204 insertions(+), 204 deletions(-) diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index de8ac67a936..df443eec0de 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -17,7 +17,7 @@ Returns a named value from the [macros](../../operations/server-configuration-pa **Syntax** -``` sql +```sql getMacro(name); ``` @@ -35,7 +35,7 @@ Type: [String](../../sql-reference/data-types/string.md). Example `` section in the server configuration file: -``` xml +```xml Value @@ -43,13 +43,13 @@ Example `` section in the server configuration file: Query: -``` sql +```sql SELECT getMacro('test'); ``` Result: -``` text +```text ┌─getMacro('test')─┐ │ Value │ └──────────────────┘ @@ -57,12 +57,12 @@ Result: The same value can be retrieved as follows: -``` sql +```sql SELECT * FROM system.macros WHERE macro = 'test'; ``` -``` text +```text ┌─macro─┬─substitution─┐ │ test │ Value │ └───────┴──────────────┘ @@ -74,7 +74,7 @@ Returns the fully qualified domain name of the ClickHouse server. **Syntax** -``` sql +```sql fqdn(); ``` @@ -88,13 +88,13 @@ Type: `String`. **Example** -``` sql +```sql SELECT FQDN(); ``` Result: -``` text +```text ┌─FQDN()──────────────────────────┐ │ clickhouse.ru-central1.internal │ └─────────────────────────────────┘ @@ -104,7 +104,7 @@ Result: Extracts the tail of a string following its last slash or backslash. This function if often used to extract the filename from a path. -``` sql +```sql basename(expr) ``` @@ -123,13 +123,13 @@ A string that contains: Query: -``` sql +```sql SELECT 'some/long/path/to/file' AS a, basename(a) ``` Result: -``` text +```text ┌─a──────────────────────┬─basename('some\\long\\path\\to\\file')─┐ │ some\long\path\to\file │ file │ └────────────────────────┴────────────────────────────────────────┘ @@ -137,13 +137,13 @@ Result: Query: -``` sql +```sql SELECT 'some\\long\\path\\to\\file' AS a, basename(a) ``` Result: -``` text +```text ┌─a──────────────────────┬─basename('some\\long\\path\\to\\file')─┐ │ some\long\path\to\file │ file │ └────────────────────────┴────────────────────────────────────────┘ @@ -151,13 +151,13 @@ Result: Query: -``` sql +```sql SELECT 'some-file-name' AS a, basename(a) ``` Result: -``` text +```text ┌─a──────────────┬─basename('some-file-name')─┐ │ some-file-name │ some-file-name │ └────────────────┴────────────────────────────┘ @@ -170,11 +170,11 @@ This function is used by the system to implement Pretty formats. `NULL` is represented as a string corresponding to `NULL` in `Pretty` formats. -``` sql +```sql SELECT visibleWidth(NULL) ``` -``` text +```text ┌─visibleWidth(NULL)─┐ │ 4 │ └────────────────────┘ @@ -256,7 +256,7 @@ SELECT key, byteSize(u8) AS `byteSize(UInt8)`, byteSize(u16) AS `byteSize(UInt16 Result: -``` text +```text Row 1: ────── key: 1 @@ -401,7 +401,7 @@ Useful in table engine parameters of `CREATE TABLE` queries where you need to sp Returns the name of the current user. In case of a distributed query, the name of the user who initiated the query is returned. -``` sql +```sql SELECT currentUser(); ``` @@ -416,13 +416,13 @@ Type: `String`. **Example** -``` sql +```sql SELECT currentUser(); ``` Result: -``` text +```text ┌─currentUser()─┐ │ default │ └───────────────┘ @@ -438,7 +438,7 @@ This function is mostly intended for development, debugging and demonstration. **Syntax** -``` sql +```sql isConstant(x) ``` @@ -457,13 +457,13 @@ Type: [UInt8](../../sql-reference/data-types/int-uint.md). Query: -``` sql +```sql SELECT isConstant(x + 1) FROM (SELECT 43 AS x) ``` Result: -``` text +```text ┌─isConstant(plus(x, 1))─┐ │ 1 │ └────────────────────────┘ @@ -471,13 +471,13 @@ Result: Query: -``` sql +```sql WITH 3.14 AS pi SELECT isConstant(cos(pi)) ``` Result: -``` text +```text ┌─isConstant(cos(pi))─┐ │ 1 │ └─────────────────────┘ @@ -485,13 +485,13 @@ Result: Query: -``` sql +```sql SELECT isConstant(number) FROM numbers(1) ``` Result: -``` text +```text ┌─isConstant(number)─┐ │ 0 │ └────────────────────┘ @@ -511,7 +511,7 @@ Checks whether a floating point value is finite. **Syntax** -``` sql +```sql ifNotFinite(x,y) ``` @@ -565,7 +565,7 @@ The band is drawn with accuracy to one eighth of a symbol. Example: -``` sql +```sql SELECT toHour(EventTime) AS h, count() AS c, @@ -575,7 +575,7 @@ GROUP BY h ORDER BY h ASC ``` -``` text +```text ┌──h─┬──────c─┬─bar────────────────┐ │ 0 │ 292907 │ █████████▋ │ │ 1 │ 180563 │ ██████ │ @@ -633,7 +633,7 @@ For example, the first argument could have type `Int64`, while the second argume Example: -``` sql +```sql SELECT transform(SearchEngineID, [2, 3], ['Yandex', 'Google'], 'Other') AS title, count() AS c @@ -643,7 +643,7 @@ GROUP BY title ORDER BY c DESC ``` -``` text +```text ┌─title─────┬──────c─┐ │ Yandex │ 498635 │ │ Google │ 229872 │ @@ -657,7 +657,7 @@ Similar to the other variation but has no ‘default’ argument. In case no mat Example: -``` sql +```sql SELECT transform(domain(Referer), ['yandex.ru', 'google.ru', 'vkontakte.ru'], ['www.yandex', 'example.com', 'vk.com']) AS s, count() AS c @@ -667,7 +667,7 @@ ORDER BY count() DESC LIMIT 10 ``` -``` text +```text ┌─s──────────────┬───────c─┐ │ │ 2906259 │ │ www.yandex │ 867767 │ @@ -687,13 +687,13 @@ Given a size (number of bytes), this function returns a readable, rounded size w Example: -``` sql +```sql SELECT arrayJoin([1, 1024, 1024*1024, 192851925]) AS filesize_bytes, formatReadableDecimalSize(filesize_bytes) AS filesize ``` -``` text +```text ┌─filesize_bytes─┬─filesize───┐ │ 1 │ 1.00 B │ │ 1024 │ 1.02 KB │ @@ -708,7 +708,7 @@ Given a size (number of bytes), this function returns a readable, rounded size w Example: -``` sql +```sql SELECT arrayJoin([1, 1024, 1024*1024, 192851925]) AS filesize_bytes, formatReadableSize(filesize_bytes) AS filesize @@ -716,7 +716,7 @@ SELECT Alias: `FORMAT_BYTES`. -``` text +```text ┌─filesize_bytes─┬─filesize───┐ │ 1 │ 1.00 B │ │ 1024 │ 1.00 KiB │ @@ -731,13 +731,13 @@ Given a number, this function returns a rounded number with suffix (thousand, mi Example: -``` sql +```sql SELECT arrayJoin([1024, 1234 * 1000, (4567 * 1000) * 1000, 98765432101234]) AS number, formatReadableQuantity(number) AS number_for_humans ``` -``` text +```text ┌─────────number─┬─number_for_humans─┐ │ 1024 │ 1.02 thousand │ │ 1234000 │ 1.23 million │ @@ -752,7 +752,7 @@ Given a time interval (delta) in seconds, this function returns a time delta wit **Syntax** -``` sql +```sql formatReadableTimeDelta(column[, maximum_unit, minimum_unit]) ``` @@ -760,21 +760,22 @@ formatReadableTimeDelta(column[, maximum_unit, minimum_unit]) - `column` — A column with a numeric time delta. - `maximum_unit` — Optional. Maximum unit to show. - * Acceptable values: `nanoseconds`, `microseconds`, `milliseconds`, `seconds`, `minutes`, `hours`, `days`, `months`, `years`. - * Default value: `years`. + - Acceptable values: `nanoseconds`, `microseconds`, `milliseconds`, `seconds`, `minutes`, `hours`, `days`, `months`, `years`. + - Default value: `years`. - `minimum_unit` — Optional. Minimum unit to show. All smaller units are truncated. - * Acceptable values: `nanoseconds`, `microseconds`, `milliseconds`, `seconds`, `minutes`, `hours`, `days`, `months`, `years`. - * If explicitly specified value is bigger than `maximum_unit`, an exception will be thrown. - * Default value: `seconds` if `maximum_unit` is `seconds` or bigger, `nanoseconds` otherwise. + - Acceptable values: `nanoseconds`, `microseconds`, `milliseconds`, `seconds`, `minutes`, `hours`, `days`, `months`, `years`. + - If explicitly specified value is bigger than `maximum_unit`, an exception will be thrown. + - Default value: `seconds` if `maximum_unit` is `seconds` or bigger, `nanoseconds` otherwise. **Example** -``` sql + +```sql SELECT arrayJoin([100, 12345, 432546534]) AS elapsed, formatReadableTimeDelta(elapsed) AS time_delta ``` -``` text +```text ┌────elapsed─┬─time_delta ─────────────────────────────────────────────────────┐ │ 100 │ 1 minute and 40 seconds │ │ 12345 │ 3 hours, 25 minutes and 45 seconds │ @@ -782,13 +783,13 @@ SELECT └────────────┴─────────────────────────────────────────────────────────────────┘ ``` -``` sql +```sql SELECT arrayJoin([100, 12345, 432546534]) AS elapsed, formatReadableTimeDelta(elapsed, 'minutes') AS time_delta ``` -``` text +```text ┌────elapsed─┬─time_delta ─────────────────────────────────────────────────────┐ │ 100 │ 1 minute and 40 seconds │ │ 12345 │ 205 minutes and 45 seconds │ @@ -824,7 +825,6 @@ parseTimeDelta(timestr) - `timestr` — A sequence of numbers followed by something resembling a time unit. - **Returned value** - A floating-point number with the number of seconds. @@ -936,7 +936,7 @@ The window function that provides access to a row at a specified offset before o **Syntax** -``` sql +```sql neighbor(column, offset[, default_value]) ``` @@ -966,13 +966,13 @@ Type: type of data blocks affected or default value type. Query: -``` sql +```sql SELECT number, neighbor(number, 2) FROM system.numbers LIMIT 10; ``` Result: -``` text +```text ┌─number─┬─neighbor(number, 2)─┐ │ 0 │ 2 │ │ 1 │ 3 │ @@ -989,13 +989,13 @@ Result: Query: -``` sql +```sql SELECT number, neighbor(number, 2, 999) FROM system.numbers LIMIT 10; ``` Result: -``` text +```text ┌─number─┬─neighbor(number, 2, 999)─┐ │ 0 │ 2 │ │ 1 │ 3 │ @@ -1014,7 +1014,7 @@ This function can be used to compute year-over-year metric value: Query: -``` sql +```sql WITH toDate('2018-01-01') AS start_date SELECT toStartOfMonth(start_date + (number * 32)) AS month, @@ -1026,7 +1026,7 @@ FROM numbers(16) Result: -``` text +```text ┌──────month─┬─money─┬─prev_year─┬─year_over_year─┐ │ 2018-01-01 │ 32 │ 0 │ 0 │ │ 2018-02-01 │ 63 │ 0 │ 0 │ @@ -1063,7 +1063,7 @@ To prevent that you can create a subquery with [ORDER BY](../../sql-reference/st Example: -``` sql +```sql SELECT EventID, EventTime, @@ -1080,7 +1080,7 @@ FROM ) ``` -``` text +```text ┌─EventID─┬───────────EventTime─┬─delta─┐ │ 1106 │ 2016-11-24 00:00:04 │ 0 │ │ 1107 │ 2016-11-24 00:00:05 │ 1 │ @@ -1092,7 +1092,7 @@ FROM Please note that the block size affects the result. The internal state of `runningDifference` state is reset for each new block. -``` sql +```sql SELECT number, runningDifference(number + 1) AS diff @@ -1100,7 +1100,7 @@ FROM numbers(100000) WHERE diff != 1 ``` -``` text +```text ┌─number─┬─diff─┐ │ 0 │ 0 │ └────────┴──────┘ @@ -1109,7 +1109,7 @@ WHERE diff != 1 └────────┴──────┘ ``` -``` sql +```sql set max_block_size=100000 -- default value is 65536! SELECT @@ -1119,7 +1119,7 @@ FROM numbers(100000) WHERE diff != 1 ``` -``` text +```text ┌─number─┬─diff─┐ │ 0 │ 0 │ └────────┴──────┘ @@ -1135,21 +1135,20 @@ Calculates the number of concurrent events. Each event has a start time and an end time. The start time is included in the event, while the end time is excluded. Columns with a start time and an end time must be of the same data type. The function calculates the total number of active (concurrent) events for each event start time. - :::tip Events must be ordered by the start time in ascending order. If this requirement is violated the function raises an exception. Every data block is processed separately. If events from different data blocks overlap then they can not be processed correctly. ::: **Syntax** -``` sql +```sql runningConcurrency(start, end) ``` **Arguments** - `start` — A column with the start time of events. [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md), or [DateTime64](../../sql-reference/data-types/datetime64.md). -- `end` — A column with the end time of events. [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md), or [DateTime64](../../sql-reference/data-types/datetime64.md). +- `end` — A column with the end time of events. [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md), or [DateTime64](../../sql-reference/data-types/datetime64.md). **Returned values** @@ -1161,7 +1160,7 @@ Type: [UInt32](../../sql-reference/data-types/int-uint.md) Consider the table: -``` text +```text ┌──────start─┬────────end─┐ │ 2021-03-03 │ 2021-03-11 │ │ 2021-03-06 │ 2021-03-12 │ @@ -1172,13 +1171,13 @@ Consider the table: Query: -``` sql +```sql SELECT start, runningConcurrency(start, end) FROM example_table; ``` Result: -``` text +```text ┌──────start─┬─runningConcurrency(start, end)─┐ │ 2021-03-03 │ 1 │ │ 2021-03-06 │ 2 │ @@ -1204,7 +1203,7 @@ Given a MAC address in format AA:BB:CC:DD:EE:FF (colon-separated numbers in hexa Returns the number of fields in [Enum](../../sql-reference/data-types/enum.md). An exception is thrown if the type is not `Enum`. -``` sql +```sql getSizeOfEnumType(value) ``` @@ -1218,11 +1217,11 @@ getSizeOfEnumType(value) **Example** -``` sql +```sql SELECT getSizeOfEnumType( CAST('a' AS Enum8('a' = 1, 'b' = 2) ) ) AS x ``` -``` text +```text ┌─x─┐ │ 2 │ └───┘ @@ -1232,7 +1231,7 @@ SELECT getSizeOfEnumType( CAST('a' AS Enum8('a' = 1, 'b' = 2) ) ) AS x Returns the size on disk without considering compression. -``` sql +```sql blockSerializedSize(value[, value[, ...]]) ``` @@ -1248,13 +1247,13 @@ blockSerializedSize(value[, value[, ...]]) Query: -``` sql +```sql SELECT blockSerializedSize(maxState(1)) as x ``` Result: -``` text +```text ┌─x─┐ │ 2 │ └───┘ @@ -1264,7 +1263,7 @@ Result: Returns the internal name of the data type that represents the value. -``` sql +```sql toColumnTypeName(value) ``` @@ -1280,13 +1279,13 @@ toColumnTypeName(value) Difference between `toTypeName ' and ' toColumnTypeName`: -``` sql +```sql SELECT toTypeName(CAST('2018-01-01 01:02:03' AS DateTime)) ``` Result: -``` text +```text ┌─toTypeName(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ │ DateTime │ └─────────────────────────────────────────────────────┘ @@ -1294,13 +1293,13 @@ Result: Query: -``` sql +```sql SELECT toColumnTypeName(CAST('2018-01-01 01:02:03' AS DateTime)) ``` Result: -``` text +```text ┌─toColumnTypeName(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ │ Const(UInt32) │ └───────────────────────────────────────────────────────────┘ @@ -1312,7 +1311,7 @@ The example shows that the `DateTime` data type is internally stored as `Const(U Outputs a detailed description of data structures in RAM -``` sql +```sql dumpColumnStructure(value) ``` @@ -1326,11 +1325,11 @@ dumpColumnStructure(value) **Example** -``` sql +```sql SELECT dumpColumnStructure(CAST('2018-01-01 01:02:03', 'DateTime')) ``` -``` text +```text ┌─dumpColumnStructure(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ │ DateTime, Const(size = 1, UInt32(size = 1)) │ └──────────────────────────────────────────────────────────────┘ @@ -1342,7 +1341,7 @@ Returns the default value for the given data type. Does not include default values for custom columns set by the user. -``` sql +```sql defaultValueOfArgumentType(expression) ``` @@ -1360,13 +1359,13 @@ defaultValueOfArgumentType(expression) Query: -``` sql +```sql SELECT defaultValueOfArgumentType( CAST(1 AS Int8) ) ``` Result: -``` text +```text ┌─defaultValueOfArgumentType(CAST(1, 'Int8'))─┐ │ 0 │ └─────────────────────────────────────────────┘ @@ -1374,13 +1373,13 @@ Result: Query: -``` sql +```sql SELECT defaultValueOfArgumentType( CAST(1 AS Nullable(Int8) ) ) ``` Result: -``` text +```text ┌─defaultValueOfArgumentType(CAST(1, 'Nullable(Int8)'))─┐ │ ᴺᵁᴸᴸ │ └───────────────────────────────────────────────────────┘ @@ -1392,7 +1391,7 @@ Returns the default value for the given type name. Does not include default values for custom columns set by the user. -``` sql +```sql defaultValueOfTypeName(type) ``` @@ -1410,13 +1409,13 @@ defaultValueOfTypeName(type) Query: -``` sql +```sql SELECT defaultValueOfTypeName('Int8') ``` Result: -``` text +```text ┌─defaultValueOfTypeName('Int8')─┐ │ 0 │ └────────────────────────────────┘ @@ -1424,13 +1423,13 @@ Result: Query: -``` sql +```sql SELECT defaultValueOfTypeName('Nullable(Int8)') ``` Result: -``` text +```text ┌─defaultValueOfTypeName('Nullable(Int8)')─┐ │ ᴺᵁᴸᴸ │ └──────────────────────────────────────────┘ @@ -1542,7 +1541,7 @@ Creates an array with a single value. Used for the internal implementation of [arrayJoin](../../sql-reference/functions/array-join.md#functions_arrayjoin). -``` sql +```sql SELECT replicate(x, arr); ``` @@ -1561,13 +1560,13 @@ Type: `Array`. Query: -``` sql +```sql SELECT replicate(1, ['a', 'b', 'c']) ``` Result: -``` text +```text ┌─replicate(1, ['a', 'b', 'c'])─┐ │ [1,1,1] │ └───────────────────────────────┘ @@ -1579,7 +1578,7 @@ Returns the amount of free space in the filesystem hosting the database persiste **Syntax** -``` sql +```sql filesystemAvailable() ``` @@ -1593,13 +1592,13 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md). Query: -``` sql +```sql SELECT formatReadableSize(filesystemAvailable()) AS "Available space"; ``` Result: -``` text +```text ┌─Available space─┐ │ 30.75 GiB │ └─────────────────┘ @@ -1611,7 +1610,7 @@ Returns the total amount of the free space on the filesystem hosting the databas **Syntax** -``` sql +```sql filesystemFree() ``` @@ -1625,13 +1624,13 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md). Query: -``` sql +```sql SELECT formatReadableSize(filesystemFree()) AS "Free space"; ``` Result: -``` text +```text ┌─Free space─┐ │ 32.39 GiB │ └────────────┘ @@ -1643,7 +1642,7 @@ Returns the capacity of the filesystem in bytes. Needs the [path](../../operatio **Syntax** -``` sql +```sql filesystemCapacity() ``` @@ -1657,13 +1656,13 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md). Query: -``` sql +```sql SELECT formatReadableSize(filesystemCapacity()) AS "Capacity"; ``` Result: -``` text +```text ┌─Capacity──┐ │ 39.32 GiB │ └───────────┘ @@ -1675,7 +1674,7 @@ Calculates the result of an aggregate function based on a single value. This fun **Syntax** -``` sql +```sql initializeAggregation (aggregate_function, arg1, arg2, ..., argN) ``` @@ -1697,6 +1696,7 @@ Query: ```sql SELECT uniqMerge(state) FROM (SELECT initializeAggregation('uniqState', number % 3) AS state FROM numbers(10000)); ``` + Result: ```text @@ -1749,7 +1749,7 @@ Given a state of aggregate function, this function returns the result of aggrega **Syntax** -``` sql +```sql finalizeAggregation(state) ``` @@ -1854,7 +1854,7 @@ The state is reset for each new block of data. **Syntax** -``` sql +```sql runningAccumulate(agg_state[, grouping]); ``` @@ -1875,13 +1875,13 @@ Consider how you can use `runningAccumulate` to find the cumulative sum of numbe Query: -``` sql +```sql SELECT k, runningAccumulate(sum_k) AS res FROM (SELECT number as k, sumState(k) AS sum_k FROM numbers(10) GROUP BY k ORDER BY k); ``` Result: -``` text +```text ┌─k─┬─res─┐ │ 0 │ 0 │ │ 1 │ 1 │ @@ -1909,7 +1909,7 @@ The following example shows the `groupping` parameter usage: Query: -``` sql +```sql SELECT grouping, item, @@ -1928,7 +1928,7 @@ FROM Result: -``` text +```text ┌─grouping─┬─item─┬─res─┐ │ 0 │ 0 │ 0 │ │ 0 │ 1 │ 1 │ @@ -1960,7 +1960,7 @@ Only supports tables created with the `ENGINE = Join(ANY, LEFT, )` st **Syntax** -``` sql +```sql joinGet(join_storage_table_name, `value_column`, join_keys) ``` @@ -1982,13 +1982,13 @@ More info about `join_use_nulls` in [Join operation](../../engines/table-engines Input table: -``` sql +```sql CREATE DATABASE db_test CREATE TABLE db_test.id_val(`id` UInt32, `val` UInt32) ENGINE = Join(ANY, LEFT, id) SETTINGS join_use_nulls = 1 INSERT INTO db_test.id_val VALUES (1,11)(2,12)(4,13) ``` -``` text +```text ┌─id─┬─val─┐ │ 4 │ 13 │ │ 2 │ 12 │ @@ -1998,13 +1998,13 @@ INSERT INTO db_test.id_val VALUES (1,11)(2,12)(4,13) Query: -``` sql +```sql SELECT joinGet(db_test.id_val, 'val', toUInt32(number)) from numbers(4) SETTINGS join_use_nulls = 1 ``` Result: -``` text +```text ┌─joinGet(db_test.id_val, 'val', toUInt32(number))─┐ │ 0 │ │ 11 │ @@ -2022,7 +2022,7 @@ This function is not available in ClickHouse Cloud. Evaluate an external catboost model. [CatBoost](https://catboost.ai) is an open-source gradient boosting library developed by Yandex for machine learning. Accepts a path to a catboost model and model arguments (features). Returns Float64. -``` sql +```sql SELECT feat1, ..., feat_n, catboostEvaluate('/path/to/model.bin', feat_1, ..., feat_n) AS prediction FROM data_table ``` @@ -2035,7 +2035,7 @@ Before evaluating catboost models, the `libcatboostmodel.` library mus Next, specify the path to `libcatboostmodel.` in the clickhouse configuration: -``` xml +```xml ... /path/to/libcatboostmodel.so @@ -2048,7 +2048,7 @@ At the first execution of `catboostEvaluate()`, the server starts the library br communicate using a HTTP interface. By default, port `9012` is used. A different port can be specified as follows - this is useful if port `9012` is already assigned to a different service. -``` xml +```xml 9019 @@ -2072,13 +2072,13 @@ To use the `error_code` argument, configuration parameter `allow_custom_error_co **Example** -``` sql +```sql SELECT throwIf(number = 3, 'Too many') FROM numbers(10); ``` Result: -``` text +```text ↙ Progress: 0.00 rows, 0.00 B (0.00 rows/s., 0.00 B/s.) Received exception from server (version 19.14.1): Code: 395. DB::Exception: Received from localhost:9000. DB::Exception: Too many. ``` @@ -2089,7 +2089,7 @@ Returns its argument. Intended for debugging and testing. Allows to cancel using **Syntax** -``` sql +```sql identity(x) ``` @@ -2097,13 +2097,13 @@ identity(x) Query: -``` sql +```sql SELECT identity(42); ``` Result: -``` text +```text ┌─identity(42)─┐ │ 42 │ └──────────────┘ @@ -2150,7 +2150,7 @@ Checks whether the [Decimal](../../sql-reference/data-types/decimal.md) value is **Syntax** -``` sql +```sql isDecimalOverflow(d, [p]) ``` @@ -2168,7 +2168,7 @@ isDecimalOverflow(d, [p]) Query: -``` sql +```sql SELECT isDecimalOverflow(toDecimal32(1000000000, 0), 9), isDecimalOverflow(toDecimal32(1000000000, 0)), isDecimalOverflow(toDecimal32(-1000000000, 0), 9), @@ -2177,7 +2177,7 @@ SELECT isDecimalOverflow(toDecimal32(1000000000, 0), 9), Result: -``` text +```text 1 1 1 1 ``` @@ -2187,7 +2187,7 @@ Returns number of decimal digits need to represent a value. **Syntax** -``` sql +```sql countDigits(x) ``` @@ -2209,7 +2209,7 @@ For `Decimal` values takes into account their scales: calculates result over und Query: -``` sql +```sql SELECT countDigits(toDecimal32(1, 9)), countDigits(toDecimal32(-1, 9)), countDigits(toDecimal64(1, 18)), countDigits(toDecimal64(-1, 18)), countDigits(toDecimal128(1, 38)), countDigits(toDecimal128(-1, 38)); @@ -2217,7 +2217,7 @@ SELECT countDigits(toDecimal32(1, 9)), countDigits(toDecimal32(-1, 9)), Result: -``` text +```text 10 10 19 19 39 39 ``` @@ -2229,13 +2229,13 @@ Type: [LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md) **Syntax** -``` sql +```sql errorCodeToName(1) ``` Result: -``` text +```text UNSUPPORTED_METHOD ``` @@ -2246,7 +2246,7 @@ If executed in the context of a distributed table, this function generates a nor **Syntax** -``` sql +```sql tcpPort() ``` @@ -2264,13 +2264,13 @@ Type: [UInt16](../../sql-reference/data-types/int-uint.md). Query: -``` sql +```sql SELECT tcpPort(); ``` Result: -``` text +```text ┌─tcpPort()─┐ │ 9000 │ └───────────┘ @@ -2288,7 +2288,7 @@ The command [SET PROFILE](../../sql-reference/statements/set.md#query-set) could **Syntax** -``` sql +```sql currentProfiles() ``` @@ -2300,11 +2300,11 @@ Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-refere ## enabledProfiles - Returns settings profiles, assigned to the current user both explicitly and implicitly. Explicitly assigned profiles are the same as returned by the [currentProfiles](#current-profiles) function. Implicitly assigned profiles include parent profiles of other assigned profiles, profiles assigned via granted roles, profiles assigned via their own settings, and the main default profile (see the `default_profile` section in the main server configuration file). +Returns settings profiles, assigned to the current user both explicitly and implicitly. Explicitly assigned profiles are the same as returned by the [currentProfiles](#current-profiles) function. Implicitly assigned profiles include parent profiles of other assigned profiles, profiles assigned via granted roles, profiles assigned via their own settings, and the main default profile (see the `default_profile` section in the main server configuration file). **Syntax** -``` sql +```sql enabledProfiles() ``` @@ -2320,7 +2320,7 @@ Returns all the profiles specified at the current user's definition (see [CREATE **Syntax** -``` sql +```sql defaultProfiles() ``` @@ -2336,7 +2336,7 @@ Returns the roles assigned to the current user. The roles can be changed by the **Syntax** -``` sql +```sql currentRoles() ``` @@ -2352,7 +2352,7 @@ Returns the names of the current roles and the roles, granted to some of the cur **Syntax** -``` sql +```sql enabledRoles() ``` @@ -2368,7 +2368,7 @@ Returns the roles which are enabled by default for the current user when he logs **Syntax** -``` sql +```sql defaultRoles() ``` @@ -2384,7 +2384,7 @@ Returns the server port number. When the port is not used by the server, throws **Syntax** -``` sql +```sql getServerPort(port_name) ``` @@ -2392,16 +2392,16 @@ getServerPort(port_name) - `port_name` — The name of the server port. [String](../../sql-reference/data-types/string.md#string). Possible values: - - 'tcp_port' - - 'tcp_port_secure' - - 'http_port' - - 'https_port' - - 'interserver_http_port' - - 'interserver_https_port' - - 'mysql_port' - - 'postgresql_port' - - 'grpc_port' - - 'prometheus.port' + - 'tcp_port' + - 'tcp_port_secure' + - 'http_port' + - 'https_port' + - 'interserver_http_port' + - 'interserver_https_port' + - 'mysql_port' + - 'postgresql_port' + - 'grpc_port' + - 'prometheus.port' **Returned value** @@ -2413,13 +2413,13 @@ Type: [UInt16](../../sql-reference/data-types/int-uint.md). Query: -``` sql +```sql SELECT getServerPort('tcp_port'); ``` Result: -``` text +```text ┌─getServerPort('tcp_port')─┐ │ 9000 │ └───────────────────────────┘ @@ -2433,7 +2433,7 @@ In contrast to [initialQueryID](#initial-query-id) function, `queryID` can retur **Syntax** -``` sql +```sql queryID() ``` @@ -2447,7 +2447,7 @@ Type: [String](../../sql-reference/data-types/string.md) Query: -``` sql +```sql CREATE TABLE tmp (str String) ENGINE = Log; INSERT INTO tmp (*) VALUES ('a'); SELECT count(DISTINCT t) FROM (SELECT queryID() AS t FROM remote('127.0.0.{1..3}', currentDatabase(), 'tmp') GROUP BY queryID()); @@ -2455,7 +2455,7 @@ SELECT count(DISTINCT t) FROM (SELECT queryID() AS t FROM remote('127.0.0.{1..3} Result: -``` text +```text ┌─count()─┐ │ 3 │ └─────────┘ @@ -2469,7 +2469,7 @@ In contrast to [queryID](#query-id) function, `initialQueryID` returns the same **Syntax** -``` sql +```sql initialQueryID() ``` @@ -2483,7 +2483,7 @@ Type: [String](../../sql-reference/data-types/string.md) Query: -``` sql +```sql CREATE TABLE tmp (str String) ENGINE = Log; INSERT INTO tmp (*) VALUES ('a'); SELECT count(DISTINCT t) FROM (SELECT initialQueryID() AS t FROM remote('127.0.0.{1..3}', currentDatabase(), 'tmp') GROUP BY queryID()); @@ -2491,7 +2491,7 @@ SELECT count(DISTINCT t) FROM (SELECT initialQueryID() AS t FROM remote('127.0.0 Result: -``` text +```text ┌─count()─┐ │ 1 │ └─────────┘ @@ -2504,7 +2504,7 @@ If a query is not distributed then constant value `0` is returned. **Syntax** -``` sql +```sql shardNum() ``` @@ -2520,7 +2520,7 @@ In the following example a configuration with two shards is used. The query is e Query: -``` sql +```sql CREATE TABLE shard_num_example (dummy UInt8) ENGINE=Distributed(test_cluster_two_shards_localhost, system, one, dummy); SELECT dummy, shardNum(), shardCount() FROM shard_num_example; @@ -2528,7 +2528,7 @@ SELECT dummy, shardNum(), shardCount() FROM shard_num_example; Result: -``` text +```text ┌─dummy─┬─shardNum()─┬─shardCount()─┐ │ 0 │ 2 │ 2 │ │ 0 │ 1 │ 2 │ @@ -2546,7 +2546,7 @@ If a query is not distributed then constant value `0` is returned. **Syntax** -``` sql +```sql shardCount() ``` @@ -2566,7 +2566,7 @@ Returns a string with the current OS kernel version. **Syntax** -``` sql +```sql getOSKernelVersion() ``` @@ -2584,13 +2584,13 @@ Type: [String](../../sql-reference/data-types/string.md). Query: -``` sql +```sql SELECT getOSKernelVersion(); ``` Result: -``` text +```text ┌─getOSKernelVersion()────┐ │ Linux 4.15.0-55-generic │ └─────────────────────────┘ @@ -2602,7 +2602,7 @@ Returns the uptime of the current ZooKeeper session in seconds. **Syntax** -``` sql +```sql zookeeperSessionUptime() ``` @@ -2620,13 +2620,13 @@ Type: [UInt32](../../sql-reference/data-types/int-uint.md). Query: -``` sql +```sql SELECT zookeeperSessionUptime(); ``` Result: -``` text +```text ┌─zookeeperSessionUptime()─┐ │ 286 │ └──────────────────────────┘ @@ -2638,7 +2638,7 @@ Generates random table structure in a format `column1_name column1_type, column2 **Syntax** -``` sql +```sql generateRandomStructure([number_of_columns, seed]) ``` @@ -2659,13 +2659,13 @@ Type: [String](../../sql-reference/data-types/string.md). Query: -``` sql +```sql SELECT generateRandomStructure() ``` Result: -``` text +```text ┌─generateRandomStructure()─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ │ c1 Decimal32(5), c2 Date, c3 Tuple(LowCardinality(String), Int128, UInt64, UInt16, UInt8, IPv6), c4 Array(UInt128), c5 UInt32, c6 IPv4, c7 Decimal256(64), c8 Decimal128(3), c9 UInt256, c10 UInt64, c11 DateTime │ └───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ @@ -2673,13 +2673,13 @@ Result: Query: -``` sql +```sql SELECT generateRandomStructure(1) ``` Result: -``` text +```text ┌─generateRandomStructure(1)─┐ │ c1 Map(UInt256, UInt16) │ └────────────────────────────┘ @@ -2687,13 +2687,13 @@ Result: Query: -``` sql +```sql SELECT generateRandomStructure(NULL, 33) ``` Result: -``` text +```text ┌─generateRandomStructure(NULL, 33)─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ │ c1 DateTime, c2 Enum8('c2V0' = 0, 'c2V1' = 1, 'c2V2' = 2, 'c2V3' = 3), c3 LowCardinality(Nullable(FixedString(30))), c4 Int16, c5 Enum8('c5V0' = 0, 'c5V1' = 1, 'c5V2' = 2, 'c5V3' = 3), c6 Nullable(UInt8), c7 String, c8 Nested(e1 IPv4, e2 UInt8, e3 UInt16, e4 UInt16, e5 Int32, e6 Map(Date, Decimal256(70))) │ └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ @@ -2709,7 +2709,7 @@ Converts ClickHouse table structure to CapnProto schema. **Syntax** -``` sql +```sql structureToCapnProtoSchema(structure) ``` @@ -2720,7 +2720,7 @@ structureToCapnProtoSchema(structure) **Returned value** -- CapnProto schema +- CapnProto schema Type: [String](../../sql-reference/data-types/string.md). @@ -2728,13 +2728,13 @@ Type: [String](../../sql-reference/data-types/string.md). Query: -``` sql +```sql SELECT structureToCapnProtoSchema('column1 String, column2 UInt32, column3 Array(String)') FORMAT RawBLOB ``` Result: -``` text +```text @0xf96402dd754d0eb7; struct Message @@ -2747,13 +2747,13 @@ struct Message Query: -``` sql +```sql SELECT structureToCapnProtoSchema('column1 Nullable(String), column2 Tuple(element1 UInt32, element2 Array(String)), column3 Map(String, String)') FORMAT RawBLOB ``` Result: -``` text +```text @0xd1c8320fecad2b7f; struct Message @@ -2788,13 +2788,13 @@ struct Message Query: -``` sql +```sql SELECT structureToCapnProtoSchema('column1 String, column2 UInt32', 'Root') FORMAT RawBLOB ``` Result: -``` text +```text @0x96ab2d4ab133c6e1; struct Root @@ -2810,7 +2810,7 @@ Converts ClickHouse table structure to Protobuf schema. **Syntax** -``` sql +```sql structureToProtobufSchema(structure) ``` @@ -2829,13 +2829,13 @@ Type: [String](../../sql-reference/data-types/string.md). Query: -``` sql +```sql SELECT structureToProtobufSchema('column1 String, column2 UInt32, column3 Array(String)') FORMAT RawBLOB ``` Result: -``` text +```text syntax = "proto3"; message Message @@ -2848,13 +2848,13 @@ message Message Query: -``` sql +```sql SELECT structureToProtobufSchema('column1 Nullable(String), column2 Tuple(element1 UInt32, element2 Array(String)), column3 Map(String, String)') FORMAT RawBLOB ``` Result: -``` text +```text syntax = "proto3"; message Message @@ -2872,13 +2872,13 @@ message Message Query: -``` sql +```sql SELECT structureToProtobufSchema('column1 String, column2 UInt32', 'Root') FORMAT RawBLOB ``` Result: -``` text +```text syntax = "proto3"; message Root @@ -3050,7 +3050,7 @@ Calculates minimum required sample size for an A/B test comparing conversions (p **Syntax** -``` sql +```sql minSampleSizeConversion(baseline, mde, power, alpha) ``` @@ -3075,13 +3075,13 @@ A named [Tuple](../data-types/tuple.md) with 3 elements: The following query calculates the required sample size for an A/B test with baseline conversion of 25%, MDE of 3%, significance level of 5%, and the desired statistical power of 80%: -``` sql +```sql SELECT minSampleSizeConversion(0.25, 0.03, 0.80, 0.05) AS sample_size; ``` Result: -``` text +```text ┌─sample_size───────────────────┐ │ (3396.077603219163,0.22,0.28) │ └───────────────────────────────┘ @@ -3093,7 +3093,7 @@ Calculates minimum required sample size for an A/B test comparing means of a con **Syntax** -``` sql +```sql minSampleSizeContinous(baseline, sigma, mde, power, alpha) ``` @@ -3105,7 +3105,7 @@ Uses the formula described in [this article](https://towardsdatascience.com/requ - `baseline` — Baseline value of a metric. [Integer](../data-types/int-uint.md) or [Float](../data-types/float.md). - `sigma` — Baseline standard deviation of a metric. [Integer](../data-types/int-uint.md) or [Float](../data-types/float.md). -- `mde` — Minimum detectable effect (MDE) as percentage of the baseline value (e.g. for a baseline value 112.25 the MDE 0.03 means an expected change to 112.25 ± 112.25*0.03). [Integer](../data-types/int-uint.md) or [Float](../data-types/float.md). +- `mde` — Minimum detectable effect (MDE) as percentage of the baseline value (e.g. for a baseline value 112.25 the MDE 0.03 means an expected change to 112.25 ± 112.25\*0.03). [Integer](../data-types/int-uint.md) or [Float](../data-types/float.md). - `power` — Required statistical power of a test (1 - probability of Type II error). [Integer](../data-types/int-uint.md) or [Float](../data-types/float.md). - `alpha` — Required significance level of a test (probability of Type I error). [Integer](../data-types/int-uint.md) or [Float](../data-types/float.md). @@ -3121,13 +3121,13 @@ A named [Tuple](../data-types/tuple.md) with 3 elements: The following query calculates the required sample size for an A/B test on a metric with baseline value of 112.25, standard deviation of 21.1, MDE of 3%, significance level of 5%, and the desired statistical power of 80%: -``` sql +```sql SELECT minSampleSizeContinous(112.25, 21.1, 0.03, 0.80, 0.05) AS sample_size; ``` Result: -``` text +```text ┌─sample_size───────────────────────────┐ │ (616.2931945826209,108.8825,115.6175) │ └───────────────────────────────────────┘ From 8abb85948c4b401a61cdf6dc6cf33b7ac9df2279 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Tue, 12 Mar 2024 17:03:11 +0000 Subject: [PATCH 219/374] Fix flaky test and add separate to show unexpected behaviour --- .../01603_insert_select_too_many_parts.sql | 3 ++- ...t_select_too_many_parts_multithread.reference | 1 + ..._insert_select_too_many_parts_multithread.sql | 16 ++++++++++++++++ 3 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/02997_insert_select_too_many_parts_multithread.reference create mode 100644 tests/queries/0_stateless/02997_insert_select_too_many_parts_multithread.sql diff --git a/tests/queries/0_stateless/01603_insert_select_too_many_parts.sql b/tests/queries/0_stateless/01603_insert_select_too_many_parts.sql index a56b680e212..eea52282cf4 100644 --- a/tests/queries/0_stateless/01603_insert_select_too_many_parts.sql +++ b/tests/queries/0_stateless/01603_insert_select_too_many_parts.sql @@ -5,7 +5,8 @@ SYSTEM STOP MERGES too_many_parts; SET max_block_size = 1, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; -- exception is not thrown if threshold is exceeded when multi-block INSERT is already started. -INSERT INTO too_many_parts SELECT * FROM numbers(10); +-- Single thread is used as different threads check it separately https://github.com/ClickHouse/ClickHouse/issues/61158 +INSERT INTO too_many_parts SELECT * FROM numbers(10) SETTINGS max_insert_threads=1; SELECT count() FROM too_many_parts; -- exception is thrown if threshold is exceeded on new INSERT. diff --git a/tests/queries/0_stateless/02997_insert_select_too_many_parts_multithread.reference b/tests/queries/0_stateless/02997_insert_select_too_many_parts_multithread.reference new file mode 100644 index 00000000000..29d6383b52c --- /dev/null +++ b/tests/queries/0_stateless/02997_insert_select_too_many_parts_multithread.reference @@ -0,0 +1 @@ +100 diff --git a/tests/queries/0_stateless/02997_insert_select_too_many_parts_multithread.sql b/tests/queries/0_stateless/02997_insert_select_too_many_parts_multithread.sql new file mode 100644 index 00000000000..00cf262add5 --- /dev/null +++ b/tests/queries/0_stateless/02997_insert_select_too_many_parts_multithread.sql @@ -0,0 +1,16 @@ +# Tags: disabled +# TODO: Fix parts_to_throw_insert logic for parallel MergeTreeSink onStart calls +DROP TABLE IF EXISTS too_many_parts; + +CREATE TABLE too_many_parts (x UInt64) ENGINE = MergeTree ORDER BY tuple() SETTINGS parts_to_delay_insert = 5, parts_to_throw_insert = 5; + +SYSTEM STOP MERGES too_many_parts; +SET max_block_size = 1, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0, max_threads=100, max_insert_threads=100; + +-- exception is not thrown if threshold is exceeded when multi-block INSERT is already started. +INSERT INTO too_many_parts SELECT * FROM numbers_mt(100); +SELECT count() FROM too_many_parts; + +INSERT INTO too_many_parts SELECT * FROM numbers_mt(10); -- { serverError 252 } + +DROP TABLE too_many_parts; From f3f1f611a45500f82c581e5ec34810e77a02cfdc Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Tue, 12 Mar 2024 17:26:42 +0000 Subject: [PATCH 220/374] Better --- base/base/defines.h | 10 ++++++++-- src/Parsers/ASTQueryWithTableAndOutput.h | 1 + 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/base/base/defines.h b/base/base/defines.h index 1f02748633d..3308315b27f 100644 --- a/base/base/defines.h +++ b/base/base/defines.h @@ -108,16 +108,22 @@ { [[noreturn]] void abortOnFailedAssertion(const String & description); } - #define chassert(x) do { static_cast(x) ? void(0) : ::DB::abortOnFailedAssertion(#x); } while (0) + #define chassert_1(x, ...) do { static_cast(x) ? void(0) : ::DB::abortOnFailedAssertion(#x); } while (0) + #define chassert_2(x, comment, ...) do { static_cast(x) ? void(0) : ::DB::abortOnFailedAssertion(comment); } while (0) #define UNREACHABLE() abort() // clang-format off #else /// Here sizeof() trick is used to suppress unused warning for result, /// since simple "(void)x" will evaluate the expression, while /// "sizeof(!(x))" will not. - #define chassert(x) (void)sizeof(!(x)) + #define chassert_1(x, ...) (void)sizeof(!(x)) + #define chassert_2(x, comment, ...) (void)sizeof(!(x)) #define UNREACHABLE() __builtin_unreachable() #endif + #define CHASSERT_IMPL(_1,_2, N,...) N(_1, _2) + #define CHASSERT_IMPL_(tuple) CHASSERT_IMPL tuple + #define chassert(...) CHASSERT_IMPL_((__VA_ARGS__, chassert_2, chassert_1)) + #endif /// Macros for Clang Thread Safety Analysis (TSA). They can be safely ignored by other compilers. diff --git a/src/Parsers/ASTQueryWithTableAndOutput.h b/src/Parsers/ASTQueryWithTableAndOutput.h index 1b8621fb63b..358291d9aa8 100644 --- a/src/Parsers/ASTQueryWithTableAndOutput.h +++ b/src/Parsers/ASTQueryWithTableAndOutput.h @@ -61,6 +61,7 @@ protected: settings.ostr << '.'; } + chassert(table != nullptr, "Table is empty for the ASTQueryWithTableAndOutputImpl."); table->formatImpl(settings, state, frame); } }; From ca282ab4510c3f06f6433de8349b28ce3527f436 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Tue, 12 Mar 2024 17:33:09 +0000 Subject: [PATCH 221/374] Better --- base/base/defines.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/base/base/defines.h b/base/base/defines.h index 3308315b27f..627c50c27d2 100644 --- a/base/base/defines.h +++ b/base/base/defines.h @@ -120,9 +120,9 @@ #define chassert_2(x, comment, ...) (void)sizeof(!(x)) #define UNREACHABLE() __builtin_unreachable() #endif - #define CHASSERT_IMPL(_1,_2, N,...) N(_1, _2) - #define CHASSERT_IMPL_(tuple) CHASSERT_IMPL tuple - #define chassert(...) CHASSERT_IMPL_((__VA_ARGS__, chassert_2, chassert_1)) + #define CHASSERT_DISPATCH(_1,_2, N,...) N(_1, _2) + #define CHASSERT_INVOKE(tuple) CHASSERT_DISPATCH tuple + #define chassert(...) CHASSERT_INVOKE((__VA_ARGS__, chassert_2, chassert_1)) #endif From ddd2d92d0fbd31dad7e091ce1d828eb65fc34338 Mon Sep 17 00:00:00 2001 From: Smita Kulkarni Date: Tue, 12 Mar 2024 18:40:20 +0100 Subject: [PATCH 222/374] Update curl to curl with data race fix --- contrib/curl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/curl b/contrib/curl index 5ce164e0e92..1a05e833f8f 160000 --- a/contrib/curl +++ b/contrib/curl @@ -1 +1 @@ -Subproject commit 5ce164e0e9290c96eb7d502173426c0a135ec008 +Subproject commit 1a05e833f8f7140628b27882b10525fd9ec4b873 From 296f7a1da2e5f5297fd26eb878071e4e60ac21fa Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 12 Mar 2024 19:02:58 +0100 Subject: [PATCH 223/374] Fix --- src/Disks/ObjectStorages/Web/WebObjectStorage.cpp | 4 ++++ src/IO/ReadWriteBufferFromHTTP.cpp | 2 +- src/IO/ReadWriteBufferFromHTTP.h | 3 +++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp index 4adb92cf5c8..7e942a6cf6f 100644 --- a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp @@ -86,6 +86,10 @@ WebObjectStorage::loadFiles(const String & path, const std::unique_lockhasNotFoundURL()) + return {}; + auto [it, inserted] = files.add(path, FileData::createDirectoryInfo(true)); if (!inserted) { diff --git a/src/IO/ReadWriteBufferFromHTTP.cpp b/src/IO/ReadWriteBufferFromHTTP.cpp index bcbec97537a..fdc8ef04d2e 100644 --- a/src/IO/ReadWriteBufferFromHTTP.cpp +++ b/src/IO/ReadWriteBufferFromHTTP.cpp @@ -449,6 +449,7 @@ bool ReadWriteBufferFromHTTP::nextImpl() if (http_skip_not_found_url && e.getHTTPStatus() == Poco::Net::HTTPResponse::HTTPStatus::HTTP_NOT_FOUND) { next_result = false; + has_not_found_url = true; return; } @@ -740,4 +741,3 @@ ReadWriteBufferFromHTTP::HTTPFileInfo ReadWriteBufferFromHTTP::parseFileInfo(con } } - diff --git a/src/IO/ReadWriteBufferFromHTTP.h b/src/IO/ReadWriteBufferFromHTTP.h index d0d792430c0..1b7437ea0c6 100644 --- a/src/IO/ReadWriteBufferFromHTTP.h +++ b/src/IO/ReadWriteBufferFromHTTP.h @@ -79,6 +79,7 @@ private: const bool use_external_buffer; const bool http_skip_not_found_url; + bool has_not_found_url = false; std::function out_stream_callback; @@ -183,6 +184,8 @@ public: std::optional tryGetLastModificationTime(); + bool hasNotFoundURL() const { return has_not_found_url; } + HTTPFileInfo getFileInfo(); static HTTPFileInfo parseFileInfo(const Poco::Net::HTTPResponse & response, size_t requested_range_begin); }; From c947484fe0d788dd46384b90987b59694f0a0b77 Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 12 Mar 2024 19:36:10 +0100 Subject: [PATCH 224/374] Fxi again --- tests/integration/test_disk_types/test.py | 15 ++++++++++++--- .../test_endpoint_macro_substitution/test.py | 17 ++++++++++++----- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/tests/integration/test_disk_types/test.py b/tests/integration/test_disk_types/test.py index a53d073d30b..b9b8ef2010d 100644 --- a/tests/integration/test_disk_types/test.py +++ b/tests/integration/test_disk_types/test.py @@ -87,19 +87,28 @@ def test_different_types(cluster): def test_select_by_type(cluster): node = cluster.instances["node"] for name, disk_type in list(disk_types.items()): - if disk_type != "S3": + if disk_type == "Local": assert ( node.query( "SELECT name FROM system.disks WHERE type='" + disk_type + "'" ) == name + "\n" ) - else: + elif disk_type == "S3": assert ( node.query( - "SELECT name FROM system.disks WHERE type='" + "SELECT name FROM system.disks WHERE object_storage_type='" + disk_type + "' ORDER BY name" ) == "disk_encrypted\ndisk_s3\n" ) + else: + assert ( + node.query( + "SELECT name FROM system.disks WHERE object_storage_type='" + + disk_type + + "'" + ) + == name + "\n" + ) diff --git a/tests/integration/test_endpoint_macro_substitution/test.py b/tests/integration/test_endpoint_macro_substitution/test.py index e161d8e82ff..ee72fb9b492 100644 --- a/tests/integration/test_endpoint_macro_substitution/test.py +++ b/tests/integration/test_endpoint_macro_substitution/test.py @@ -67,22 +67,29 @@ def test_different_types(cluster): def test_select_by_type(cluster): node = cluster.instances["node"] - fs = HdfsClient(hosts=cluster.hdfs_ip) - for name, disk_type in list(disk_types.items()): - if disk_type != "S3": + if disk_type == "Local": assert ( node.query( "SELECT name FROM system.disks WHERE type='" + disk_type + "'" ) == name + "\n" ) - else: + elif disk_type == "S3": assert ( node.query( - "SELECT name FROM system.disks WHERE type='" + "SELECT name FROM system.disks WHERE object_storage_type='" + disk_type + "' ORDER BY name" ) == "disk_encrypted\ndisk_s3\n" ) + else: + assert ( + node.query( + "SELECT name FROM system.disks WHERE object_storage_type='" + + disk_type + + "'" + ) + == name + "\n" + ) From ec80cc475ed8c0c3126aceb5db35ff20b9295451 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 12 Mar 2024 19:58:23 +0100 Subject: [PATCH 225/374] Fix formatting of overlay database --- src/Databases/DatabasesOverlay.cpp | 4 +++- .../0_stateless/03009_format_show_database.reference | 2 ++ tests/queries/0_stateless/03009_format_show_database.sh | 8 ++++++++ 3 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03009_format_show_database.reference create mode 100755 tests/queries/0_stateless/03009_format_show_database.sh diff --git a/src/Databases/DatabasesOverlay.cpp b/src/Databases/DatabasesOverlay.cpp index 8cea3441698..c8705254e73 100644 --- a/src/Databases/DatabasesOverlay.cpp +++ b/src/Databases/DatabasesOverlay.cpp @@ -149,7 +149,9 @@ ASTPtr DatabasesOverlay::getCreateTableQueryImpl(const String & name, ContextPtr */ ASTPtr DatabasesOverlay::getCreateDatabaseQuery() const { - return std::make_shared(); + auto query = std::make_shared(); + query->setDatabase(getDatabaseName()); + return query; } String DatabasesOverlay::getTableDataPath(const String & table_name) const diff --git a/tests/queries/0_stateless/03009_format_show_database.reference b/tests/queries/0_stateless/03009_format_show_database.reference new file mode 100644 index 00000000000..83cfd4c1a68 --- /dev/null +++ b/tests/queries/0_stateless/03009_format_show_database.reference @@ -0,0 +1,2 @@ +CREATE DATABASE default +UNKNOWN_DATABASE diff --git a/tests/queries/0_stateless/03009_format_show_database.sh b/tests/queries/0_stateless/03009_format_show_database.sh new file mode 100755 index 00000000000..7f33ad7b1e1 --- /dev/null +++ b/tests/queries/0_stateless/03009_format_show_database.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_LOCAL -q "show database default" +$CLICKHOUSE_LOCAL -q "show database default2" 2>&1 | grep -o 'UNKNOWN_DATABASE' From 15484bedadf62d05fdcaea842ce9d86bdafe57f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 12 Mar 2024 21:03:26 +0100 Subject: [PATCH 226/374] Fix clone for ASTQueryWithTableAndOutputImpl derived classes --- src/Parsers/TablePropertiesQueriesASTs.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/Parsers/TablePropertiesQueriesASTs.h b/src/Parsers/TablePropertiesQueriesASTs.h index e8e4bd8adb3..81ad975aa37 100644 --- a/src/Parsers/TablePropertiesQueriesASTs.h +++ b/src/Parsers/TablePropertiesQueriesASTs.h @@ -85,6 +85,15 @@ using ASTShowCreateDictionaryQuery = ASTQueryWithTableAndOutputImpl { +public: + ASTPtr clone() const override + { + auto res = std::make_shared(*this); + res->children.clear(); + cloneTableOptions(*res); + return res; + } + protected: void formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override { @@ -98,6 +107,15 @@ protected: class ASTShowCreateDatabaseQuery : public ASTQueryWithTableAndOutputImpl { +public: + ASTPtr clone() const override + { + auto res = std::make_shared(*this); + res->children.clear(); + cloneTableOptions(*res); + return res; + } + protected: void formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override { From ebd934e28696c1d01f6ee225c62b4e76a026c337 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 13 Mar 2024 05:05:23 +0100 Subject: [PATCH 227/374] Continue --- src/Functions/FunctionsConversion.cpp | 390 +++++++++++++------------- 1 file changed, 192 insertions(+), 198 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index e2d9c4a173e..846b6c31062 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -179,6 +179,131 @@ struct ToDateTimeImpl }; +/// Implementation of toDate function. + +template +struct ToDateTransform32Or64 +{ + static constexpr auto name = "toDate"; + + static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) + { + if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) + { + if (from > MAX_DATETIME_TIMESTAMP) [[unlikely]] + throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Value {} is out of bounds of type Date", from); + } + /// if value is smaller (or equal) than maximum day value for Date, than treat it as day num, + /// otherwise treat it as unix timestamp. This is a bit weird, but we leave this behavior. + if (from <= DATE_LUT_MAX_DAY_NUM) + return from; + else + return time_zone.toDayNum(std::min(time_t(from), time_t(MAX_DATETIME_TIMESTAMP))); + } +}; + + +template +struct ToDateTransform32Or64Signed +{ + static constexpr auto name = "toDate"; + + static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) + { + // TODO: decide narrow or extended range based on FromType + if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) + { + if (from < 0 || from > MAX_DATE_TIMESTAMP) [[unlikely]] + throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Value {} is out of bounds of type Date", from); + } + else + { + if (from < 0) + return 0; + } + return (from <= DATE_LUT_MAX_DAY_NUM) + ? static_cast(from) + : time_zone.toDayNum(std::min(time_t(from), time_t(MAX_DATE_TIMESTAMP))); + } +}; + +template +struct ToDateTransform8Or16Signed +{ + static constexpr auto name = "toDate"; + + static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) + { + if (from < 0) + { + if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) + throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Value {} is out of bounds of type Date", from); + else + return 0; + } + return from; + } +}; + +/// Implementation of toDate32 function. + +template +struct ToDate32Transform32Or64 +{ + static constexpr auto name = "toDate32"; + + static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) + { + if (from < DATE_LUT_MAX_EXTEND_DAY_NUM) + return static_cast(from); + else + { + if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) + { + if (from > MAX_DATETIME64_TIMESTAMP) [[unlikely]] + throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type Date32", from); + } + return time_zone.toDayNum(std::min(time_t(from), time_t(MAX_DATETIME64_TIMESTAMP))); + } + } +}; + +template +struct ToDate32Transform32Or64Signed +{ + static constexpr auto name = "toDate32"; + + static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) + { + static const Int32 daynum_min_offset = -static_cast(time_zone.getDayNumOffsetEpoch()); + + if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) + { + if (from < daynum_min_offset || from > MAX_DATETIME64_TIMESTAMP) [[unlikely]] + throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type Date32", from); + } + + if (from < daynum_min_offset) + return daynum_min_offset; + + return (from < DATE_LUT_MAX_EXTEND_DAY_NUM) + ? static_cast(from) + : time_zone.toDayNum(std::min(time_t(Int64(from)), time_t(MAX_DATETIME64_TIMESTAMP))); + } +}; + +template +struct ToDate32Transform8Or16Signed +{ + static constexpr auto name = "toDate32"; + + static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) + { + return from; + } +}; + + /// Function toUnixTimestamp has exactly the same implementation as toDateTime of String type. struct NameToUnixTimestamp { static constexpr auto name = "toUnixTimestamp"; }; @@ -241,6 +366,73 @@ struct ConvertImpl return DateTimeTransformImpl, false>::execute( arguments, result_type, input_rows_count); } + /** Special case of converting Int8, Int16, (U)Int32 or (U)Int64 (and also, for convenience, + * Float32, Float64) to Date. If the + * number is less than 65536, then it is treated as DayNum, and if it's greater or equals to 65536, + * then treated as unix timestamp. If the number exceeds UInt32, saturate to MAX_UINT32 then as DayNum. + * It's a bit illogical, as we actually have two functions in one. + * But allows to support frequent case, + * when user write toDate(UInt32), expecting conversion of unix timestamp to Date. + * (otherwise such usage would be frequent mistake). + */ + else if constexpr (( + std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (( + std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (( + std::is_same_v + || std::is_same_v + || std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (( + std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (( + std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (( + std::is_same_v + || std::is_same_v + || std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } else { using ColVecFrom = typename FromDataType::ColumnType; @@ -490,208 +682,10 @@ struct ConvertImpl }; -/// Implementation of toDate function. - -template -struct ToDateTransform32Or64 -{ - static constexpr auto name = "toDate"; - - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - { - if (from > MAX_DATETIME_TIMESTAMP) [[unlikely]] - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Value {} is out of bounds of type Date", from); - } - /// if value is smaller (or equal) than maximum day value for Date, than treat it as day num, - /// otherwise treat it as unix timestamp. This is a bit weird, but we leave this behavior. - if (from <= DATE_LUT_MAX_DAY_NUM) - return from; - else - return time_zone.toDayNum(std::min(time_t(from), time_t(MAX_DATETIME_TIMESTAMP))); - } -}; - - -template -struct ToDateTransform32Or64Signed -{ - static constexpr auto name = "toDate"; - - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) - { - // TODO: decide narrow or extended range based on FromType - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - { - if (from < 0 || from > MAX_DATE_TIMESTAMP) [[unlikely]] - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Value {} is out of bounds of type Date", from); - } - else - { - if (from < 0) - return 0; - } - return (from <= DATE_LUT_MAX_DAY_NUM) - ? static_cast(from) - : time_zone.toDayNum(std::min(time_t(from), time_t(MAX_DATE_TIMESTAMP))); - } -}; - -template -struct ToDateTransform8Or16Signed -{ - static constexpr auto name = "toDate"; - - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) - { - if (from < 0) - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Value {} is out of bounds of type Date", from); - else - return 0; - } - return from; - } -}; - template struct ConvertImpl : DateTimeTransformImpl, false> {}; -/// Implementation of toDate32 function. - -template -struct ToDate32Transform32Or64 -{ - static constexpr auto name = "toDate32"; - - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) - { - if (from < DATE_LUT_MAX_EXTEND_DAY_NUM) - return static_cast(from); - else - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - { - if (from > MAX_DATETIME64_TIMESTAMP) [[unlikely]] - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type Date32", from); - } - return time_zone.toDayNum(std::min(time_t(from), time_t(MAX_DATETIME64_TIMESTAMP))); - } - } -}; - -template -struct ToDate32Transform32Or64Signed -{ - static constexpr auto name = "toDate32"; - - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) - { - static const Int32 daynum_min_offset = -static_cast(time_zone.getDayNumOffsetEpoch()); - - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - { - if (from < daynum_min_offset || from > MAX_DATETIME64_TIMESTAMP) [[unlikely]] - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type Date32", from); - } - - if (from < daynum_min_offset) - return daynum_min_offset; - - return (from < DATE_LUT_MAX_EXTEND_DAY_NUM) - ? static_cast(from) - : time_zone.toDayNum(std::min(time_t(Int64(from)), time_t(MAX_DATETIME64_TIMESTAMP))); - } -}; - -template -struct ToDate32Transform8Or16Signed -{ - static constexpr auto name = "toDate32"; - - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) - { - return from; - } -}; - -/** Special case of converting Int8, Int16, (U)Int32 or (U)Int64 (and also, for convenience, - * Float32, Float64) to Date. If the - * number is less than 65536, then it is treated as DayNum, and if it's greater or equals to 65536, - * then treated as unix timestamp. If the number exceeds UInt32, saturate to MAX_UINT32 then as DayNum. - * It's a bit illogical, as we actually have two functions in one. - * But allows to support frequent case, - * when user write toDate(UInt32), expecting conversion of unix timestamp to Date. - * (otherwise such usage would be frequent mistake). - */ -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - template struct ToDateTimeTransform64 From dbfda047014ab1b3e4cd81e46c5ce623387d61fe Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 13 Mar 2024 05:18:51 +0100 Subject: [PATCH 228/374] Continue --- src/Functions/FunctionsConversion.cpp | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 846b6c31062..33ef12b9d23 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -366,6 +366,12 @@ struct ConvertImpl return DateTimeTransformImpl, false>::execute( arguments, result_type, input_rows_count); } + else if constexpr (std::is_same_v && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count, TransformDateTime64(assert_cast(*named_from.type).getScale())); + } /** Special case of converting Int8, Int16, (U)Int32 or (U)Int64 (and also, for convenience, * Float32, Float64) to Date. If the * number is less than 65536, then it is treated as DayNum, and if it's greater or equals to 65536, @@ -682,11 +688,6 @@ struct ConvertImpl }; -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - - template struct ToDateTimeTransform64 { From a5383a4619a18f0979594e25701f8f7893d62d56 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 13 Mar 2024 05:27:05 +0100 Subject: [PATCH 229/374] Continue --- src/Functions/FunctionsConversion.cpp | 410 +++++++++++++------------- 1 file changed, 205 insertions(+), 205 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 33ef12b9d23..ffcb0086e27 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -303,6 +303,183 @@ struct ToDate32Transform8Or16Signed } }; +template +struct ToDateTimeTransform64 +{ + static constexpr auto name = "toDateTime"; + + static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) + { + if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) + { + if (from > MAX_DATETIME_TIMESTAMP) [[unlikely]] + throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type DateTime", from); + } + return static_cast(std::min(time_t(from), time_t(MAX_DATETIME_TIMESTAMP))); + } +}; + +template +struct ToDateTimeTransformSigned +{ + static constexpr auto name = "toDateTime"; + + static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) + { + if (from < 0) + { + if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) + throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type DateTime", from); + else + return 0; + } + return from; + } +}; + +template +struct ToDateTimeTransform64Signed +{ + static constexpr auto name = "toDateTime"; + + static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) + { + if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) + { + if (from < 0 || from > MAX_DATETIME_TIMESTAMP) [[unlikely]] + throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type DateTime", from); + } + + if (from < 0) + return 0; + return static_cast(std::min(time_t(from), time_t(MAX_DATETIME_TIMESTAMP))); + } +}; + +/** Conversion of numeric to DateTime64 + */ + +template +struct ToDateTime64TransformUnsigned +{ + static constexpr auto name = "toDateTime64"; + + const DateTime64::NativeType scale_multiplier = 1; + + ToDateTime64TransformUnsigned(UInt32 scale = 0) /// NOLINT + : scale_multiplier(DecimalUtils::scaleMultiplier(scale)) + {} + + NO_SANITIZE_UNDEFINED DateTime64::NativeType execute(FromType from, const DateLUTImpl &) const + { + if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) + { + if (from > MAX_DATETIME64_TIMESTAMP) [[unlikely]] + throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type DateTime64", from); + else + return DecimalUtils::decimalFromComponentsWithMultiplier(from, 0, scale_multiplier); + } + else + return DecimalUtils::decimalFromComponentsWithMultiplier(std::min(from, MAX_DATETIME64_TIMESTAMP), 0, scale_multiplier); + } +}; +template +struct ToDateTime64TransformSigned +{ + static constexpr auto name = "toDateTime64"; + + const DateTime64::NativeType scale_multiplier = 1; + + ToDateTime64TransformSigned(UInt32 scale = 0) /// NOLINT + : scale_multiplier(DecimalUtils::scaleMultiplier(scale)) + {} + + NO_SANITIZE_UNDEFINED DateTime64::NativeType execute(FromType from, const DateLUTImpl &) const + { + if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) + { + if (from < MIN_DATETIME64_TIMESTAMP || from > MAX_DATETIME64_TIMESTAMP) [[unlikely]] + throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type DateTime64", from); + } + from = static_cast(std::max(from, MIN_DATETIME64_TIMESTAMP)); + from = static_cast(std::min(from, MAX_DATETIME64_TIMESTAMP)); + + return DecimalUtils::decimalFromComponentsWithMultiplier(from, 0, scale_multiplier); + } +}; +template +struct ToDateTime64TransformFloat +{ + static constexpr auto name = "toDateTime64"; + + const UInt32 scale = 1; + + ToDateTime64TransformFloat(UInt32 scale_ = 0) /// NOLINT + : scale(scale_) + {} + + NO_SANITIZE_UNDEFINED DateTime64::NativeType execute(FromType from, const DateLUTImpl &) const + { + if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) + { + if (from < MIN_DATETIME64_TIMESTAMP || from > MAX_DATETIME64_TIMESTAMP) [[unlikely]] + throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type DateTime64", from); + } + + from = std::max(from, static_cast(MIN_DATETIME64_TIMESTAMP)); + from = std::min(from, static_cast(MAX_DATETIME64_TIMESTAMP)); + return convertToDecimal(from, scale); + } +}; + +/** Conversion of DateTime64 to Date or DateTime: discards fractional part. + */ +template +struct FromDateTime64Transform +{ + static constexpr auto name = Transform::name; + + const DateTime64::NativeType scale_multiplier = 1; + + FromDateTime64Transform(UInt32 scale) /// NOLINT + : scale_multiplier(DecimalUtils::scaleMultiplier(scale)) + {} + + auto execute(DateTime64::NativeType dt, const DateLUTImpl & time_zone) const + { + const auto c = DecimalUtils::splitWithScaleMultiplier(DateTime64(dt), scale_multiplier); + return Transform::execute(static_cast(c.whole), time_zone); + } +}; + +struct ToDateTime64Transform +{ + static constexpr auto name = "toDateTime64"; + + const DateTime64::NativeType scale_multiplier = 1; + + ToDateTime64Transform(UInt32 scale = 0) /// NOLINT + : scale_multiplier(DecimalUtils::scaleMultiplier(scale)) + {} + + DateTime64::NativeType execute(UInt16 d, const DateLUTImpl & time_zone) const + { + const auto dt = ToDateTimeImpl<>::execute(d, time_zone); + return execute(dt, time_zone); + } + + DateTime64::NativeType execute(Int32 d, const DateLUTImpl & time_zone) const + { + Int64 dt = static_cast(time_zone.fromDayNum(ExtendedDayNum(d))); + return DecimalUtils::decimalFromComponentsWithMultiplier(dt, 0, scale_multiplier); + } + + DateTime64::NativeType execute(UInt32 dt, const DateLUTImpl & /*time_zone*/) const + { + return DecimalUtils::decimalFromComponentsWithMultiplier(dt, 0, scale_multiplier); + } +}; + /// Function toUnixTimestamp has exactly the same implementation as toDateTime of String type. struct NameToUnixTimestamp { static constexpr auto name = "toUnixTimestamp"; }; @@ -439,6 +616,34 @@ struct ConvertImpl return DateTimeTransformImpl, false>::execute( arguments, result_type, input_rows_count); } + /// Special case of converting Int8, Int16, Int32 or (U)Int64 (and also, for convenience, Float32, Float64) to DateTime. + else if constexpr (( + std::is_same_v + || std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (std::is_same_v + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (( + std::is_same_v + || std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } else { using ColVecFrom = typename FromDataType::ColumnType; @@ -688,164 +893,6 @@ struct ConvertImpl }; -template -struct ToDateTimeTransform64 -{ - static constexpr auto name = "toDateTime"; - - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - { - if (from > MAX_DATETIME_TIMESTAMP) [[unlikely]] - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type DateTime", from); - } - return static_cast(std::min(time_t(from), time_t(MAX_DATETIME_TIMESTAMP))); - } -}; - -template -struct ToDateTimeTransformSigned -{ - static constexpr auto name = "toDateTime"; - - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) - { - if (from < 0) - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type DateTime", from); - else - return 0; - } - return from; - } -}; - -template -struct ToDateTimeTransform64Signed -{ - static constexpr auto name = "toDateTime"; - - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - { - if (from < 0 || from > MAX_DATETIME_TIMESTAMP) [[unlikely]] - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type DateTime", from); - } - - if (from < 0) - return 0; - return static_cast(std::min(time_t(from), time_t(MAX_DATETIME_TIMESTAMP))); - } -}; - -/// Special case of converting Int8, Int16, Int32 or (U)Int64 (and also, for convenience, Float32, Float64) to DateTime. -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -/** Conversion of numeric to DateTime64 - */ - -template -struct ToDateTime64TransformUnsigned -{ - static constexpr auto name = "toDateTime64"; - - const DateTime64::NativeType scale_multiplier = 1; - - ToDateTime64TransformUnsigned(UInt32 scale = 0) /// NOLINT - : scale_multiplier(DecimalUtils::scaleMultiplier(scale)) - {} - - NO_SANITIZE_UNDEFINED DateTime64::NativeType execute(FromType from, const DateLUTImpl &) const - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - { - if (from > MAX_DATETIME64_TIMESTAMP) [[unlikely]] - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type DateTime64", from); - else - return DecimalUtils::decimalFromComponentsWithMultiplier(from, 0, scale_multiplier); - } - else - return DecimalUtils::decimalFromComponentsWithMultiplier(std::min(from, MAX_DATETIME64_TIMESTAMP), 0, scale_multiplier); - } -}; -template -struct ToDateTime64TransformSigned -{ - static constexpr auto name = "toDateTime64"; - - const DateTime64::NativeType scale_multiplier = 1; - - ToDateTime64TransformSigned(UInt32 scale = 0) /// NOLINT - : scale_multiplier(DecimalUtils::scaleMultiplier(scale)) - {} - - NO_SANITIZE_UNDEFINED DateTime64::NativeType execute(FromType from, const DateLUTImpl &) const - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - { - if (from < MIN_DATETIME64_TIMESTAMP || from > MAX_DATETIME64_TIMESTAMP) [[unlikely]] - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type DateTime64", from); - } - from = static_cast(std::max(from, MIN_DATETIME64_TIMESTAMP)); - from = static_cast(std::min(from, MAX_DATETIME64_TIMESTAMP)); - - return DecimalUtils::decimalFromComponentsWithMultiplier(from, 0, scale_multiplier); - } -}; -template -struct ToDateTime64TransformFloat -{ - static constexpr auto name = "toDateTime64"; - - const UInt32 scale = 1; - - ToDateTime64TransformFloat(UInt32 scale_ = 0) /// NOLINT - : scale(scale_) - {} - - NO_SANITIZE_UNDEFINED DateTime64::NativeType execute(FromType from, const DateLUTImpl &) const - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - { - if (from < MIN_DATETIME64_TIMESTAMP || from > MAX_DATETIME64_TIMESTAMP) [[unlikely]] - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type DateTime64", from); - } - - from = std::max(from, static_cast(MIN_DATETIME64_TIMESTAMP)); - from = std::min(from, static_cast(MAX_DATETIME64_TIMESTAMP)); - return convertToDecimal(from, scale); - } -}; - template struct ConvertImpl : DateTimeTransformImpl, false> {}; @@ -875,26 +922,6 @@ struct ConvertImpl, false> {}; -/** Conversion of DateTime64 to Date or DateTime: discards fractional part. - */ -template -struct FromDateTime64Transform -{ - static constexpr auto name = Transform::name; - - const DateTime64::NativeType scale_multiplier = 1; - - FromDateTime64Transform(UInt32 scale) /// NOLINT - : scale_multiplier(DecimalUtils::scaleMultiplier(scale)) - {} - - auto execute(DateTime64::NativeType dt, const DateLUTImpl & time_zone) const - { - const auto c = DecimalUtils::splitWithScaleMultiplier(DateTime64(dt), scale_multiplier); - return Transform::execute(static_cast(c.whole), time_zone); - } -}; - /** Conversion of DateTime64 to Date or DateTime: discards fractional part. */ template @@ -905,33 +932,6 @@ template : DateTimeTransformImpl>, false> {}; -struct ToDateTime64Transform -{ - static constexpr auto name = "toDateTime64"; - - const DateTime64::NativeType scale_multiplier = 1; - - ToDateTime64Transform(UInt32 scale = 0) /// NOLINT - : scale_multiplier(DecimalUtils::scaleMultiplier(scale)) - {} - - DateTime64::NativeType execute(UInt16 d, const DateLUTImpl & time_zone) const - { - const auto dt = ToDateTimeImpl<>::execute(d, time_zone); - return execute(dt, time_zone); - } - - DateTime64::NativeType execute(Int32 d, const DateLUTImpl & time_zone) const - { - Int64 dt = static_cast(time_zone.fromDayNum(ExtendedDayNum(d))); - return DecimalUtils::decimalFromComponentsWithMultiplier(dt, 0, scale_multiplier); - } - - DateTime64::NativeType execute(UInt32 dt, const DateLUTImpl & /*time_zone*/) const - { - return DecimalUtils::decimalFromComponentsWithMultiplier(dt, 0, scale_multiplier); - } -}; /** Conversion of Date or DateTime to DateTime64: add zero sub-second part. */ From 5336a8911794662d6a207c5e4adda14c93871562 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 13 Mar 2024 05:36:09 +0100 Subject: [PATCH 230/374] Continue --- src/Functions/FunctionsConversion.cpp | 56 +++++++++++++-------------- 1 file changed, 27 insertions(+), 29 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index ffcb0086e27..af240f88560 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -644,6 +644,33 @@ struct ConvertImpl return DateTimeTransformImpl, false>::execute( arguments, result_type, input_rows_count); } + else if constexpr (( + std::is_same_v + || std::is_same_v + || std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (std::is_same_v + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (( + std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } else { using ColVecFrom = typename FromDataType::ColumnType; @@ -893,35 +920,6 @@ struct ConvertImpl }; -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - - /** Conversion of DateTime64 to Date or DateTime: discards fractional part. */ template From 460692ac74d68a38bad1b4786b3bbabd377189e6 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy <99031427+yakov-olkhovskiy@users.noreply.github.com> Date: Wed, 13 Mar 2024 00:36:23 -0400 Subject: [PATCH 231/374] Update docs/en/sql-reference/functions/other-functions.md --- docs/en/sql-reference/functions/other-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index df443eec0de..288905c83da 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -360,7 +360,7 @@ sleepEachRow(seconds) **Arguments** -- `seconds`: [Int](../../sql-reference/data-types/int-uint.md) The number of seconds to pause the query execution for each row in the result set. It can be a floating-point value to specify fractional seconds. +- `seconds`: [Int](../../sql-reference/data-types/int-uint.md) The number of seconds to pause the query execution for each row in the result set to a maximum of 3 seconds. It can be a floating-point value to specify fractional seconds. **Returned value** From b870d9d1cd767eddcbe98cb31abe0167bbdab488 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 13 Mar 2024 05:45:33 +0100 Subject: [PATCH 232/374] Continue --- src/Functions/FunctionsConversion.cpp | 239 +++++++++++++------------- 1 file changed, 119 insertions(+), 120 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index af240f88560..574e99bd6a6 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -433,7 +433,7 @@ struct ToDateTime64TransformFloat }; /** Conversion of DateTime64 to Date or DateTime: discards fractional part. - */ + */ template struct FromDateTime64Transform { @@ -480,6 +480,98 @@ struct ToDateTime64Transform } }; +/** Transformation of numbers, dates, datetimes to strings: through formatting. + */ +template +struct FormatImpl +{ + template + static ReturnType execute(const typename DataType::FieldType x, WriteBuffer & wb, const DataType *, const DateLUTImpl *) + { + writeText(x, wb); + return ReturnType(true); + } +}; + +template <> +struct FormatImpl +{ + template + static ReturnType execute(const DataTypeDate::FieldType x, WriteBuffer & wb, const DataTypeDate *, const DateLUTImpl * time_zone) + { + writeDateText(DayNum(x), wb, *time_zone); + return ReturnType(true); + } +}; + +template <> +struct FormatImpl +{ + template + static ReturnType execute(const DataTypeDate32::FieldType x, WriteBuffer & wb, const DataTypeDate32 *, const DateLUTImpl * time_zone) + { + writeDateText(ExtendedDayNum(x), wb, *time_zone); + return ReturnType(true); + } +}; + +template <> +struct FormatImpl +{ + template + static ReturnType execute(const DataTypeDateTime::FieldType x, WriteBuffer & wb, const DataTypeDateTime *, const DateLUTImpl * time_zone) + { + writeDateTimeText(x, wb, *time_zone); + return ReturnType(true); + } +}; + +template <> +struct FormatImpl +{ + template + static ReturnType execute(const DataTypeDateTime64::FieldType x, WriteBuffer & wb, const DataTypeDateTime64 * type, const DateLUTImpl * time_zone) + { + writeDateTimeText(DateTime64(x), type->getScale(), wb, *time_zone); + return ReturnType(true); + } +}; + + +template +struct FormatImpl> +{ + template + static ReturnType execute(const FieldType x, WriteBuffer & wb, const DataTypeEnum * type, const DateLUTImpl *) + { + static constexpr bool throw_exception = std::is_same_v; + + if constexpr (throw_exception) + { + writeString(type->getNameForValue(x), wb); + } + else + { + StringRef res; + bool is_ok = type->getNameForValue(x, res); + if (is_ok) + writeString(res, wb); + return ReturnType(is_ok); + } + } +}; + +template +struct FormatImpl> +{ + template + static ReturnType execute(const FieldType x, WriteBuffer & wb, const DataTypeDecimal * type, const DateLUTImpl *) + { + writeText(x, type->getScale(), wb, false); + return ReturnType(true); + } +}; + /// Function toUnixTimestamp has exactly the same implementation as toDateTime of String type. struct NameToUnixTimestamp { static constexpr auto name = "toUnixTimestamp"; }; @@ -671,6 +763,32 @@ struct ConvertImpl return DateTimeTransformImpl, false>::execute( arguments, result_type, input_rows_count); } + /// Conversion of DateTime64 to Date or DateTime: discards fractional part. + else if constexpr (std::is_same_v + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl>, false>::execute( + arguments, result_type, input_rows_count, TransformDateTime64>(assert_cast(*named_from.type).getScale())); + } + else if constexpr (std::is_same_v + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl>, false>::execute( + arguments, result_type, input_rows_count, TransformDateTime64>(assert_cast(*named_from.type).getScale())); + } + /// Conversion of Date or DateTime to DateTime64: add zero sub-second part. + else if constexpr (( + std::is_same_v + || std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl::execute( + arguments, result_type, input_rows_count); + } else { using ColVecFrom = typename FromDataType::ColumnType; @@ -920,125 +1038,6 @@ struct ConvertImpl }; -/** Conversion of DateTime64 to Date or DateTime: discards fractional part. - */ -template -struct ConvertImpl - : DateTimeTransformImpl>, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl>, false> {}; - - -/** Conversion of Date or DateTime to DateTime64: add zero sub-second part. - */ -template -struct ConvertImpl - : DateTimeTransformImpl {}; - -template -struct ConvertImpl - : DateTimeTransformImpl {}; - -template -struct ConvertImpl - : DateTimeTransformImpl {}; - - -/** Transformation of numbers, dates, datetimes to strings: through formatting. - */ -template -struct FormatImpl -{ - template - static ReturnType execute(const typename DataType::FieldType x, WriteBuffer & wb, const DataType *, const DateLUTImpl *) - { - writeText(x, wb); - return ReturnType(true); - } -}; - -template <> -struct FormatImpl -{ - template - static ReturnType execute(const DataTypeDate::FieldType x, WriteBuffer & wb, const DataTypeDate *, const DateLUTImpl * time_zone) - { - writeDateText(DayNum(x), wb, *time_zone); - return ReturnType(true); - } -}; - -template <> -struct FormatImpl -{ - template - static ReturnType execute(const DataTypeDate32::FieldType x, WriteBuffer & wb, const DataTypeDate32 *, const DateLUTImpl * time_zone) - { - writeDateText(ExtendedDayNum(x), wb, *time_zone); - return ReturnType(true); - } -}; - -template <> -struct FormatImpl -{ - template - static ReturnType execute(const DataTypeDateTime::FieldType x, WriteBuffer & wb, const DataTypeDateTime *, const DateLUTImpl * time_zone) - { - writeDateTimeText(x, wb, *time_zone); - return ReturnType(true); - } -}; - -template <> -struct FormatImpl -{ - template - static ReturnType execute(const DataTypeDateTime64::FieldType x, WriteBuffer & wb, const DataTypeDateTime64 * type, const DateLUTImpl * time_zone) - { - writeDateTimeText(DateTime64(x), type->getScale(), wb, *time_zone); - return ReturnType(true); - } -}; - - -template -struct FormatImpl> -{ - template - static ReturnType execute(const FieldType x, WriteBuffer & wb, const DataTypeEnum * type, const DateLUTImpl *) - { - static constexpr bool throw_exception = std::is_same_v; - - if constexpr (throw_exception) - { - writeString(type->getNameForValue(x), wb); - } - else - { - StringRef res; - bool is_ok = type->getNameForValue(x, res); - if (is_ok) - writeString(res, wb); - return ReturnType(is_ok); - } - } -}; - -template -struct FormatImpl> -{ - template - static ReturnType execute(const FieldType x, WriteBuffer & wb, const DataTypeDecimal * type, const DateLUTImpl *) - { - writeText(x, type->getScale(), wb, false); - return ReturnType(true); - } -}; - - /// DataTypeEnum to DataType free conversion template struct ConvertImpl, DataTypeNumber, Name, ConvertDefaultBehaviorTag> From 8d9a8fdfc1d91723513bd66184662a810e01b2fe Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 13 Mar 2024 05:52:01 +0100 Subject: [PATCH 233/374] Continue --- src/Functions/FunctionsConversion.cpp | 25 +++++++------------------ 1 file changed, 7 insertions(+), 18 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 574e99bd6a6..2ce508c4924 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -609,9 +609,12 @@ struct ConvertImpl { const ColumnWithTypeAndName & named_from = arguments[0]; - if constexpr (std::is_same_v && !FromDataType::is_parametric) + if constexpr ((std::is_same_v && !FromDataType::is_parametric) + || (std::is_same_v && std::is_same_v) + || (std::is_same_v && std::is_same_v)) { /// If types are the same, reuse the columns. + /// Conversions between Enum and the underlying type are also free. return named_from.column; } else if constexpr ((std::is_same_v || std::is_same_v) @@ -1038,25 +1041,11 @@ struct ConvertImpl }; -/// DataTypeEnum to DataType free conversion -template -struct ConvertImpl, DataTypeNumber, Name, ConvertDefaultBehaviorTag> -{ - static ColumnPtr execute(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) - { - return arguments[0].column; - } -}; - inline ColumnUInt8::MutablePtr copyNullMap(ColumnPtr col) { - ColumnUInt8::MutablePtr null_map = nullptr; - if (const auto * col_null = checkAndGetColumn(col.get())) - { - null_map = ColumnUInt8::create(); - null_map->insertRangeFrom(col_null->getNullMapColumn(), 0, col_null->size()); - } - return null_map; + if (const auto * col_nullable = checkAndGetColumn(col.get())) + return col_nullable->getNullMapColumn().mutate(); + return nullptr; } template From 17a7696a378c5cfaad18214284e340f1d2306170 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 13 Mar 2024 06:06:07 +0100 Subject: [PATCH 234/374] Continue --- src/Functions/FunctionsConversion.cpp | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 2ce508c4924..56a0d74c60c 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -1041,11 +1041,15 @@ struct ConvertImpl }; -inline ColumnUInt8::MutablePtr copyNullMap(ColumnPtr col) +ColumnUInt8::MutablePtr copyNullMap(ColumnPtr col) { + ColumnUInt8::MutablePtr null_map = nullptr; if (const auto * col_nullable = checkAndGetColumn(col.get())) - return col_nullable->getNullMapColumn().mutate(); - return nullptr; + { + null_map = ColumnUInt8::create(); + null_map->insertRangeFrom(col_nullable->getNullMapColumn(), 0, col_nullable->size()); + } + return null_map; } template From ecd6b88831a4251acf3f62d52a43302fa1ef534c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 13 Mar 2024 06:15:17 +0100 Subject: [PATCH 235/374] Continue --- src/Functions/FunctionsConversion.cpp | 1272 ++++++++++++------------- 1 file changed, 635 insertions(+), 637 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 56a0d74c60c..16f547939fa 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -572,475 +572,6 @@ struct FormatImpl> } }; - -/// Function toUnixTimestamp has exactly the same implementation as toDateTime of String type. -struct NameToUnixTimestamp { static constexpr auto name = "toUnixTimestamp"; }; - -struct AccurateConvertStrategyAdditions -{ - UInt32 scale { 0 }; -}; - -struct AccurateOrNullConvertStrategyAdditions -{ - UInt32 scale { 0 }; -}; - - -struct ConvertDefaultBehaviorTag {}; -struct ConvertReturnNullOnErrorTag {}; -struct ConvertReturnZeroOnErrorTag {}; - -/** Conversion of number types to each other, enums to numbers, dates and datetimes to numbers and back: done by straight assignment. - * (Date is represented internally as number of days from some day; DateTime - as unix timestamp) - */ -template -struct ConvertImpl -{ - using FromFieldType = typename FromDataType::FieldType; - using ToFieldType = typename ToDataType::FieldType; - - template - static ColumnPtr NO_SANITIZE_UNDEFINED execute( - const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type [[maybe_unused]], size_t input_rows_count, - Additions additions [[maybe_unused]] = Additions()) - { - const ColumnWithTypeAndName & named_from = arguments[0]; - - if constexpr ((std::is_same_v && !FromDataType::is_parametric) - || (std::is_same_v && std::is_same_v) - || (std::is_same_v && std::is_same_v)) - { - /// If types are the same, reuse the columns. - /// Conversions between Enum and the underlying type are also free. - return named_from.column; - } - else if constexpr ((std::is_same_v || std::is_same_v) - && std::is_same_v) - { - /// Conversion of DateTime to Date: throw off time component. - /// Conversion of Date32 to Date. - return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count); - } - else if constexpr (std::is_same_v && std::is_same_v) - { - /// Conversion of DateTime to Date: throw off time component. - return DateTimeTransformImpl::execute( - arguments, result_type, input_rows_count); - } - else if constexpr ((std::is_same_v || std::is_same_v) - && std::is_same_v) - { - /// Conversion from Date/Date32 to DateTime. - return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count); - } - else if constexpr (std::is_same_v && std::is_same_v - && std::is_same_v) - { - return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count, TransformDateTime64(assert_cast(*named_from.type).getScale())); - } - /** Special case of converting Int8, Int16, (U)Int32 or (U)Int64 (and also, for convenience, - * Float32, Float64) to Date. If the - * number is less than 65536, then it is treated as DayNum, and if it's greater or equals to 65536, - * then treated as unix timestamp. If the number exceeds UInt32, saturate to MAX_UINT32 then as DayNum. - * It's a bit illogical, as we actually have two functions in one. - * But allows to support frequent case, - * when user write toDate(UInt32), expecting conversion of unix timestamp to Date. - * (otherwise such usage would be frequent mistake). - */ - else if constexpr (( - std::is_same_v - || std::is_same_v) - && std::is_same_v - && std::is_same_v) - { - return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count); - } - else if constexpr (( - std::is_same_v - || std::is_same_v) - && std::is_same_v - && std::is_same_v) - { - return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count); - } - else if constexpr (( - std::is_same_v - || std::is_same_v - || std::is_same_v - || std::is_same_v) - && std::is_same_v - && std::is_same_v) - { - return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count); - } - else if constexpr (( - std::is_same_v - || std::is_same_v) - && std::is_same_v - && std::is_same_v) - { - return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count); - } - else if constexpr (( - std::is_same_v - || std::is_same_v) - && std::is_same_v - && std::is_same_v) - { - return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count); - } - else if constexpr (( - std::is_same_v - || std::is_same_v - || std::is_same_v - || std::is_same_v) - && std::is_same_v - && std::is_same_v) - { - return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count); - } - /// Special case of converting Int8, Int16, Int32 or (U)Int64 (and also, for convenience, Float32, Float64) to DateTime. - else if constexpr (( - std::is_same_v - || std::is_same_v - || std::is_same_v) - && std::is_same_v - && std::is_same_v) - { - return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count); - } - else if constexpr (std::is_same_v - && std::is_same_v - && std::is_same_v) - { - return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count); - } - else if constexpr (( - std::is_same_v - || std::is_same_v - || std::is_same_v) - && std::is_same_v - && std::is_same_v) - { - return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count); - } - else if constexpr (( - std::is_same_v - || std::is_same_v - || std::is_same_v - || std::is_same_v) - && std::is_same_v - && std::is_same_v) - { - return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count); - } - else if constexpr (std::is_same_v - && std::is_same_v - && std::is_same_v) - { - return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count); - } - else if constexpr (( - std::is_same_v - || std::is_same_v) - && std::is_same_v - && std::is_same_v) - { - return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count); - } - /// Conversion of DateTime64 to Date or DateTime: discards fractional part. - else if constexpr (std::is_same_v - && std::is_same_v - && std::is_same_v) - { - return DateTimeTransformImpl>, false>::execute( - arguments, result_type, input_rows_count, TransformDateTime64>(assert_cast(*named_from.type).getScale())); - } - else if constexpr (std::is_same_v - && std::is_same_v - && std::is_same_v) - { - return DateTimeTransformImpl>, false>::execute( - arguments, result_type, input_rows_count, TransformDateTime64>(assert_cast(*named_from.type).getScale())); - } - /// Conversion of Date or DateTime to DateTime64: add zero sub-second part. - else if constexpr (( - std::is_same_v - || std::is_same_v - || std::is_same_v) - && std::is_same_v - && std::is_same_v) - { - return DateTimeTransformImpl::execute( - arguments, result_type, input_rows_count); - } - else - { - using ColVecFrom = typename FromDataType::ColumnType; - using ColVecTo = typename ToDataType::ColumnType; - - if constexpr ((IsDataTypeDecimal || IsDataTypeDecimal) - && !(std::is_same_v || std::is_same_v) - && (!IsDataTypeDecimalOrNumber || !IsDataTypeDecimalOrNumber)) - { - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", - named_from.column->getName(), Name::name); - } - - const ColVecFrom * col_from = checkAndGetColumn(named_from.column.get()); - if (!col_from) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", - named_from.column->getName(), Name::name); - - typename ColVecTo::MutablePtr col_to = nullptr; - - if constexpr (IsDataTypeDecimal) - { - UInt32 scale; - - if constexpr (std::is_same_v - || std::is_same_v) - { - scale = additions.scale; - } - else - { - scale = additions; - } - - col_to = ColVecTo::create(0, scale); - } - else - col_to = ColVecTo::create(); - - const auto & vec_from = col_from->getData(); - auto & vec_to = col_to->getData(); - vec_to.resize(input_rows_count); - - ColumnUInt8::MutablePtr col_null_map_to; - ColumnUInt8::Container * vec_null_map_to [[maybe_unused]] = nullptr; - if constexpr (std::is_same_v) - { - col_null_map_to = ColumnUInt8::create(input_rows_count, false); - vec_null_map_to = &col_null_map_to->getData(); - } - - bool result_is_bool = isBool(result_type); - for (size_t i = 0; i < input_rows_count; ++i) - { - if constexpr (std::is_same_v) - { - if (result_is_bool) - { - vec_to[i] = vec_from[i] != FromFieldType(0); - continue; - } - } - - if constexpr (std::is_same_v && std::is_same_v) - { - static_assert( - std::is_same_v, - "UInt128 and UUID types must be same"); - - vec_to[i].items[1] = vec_from[i].toUnderType().items[0]; - vec_to[i].items[0] = vec_from[i].toUnderType().items[1]; - } - else if constexpr (std::is_same_v && std::is_same_v) - { - static_assert( - std::is_same_v, - "UInt128 and IPv6 types must be same"); - - vec_to[i].items[1] = std::byteswap(vec_from[i].toUnderType().items[0]); - vec_to[i].items[0] = std::byteswap(vec_from[i].toUnderType().items[1]); - } - else if constexpr (std::is_same_v != std::is_same_v) - { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "Conversion between numeric types and UUID is not supported. " - "Probably the passed UUID is unquoted"); - } - else if constexpr ( - (std::is_same_v != std::is_same_v) - && !(is_any_of - || is_any_of)) - { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Conversion from {} to {} is not supported", - TypeName, TypeName); - } - else if constexpr (std::is_same_v != std::is_same_v - && !(std::is_same_v || std::is_same_v)) - { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "Conversion between numeric types and IPv6 is not supported. " - "Probably the passed IPv6 is unquoted"); - } - else if constexpr (IsDataTypeDecimal || IsDataTypeDecimal) - { - if constexpr (std::is_same_v) - { - ToFieldType result; - bool convert_result = false; - - if constexpr (IsDataTypeDecimal && IsDataTypeDecimal) - convert_result = tryConvertDecimals(vec_from[i], col_from->getScale(), col_to->getScale(), result); - else if constexpr (IsDataTypeDecimal && IsDataTypeNumber) - convert_result = tryConvertFromDecimal(vec_from[i], col_from->getScale(), result); - else if constexpr (IsDataTypeNumber && IsDataTypeDecimal) - convert_result = tryConvertToDecimal(vec_from[i], col_to->getScale(), result); - - if (convert_result) - vec_to[i] = result; - else - { - vec_to[i] = static_cast(0); - (*vec_null_map_to)[i] = true; - } - } - else - { - if constexpr (IsDataTypeDecimal && IsDataTypeDecimal) - vec_to[i] = convertDecimals(vec_from[i], col_from->getScale(), col_to->getScale()); - else if constexpr (IsDataTypeDecimal && IsDataTypeNumber) - vec_to[i] = convertFromDecimal(vec_from[i], col_from->getScale()); - else if constexpr (IsDataTypeNumber && IsDataTypeDecimal) - vec_to[i] = convertToDecimal(vec_from[i], col_to->getScale()); - else - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Unsupported data type in conversion function"); - } - } - else if constexpr (std::is_same_v && std::is_same_v) - { - const uint8_t ip4_cidr[] {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00}; - const uint8_t * src = reinterpret_cast(&vec_from[i].toUnderType()); - if (!matchIPv6Subnet(src, ip4_cidr, 96)) - { - char addr[IPV6_MAX_TEXT_LENGTH + 1] {}; - char * paddr = addr; - formatIPv6(src, paddr); - - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "IPv6 {} in column {} is not in IPv4 mapping block", addr, named_from.column->getName()); - } - - uint8_t * dst = reinterpret_cast(&vec_to[i].toUnderType()); - if constexpr (std::endian::native == std::endian::little) - { - dst[0] = src[15]; - dst[1] = src[14]; - dst[2] = src[13]; - dst[3] = src[12]; - } - else - { - dst[0] = src[12]; - dst[1] = src[13]; - dst[2] = src[14]; - dst[3] = src[15]; - } - } - else if constexpr (std::is_same_v && std::is_same_v) - { - const uint8_t * src = reinterpret_cast(&vec_from[i].toUnderType()); - uint8_t * dst = reinterpret_cast(&vec_to[i].toUnderType()); - std::memset(dst, '\0', IPV6_BINARY_LENGTH); - dst[10] = dst[11] = 0xff; - - if constexpr (std::endian::native == std::endian::little) - { - dst[12] = src[3]; - dst[13] = src[2]; - dst[14] = src[1]; - dst[15] = src[0]; - } - else - { - dst[12] = src[0]; - dst[13] = src[1]; - dst[14] = src[2]; - dst[15] = src[3]; - } - } - else if constexpr (std::is_same_v && std::is_same_v) - { - vec_to[i] = static_cast(static_cast(vec_from[i])); - } - else if constexpr (std::is_same_v - && (std::is_same_v || std::is_same_v)) - { - vec_to[i] = static_cast(vec_from[i] * DATE_SECONDS_PER_DAY); - } - else - { - /// If From Data is Nan or Inf and we convert to integer type, throw exception - if constexpr (std::is_floating_point_v && !std::is_floating_point_v) - { - if (!isFinite(vec_from[i])) - { - if constexpr (std::is_same_v) - { - vec_to[i] = 0; - (*vec_null_map_to)[i] = true; - continue; - } - else - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Unexpected inf or nan to integer conversion"); - } - } - - if constexpr (std::is_same_v - || std::is_same_v) - { - bool convert_result = accurate::convertNumeric(vec_from[i], vec_to[i]); - - if (!convert_result) - { - if (std::is_same_v) - { - vec_to[i] = 0; - (*vec_null_map_to)[i] = true; - } - else - { - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Value in column {} cannot be safely converted into type {}", - named_from.column->getName(), result_type->getName()); - } - } - } - else - { - vec_to[i] = static_cast(vec_from[i]); - } - } - } - - if constexpr (std::is_same_v) - return ColumnNullable::create(std::move(col_to), std::move(col_null_map_to)); - else - return col_to; - } - } -}; - - ColumnUInt8::MutablePtr copyNullMap(ColumnPtr col) { ColumnUInt8::MutablePtr null_map = nullptr; @@ -1052,174 +583,6 @@ ColumnUInt8::MutablePtr copyNullMap(ColumnPtr col) return null_map; } -template -requires (!std::is_same_v) -struct ConvertImpl -{ - using FromFieldType = typename FromDataType::FieldType; - using ColVecType = ColumnVectorOrDecimal; - - static ColumnPtr execute(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) - { - if constexpr (IsDataTypeDateOrDateTime) - { - auto datetime_arg = arguments[0]; - - const DateLUTImpl * time_zone = nullptr; - const ColumnConst * time_zone_column = nullptr; - - if (arguments.size() == 1) - { - auto non_null_args = createBlockWithNestedColumns(arguments); - time_zone = &extractTimeZoneFromFunctionArguments(non_null_args, 1, 0); - } - else /// When we have a column for timezone - { - datetime_arg.column = datetime_arg.column->convertToFullColumnIfConst(); - - if constexpr (std::is_same_v || std::is_same_v) - time_zone = &DateLUT::instance(); - /// For argument of Date or DateTime type, second argument with time zone could be specified. - if constexpr (std::is_same_v || std::is_same_v) - { - if ((time_zone_column = checkAndGetColumnConst(arguments[1].column.get()))) - { - auto non_null_args = createBlockWithNestedColumns(arguments); - time_zone = &extractTimeZoneFromFunctionArguments(non_null_args, 1, 0); - } - } - } - const auto & col_with_type_and_name = columnGetNested(datetime_arg); - - if (const auto col_from = checkAndGetColumn(col_with_type_and_name.column.get())) - { - auto col_to = ColumnString::create(); - - const typename ColVecType::Container & vec_from = col_from->getData(); - ColumnString::Chars & data_to = col_to->getChars(); - ColumnString::Offsets & offsets_to = col_to->getOffsets(); - size_t size = vec_from.size(); - - if constexpr (std::is_same_v) - data_to.resize(size * (strlen("YYYY-MM-DD") + 1)); - else if constexpr (std::is_same_v) - data_to.resize(size * (strlen("YYYY-MM-DD") + 1)); - else if constexpr (std::is_same_v) - data_to.resize(size * (strlen("YYYY-MM-DD hh:mm:ss") + 1)); - else if constexpr (std::is_same_v) - data_to.resize(size * (strlen("YYYY-MM-DD hh:mm:ss.") + col_from->getScale() + 1)); - else - data_to.resize(size * 3); /// Arbitrary - - offsets_to.resize(size); - - WriteBufferFromVector write_buffer(data_to); - const auto & type = static_cast(*col_with_type_and_name.type); - - ColumnUInt8::MutablePtr null_map = copyNullMap(datetime_arg.column); - - if (!null_map && arguments.size() > 1) - null_map = copyNullMap(arguments[1].column->convertToFullColumnIfConst()); - - if (null_map) - { - for (size_t i = 0; i < size; ++i) - { - if (!time_zone_column && arguments.size() > 1) - { - if (!arguments[1].column.get()->getDataAt(i).toString().empty()) - time_zone = &DateLUT::instance(arguments[1].column.get()->getDataAt(i).toString()); - else - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Provided time zone must be non-empty"); - } - bool is_ok = FormatImpl::template execute(vec_from[i], write_buffer, &type, time_zone); - null_map->getData()[i] |= !is_ok; - writeChar(0, write_buffer); - offsets_to[i] = write_buffer.count(); - } - } - else - { - for (size_t i = 0; i < size; ++i) - { - if (!time_zone_column && arguments.size() > 1) - { - if (!arguments[1].column.get()->getDataAt(i).toString().empty()) - time_zone = &DateLUT::instance(arguments[1].column.get()->getDataAt(i).toString()); - else - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Provided time zone must be non-empty"); - } - FormatImpl::template execute(vec_from[i], write_buffer, &type, time_zone); - writeChar(0, write_buffer); - offsets_to[i] = write_buffer.count(); - } - } - - write_buffer.finalize(); - - if (null_map) - return ColumnNullable::create(std::move(col_to), std::move(null_map)); - return col_to; - } - else - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", - arguments[0].column->getName(), Name::name); - } - else - { - ColumnUInt8::MutablePtr null_map = copyNullMap(arguments[0].column); - - const auto & col_with_type_and_name = columnGetNested(arguments[0]); - const auto & type = static_cast(*col_with_type_and_name.type); - - if (const auto col_from = checkAndGetColumn(col_with_type_and_name.column.get())) - { - auto col_to = ColumnString::create(); - - const typename ColVecType::Container & vec_from = col_from->getData(); - ColumnString::Chars & data_to = col_to->getChars(); - ColumnString::Offsets & offsets_to = col_to->getOffsets(); - size_t size = vec_from.size(); - - data_to.resize(size * 3); - offsets_to.resize(size); - - WriteBufferFromVector write_buffer(data_to); - - if (null_map) - { - for (size_t i = 0; i < size; ++i) - { - bool is_ok = FormatImpl::template execute(vec_from[i], write_buffer, &type, nullptr); - /// We don't use timezones in this branch - null_map->getData()[i] |= !is_ok; - writeChar(0, write_buffer); - offsets_to[i] = write_buffer.count(); - } - } - else - { - for (size_t i = 0; i < size; ++i) - { - FormatImpl::template execute(vec_from[i], write_buffer, &type, nullptr); - writeChar(0, write_buffer); - offsets_to[i] = write_buffer.count(); - } - } - - write_buffer.finalize(); - - if (null_map) - return ColumnNullable::create(std::move(col_to), std::move(null_map)); - return col_to; - } - else - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", - arguments[0].column->getName(), Name::name); - } - } -}; - /// Generic conversion of any type to String or FixedString via serialization to text. template @@ -1774,6 +1137,641 @@ struct ConvertThroughParsing }; +/// Function toUnixTimestamp has exactly the same implementation as toDateTime of String type. +struct NameToUnixTimestamp { static constexpr auto name = "toUnixTimestamp"; }; + +struct AccurateConvertStrategyAdditions +{ + UInt32 scale { 0 }; +}; + +struct AccurateOrNullConvertStrategyAdditions +{ + UInt32 scale { 0 }; +}; + + +struct ConvertDefaultBehaviorTag {}; +struct ConvertReturnNullOnErrorTag {}; +struct ConvertReturnZeroOnErrorTag {}; + +/** Conversion of number types to each other, enums to numbers, dates and datetimes to numbers and back: done by straight assignment. + * (Date is represented internally as number of days from some day; DateTime - as unix timestamp) + */ +template +struct ConvertImpl +{ + using FromFieldType = typename FromDataType::FieldType; + using ToFieldType = typename ToDataType::FieldType; + + template + static ColumnPtr NO_SANITIZE_UNDEFINED execute( + const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type [[maybe_unused]], size_t input_rows_count, + Additions additions [[maybe_unused]] = Additions()) + { + const ColumnWithTypeAndName & named_from = arguments[0]; + + if constexpr ((std::is_same_v && !FromDataType::is_parametric) + || (std::is_same_v && std::is_same_v) + || (std::is_same_v && std::is_same_v)) + { + /// If types are the same, reuse the columns. + /// Conversions between Enum and the underlying type are also free. + return named_from.column; + } + else if constexpr ((std::is_same_v || std::is_same_v) + && std::is_same_v) + { + /// Conversion of DateTime to Date: throw off time component. + /// Conversion of Date32 to Date. + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (std::is_same_v && std::is_same_v) + { + /// Conversion of DateTime to Date: throw off time component. + return DateTimeTransformImpl::execute( + arguments, result_type, input_rows_count); + } + else if constexpr ((std::is_same_v || std::is_same_v) + && std::is_same_v) + { + /// Conversion from Date/Date32 to DateTime. + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (std::is_same_v && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count, TransformDateTime64(assert_cast(*named_from.type).getScale())); + } + /** Special case of converting Int8, Int16, (U)Int32 or (U)Int64 (and also, for convenience, + * Float32, Float64) to Date. If the + * number is less than 65536, then it is treated as DayNum, and if it's greater or equals to 65536, + * then treated as unix timestamp. If the number exceeds UInt32, saturate to MAX_UINT32 then as DayNum. + * It's a bit illogical, as we actually have two functions in one. + * But allows to support frequent case, + * when user write toDate(UInt32), expecting conversion of unix timestamp to Date. + * (otherwise such usage would be frequent mistake). + */ + else if constexpr (( + std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (( + std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (( + std::is_same_v + || std::is_same_v + || std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (( + std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (( + std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (( + std::is_same_v + || std::is_same_v + || std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + /// Special case of converting Int8, Int16, Int32 or (U)Int64 (and also, for convenience, Float32, Float64) to DateTime. + else if constexpr (( + std::is_same_v + || std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (std::is_same_v + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (( + std::is_same_v + || std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (( + std::is_same_v + || std::is_same_v + || std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (std::is_same_v + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (( + std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl, false>::execute( + arguments, result_type, input_rows_count); + } + /// Conversion of DateTime64 to Date or DateTime: discards fractional part. + else if constexpr (std::is_same_v + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl>, false>::execute( + arguments, result_type, input_rows_count, TransformDateTime64>(assert_cast(*named_from.type).getScale())); + } + else if constexpr (std::is_same_v + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl>, false>::execute( + arguments, result_type, input_rows_count, TransformDateTime64>(assert_cast(*named_from.type).getScale())); + } + /// Conversion of Date or DateTime to DateTime64: add zero sub-second part. + else if constexpr (( + std::is_same_v + || std::is_same_v + || std::is_same_v) + && std::is_same_v + && std::is_same_v) + { + return DateTimeTransformImpl::execute( + arguments, result_type, input_rows_count); + } + else if constexpr (IsDataTypeDateOrDateTime + && std::is_same_v + && std::is_same_v) + { + /// Date or DateTime to String + + using ColVecType = ColumnVectorOrDecimal; + + auto datetime_arg = arguments[0]; + + const DateLUTImpl * time_zone = nullptr; + const ColumnConst * time_zone_column = nullptr; + + if (arguments.size() == 1) + { + auto non_null_args = createBlockWithNestedColumns(arguments); + time_zone = &extractTimeZoneFromFunctionArguments(non_null_args, 1, 0); + } + else /// When we have a column for timezone + { + datetime_arg.column = datetime_arg.column->convertToFullColumnIfConst(); + + if constexpr (std::is_same_v || std::is_same_v) + time_zone = &DateLUT::instance(); + /// For argument of Date or DateTime type, second argument with time zone could be specified. + if constexpr (std::is_same_v || std::is_same_v) + { + if ((time_zone_column = checkAndGetColumnConst(arguments[1].column.get()))) + { + auto non_null_args = createBlockWithNestedColumns(arguments); + time_zone = &extractTimeZoneFromFunctionArguments(non_null_args, 1, 0); + } + } + } + const auto & col_with_type_and_name = columnGetNested(datetime_arg); + + if (const auto col_from = checkAndGetColumn(col_with_type_and_name.column.get())) + { + auto col_to = ColumnString::create(); + + const typename ColVecType::Container & vec_from = col_from->getData(); + ColumnString::Chars & data_to = col_to->getChars(); + ColumnString::Offsets & offsets_to = col_to->getOffsets(); + size_t size = vec_from.size(); + + if constexpr (std::is_same_v) + data_to.resize(size * (strlen("YYYY-MM-DD") + 1)); + else if constexpr (std::is_same_v) + data_to.resize(size * (strlen("YYYY-MM-DD") + 1)); + else if constexpr (std::is_same_v) + data_to.resize(size * (strlen("YYYY-MM-DD hh:mm:ss") + 1)); + else if constexpr (std::is_same_v) + data_to.resize(size * (strlen("YYYY-MM-DD hh:mm:ss.") + col_from->getScale() + 1)); + else + data_to.resize(size * 3); /// Arbitrary + + offsets_to.resize(size); + + WriteBufferFromVector write_buffer(data_to); + const auto & type = static_cast(*col_with_type_and_name.type); + + ColumnUInt8::MutablePtr null_map = copyNullMap(datetime_arg.column); + + if (!null_map && arguments.size() > 1) + null_map = copyNullMap(arguments[1].column->convertToFullColumnIfConst()); + + if (null_map) + { + for (size_t i = 0; i < size; ++i) + { + if (!time_zone_column && arguments.size() > 1) + { + if (!arguments[1].column.get()->getDataAt(i).toString().empty()) + time_zone = &DateLUT::instance(arguments[1].column.get()->getDataAt(i).toString()); + else + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Provided time zone must be non-empty"); + } + bool is_ok = FormatImpl::template execute(vec_from[i], write_buffer, &type, time_zone); + null_map->getData()[i] |= !is_ok; + writeChar(0, write_buffer); + offsets_to[i] = write_buffer.count(); + } + } + else + { + for (size_t i = 0; i < size; ++i) + { + if (!time_zone_column && arguments.size() > 1) + { + if (!arguments[1].column.get()->getDataAt(i).toString().empty()) + time_zone = &DateLUT::instance(arguments[1].column.get()->getDataAt(i).toString()); + else + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Provided time zone must be non-empty"); + } + FormatImpl::template execute(vec_from[i], write_buffer, &type, time_zone); + writeChar(0, write_buffer); + offsets_to[i] = write_buffer.count(); + } + } + + write_buffer.finalize(); + + if (null_map) + return ColumnNullable::create(std::move(col_to), std::move(null_map)); + return col_to; + } + else + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", + arguments[0].column->getName(), Name::name); + } + else if constexpr (std::is_same_v + && std::is_same_v) + { + /// Anything else to String. + + using ColVecType = ColumnVectorOrDecimal; + + ColumnUInt8::MutablePtr null_map = copyNullMap(arguments[0].column); + + const auto & col_with_type_and_name = columnGetNested(arguments[0]); + const auto & type = static_cast(*col_with_type_and_name.type); + + if (const auto col_from = checkAndGetColumn(col_with_type_and_name.column.get())) + { + auto col_to = ColumnString::create(); + + const typename ColVecType::Container & vec_from = col_from->getData(); + ColumnString::Chars & data_to = col_to->getChars(); + ColumnString::Offsets & offsets_to = col_to->getOffsets(); + size_t size = vec_from.size(); + + data_to.resize(size * 3); + offsets_to.resize(size); + + WriteBufferFromVector write_buffer(data_to); + + if (null_map) + { + for (size_t i = 0; i < size; ++i) + { + bool is_ok = FormatImpl::template execute(vec_from[i], write_buffer, &type, nullptr); + /// We don't use timezones in this branch + null_map->getData()[i] |= !is_ok; + writeChar(0, write_buffer); + offsets_to[i] = write_buffer.count(); + } + } + else + { + for (size_t i = 0; i < size; ++i) + { + FormatImpl::template execute(vec_from[i], write_buffer, &type, nullptr); + writeChar(0, write_buffer); + offsets_to[i] = write_buffer.count(); + } + } + + write_buffer.finalize(); + + if (null_map) + return ColumnNullable::create(std::move(col_to), std::move(null_map)); + return col_to; + } + else + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", + arguments[0].column->getName(), Name::name); + } + else + { + using ColVecFrom = typename FromDataType::ColumnType; + using ColVecTo = typename ToDataType::ColumnType; + + if constexpr ((IsDataTypeDecimal || IsDataTypeDecimal) + && !(std::is_same_v || std::is_same_v) + && (!IsDataTypeDecimalOrNumber || !IsDataTypeDecimalOrNumber)) + { + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", + named_from.column->getName(), Name::name); + } + + const ColVecFrom * col_from = checkAndGetColumn(named_from.column.get()); + if (!col_from) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", + named_from.column->getName(), Name::name); + + typename ColVecTo::MutablePtr col_to = nullptr; + + if constexpr (IsDataTypeDecimal) + { + UInt32 scale; + + if constexpr (std::is_same_v + || std::is_same_v) + { + scale = additions.scale; + } + else + { + scale = additions; + } + + col_to = ColVecTo::create(0, scale); + } + else + col_to = ColVecTo::create(); + + const auto & vec_from = col_from->getData(); + auto & vec_to = col_to->getData(); + vec_to.resize(input_rows_count); + + ColumnUInt8::MutablePtr col_null_map_to; + ColumnUInt8::Container * vec_null_map_to [[maybe_unused]] = nullptr; + if constexpr (std::is_same_v) + { + col_null_map_to = ColumnUInt8::create(input_rows_count, false); + vec_null_map_to = &col_null_map_to->getData(); + } + + bool result_is_bool = isBool(result_type); + for (size_t i = 0; i < input_rows_count; ++i) + { + if constexpr (std::is_same_v) + { + if (result_is_bool) + { + vec_to[i] = vec_from[i] != FromFieldType(0); + continue; + } + } + + if constexpr (std::is_same_v && std::is_same_v) + { + static_assert( + std::is_same_v, + "UInt128 and UUID types must be same"); + + vec_to[i].items[1] = vec_from[i].toUnderType().items[0]; + vec_to[i].items[0] = vec_from[i].toUnderType().items[1]; + } + else if constexpr (std::is_same_v && std::is_same_v) + { + static_assert( + std::is_same_v, + "UInt128 and IPv6 types must be same"); + + vec_to[i].items[1] = std::byteswap(vec_from[i].toUnderType().items[0]); + vec_to[i].items[0] = std::byteswap(vec_from[i].toUnderType().items[1]); + } + else if constexpr (std::is_same_v != std::is_same_v) + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "Conversion between numeric types and UUID is not supported. " + "Probably the passed UUID is unquoted"); + } + else if constexpr ( + (std::is_same_v != std::is_same_v) + && !(is_any_of + || is_any_of)) + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Conversion from {} to {} is not supported", + TypeName, TypeName); + } + else if constexpr (std::is_same_v != std::is_same_v + && !(std::is_same_v || std::is_same_v)) + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "Conversion between numeric types and IPv6 is not supported. " + "Probably the passed IPv6 is unquoted"); + } + else if constexpr (IsDataTypeDecimal || IsDataTypeDecimal) + { + if constexpr (std::is_same_v) + { + ToFieldType result; + bool convert_result = false; + + if constexpr (IsDataTypeDecimal && IsDataTypeDecimal) + convert_result = tryConvertDecimals(vec_from[i], col_from->getScale(), col_to->getScale(), result); + else if constexpr (IsDataTypeDecimal && IsDataTypeNumber) + convert_result = tryConvertFromDecimal(vec_from[i], col_from->getScale(), result); + else if constexpr (IsDataTypeNumber && IsDataTypeDecimal) + convert_result = tryConvertToDecimal(vec_from[i], col_to->getScale(), result); + + if (convert_result) + vec_to[i] = result; + else + { + vec_to[i] = static_cast(0); + (*vec_null_map_to)[i] = true; + } + } + else + { + if constexpr (IsDataTypeDecimal && IsDataTypeDecimal) + vec_to[i] = convertDecimals(vec_from[i], col_from->getScale(), col_to->getScale()); + else if constexpr (IsDataTypeDecimal && IsDataTypeNumber) + vec_to[i] = convertFromDecimal(vec_from[i], col_from->getScale()); + else if constexpr (IsDataTypeNumber && IsDataTypeDecimal) + vec_to[i] = convertToDecimal(vec_from[i], col_to->getScale()); + else + throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Unsupported data type in conversion function"); + } + } + else if constexpr (std::is_same_v && std::is_same_v) + { + const uint8_t ip4_cidr[] {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00}; + const uint8_t * src = reinterpret_cast(&vec_from[i].toUnderType()); + if (!matchIPv6Subnet(src, ip4_cidr, 96)) + { + char addr[IPV6_MAX_TEXT_LENGTH + 1] {}; + char * paddr = addr; + formatIPv6(src, paddr); + + throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "IPv6 {} in column {} is not in IPv4 mapping block", addr, named_from.column->getName()); + } + + uint8_t * dst = reinterpret_cast(&vec_to[i].toUnderType()); + if constexpr (std::endian::native == std::endian::little) + { + dst[0] = src[15]; + dst[1] = src[14]; + dst[2] = src[13]; + dst[3] = src[12]; + } + else + { + dst[0] = src[12]; + dst[1] = src[13]; + dst[2] = src[14]; + dst[3] = src[15]; + } + } + else if constexpr (std::is_same_v && std::is_same_v) + { + const uint8_t * src = reinterpret_cast(&vec_from[i].toUnderType()); + uint8_t * dst = reinterpret_cast(&vec_to[i].toUnderType()); + std::memset(dst, '\0', IPV6_BINARY_LENGTH); + dst[10] = dst[11] = 0xff; + + if constexpr (std::endian::native == std::endian::little) + { + dst[12] = src[3]; + dst[13] = src[2]; + dst[14] = src[1]; + dst[15] = src[0]; + } + else + { + dst[12] = src[0]; + dst[13] = src[1]; + dst[14] = src[2]; + dst[15] = src[3]; + } + } + else if constexpr (std::is_same_v && std::is_same_v) + { + vec_to[i] = static_cast(static_cast(vec_from[i])); + } + else if constexpr (std::is_same_v + && (std::is_same_v || std::is_same_v)) + { + vec_to[i] = static_cast(vec_from[i] * DATE_SECONDS_PER_DAY); + } + else + { + /// If From Data is Nan or Inf and we convert to integer type, throw exception + if constexpr (std::is_floating_point_v && !std::is_floating_point_v) + { + if (!isFinite(vec_from[i])) + { + if constexpr (std::is_same_v) + { + vec_to[i] = 0; + (*vec_null_map_to)[i] = true; + continue; + } + else + throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Unexpected inf or nan to integer conversion"); + } + } + + if constexpr (std::is_same_v + || std::is_same_v) + { + bool convert_result = accurate::convertNumeric(vec_from[i], vec_to[i]); + + if (!convert_result) + { + if (std::is_same_v) + { + vec_to[i] = 0; + (*vec_null_map_to)[i] = true; + } + else + { + throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Value in column {} cannot be safely converted into type {}", + named_from.column->getName(), result_type->getName()); + } + } + } + else + { + vec_to[i] = static_cast(vec_from[i]); + } + } + } + + if constexpr (std::is_same_v) + return ColumnNullable::create(std::move(col_to), std::move(col_null_map_to)); + else + return col_to; + } + } +}; + + template requires (!std::is_same_v) struct ConvertImpl From f339c88079e2b0173b29e343872a576767b56e7b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 13 Mar 2024 06:32:31 +0100 Subject: [PATCH 236/374] Continue --- src/Functions/FunctionsConversion.cpp | 61 +++++++++++++-------------- 1 file changed, 29 insertions(+), 32 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 16f547939fa..5001abcb5d2 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -1163,13 +1163,10 @@ template struct ConvertImpl { - using FromFieldType = typename FromDataType::FieldType; - using ToFieldType = typename ToDataType::FieldType; - template static ColumnPtr NO_SANITIZE_UNDEFINED execute( const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type [[maybe_unused]], size_t input_rows_count, - Additions additions [[maybe_unused]] = Additions()) + Additions additions = Additions()) { const ColumnWithTypeAndName & named_from = arguments[0]; @@ -1206,7 +1203,7 @@ struct ConvertImpl && std::is_same_v) { return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count, TransformDateTime64(assert_cast(*named_from.type).getScale())); + arguments, result_type, input_rows_count, additions); } /** Special case of converting Int8, Int16, (U)Int32 or (U)Int64 (and also, for convenience, * Float32, Float64) to Date. If the @@ -1336,14 +1333,14 @@ struct ConvertImpl && std::is_same_v) { return DateTimeTransformImpl>, false>::execute( - arguments, result_type, input_rows_count, TransformDateTime64>(assert_cast(*named_from.type).getScale())); + arguments, result_type, input_rows_count, additions); } else if constexpr (std::is_same_v && std::is_same_v && std::is_same_v) { return DateTimeTransformImpl>, false>::execute( - arguments, result_type, input_rows_count, TransformDateTime64>(assert_cast(*named_from.type).getScale())); + arguments, result_type, input_rows_count, additions); } /// Conversion of Date or DateTime to DateTime64: add zero sub-second part. else if constexpr (( @@ -1362,6 +1359,7 @@ struct ConvertImpl { /// Date or DateTime to String + using FromFieldType = typename FromDataType::FieldType; using ColVecType = ColumnVectorOrDecimal; auto datetime_arg = arguments[0]; @@ -1471,6 +1469,7 @@ struct ConvertImpl { /// Anything else to String. + using FromFieldType = typename FromDataType::FieldType; using ColVecType = ColumnVectorOrDecimal; ColumnUInt8::MutablePtr null_map = copyNullMap(arguments[0].column); @@ -1523,8 +1522,31 @@ struct ConvertImpl throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", arguments[0].column->getName(), Name::name); } + else if constexpr ((std::is_same_v || std::is_same_v) + && !std::is_same_v + && std::is_same_v) + { + return ConvertThroughParsing::execute( + arguments, result_type, input_rows_count, additions); + } + else if constexpr ((std::is_same_v || std::is_same_v) + && !std::is_same_v + && std::is_same_v) + { + return ConvertThroughParsing::execute( + arguments, result_type, input_rows_count, additions); + } + else if constexpr ((std::is_same_v || std::is_same_v) + && is_any_of + && std::is_same_v) + { + return ConvertThroughParsing::execute( + arguments, result_type, input_rows_count, additions); + } else { + using FromFieldType = typename FromDataType::FieldType; + using ToFieldType = typename ToDataType::FieldType; using ColVecFrom = typename FromDataType::ColumnType; using ColVecTo = typename ToDataType::ColumnType; @@ -1772,31 +1794,6 @@ struct ConvertImpl }; -template -requires (!std::is_same_v) -struct ConvertImpl - : ConvertThroughParsing {}; - -template -requires (!std::is_same_v) -struct ConvertImpl - : ConvertThroughParsing {}; - -template -requires (!std::is_same_v) -struct ConvertImpl - : ConvertThroughParsing {}; - -template -requires (!std::is_same_v) -struct ConvertImpl - : ConvertThroughParsing {}; - -template -requires (is_any_of && is_any_of) -struct ConvertImpl - : ConvertThroughParsing {}; - /// Generic conversion of any type from String. Used for complex types: Array and Tuple or types with custom serialization. struct ConvertImplGenericFromString { From 87db039d8952ca0e2ed32c7eda0f49600f9078c4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 13 Mar 2024 07:06:06 +0100 Subject: [PATCH 237/374] Continue --- src/Functions/FunctionsConversion.cpp | 119 ++++++++++++-------------- 1 file changed, 55 insertions(+), 64 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 5001abcb5d2..fd328e8af42 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -1354,8 +1354,7 @@ struct ConvertImpl arguments, result_type, input_rows_count); } else if constexpr (IsDataTypeDateOrDateTime - && std::is_same_v - && std::is_same_v) + && std::is_same_v) { /// Date or DateTime to String @@ -1464,6 +1463,54 @@ struct ConvertImpl throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", arguments[0].column->getName(), Name::name); } + /// Conversion from FixedString to String. + /// Cutting sequences of zero bytes from end of strings. + else if constexpr (std::is_same_v + && std::is_same_v + && std::is_same_v) + { + ColumnUInt8::MutablePtr null_map = copyNullMap(arguments[0].column); + const auto & nested = columnGetNested(arguments[0]); + if (const ColumnFixedString * col_from = checkAndGetColumn(nested.column.get())) + { + auto col_to = ColumnString::create(); + + const ColumnFixedString::Chars & data_from = col_from->getChars(); + ColumnString::Chars & data_to = col_to->getChars(); + ColumnString::Offsets & offsets_to = col_to->getOffsets(); + size_t size = col_from->size(); + size_t n = col_from->getN(); + data_to.resize(size * (n + 1)); /// + 1 - zero terminator + offsets_to.resize(size); + + size_t offset_from = 0; + size_t offset_to = 0; + for (size_t i = 0; i < size; ++i) + { + if (!null_map || !null_map->getData()[i]) + { + size_t bytes_to_copy = n; + while (bytes_to_copy > 0 && data_from[offset_from + bytes_to_copy - 1] == 0) + --bytes_to_copy; + + memcpy(&data_to[offset_to], &data_from[offset_from], bytes_to_copy); + offset_to += bytes_to_copy; + } + data_to[offset_to] = 0; + ++offset_to; + offsets_to[i] = offset_to; + offset_from += n; + } + + data_to.resize(offset_to); + if (result_type->isNullable() && null_map) + return ColumnNullable::create(std::move(col_to), std::move(null_map)); + return col_to; + } + else + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", + arguments[0].column->getName(), Name::name); + } else if constexpr (std::is_same_v && std::is_same_v) { @@ -1543,6 +1590,12 @@ struct ConvertImpl return ConvertThroughParsing::execute( arguments, result_type, input_rows_count, additions); } + else if constexpr (std::is_same_v + && std::is_same_v + && std::is_same_v + && std::is_same_v) + { + } else { using FromFieldType = typename FromDataType::FieldType; @@ -1863,68 +1916,6 @@ struct ConvertImplGenericFromString }; -template <> -struct ConvertImpl - : ConvertImpl {}; - -template <> -struct ConvertImpl - : ConvertImpl {}; - - -/** Conversion from FixedString to String. - * Cutting sequences of zero bytes from end of strings. - */ -template -struct ConvertImpl -{ - static ColumnPtr execute(const ColumnsWithTypeAndName & arguments, const DataTypePtr & return_type, size_t /*input_rows_count*/) - { - ColumnUInt8::MutablePtr null_map = copyNullMap(arguments[0].column); - const auto & nested = columnGetNested(arguments[0]); - if (const ColumnFixedString * col_from = checkAndGetColumn(nested.column.get())) - { - auto col_to = ColumnString::create(); - - const ColumnFixedString::Chars & data_from = col_from->getChars(); - ColumnString::Chars & data_to = col_to->getChars(); - ColumnString::Offsets & offsets_to = col_to->getOffsets(); - size_t size = col_from->size(); - size_t n = col_from->getN(); - data_to.resize(size * (n + 1)); /// + 1 - zero terminator - offsets_to.resize(size); - - size_t offset_from = 0; - size_t offset_to = 0; - for (size_t i = 0; i < size; ++i) - { - if (!null_map || !null_map->getData()[i]) - { - size_t bytes_to_copy = n; - while (bytes_to_copy > 0 && data_from[offset_from + bytes_to_copy - 1] == 0) - --bytes_to_copy; - - memcpy(&data_to[offset_to], &data_from[offset_from], bytes_to_copy); - offset_to += bytes_to_copy; - } - data_to[offset_to] = 0; - ++offset_to; - offsets_to[i] = offset_to; - offset_from += n; - } - - data_to.resize(offset_to); - if (return_type->isNullable() && null_map) - return ColumnNullable::create(std::move(col_to), std::move(null_map)); - return col_to; - } - else - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", - arguments[0].column->getName(), Name::name); - } -}; - - /// Declared early because used below. struct NameToDate { static constexpr auto name = "toDate"; }; struct NameToDate32 { static constexpr auto name = "toDate32"; }; From f8dfd8c03a05f45729f0dce588da4c9b0200739e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 13 Mar 2024 07:15:07 +0100 Subject: [PATCH 238/374] Less garbage --- src/Functions/FunctionsConversion.cpp | 30 +++++++++++++-------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index fd328e8af42..5461661386e 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -2389,7 +2389,6 @@ public: static constexpr bool to_datetime64 = std::is_same_v; static FunctionPtr create(ContextPtr) { return std::make_shared(); } - static FunctionPtr create() { return std::make_shared(); } String getName() const override { @@ -2516,28 +2515,29 @@ public: ColumnPtr result_column; if constexpr (to_decimal) + { result_column = executeInternal(arguments, result_type, input_rows_count, assert_cast(*removeNullable(result_type)).getScale()); - else + } + else if (isDateTime64(arguments)) { - if (isDateTime64(arguments)) - { - UInt64 scale = to_datetime64 ? DataTypeDateTime64::default_scale : 0; - if (arguments.size() > 1) - scale = extractToDecimalScale(arguments[1]); + UInt64 scale = to_datetime64 ? DataTypeDateTime64::default_scale : 0; + if (arguments.size() > 1) + scale = extractToDecimalScale(arguments[1]); - if (scale == 0) - result_column = executeInternal(arguments, result_type, input_rows_count); - else - { - result_column = executeInternal(arguments, result_type, input_rows_count, static_cast(scale)); - } + if (scale == 0) + { + result_column = executeInternal(arguments, result_type, input_rows_count); } else { - result_column = executeInternal(arguments, result_type, input_rows_count); + result_column = executeInternal(arguments, result_type, input_rows_count, static_cast(scale)); } } + else + { + result_column = executeInternal(arguments, result_type, input_rows_count); + } if (!result_column) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}. " @@ -3204,7 +3204,7 @@ private: { /// In case when converting to Nullable type, we apply different parsing rule, /// that will not throw an exception but return NULL in case of malformed input. - FunctionPtr function = FunctionConvertFromString::create(); + FunctionPtr function = FunctionConvertFromString::create(context); return createFunctionAdaptor(function, from_type); } else if (!can_apply_accurate_cast) From 65a541fbdbac4ec3a00a1f0271867e68e6cdef87 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 13 Mar 2024 07:48:14 +0100 Subject: [PATCH 239/374] Tighten --- utils/check-style/check-large-objects.sh | 3 --- 1 file changed, 3 deletions(-) diff --git a/utils/check-style/check-large-objects.sh b/utils/check-style/check-large-objects.sh index 3e2a385bdd0..9b78d6196e3 100755 --- a/utils/check-style/check-large-objects.sh +++ b/utils/check-style/check-large-objects.sh @@ -6,10 +6,7 @@ export LC_ALL=C # The "total" should be printed without localization TU_EXCLUDES=( AggregateFunctionUniq - FunctionsConversion - RangeHashedDictionary - Aggregator ) From 486e8537a80abe1b238839f207f35a9f9030011e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 13 Mar 2024 08:02:27 +0100 Subject: [PATCH 240/374] Fix error --- src/Functions/FunctionsConversion.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 5461661386e..67d6e202255 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -1309,14 +1309,14 @@ struct ConvertImpl && std::is_same_v) { return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count); + arguments, result_type, input_rows_count, additions); } else if constexpr (std::is_same_v && std::is_same_v && std::is_same_v) { return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count); + arguments, result_type, input_rows_count, additions); } else if constexpr (( std::is_same_v @@ -1325,7 +1325,7 @@ struct ConvertImpl && std::is_same_v) { return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count); + arguments, result_type, input_rows_count, additions); } /// Conversion of DateTime64 to Date or DateTime: discards fractional part. else if constexpr (std::is_same_v @@ -1351,7 +1351,7 @@ struct ConvertImpl && std::is_same_v) { return DateTimeTransformImpl::execute( - arguments, result_type, input_rows_count); + arguments, result_type, input_rows_count, additions); } else if constexpr (IsDataTypeDateOrDateTime && std::is_same_v) From 87846b2c98205f95db69d99ede102e600615c54a Mon Sep 17 00:00:00 2001 From: Mikhail Koviazin Date: Wed, 13 Mar 2024 09:14:55 +0200 Subject: [PATCH 241/374] Added more tests --- .../0_stateless/00662_has_nullable.reference | 12 +++++++ .../0_stateless/00662_has_nullable.sql | 32 +++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/tests/queries/0_stateless/00662_has_nullable.reference b/tests/queries/0_stateless/00662_has_nullable.reference index 97da99d655e..1ac93f25a39 100644 --- a/tests/queries/0_stateless/00662_has_nullable.reference +++ b/tests/queries/0_stateless/00662_has_nullable.reference @@ -1,2 +1,14 @@ +Nullable(UInt64), non-null array 1 1 \N 0 +Non-nullable UInt64, nullable array +0 0 +1 1 +2 1 +Nullable(UInt64), nullable array +0 0 +\N 1 +1 1 +All NULLs +0 0 +\N 1 diff --git a/tests/queries/0_stateless/00662_has_nullable.sql b/tests/queries/0_stateless/00662_has_nullable.sql index 0d395871d9f..3fd3bd46baa 100644 --- a/tests/queries/0_stateless/00662_has_nullable.sql +++ b/tests/queries/0_stateless/00662_has_nullable.sql @@ -1,7 +1,39 @@ DROP TABLE IF EXISTS 00662_has_nullable; + +SELECT 'Nullable(UInt64), non-null array'; CREATE TABLE 00662_has_nullable(a Nullable(UInt64)) ENGINE = Memory; INSERT INTO 00662_has_nullable VALUES (1), (Null); SELECT a, has([0, 1], a) FROM 00662_has_nullable; DROP TABLE 00662_has_nullable; + +-------------------------------------------------------------------------------- + +SELECT 'Non-nullable UInt64, nullable array'; +CREATE TABLE 00662_has_nullable(a UInt64) ENGINE = Memory; + +INSERT INTO 00662_has_nullable VALUES (0), (1), (2); +SELECT a, has([NULL, 1, 2], a) FROM 00662_has_nullable; + +DROP TABLE 00662_has_nullable; + +-------------------------------------------------------------------------------- + +SELECT 'Nullable(UInt64), nullable array'; +CREATE TABLE 00662_has_nullable(a Nullable(UInt64)) ENGINE = Memory; + +INSERT INTO 00662_has_nullable VALUES (0), (Null), (1); +SELECT a, has([NULL, 1, 2], a) FROM 00662_has_nullable; + +DROP TABLE 00662_has_nullable; + +-------------------------------------------------------------------------------- + +SELECT 'All NULLs'; +CREATE TABLE 00662_has_nullable(a Nullable(UInt64)) ENGINE = Memory; + +INSERT INTO 00662_has_nullable VALUES (0), (Null); +SELECT a, has([NULL, NULL], a) FROM 00662_has_nullable; + +DROP TABLE 00662_has_nullable; From 514f8392f93b9488ab4b52011102891a851b51db Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 13 Mar 2024 10:25:37 +0100 Subject: [PATCH 242/374] Fix error --- src/Functions/FunctionsConversion.cpp | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 67d6e202255..70cbf31bcb3 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -1569,15 +1569,20 @@ struct ConvertImpl throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", arguments[0].column->getName(), Name::name); } + else if constexpr (std::is_same_v + && std::is_same_v + && std::is_same_v) + { + return ConvertImpl::execute( + arguments, result_type, input_rows_count, additions); + } else if constexpr ((std::is_same_v || std::is_same_v) - && !std::is_same_v && std::is_same_v) { return ConvertThroughParsing::execute( arguments, result_type, input_rows_count, additions); } else if constexpr ((std::is_same_v || std::is_same_v) - && !std::is_same_v && std::is_same_v) { return ConvertThroughParsing::execute( @@ -1590,12 +1595,6 @@ struct ConvertImpl return ConvertThroughParsing::execute( arguments, result_type, input_rows_count, additions); } - else if constexpr (std::is_same_v - && std::is_same_v - && std::is_same_v - && std::is_same_v) - { - } else { using FromFieldType = typename FromDataType::FieldType; From 5ef241cc850fe95240145f86329e259f5609de31 Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Wed, 13 Mar 2024 11:30:11 +0100 Subject: [PATCH 243/374] WIP on virtual columns in StorageMerge --- src/Storages/StorageMerge.cpp | 42 ++++++++++++++++++++++++++++------- 1 file changed, 34 insertions(+), 8 deletions(-) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 8410f0a8df8..ec03545d767 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -56,12 +56,17 @@ #include #include #include +#include "Common/logger_useful.h" #include #include #include #include +#include "Analyzer/QueryNode.h" +#include "Core/QueryProcessingStage.h" +#include "IO/WriteHelpers.h" #include #include +#include namespace DB { @@ -798,10 +803,13 @@ QueryTreeNodePtr replaceTableExpressionAndRemoveJoin( const ContextPtr & context, const Names & required_column_names) { + LOG_DEBUG(&Poco::Logger::get("replaceTableExpressionAndRemoveJoin"), "BEFORE:\n{}", query->dumpTree()); auto * query_node = query->as(); auto join_tree_type = query_node->getJoinTree()->getNodeType(); auto modified_query = query_node->cloneAndReplace(original_table_expression, replacement_table_expression); + LOG_DEBUG(&Poco::Logger::get("replaceTableExpressionAndRemoveJoin"), "AFTER:\n{}", modified_query->dumpTree()); + // For the case when join tree is just a table or a table function we don't need to do anything more. if (join_tree_type == QueryTreeNodeType::TABLE || join_tree_type == QueryTreeNodeType::TABLE_FUNCTION) return modified_query; @@ -880,6 +888,7 @@ SelectQueryInfo ReadFromMerge::getModifiedQueryInfo(const ContextPtr & modified_ if (modified_query_info.table_expression) { auto replacement_table_expression = std::make_shared(storage, storage_lock, storage_snapshot_); + replacement_table_expression->setAlias(modified_query_info.table_expression->getAlias()); if (query_info.table_expression_modifiers) replacement_table_expression->setTableExpressionModifiers(*query_info.table_expression_modifiers); @@ -960,6 +969,8 @@ SelectQueryInfo ReadFromMerge::getModifiedQueryInfo(const ContextPtr & modified_ column_name_to_node); } + LOG_DEBUG(&Poco::Logger::get("getModifiedQueryInfo"), "{}", modified_query_info.query_tree->dumpTree()); + modified_query_info.query = queryNodeToSelectQuery(modified_query_info.query_tree); } else @@ -1020,7 +1031,7 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( const auto & [database_name, storage, _, table_name] = storage_with_lock; bool allow_experimental_analyzer = context->getSettingsRef().allow_experimental_analyzer; auto storage_stage - = storage->getQueryProcessingStage(context, QueryProcessingStage::Complete, storage_snapshot_, modified_query_info); + = storage->getQueryProcessingStage(context, processed_stage, storage_snapshot_, modified_query_info); builder = plan.buildQueryPipeline( QueryPlanOptimizationSettings::fromContext(context), BuildQueryPipelineSettings::fromContext(context)); @@ -1047,10 +1058,23 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( Block pipe_header = builder->getHeader(); - if (has_database_virtual_column && common_header.has("_database") && !pipe_header.has("_database")) + auto get_column_options = GetColumnsOptions(GetColumnsOptions::All).withExtendedObjects().withVirtuals(); + if (storage_snapshot_->storage.supportsSubcolumns()) + get_column_options.withSubcolumns(); + + LOG_DEBUG(&Poco::Logger::get("createSources"), "Processed:{}\nStorage:{}", toString(processed_stage), toString(storage_stage)); + + String table_alias; + if (allow_experimental_analyzer) + table_alias = modified_query_info.query_tree->as()->getJoinTree()->as()->getAlias(); + + String database_column = table_alias.empty() || processed_stage == QueryProcessingStage::FetchColumns ? "_database" : table_alias + "._database"; + String table_column = table_alias.empty() || processed_stage == QueryProcessingStage::FetchColumns ? "_table" : table_alias + "._table"; + + if (has_database_virtual_column && common_header.has(database_column) && (storage_stage == QueryProcessingStage::FetchColumns || dynamic_cast(&storage_snapshot_->storage) != nullptr)) { ColumnWithTypeAndName column; - column.name = "_database"; + column.name = database_column; column.type = std::make_shared(std::make_shared()); column.column = column.type->createColumnConst(0, Field(database_name)); @@ -1062,10 +1086,10 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( { return std::make_shared(stream_header, adding_column_actions); }); } - if (has_table_virtual_column && common_header.has("_table") && !pipe_header.has("_table")) + if (has_table_virtual_column && common_header.has(table_column) && (storage_stage == QueryProcessingStage::FetchColumns || dynamic_cast(&storage_snapshot_->storage) != nullptr)) { ColumnWithTypeAndName column; - column.name = "_table"; + column.name = table_column; column.type = std::make_shared(std::make_shared()); column.column = column.type->createColumnConst(0, Field(table_name)); @@ -1080,7 +1104,7 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( /// Subordinary tables could have different but convertible types, like numeric types of different width. /// We must return streams with structure equals to structure of Merge table. convertAndFilterSourceStream( - header, modified_query_info, storage_snapshot_, aliases, row_policy_data_opt, context, *builder, processed_stage); + header, modified_query_info, storage_snapshot_, aliases, row_policy_data_opt, context, *builder, storage_stage); } return builder; @@ -1433,7 +1457,7 @@ void ReadFromMerge::convertAndFilterSourceStream( const RowPolicyDataOpt & row_policy_data_opt, ContextPtr local_context, QueryPipelineBuilder & builder, - QueryProcessingStage::Enum processed_stage) + QueryProcessingStage::Enum processed_stage [[maybe_unused]]) { Block before_block_header = builder.getHeader(); @@ -1493,7 +1517,7 @@ void ReadFromMerge::convertAndFilterSourceStream( ActionsDAG::MatchColumnsMode convert_actions_match_columns_mode = ActionsDAG::MatchColumnsMode::Name; if (local_context->getSettingsRef().allow_experimental_analyzer - && (processed_stage != QueryProcessingStage::FetchColumns || dynamic_cast(&snapshot->storage) != nullptr)) + && (processed_stage != QueryProcessingStage::FetchColumns)) convert_actions_match_columns_mode = ActionsDAG::MatchColumnsMode::Position; if (row_policy_data_opt) @@ -1501,6 +1525,8 @@ void ReadFromMerge::convertAndFilterSourceStream( row_policy_data_opt->addFilterTransform(builder); } + LOG_DEBUG(&Poco::Logger::get("convertAndFilterSourceStream"), "SOURCE:\n{}\nRESULT:\n{}", builder.getHeader().dumpStructure(), header.dumpStructure()); + auto convert_actions_dag = ActionsDAG::makeConvertingActions(builder.getHeader().getColumnsWithTypeAndName(), header.getColumnsWithTypeAndName(), convert_actions_match_columns_mode); From 2e49140de7695095831f52b32fc965a6b07ffd3e Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Wed, 13 Mar 2024 11:37:42 +0100 Subject: [PATCH 244/374] Small progress --- src/Storages/StorageMerge.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index ec03545d767..d5fd0f51a62 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -1517,7 +1517,7 @@ void ReadFromMerge::convertAndFilterSourceStream( ActionsDAG::MatchColumnsMode convert_actions_match_columns_mode = ActionsDAG::MatchColumnsMode::Name; if (local_context->getSettingsRef().allow_experimental_analyzer - && (processed_stage != QueryProcessingStage::FetchColumns)) + && (processed_stage == QueryProcessingStage::FetchColumns && dynamic_cast(&snapshot->storage) != nullptr)) convert_actions_match_columns_mode = ActionsDAG::MatchColumnsMode::Position; if (row_policy_data_opt) From c0bfafa203091abbe2f5af6d1253249fc432ad64 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Wed, 13 Mar 2024 10:48:02 +0000 Subject: [PATCH 245/374] fix tags --- .../02997_insert_select_too_many_parts_multithread.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02997_insert_select_too_many_parts_multithread.sql b/tests/queries/0_stateless/02997_insert_select_too_many_parts_multithread.sql index 00cf262add5..2dfc8094115 100644 --- a/tests/queries/0_stateless/02997_insert_select_too_many_parts_multithread.sql +++ b/tests/queries/0_stateless/02997_insert_select_too_many_parts_multithread.sql @@ -1,5 +1,5 @@ -# Tags: disabled -# TODO: Fix parts_to_throw_insert logic for parallel MergeTreeSink onStart calls +-- Tags: disabled +-- TODO: Fix parts_to_throw_insert logic for parallel MergeTreeSink onStart calls DROP TABLE IF EXISTS too_many_parts; CREATE TABLE too_many_parts (x UInt64) ENGINE = MergeTree ORDER BY tuple() SETTINGS parts_to_delay_insert = 5, parts_to_throw_insert = 5; From a704ea510a2c180d46ba89031915624619d3b74c Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Wed, 13 Mar 2024 12:03:42 +0100 Subject: [PATCH 246/374] Fix `test_placement_info` (#61057) * Fix test * Fix last minute changes * Automatic style fix --------- Co-authored-by: robot-clickhouse --- tests/integration/helpers/mock_servers.py | 2 +- .../configs/imds_bootstrap.xml | 9 +++++++++ tests/integration/test_placement_info/test.py | 18 ++++++++++-------- 3 files changed, 20 insertions(+), 9 deletions(-) create mode 100644 tests/integration/test_placement_info/configs/imds_bootstrap.xml diff --git a/tests/integration/helpers/mock_servers.py b/tests/integration/helpers/mock_servers.py index e4655ffeeaf..f2181d85e12 100644 --- a/tests/integration/helpers/mock_servers.py +++ b/tests/integration/helpers/mock_servers.py @@ -33,7 +33,7 @@ def start_mock_servers(cluster, script_dir, mocks, timeout=100): cluster.exec_in_container( container_id, - ["python", server_name, str(port)], + ["python3", server_name, str(port)], detach=True, ) diff --git a/tests/integration/test_placement_info/configs/imds_bootstrap.xml b/tests/integration/test_placement_info/configs/imds_bootstrap.xml new file mode 100644 index 00000000000..5b2a77e0663 --- /dev/null +++ b/tests/integration/test_placement_info/configs/imds_bootstrap.xml @@ -0,0 +1,9 @@ + + + 1 + + + 0 + ci-placeholder + + diff --git a/tests/integration/test_placement_info/test.py b/tests/integration/test_placement_info/test.py index 1b93a3eae0b..32fd2fa57d7 100644 --- a/tests/integration/test_placement_info/test.py +++ b/tests/integration/test_placement_info/test.py @@ -2,16 +2,14 @@ import pytest from helpers.cluster import ClickHouseCluster from helpers.mock_servers import start_mock_servers import os -import time -METADATA_SERVER_HOSTNAME = "resolver" +METADATA_SERVER_HOSTNAME = "node_imds" METADATA_SERVER_PORT = 8080 cluster = ClickHouseCluster(__file__) node_imds = cluster.add_instance( "node_imds", - with_minio=True, - main_configs=["configs/imds.xml"], + main_configs=["configs/imds_bootstrap.xml"], env_variables={ "AWS_EC2_METADATA_SERVICE_ENDPOINT": f"http://{METADATA_SERVER_HOSTNAME}:{METADATA_SERVER_PORT}", }, @@ -32,10 +30,10 @@ node_missing_value = cluster.add_instance( ) -def start_metadata_server(): +def start_metadata_server(started_cluster): script_dir = os.path.join(os.path.dirname(__file__), "metadata_servers") start_mock_servers( - cluster, + started_cluster, script_dir, [ ( @@ -51,13 +49,17 @@ def start_metadata_server(): def start_cluster(): try: cluster.start() - start_metadata_server() - yield + start_metadata_server(cluster) + yield cluster finally: cluster.shutdown() def test_placement_info_from_imds(): + with open(os.path.join(os.path.dirname(__file__), "configs/imds.xml"), "r") as f: + node_imds.replace_config( + "/etc/clickhouse-server/config.d/imds_bootstrap.xml", f.read() + ) node_imds.stop_clickhouse(kill=True) node_imds.start_clickhouse() From 0353121dccb87403ff08334137fd9cecbb8953f1 Mon Sep 17 00:00:00 2001 From: peter279k Date: Wed, 13 Mar 2024 19:17:27 +0800 Subject: [PATCH 247/374] Improve related NULL functions usage --- docs/en/sql-reference/functions/functions-for-nulls.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/en/sql-reference/functions/functions-for-nulls.md b/docs/en/sql-reference/functions/functions-for-nulls.md index 91c04cfded3..e73d6c899e7 100644 --- a/docs/en/sql-reference/functions/functions-for-nulls.md +++ b/docs/en/sql-reference/functions/functions-for-nulls.md @@ -14,7 +14,7 @@ Returns whether the argument is [NULL](../../sql-reference/syntax.md#null). isNull(x) ``` -Alias: `ISNULL`. +Alias: `IS NULL`. **Arguments** @@ -58,6 +58,8 @@ Returns whether the argument is not [NULL](../../sql-reference/syntax.md#null-li isNotNull(x) ``` +Alias: `IS NOT NULL`. + **Arguments:** - `x` — A value of non-compound data type. @@ -100,6 +102,8 @@ Returns whether the argument is 0 (zero) or [NULL](../../sql-reference/syntax.md isZeroOrNull(x) ``` +Alias: `x = 0 OR x IS NULL`. + **Arguments:** - `x` — A value of non-compound data type. From 3931351ec4806769048d2638f54323f1ae89e056 Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 13 Mar 2024 12:36:47 +0100 Subject: [PATCH 248/374] Pass timeout through setting --- src/Core/Settings.h | 1 + src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp | 5 +++-- src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp | 10 +++++++--- src/Disks/IO/CachedOnDiskWriteBufferFromFile.h | 15 +++++++++------ src/IO/ReadSettings.h | 1 + src/IO/WriteSettings.h | 1 + src/Interpreters/Cache/FileCache.cpp | 5 +++-- src/Interpreters/Cache/FileCache.h | 3 ++- src/Interpreters/Cache/FileCache_fwd.h | 1 - src/Interpreters/Cache/FileSegment.cpp | 4 ++-- src/Interpreters/Cache/FileSegment.h | 2 +- src/Interpreters/Cache/Metadata.cpp | 6 +++++- .../Cache/WriteBufferToFileSegment.cpp | 8 +++++++- src/Interpreters/Cache/WriteBufferToFileSegment.h | 2 ++ src/Interpreters/Context.cpp | 2 ++ 15 files changed, 46 insertions(+), 20 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index d70a6cf51c5..7ba335099e6 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -778,6 +778,7 @@ class IColumn; M(UInt64, filesystem_cache_max_download_size, (128UL * 1024 * 1024 * 1024), "Max remote filesystem cache size that can be downloaded by a single query", 0) \ M(Bool, throw_on_error_from_cache_on_write_operations, false, "Ignore error from cache when caching on write operations (INSERT, merges)", 0) \ M(UInt64, filesystem_cache_segments_batch_size, 20, "Limit on size of a single batch of file segments that a read buffer can request from cache. Too low value will lead to excessive requests to cache, too large may slow down eviction from cache", 0) \ + M(UInt64, filesystem_cache_reserve_space_wait_lock_timeout_milliseconds, 1000, "Wait time to lock cache for sapce reservation in filesystem cache", 0) \ \ M(Bool, use_page_cache_for_disks_without_file_cache, false, "Use userspace page cache for remote disks that don't have filesystem cache enabled.", 0) \ M(Bool, read_from_page_cache_if_exists_otherwise_bypass_cache, false, "Use userspace page cache in passive mode, similar to read_from_filesystem_cache_if_exists_otherwise_bypass_cache.", 0) \ diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp index 47ee5858562..1e108b481ee 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp @@ -637,7 +637,8 @@ void CachedOnDiskReadBufferFromFile::predownload(FileSegment & file_segment) ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromSourceBytes, current_impl_buffer_size); - bool continue_predownload = file_segment.reserve(current_predownload_size); + bool continue_predownload = file_segment.reserve( + current_predownload_size, settings.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds); if (continue_predownload) { LOG_TEST(log, "Left to predownload: {}, buffer size: {}", bytes_to_predownload, current_impl_buffer_size); @@ -992,7 +993,7 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep() { chassert(file_offset_of_buffer_end + size - 1 <= file_segment.range().right); - bool success = file_segment.reserve(size); + bool success = file_segment.reserve(size, settings.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds); if (success) { chassert(file_segment.getCurrentWriteOffset() == static_cast(implementation_buffer->getPosition())); diff --git a/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp index faed55de713..f4e309f461e 100644 --- a/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp @@ -26,16 +26,18 @@ FileSegmentRangeWriter::FileSegmentRangeWriter( FileCache * cache_, const FileSegment::Key & key_, const FileCacheUserInfo & user_, + size_t reserve_space_lock_wait_timeout_milliseconds_, std::shared_ptr cache_log_, const String & query_id_, const String & source_path_) : cache(cache_) , key(key_) + , user(user_) + , reserve_space_lock_wait_timeout_milliseconds(reserve_space_lock_wait_timeout_milliseconds_) , log(getLogger("FileSegmentRangeWriter")) , cache_log(cache_log_) , query_id(query_id_) , source_path(source_path_) - , user(user_) { } @@ -89,7 +91,7 @@ bool FileSegmentRangeWriter::write(const char * data, size_t size, size_t offset size_t size_to_write = std::min(available_size, size); - bool reserved = file_segment->reserve(size_to_write); + bool reserved = file_segment->reserve(size_to_write, reserve_space_lock_wait_timeout_milliseconds); if (!reserved) { appendFilesystemCacheLog(*file_segment); @@ -211,6 +213,7 @@ CachedOnDiskWriteBufferFromFile::CachedOnDiskWriteBufferFromFile( , key(key_) , query_id(query_id_) , user(user_) + , reserve_space_lock_wait_timeout_milliseconds(settings_.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds) , throw_on_error_from_cache(settings_.throw_on_error_from_cache) , cache_log(!query_id_.empty() && settings_.enable_filesystem_cache_log ? cache_log_ : nullptr) { @@ -251,7 +254,8 @@ void CachedOnDiskWriteBufferFromFile::cacheData(char * data, size_t size, bool t if (!cache_writer) { - cache_writer = std::make_unique(cache.get(), key, user, cache_log, query_id, source_path); + cache_writer = std::make_unique( + cache.get(), key, user, reserve_space_lock_wait_timeout_milliseconds, cache_log, query_id, source_path); } Stopwatch watch(CLOCK_MONOTONIC); diff --git a/src/Disks/IO/CachedOnDiskWriteBufferFromFile.h b/src/Disks/IO/CachedOnDiskWriteBufferFromFile.h index 59e0c76ca3d..ad4f6b5916d 100644 --- a/src/Disks/IO/CachedOnDiskWriteBufferFromFile.h +++ b/src/Disks/IO/CachedOnDiskWriteBufferFromFile.h @@ -30,6 +30,7 @@ public: FileCache * cache_, const FileSegment::Key & key_, const FileCacheUserInfo & user_, + size_t reserve_space_lock_wait_timeout_milliseconds_, std::shared_ptr cache_log_, const String & query_id_, const String & source_path_); @@ -52,13 +53,14 @@ private: void completeFileSegment(); FileCache * cache; - FileSegment::Key key; + const FileSegment::Key key; + const FileCacheUserInfo user; + const size_t reserve_space_lock_wait_timeout_milliseconds; LoggerPtr log; std::shared_ptr cache_log; - String query_id; - String source_path; - FileCacheUserInfo user; + const String query_id; + const String source_path; FileSegmentsHolderPtr file_segments; @@ -99,11 +101,12 @@ private: String source_path; FileCacheKey key; - size_t current_download_offset = 0; const String query_id; const FileCacheUserInfo user; + const size_t reserve_space_lock_wait_timeout_milliseconds; + const bool throw_on_error_from_cache; - bool throw_on_error_from_cache; + size_t current_download_offset = 0; bool cache_in_error_state_or_disabled = false; std::unique_ptr cache_writer; diff --git a/src/IO/ReadSettings.h b/src/IO/ReadSettings.h index c0a63bf51b1..6a0cac35878 100644 --- a/src/IO/ReadSettings.h +++ b/src/IO/ReadSettings.h @@ -100,6 +100,7 @@ struct ReadSettings bool read_from_filesystem_cache_if_exists_otherwise_bypass_cache = false; bool enable_filesystem_cache_log = false; size_t filesystem_cache_segments_batch_size = 20; + size_t filesystem_cache_reserve_space_wait_lock_timeout_milliseconds = 1000; bool use_page_cache_for_disks_without_file_cache = false; bool read_from_page_cache_if_exists_otherwise_bypass_cache = false; diff --git a/src/IO/WriteSettings.h b/src/IO/WriteSettings.h index fcadf34f021..7d36677b468 100644 --- a/src/IO/WriteSettings.h +++ b/src/IO/WriteSettings.h @@ -20,6 +20,7 @@ struct WriteSettings bool enable_filesystem_cache_on_write_operations = false; bool enable_filesystem_cache_log = false; bool throw_on_error_from_cache = false; + size_t filesystem_cache_reserve_space_wait_lock_timeout_milliseconds = 1000; bool s3_allow_parallel_part_upload = true; diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index 5650b9ce44e..ea40ffcfa3c 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -777,12 +777,13 @@ bool FileCache::tryReserve( FileSegment & file_segment, const size_t size, FileCacheReserveStat & reserve_stat, - const UserInfo & user) + const UserInfo & user, + size_t lock_wait_timeout_milliseconds) { ProfileEventTimeIncrement watch(ProfileEvents::FilesystemCacheReserveMicroseconds); assertInitialized(); - auto cache_lock = tryLockCache(std::chrono::milliseconds(FILECACHE_TRY_RESERVE_LOCK_TIMEOUT_MILLISECONDS)); + auto cache_lock = tryLockCache(std::chrono::milliseconds(lock_wait_timeout_milliseconds)); if (!cache_lock) { ProfileEvents::increment(ProfileEvents::FilesystemCacheFailToReserveSpaceBecauseOfLockContention); diff --git a/src/Interpreters/Cache/FileCache.h b/src/Interpreters/Cache/FileCache.h index 7434b2ac78a..007c4fd9483 100644 --- a/src/Interpreters/Cache/FileCache.h +++ b/src/Interpreters/Cache/FileCache.h @@ -161,7 +161,8 @@ public: FileSegment & file_segment, size_t size, FileCacheReserveStat & stat, - const UserInfo & user); + const UserInfo & user, + size_t lock_wait_timeout_milliseconds); std::vector getFileSegmentInfos(const UserID & user_id); diff --git a/src/Interpreters/Cache/FileCache_fwd.h b/src/Interpreters/Cache/FileCache_fwd.h index eaed279e7fd..06261b19db7 100644 --- a/src/Interpreters/Cache/FileCache_fwd.h +++ b/src/Interpreters/Cache/FileCache_fwd.h @@ -12,7 +12,6 @@ static constexpr int FILECACHE_DEFAULT_LOAD_METADATA_THREADS = 16; static constexpr int FILECACHE_DEFAULT_MAX_ELEMENTS = 10000000; static constexpr int FILECACHE_DEFAULT_HITS_THRESHOLD = 0; static constexpr size_t FILECACHE_BYPASS_THRESHOLD = 256 * 1024 * 1024; -static constexpr size_t FILECACHE_TRY_RESERVE_LOCK_TIMEOUT_MILLISECONDS = 1000; /// 1 sec. class FileCache; using FileCachePtr = std::shared_ptr; diff --git a/src/Interpreters/Cache/FileSegment.cpp b/src/Interpreters/Cache/FileSegment.cpp index 6b2d4a4bec8..9ec2b090dc7 100644 --- a/src/Interpreters/Cache/FileSegment.cpp +++ b/src/Interpreters/Cache/FileSegment.cpp @@ -497,7 +497,7 @@ LockedKeyPtr FileSegment::lockKeyMetadata(bool assert_exists) const return metadata->tryLock(); } -bool FileSegment::reserve(size_t size_to_reserve, FileCacheReserveStat * reserve_stat) +bool FileSegment::reserve(size_t size_to_reserve, size_t lock_wait_timeout_milliseconds, FileCacheReserveStat * reserve_stat) { if (!size_to_reserve) throw Exception(ErrorCodes::LOGICAL_ERROR, "Zero space reservation is not allowed"); @@ -549,7 +549,7 @@ bool FileSegment::reserve(size_t size_to_reserve, FileCacheReserveStat * reserve if (!reserve_stat) reserve_stat = &dummy_stat; - bool reserved = cache->tryReserve(*this, size_to_reserve, *reserve_stat, getKeyMetadata()->user); + bool reserved = cache->tryReserve(*this, size_to_reserve, *reserve_stat, getKeyMetadata()->user, lock_wait_timeout_milliseconds); if (!reserved) setDownloadFailedUnlocked(lockFileSegment()); diff --git a/src/Interpreters/Cache/FileSegment.h b/src/Interpreters/Cache/FileSegment.h index ea97a6b0157..c34ee064345 100644 --- a/src/Interpreters/Cache/FileSegment.h +++ b/src/Interpreters/Cache/FileSegment.h @@ -199,7 +199,7 @@ public: /// Try to reserve exactly `size` bytes (in addition to the getDownloadedSize() bytes already downloaded). /// Returns true if reservation was successful, false otherwise. - bool reserve(size_t size_to_reserve, FileCacheReserveStat * reserve_stat = nullptr); + bool reserve(size_t size_to_reserve, size_t lock_wait_timeout_milliseconds, FileCacheReserveStat * reserve_stat = nullptr); /// Write data into reserved space. void write(const char * from, size_t size, size_t offset); diff --git a/src/Interpreters/Cache/Metadata.cpp b/src/Interpreters/Cache/Metadata.cpp index 727f2762cca..b79605622b6 100644 --- a/src/Interpreters/Cache/Metadata.cpp +++ b/src/Interpreters/Cache/Metadata.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -693,6 +694,9 @@ void CacheMetadata::downloadImpl(FileSegment & file_segment, std::optionalset(memory->data(), memory->size()); } + const auto reserve_space_lock_wait_timeout_milliseconds = + Context::getGlobalContextInstance()->getReadSettings().filesystem_cache_reserve_space_wait_lock_timeout_milliseconds; + size_t offset = file_segment.getCurrentWriteOffset(); if (offset != static_cast(reader->getPosition())) reader->seek(offset, SEEK_SET); @@ -701,7 +705,7 @@ void CacheMetadata::downloadImpl(FileSegment & file_segment, std::optionalavailable(); - if (!file_segment.reserve(size)) + if (!file_segment.reserve(size, reserve_space_lock_wait_timeout_milliseconds)) { LOG_TEST( log, "Failed to reserve space during background download " diff --git a/src/Interpreters/Cache/WriteBufferToFileSegment.cpp b/src/Interpreters/Cache/WriteBufferToFileSegment.cpp index 7cd4e2d6e8d..759135722dc 100644 --- a/src/Interpreters/Cache/WriteBufferToFileSegment.cpp +++ b/src/Interpreters/Cache/WriteBufferToFileSegment.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include @@ -32,6 +33,11 @@ WriteBufferToFileSegment::WriteBufferToFileSegment(FileSegmentsHolderPtr segment , file_segment(&segment_holder_->front()) , segment_holder(std::move(segment_holder_)) { + auto query_context = CurrentThread::getQueryContext(); + if (query_context) + reserve_space_lock_wait_timeout_milliseconds = query_context->getReadSettings().filesystem_cache_reserve_space_wait_lock_timeout_milliseconds; + else + reserve_space_lock_wait_timeout_milliseconds = Context::getGlobalContextInstance()->getReadSettings().filesystem_cache_reserve_space_wait_lock_timeout_milliseconds; } /// If it throws an exception, the file segment will be incomplete, so you should not use it in the future. @@ -49,7 +55,7 @@ void WriteBufferToFileSegment::nextImpl() FileCacheReserveStat reserve_stat; /// In case of an error, we don't need to finalize the file segment /// because it will be deleted soon and completed in the holder's destructor. - bool ok = file_segment->reserve(bytes_to_write, &reserve_stat); + bool ok = file_segment->reserve(bytes_to_write, reserve_space_lock_wait_timeout_milliseconds, &reserve_stat); if (!ok) { diff --git a/src/Interpreters/Cache/WriteBufferToFileSegment.h b/src/Interpreters/Cache/WriteBufferToFileSegment.h index feb33472513..bff340d79b3 100644 --- a/src/Interpreters/Cache/WriteBufferToFileSegment.h +++ b/src/Interpreters/Cache/WriteBufferToFileSegment.h @@ -28,6 +28,8 @@ private: /// Empty if file_segment is not owned by this WriteBufferToFileSegment FileSegmentsHolderPtr segment_holder; + + size_t reserve_space_lock_wait_timeout_milliseconds; }; diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index d658fbe9920..6a0657a842c 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -5166,6 +5166,7 @@ ReadSettings Context::getReadSettings() const res.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache; res.enable_filesystem_cache_log = settings.enable_filesystem_cache_log; res.filesystem_cache_segments_batch_size = settings.filesystem_cache_segments_batch_size; + res.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds = settings.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds; res.filesystem_cache_max_download_size = settings.filesystem_cache_max_download_size; res.skip_download_if_exceeds_query_cache = settings.skip_download_if_exceeds_query_cache; @@ -5214,6 +5215,7 @@ WriteSettings Context::getWriteSettings() const res.enable_filesystem_cache_on_write_operations = settings.enable_filesystem_cache_on_write_operations; res.enable_filesystem_cache_log = settings.enable_filesystem_cache_log; res.throw_on_error_from_cache = settings.throw_on_error_from_cache_on_write_operations; + res.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds = settings.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds; res.s3_allow_parallel_part_upload = settings.s3_allow_parallel_part_upload; From ef796c668d187c80f13223dbf679dcc442907008 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 13 Mar 2024 12:03:37 +0000 Subject: [PATCH 249/374] Fixing test_build_sets_from_multiple_threads/test.py::test_set --- src/Planner/Planner.cpp | 4 +++- tests/analyzer_integration_broken_tests.txt | 1 - 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index bc1fb30781d..861b12e3da2 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -1081,7 +1081,9 @@ void addBuildSubqueriesForSetsStepIfNeeded( for (auto & subquery : subqueries) { auto query_tree = subquery->detachQueryTree(); - auto subquery_options = select_query_options.subquery(); + /// I suppose it should be better to use all flags from select_query_options, + /// But for now it is done in the same way as in old analyzer. + auto subquery_options = SelectQueryOptions(QueryProcessingStage::Complete, select_query_options.subquery_depth).subquery(); Planner subquery_planner( query_tree, subquery_options, diff --git a/tests/analyzer_integration_broken_tests.txt b/tests/analyzer_integration_broken_tests.txt index 31527dc3476..e71a047c215 100644 --- a/tests/analyzer_integration_broken_tests.txt +++ b/tests/analyzer_integration_broken_tests.txt @@ -1,4 +1,3 @@ -test_build_sets_from_multiple_threads/test.py::test_set test_concurrent_backups_s3/test.py::test_concurrent_backups test_distributed_type_object/test.py::test_distributed_type_object test_merge_table_over_distributed/test.py::test_global_in From 1a47682c12a45acb888c89974302ec35bee5aaed Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Wed, 13 Mar 2024 13:08:12 +0100 Subject: [PATCH 250/374] Analyzer: Fix virtual columns in StorageMerge #ci_set_analyzer --- src/Interpreters/InterpreterSelectQueryAnalyzer.cpp | 1 + src/Storages/StorageMerge.cpp | 8 +++++--- tests/analyzer_tech_debt.txt | 1 - 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/Interpreters/InterpreterSelectQueryAnalyzer.cpp b/src/Interpreters/InterpreterSelectQueryAnalyzer.cpp index 922f4a99b4a..539d7a59f6f 100644 --- a/src/Interpreters/InterpreterSelectQueryAnalyzer.cpp +++ b/src/Interpreters/InterpreterSelectQueryAnalyzer.cpp @@ -86,6 +86,7 @@ void replaceStorageInQueryTree(QueryTreeNodePtr & query_tree, const ContextPtr & continue; auto replacement_table_expression = std::make_shared(storage, context); + replacement_table_expression->setAlias(node->getAlias()); if (auto table_expression_modifiers = table_node.getTableExpressionModifiers()) replacement_table_expression->setTableExpressionModifiers(*table_expression_modifiers); diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index d5fd0f51a62..fab4b2e5146 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -1135,13 +1135,15 @@ QueryPlan ReadFromMerge::createPlanForTable( bool allow_experimental_analyzer = modified_context->getSettingsRef().allow_experimental_analyzer; auto storage_stage = storage->getQueryProcessingStage(modified_context, - QueryProcessingStage::Complete, + processed_stage, storage_snapshot_, modified_query_info); + LOG_DEBUG(&Poco::Logger::get("createPlanForTable"), "Storage: {}", toString(storage_stage)); + QueryPlan plan; - if (processed_stage <= storage_stage || (allow_experimental_analyzer && processed_stage == QueryProcessingStage::FetchColumns)) + if (processed_stage <= storage_stage) { /// If there are only virtual columns in query, you must request at least one other column. if (real_column_names.empty()) @@ -1186,7 +1188,7 @@ QueryPlan ReadFromMerge::createPlanForTable( row_policy_data_opt->addStorageFilter(source_step_with_filter); } } - else if (processed_stage > storage_stage || (allow_experimental_analyzer && processed_stage != QueryProcessingStage::FetchColumns)) + else if (processed_stage > storage_stage || allow_experimental_analyzer) { /// Maximum permissible parallelism is streams_num modified_context->setSetting("max_threads", streams_num); diff --git a/tests/analyzer_tech_debt.txt b/tests/analyzer_tech_debt.txt index dc6284d20c5..cee3cff8cd5 100644 --- a/tests/analyzer_tech_debt.txt +++ b/tests/analyzer_tech_debt.txt @@ -1,5 +1,4 @@ 00223_shard_distributed_aggregation_memory_efficient -00717_merge_and_distributed 00725_memory_tracking 01062_pm_all_join_with_block_continuation 01083_expressions_in_engine_arguments From 9da03607282b8fc338be4356322c67229686fd39 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Wed, 13 Mar 2024 13:10:48 +0100 Subject: [PATCH 251/374] Reload CI From 457578627103b0eb0028f51b739c3911de278bf4 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 13 Mar 2024 12:38:17 +0000 Subject: [PATCH 252/374] Fix 01952_optimize_distributed_group_by_sharding_key with analyzer. --- ...istributed_group_by_sharding_key.reference | 78 +++++++++++++++++++ ...mize_distributed_group_by_sharding_key.sql | 14 ++++ 2 files changed, 92 insertions(+) diff --git a/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference b/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference index ddfa6929d69..212dd348edb 100644 --- a/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference +++ b/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference @@ -71,3 +71,81 @@ Expression (Projection) Expression ((Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY))))) ReadFromSystemNumbers ReadFromRemote (Read from remote replica) +set allow_experimental_analyzer = 1; +explain select distinct k1 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- not optimized +Expression (Project names) + Distinct (DISTINCT) + Union + Distinct (Preliminary DISTINCT) + Expression ((Projection + (Change column names to column identifiers + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Project names + (Projection + Change column names to column identifiers))))))) + ReadFromSystemNumbers + ReadFromRemote (Read from remote replica) +explain select distinct k1, k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- optimized +Union + Expression (Project names) + Distinct (DISTINCT) + Distinct (Preliminary DISTINCT) + Expression ((Projection + (Change column names to column identifiers + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Project names + (Projection + Change column names to column identifiers))))))) + ReadFromSystemNumbers + ReadFromRemote (Read from remote replica) +explain select distinct on (k1) k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- not optimized +Expression (Project names) + LimitBy + Union + Expression (Before LIMIT BY) + LimitBy + Expression ((Before LIMIT BY + (Projection + (Change column names to column identifiers + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Project names + (Projection + Change column names to column identifiers)))))))) + ReadFromSystemNumbers + Expression + ReadFromRemote (Read from remote replica) +explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- optimized +Union + Expression (Project names) + LimitBy + Expression ((Before LIMIT BY + (Projection + (Change column names to column identifiers + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Project names + (Projection + Change column names to column identifiers)))))))) + ReadFromSystemNumbers + ReadFromRemote (Read from remote replica) +explain select distinct k1 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- not optimized +Expression (Project names) + Distinct (DISTINCT) + Sorting (Merge sorted streams for ORDER BY, without aggregation) + Union + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + Distinct (Preliminary DISTINCT) + Expression ((Projection + (Change column names to column identifiers + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Project names + (Projection + Change column names to column identifiers))))))) + ReadFromSystemNumbers + ReadFromRemote (Read from remote replica) +explain select distinct k1, k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- optimized +Expression (Project names) + Sorting (Merge sorted streams after aggregation stage for ORDER BY) + Union + Distinct (DISTINCT) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + Distinct (Preliminary DISTINCT) + Expression ((Projection + (Change column names to column identifiers + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Project names + (Projection + Change column names to column identifiers))))))) + ReadFromSystemNumbers + ReadFromRemote (Read from remote replica) +explain select distinct on (k1) k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- not optimized +Expression (Project names) + LimitBy + Expression (Before LIMIT BY) + Sorting (Merge sorted streams for ORDER BY, without aggregation) + Union + LimitBy + Expression ((Before LIMIT BY + (Before ORDER BY + (Projection + (Change column names to column identifiers + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Project names + (Projection + Change column names to column identifiers))))))) [lifted up part])) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Projection + (Change column names to column identifiers + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Project names + (Projection + Change column names to column identifiers)))))))) + ReadFromSystemNumbers + ReadFromRemote (Read from remote replica) +explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- optimized +Expression (Project names) + Sorting (Merge sorted streams after aggregation stage for ORDER BY) + Union + LimitBy + Expression ((Before LIMIT BY + (Before ORDER BY + (Projection + (Change column names to column identifiers + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Project names + (Projection + Change column names to column identifiers))))))) [lifted up part])) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Projection + (Change column names to column identifiers + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Project names + (Projection + Change column names to column identifiers)))))))) + ReadFromSystemNumbers + ReadFromRemote (Read from remote replica) diff --git a/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.sql b/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.sql index 74b55b95315..adf55a9dd7f 100644 --- a/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.sql +++ b/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.sql @@ -4,6 +4,8 @@ set optimize_skip_unused_shards=1; set optimize_distributed_group_by_sharding_key=1; set prefer_localhost_replica=1; +set allow_experimental_analyzer = 0; + -- { echo } explain select distinct k1 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- not optimized explain select distinct k1, k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- optimized @@ -14,3 +16,15 @@ explain select distinct k1 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v explain select distinct k1, k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- optimized explain select distinct on (k1) k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- not optimized explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- optimized + +set allow_experimental_analyzer = 1; + +explain select distinct k1 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- not optimized +explain select distinct k1, k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- optimized +explain select distinct on (k1) k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- not optimized +explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- optimized + +explain select distinct k1 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- not optimized +explain select distinct k1, k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- optimized +explain select distinct on (k1) k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- not optimized +explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- optimized From ffdb84b8df7120292543da077227ef049552dccf Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Wed, 13 Mar 2024 13:38:23 +0100 Subject: [PATCH 253/374] Fix style --- src/Storages/StorageMerge.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index fab4b2e5146..e695594873b 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -1063,7 +1063,7 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( get_column_options.withSubcolumns(); LOG_DEBUG(&Poco::Logger::get("createSources"), "Processed:{}\nStorage:{}", toString(processed_stage), toString(storage_stage)); - + String table_alias; if (allow_experimental_analyzer) table_alias = modified_query_info.query_tree->as()->getJoinTree()->as()->getAlias(); From 4954bde599dd1bdcdf56957e17b0b9a661aa17f6 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 13 Mar 2024 13:38:35 +0100 Subject: [PATCH 254/374] Update docs/en/sql-reference/functions/string-functions.md Co-authored-by: Johnny <9611008+johnnymatthews@users.noreply.github.com> --- docs/en/sql-reference/functions/string-functions.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/en/sql-reference/functions/string-functions.md b/docs/en/sql-reference/functions/string-functions.md index f9c3f91a12b..a9b7cc9566d 100644 --- a/docs/en/sql-reference/functions/string-functions.md +++ b/docs/en/sql-reference/functions/string-functions.md @@ -615,7 +615,9 @@ Assumes that the string contains valid UTF-8 encoded text. If this assumption is **Example** ```sql -SELECT 'database' AS string, substringUTF8(string, 5), substringUTF8(string, 5, 1) +SELECT 'Täglich grüßt das Murmeltier.' AS str, + substringUTF8(str, 9), + substringUTF8(str, 9, 5) ``` ```response From b4953f35b4a8ca3eca816557c080ff612062b482 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 13 Mar 2024 13:39:03 +0100 Subject: [PATCH 255/374] Update docs/en/sql-reference/functions/string-functions.md Co-authored-by: Johnny <9611008+johnnymatthews@users.noreply.github.com> --- docs/en/sql-reference/functions/string-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/functions/string-functions.md b/docs/en/sql-reference/functions/string-functions.md index a9b7cc9566d..25a0c7e38d8 100644 --- a/docs/en/sql-reference/functions/string-functions.md +++ b/docs/en/sql-reference/functions/string-functions.md @@ -686,7 +686,7 @@ Assumes that the string contains valid UTF-8 encoded text. If this assumption is **Example** ```sql -SELECT substringIndexUTF8('www.clickhouse.com', '.', 2) +SELECT substringIndexUTF8('www.straßen-in-europa.de', '.', 2) ``` ```response From 6ca4fc26f4bca8f787b3a575d5496ffd75ee0c55 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 13 Mar 2024 13:39:14 +0100 Subject: [PATCH 256/374] Update docs/en/sql-reference/functions/string-functions.md Co-authored-by: Johnny <9611008+johnnymatthews@users.noreply.github.com> --- docs/en/sql-reference/functions/string-functions.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/en/sql-reference/functions/string-functions.md b/docs/en/sql-reference/functions/string-functions.md index 25a0c7e38d8..01a583e1713 100644 --- a/docs/en/sql-reference/functions/string-functions.md +++ b/docs/en/sql-reference/functions/string-functions.md @@ -621,9 +621,7 @@ SELECT 'Täglich grüßt das Murmeltier.' AS str, ``` ```response -┌─string───┬─substringUTF8('database', 5)─┬─substringUTF8('database', 5, 1)─┐ -│ database │ base │ b │ -└──────────┴──────────────────────────────┴─────────────────────────────────┘ +Täglich grüßt das Murmeltier. grüßt das Murmeltier. grüßt ``` ## substringIndex From 1e536251a20a0fdbac08b0a99e420a8e74886bcd Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 13 Mar 2024 13:41:56 +0100 Subject: [PATCH 257/374] Update string-functions.md --- docs/en/sql-reference/functions/string-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/functions/string-functions.md b/docs/en/sql-reference/functions/string-functions.md index 01a583e1713..b4e2adbed3c 100644 --- a/docs/en/sql-reference/functions/string-functions.md +++ b/docs/en/sql-reference/functions/string-functions.md @@ -688,7 +688,7 @@ SELECT substringIndexUTF8('www.straßen-in-europa.de', '.', 2) ``` ```response -www.clickhouse +www.straßen-in-europa ``` ## appendTrailingCharIfAbsent From e5e632ec3362d2106adca2e02ae2a4ea1862ee3c Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 13 Mar 2024 12:43:10 +0000 Subject: [PATCH 258/374] Update analyzer_tech_debt.txt --- tests/analyzer_tech_debt.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/analyzer_tech_debt.txt b/tests/analyzer_tech_debt.txt index dbd216ea7be..42aa579658e 100644 --- a/tests/analyzer_tech_debt.txt +++ b/tests/analyzer_tech_debt.txt @@ -9,7 +9,6 @@ 01747_join_view_filter_dictionary 01761_cast_to_enum_nullable 01925_join_materialized_columns -01952_optimize_distributed_group_by_sharding_key 02354_annoy # Check after constants refactoring 02901_parallel_replicas_rollup From cca96e05cf7be69f53a479db13414824552b7ca0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 11 Mar 2024 17:57:24 +0100 Subject: [PATCH 259/374] Bring clickhouse-test changes from private --- tests/clickhouse-test | 376 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 345 insertions(+), 31 deletions(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index ce0feadf050..057502379ed 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -4,9 +4,11 @@ # pylint: disable=global-variable-not-assigned # pylint: disable=too-many-lines # pylint: disable=anomalous-backslash-in-string +# pylint: disable=protected-access import copy import enum +import tempfile import glob # Not requests, to avoid requiring extra dependency. @@ -68,6 +70,144 @@ TEST_FILE_EXTENSIONS = [".sql", ".sql.j2", ".sh", ".py", ".expect"] VERSION_PATTERN = r"^((\d+\.)?(\d+\.)?(\d+\.)?\d+)$" +class SharedEngineReplacer: + ENGINES_NON_REPLICATED_REGEXP = r"[ =]((Collapsing|VersionedCollapsing|Summing|Replacing|Aggregating|)MergeTree\(?\)?)" + ENGINES_MAPPING_REPLICATED = [ + ("ReplicatedMergeTree", "SharedMergeTree"), + ("ReplicatedCollapsingMergeTree", "SharedCollapsingMergeTree"), + ( + "ReplicatedVersionedCollapsingMergeTree", + "SharedVersionedCollapsingMergeTree", + ), + ("ReplicatedSummingMergeTree", "SharedSummingMergeTree"), + ("ReplicatedReplacingMergeTree", "SharedReplacingMergeTree"), + ("ReplicatedAggregatingMergeTree", "SharedAggregatingMergeTree"), + ] + NEW_SYNTAX_REPLICATED_MERGE_TREE_RE = ( + r"Replicated[a-zA-Z]*MergeTree\((\\?'.*\\?')?,?(\\?'.*\\?')?[a-zA-Z, _}{]*\)" + ) + OLD_SYNTAX_OR_ARGUMENTS_RE = r"Tree\(.*[0-9]+.*\)" + + def _check_replicad_new_syntax(self, line): + return re.search(self.NEW_SYNTAX_REPLICATED_MERGE_TREE_RE, line) is not None + + def _check_old_syntax_or_arguments(self, line): + return re.search(self.OLD_SYNTAX_OR_ARGUMENTS_RE, line) is not None + + @staticmethod + def _is_comment_line(line): + return line.startswith("SELECT") or line.startswith("select") + + @staticmethod + def _is_create_query(line): + return ( + line.startswith("CREATE") + or line.startswith("create") + or line.startswith("ENGINE") + or line.startswith("engine") + ) + + def _replace_non_replicated(self, line, escape_quotes, use_random_path): + groups = re.search(self.ENGINES_NON_REPLICATED_REGEXP, line) + if groups is not None and not self._check_old_syntax_or_arguments(line): + non_replicated_engine = groups.groups()[0] + basename_no_ext = os.path.splitext(os.path.basename(self.file_name))[0] + if use_random_path: + shared_path = "/" + os.path.join( + basename_no_ext.replace("_", "/"), + str(os.getpid()), + str(random.randint(1, 1000)), + ) + else: + shared_path = "/" + os.path.join( + basename_no_ext.replace("_", "/"), str(os.getpid()) + ) + + if escape_quotes: + shared_engine = ( + "Shared" + + non_replicated_engine.replace("()", "") + + f"(\\'{shared_path}\\', \\'1\\')" + ) + else: + shared_engine = ( + "Shared" + + non_replicated_engine.replace("()", "") + + f"('{shared_path}', '1')" + ) + return line.replace(non_replicated_engine, shared_engine) + + return line + + def _need_to_replace_something(self): + return ( + self.replace_replicated or self.replace_non_replicated + ) and "shared_merge_tree" not in self.file_name + + def _has_show_create_table(self): + with open(self.file_name, "r", encoding="utf-8") as f: + return re.search("show create table", f.read(), re.IGNORECASE) + + def __init__( + self, file_name, replace_replicated, replace_non_replicated, reference_file + ): + self.file_name = file_name + self.temp_file_path = get_temp_file_path() + self.replace_replicated = replace_replicated + self.replace_non_replicated = replace_non_replicated + + use_random_path = not reference_file and not self._has_show_create_table() + + if not self._need_to_replace_something(): + return + + shutil.copyfile(self.file_name, self.temp_file_path) + shutil.copymode(self.file_name, self.temp_file_path) + + with open(self.file_name, "w", newline="", encoding="utf-8") as modified: + with open(self.temp_file_path, "r", newline="", encoding="utf-8") as source: + for line in source: + if self._is_comment_line(line) or ( + reference_file and not self._is_create_query(line) + ): + modified.write(line) + continue + + if self.replace_replicated: + for ( + engine_from, + engine_to, + ) in SharedEngineReplacer.ENGINES_MAPPING_REPLICATED: + if engine_from in line and ( + self._check_replicad_new_syntax(line) + or engine_from + " " in line + or engine_from + ";" in line + ): + line = line.replace(engine_from, engine_to) + break + + if self.replace_non_replicated: + line = self._replace_non_replicated( + line, reference_file, use_random_path + ) + + modified.write(line) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + if not self._need_to_replace_something(): + return + shutil.move(self.temp_file_path, self.file_name) + + +def get_temp_file_path(): + return os.path.join( + tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()) + ) + + def stringhash(s: str) -> int: # default hash() function consistent # only during process invocation https://stackoverflow.com/a/42089311 @@ -92,6 +232,16 @@ def trim_for_log(s): return "\n".join(lines) +def is_valid_utf_8(fname): + try: + with open(fname, "rb") as f: + contents = f.read() + contents.decode("utf-8") + return True + except UnicodeDecodeError: + return False + + class TestException(Exception): pass @@ -536,6 +686,8 @@ class FailureReason(enum.Enum): INTERNAL_QUERY_FAIL = "Internal query (CREATE/DROP DATABASE) failed:" # SKIPPED reasons + NOT_SUPPORTED_IN_CLOUD = "not supported in cloud environment" + NOT_SUPPORTED_IN_PRIVATE = "not supported in private build" DISABLED = "disabled" SKIP = "skip" NO_JINJA = "no jinja" @@ -548,6 +700,7 @@ class FailureReason(enum.Enum): S3_STORAGE = "s3-storage" BUILD = "not running for current build" NO_PARALLEL_REPLICAS = "smth in not supported with parallel replicas" + SHARED_MERGE_TREE = "no-shared-merge-tree" # UNKNOWN reasons NO_REFERENCE = "no reference file" @@ -606,8 +759,6 @@ class SettingsRandomizer: "read_in_order_two_level_merge_threshold": lambda: random.randint(0, 100), "optimize_aggregation_in_order": lambda: random.randint(0, 1), "aggregation_in_order_max_block_bytes": lambda: random.randint(0, 50000000), - "min_compress_block_size": lambda: random.randint(1, 1048576 * 3), - "max_compress_block_size": lambda: random.randint(1, 1048576 * 3), "use_uncompressed_cache": lambda: random.randint(0, 1), "min_bytes_to_use_direct_io": threshold_generator( 0.2, 0.5, 1, 10 * 1024 * 1024 * 1024 @@ -659,6 +810,11 @@ class SettingsRandomizer: 0.3, 0.5, 1, 10 * 1024 * 1024 * 1024 ), "max_bytes_before_remerge_sort": lambda: random.randint(1, 3000000000), + "min_compress_block_size": lambda: random.randint(1, 1048576 * 3), + "max_compress_block_size": lambda: random.randint(1, 1048576 * 3), + "merge_tree_compact_parts_min_granules_to_multibuffer_read": lambda: random.randint( + 1, 128 + ), "optimize_sorting_by_input_stream_properties": lambda: random.randint(0, 1), "http_response_buffer_size": lambda: random.randint(0, 10 * 1048576), "http_wait_end_of_query": lambda: random.random() > 0.5, @@ -684,6 +840,7 @@ class SettingsRandomizer: get_localzone(), ] ), + "prefer_warmed_unmerged_parts_seconds": lambda: random.randint(0, 10), "use_page_cache_for_disks_without_file_cache": lambda: random.random() < 0.7, "page_cache_inject_eviction": lambda: random.random() < 0.5, } @@ -733,6 +890,17 @@ class MergeTreeSettingsRandomizer: "primary_key_compress_block_size": lambda: random.randint(8000, 100000), "replace_long_file_name_to_hash": lambda: random.randint(0, 1), "max_file_name_length": threshold_generator(0.3, 0.3, 0, 128), + "min_bytes_for_full_part_storage": threshold_generator( + 0.3, 0.3, 0, 512 * 1024 * 1024 + ), + "compact_parts_max_bytes_to_buffer": lambda: random.randint( + 1024, 512 * 1024 * 1024 + ), + "compact_parts_max_granules_to_buffer": threshold_generator(0.15, 0.15, 1, 256), + "compact_parts_merge_max_bytes_to_prefetch_part": lambda: random.randint( + 1, 32 * 1024 * 1024 + ), + "cache_populated_by_fetch": lambda: random.randint(0, 1), } @staticmethod @@ -744,6 +912,10 @@ class MergeTreeSettingsRandomizer: return random_settings +def replace_in_file(filename, what, with_what): + os.system(f"LC_ALL=C sed -i -e 's|{what}|{with_what}|g' {filename}") + + class TestResult: def __init__( self, @@ -972,6 +1144,15 @@ class TestCase: if tags and ("disabled" in tags) and not args.disabled: return FailureReason.DISABLED + elif args.private and self.name in suite.private_skip_list: + return FailureReason.NOT_SUPPORTED_IN_PRIVATE + + elif args.cloud and ("no-replicated-database" in tags): + return FailureReason.REPLICATED_DB + + elif args.cloud and self.name in suite.cloud_skip_list: + return FailureReason.NOT_SUPPORTED_IN_CLOUD + elif ( os.path.exists(os.path.join(suite.suite_path, self.name) + ".disabled") and not args.disabled @@ -1022,6 +1203,13 @@ class TestCase: ): return FailureReason.NON_ATOMIC_DB + elif ( + tags + and ("no-shared-merge-tree" in tags) + and args.replace_replicated_with_shared + ): + return FailureReason.SHARED_MERGE_TREE + elif tags and ("no-s3-storage" in tags) and args.s3_storage: return FailureReason.S3_STORAGE elif ( @@ -1051,7 +1239,8 @@ class TestCase: ): description = "" - debug_log = trim_for_log(debug_log) + if debug_log: + debug_log = "\n".join(debug_log.splitlines()[:100]) if proc: if proc.returncode is None: @@ -1136,6 +1325,7 @@ class TestCase: description += "\nstdout:\n" description += trim_for_log(stdout) description += "\n" + if debug_log: description += "\n" description += debug_log @@ -1148,9 +1338,7 @@ class TestCase: ) if "Exception" in stdout: - description += "\n" - description += trim_for_log(stdout) - description += "\n" + description += "\n{}\n".format("\n".join(stdout.splitlines()[:100])) if debug_log: description += "\n" description += debug_log @@ -1358,7 +1546,13 @@ class TestCase: # because there are also output of per test database creation pattern = "{test} > {stdout} 2> {stderr}" - if self.ext == ".sql": + if self.ext == ".sql" and args.cloud: + # Get at least some logs, because we don't have access to system.text_log and pods... + pattern = ( + "{client} --send_logs_level={logs_level} {secure} --multiquery {options}" + " --send_logs_level=trace < {test} > {stdout} 2>> /test_output/some_logs_from_server.log" + ) + elif self.ext == ".sql" and not args.cloud: pattern = ( "{client} --send_logs_level={logs_level} {secure} --multiquery {options} < " + pattern @@ -1396,17 +1590,15 @@ class TestCase: total_time = (datetime.now() - start_time).total_seconds() # Normalize randomized database names in stdout, stderr files. - os.system(f"LC_ALL=C sed -i -e 's/{database}/default/g' {self.stdout_file}") + replace_in_file(self.stdout_file, database, "default") if args.hide_db_name: - os.system(f"LC_ALL=C sed -i -e 's/{database}/default/g' {self.stderr_file}") + replace_in_file(self.stderr_file, database, "default") if args.replicated_database: - os.system(f"LC_ALL=C sed -i -e 's|/auto_{{shard}}||g' {self.stdout_file}") - os.system(f"LC_ALL=C sed -i -e 's|auto_{{replica}}||g' {self.stdout_file}") + replace_in_file(self.stdout_file, "/auto_{shard}", "") + replace_in_file(self.stdout_file, "auto_{replica}", "") # Normalize hostname in stdout file. - os.system( - f"LC_ALL=C sed -i -e 's/{socket.gethostname()}/localhost/g' {self.stdout_file}" - ) + replace_in_file(self.stdout_file, socket.gethostname(), "localhost") stdout = "" if os.path.exists(self.stdout_file): @@ -1444,18 +1636,51 @@ class TestCase: self.testcase_args = self.configure_testcase_args( args, self.case_file, suite.suite_tmp_path ) + client_options = self.add_random_settings(client_options) - proc, stdout, stderr, debug_log, total_time = self.run_single_test( - server_logs_level, client_options - ) - result = self.process_result_impl( - proc, stdout, stderr, debug_log, total_time - ) - result.check_if_need_retry(args, stdout, stderr, self.runs_count) - # to avoid breaking CSV parser - result.description = result.description.replace("\0", "") + if not is_valid_utf_8(self.case_file) or not is_valid_utf_8( + self.reference_file + ): + proc, stdout, stderr, debug_log, total_time = self.run_single_test( + server_logs_level, client_options + ) + result = self.process_result_impl( + proc, stdout, stderr, debug_log, total_time + ) + result.check_if_need_retry(args, stdout, stderr, self.runs_count) + # to avoid breaking CSV parser + result.description = result.description.replace("\0", "") + else: + with SharedEngineReplacer( + self.case_file, + args.replace_replicated_with_shared, + args.replace_non_replicated_with_shared, + False, + ): + with SharedEngineReplacer( + self.reference_file, + args.replace_replicated_with_shared, + args.replace_non_replicated_with_shared, + True, + ): + ( + proc, + stdout, + stderr, + debug_log, + total_time, + ) = self.run_single_test(server_logs_level, client_options) + + result = self.process_result_impl( + proc, stdout, stderr, debug_log, total_time + ) + result.check_if_need_retry( + args, stdout, stderr, self.runs_count + ) + # to avoid breaking CSV parser + result.description = result.description.replace("\0", "") if result.status == TestStatus.FAIL: result.description = self.add_info_about_settings(result.description) @@ -1688,6 +1913,8 @@ class TestSuite: self.suite_path: str = suite_path self.suite_tmp_path: str = suite_tmp_path self.suite: str = suite + self.cloud_skip_list: List[str] = [] + self.private_skip_list: List[str] = [] if args.run_by_hash_num is not None and args.run_by_hash_total is not None: if args.run_by_hash_num > args.run_by_hash_total: @@ -1987,10 +2214,16 @@ def check_server_started(args): sys.stdout.flush() retry_count = args.server_check_retries + query = "SELECT version(), arrayStringConcat(groupArray(value), ' ') FROM system.build_options WHERE name IN ('GIT_HASH', 'GIT_BRANCH')" while retry_count > 0: try: - clickhouse_execute(args, "SELECT 1", max_http_retries=1) + res = ( + str(clickhouse_execute(args, query).decode()) + .strip() + .replace("\t", " @ ") + ) print(" OK") + print(f"Connected to server {res}") sys.stdout.flush() return True except (ConnectionError, http.client.ImproperConnectionState) as e: @@ -2412,6 +2645,23 @@ def reportLogStats(args): print("\n") +def try_get_skip_list(base_dir, name): + test_names_to_skip = [] + skip_list_path = os.path.join(base_dir, name) + if not os.path.exists(skip_list_path): + return test_names_to_skip + + with open(skip_list_path, "r", encoding="utf-8") as fd: + for line in fd.read().split("\n"): + if line == "" or line[0] == " ": + continue + test_name = line.split()[0].strip() + if test_name != "": + test_names_to_skip.append(test_name) + + return test_names_to_skip + + def main(args): global server_died global stop_time @@ -2430,18 +2680,18 @@ def main(args): args.build_flags = collect_build_flags(args) args.changed_merge_tree_settings = collect_changed_merge_tree_settings(args) - args.suppport_system_processes_is_all_data_sent = check_table_column( - args, "system", "processes", "is_all_data_sent" - ) - if args.s3_storage and ( - BuildFlags.THREAD in args.build_flags or BuildFlags.DEBUG in args.build_flags - ): + if args.s3_storage and (BuildFlags.RELEASE not in args.build_flags): args.no_random_settings = True if args.skip: args.skip = set(args.skip) + if args.replace_replicated_with_shared: + if not args.skip: + args.skip = set([]) + args.skip = set(args.skip) + base_dir = os.path.abspath(args.queries) # Keep same default values as in queries/shell_config.sh @@ -2516,6 +2766,8 @@ def main(args): ) total_tests_run = 0 + cloud_skip_list = try_get_skip_list(base_dir, "../queries-no-cloud-tests.txt") + private_skip_list = try_get_skip_list(base_dir, "../queries-no-private-tests.txt") for suite in sorted(os.listdir(base_dir), key=suite_key_func): if server_died.is_set(): @@ -2525,6 +2777,8 @@ def main(args): if test_suite is None: continue + test_suite.cloud_skip_list = cloud_skip_list + test_suite.private_skip_list = private_skip_list total_tests_run += do_run_tests(args.jobs, test_suite, args.parallel) if server_died.is_set(): @@ -2644,7 +2898,14 @@ def find_clickhouse_command(binary, command): def get_additional_client_options(args): if args.client_option: - return " ".join("--" + option for option in args.client_option) + client_options = " ".join("--" + option for option in args.client_option) + if "CLICKHOUSE_CLIENT_OPT" in os.environ: + return os.environ["CLICKHOUSE_CLIENT_OPT"] + client_options + else: + return client_options + else: + if "CLICKHOUSE_CLIENT_OPT" in os.environ: + return os.environ["CLICKHOUSE_CLIENT_OPT"] return "" @@ -2839,6 +3100,43 @@ def parse_args(): help="Display $ characters after line with trailing whitespaces in diff output", ) + group = parser.add_mutually_exclusive_group(required=False) + group.add_argument( + "--cloud", + action="store_true", + default=None, + dest="cloud", + help="Run only tests that are supported in ClickHouse Cloud environment", + ) + + group.add_argument( + "--no-cloud", + action="store_false", + default=None, + dest="cloud", + help="Run all the tests, including the ones not supported in ClickHouse Cloud environment", + ) + parser.set_defaults(cloud=False) + + group = parser.add_mutually_exclusive_group(required=False) + group.add_argument( + "--private", + action="store_true", + default=None, + dest="private", + help="Run only tests that are supported in the private build", + ) + + group.add_argument( + "--no-private", + action="store_false", + default=None, + dest="private", + help="Run all the tests, including the ones not supported in the private build", + ) + # Only used to skip tests via "../queries-no-private-tests.txt", so it's fine to keep it enabled by default + parser.set_defaults(private=True) + group = parser.add_mutually_exclusive_group(required=False) group.add_argument( "--zookeeper", @@ -2920,6 +3218,18 @@ def parse_args(): default=False, help="Do not include tests that are not supported with parallel replicas feature", ) + parser.add_argument( + "--replace-replicated-with-shared", + action="store_true", + default=os.environ.get("USE_META_IN_KEEPER_FOR_MERGE_TREE", False), + help="Replace ReplicatedMergeTree engine with SharedMergeTree", + ) + parser.add_argument( + "--replace-non-replicated-with-shared", + action="store_true", + default=False, + help="Replace ordinary MergeTree engine with SharedMergeTree", + ) return parser.parse_args() @@ -3062,6 +3372,7 @@ if __name__ == "__main__": client_options_query_str = get_additional_client_options_url(args) args.client_options_query_str = client_options_query_str + "&" + args.client_options_query_str += os.environ["CLICKHOUSE_URL_PARAMS"] os.environ["CLICKHOUSE_URL_PARAMS"] += client_options_query_str else: args.client_options_query_str = "" @@ -3072,4 +3383,7 @@ if __name__ == "__main__": if args.db_engine and args.db_engine == "Ordinary": MESSAGES_TO_RETRY.append(" locking attempt on ") + if args.replace_replicated_with_shared: + args.s3_storage = True + main(args) From 80723134d6957f72606a419040c8101ec60c05e9 Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Wed, 13 Mar 2024 14:00:57 +0100 Subject: [PATCH 260/374] Fix fast test #ci_set_analyzer --- src/Storages/StorageMerge.cpp | 89 +++++++++++++++++++++++------------ 1 file changed, 58 insertions(+), 31 deletions(-) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index e695594873b..f0b9d58f3dd 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -1058,47 +1058,74 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( Block pipe_header = builder->getHeader(); - auto get_column_options = GetColumnsOptions(GetColumnsOptions::All).withExtendedObjects().withVirtuals(); - if (storage_snapshot_->storage.supportsSubcolumns()) - get_column_options.withSubcolumns(); - LOG_DEBUG(&Poco::Logger::get("createSources"), "Processed:{}\nStorage:{}", toString(processed_stage), toString(storage_stage)); - String table_alias; if (allow_experimental_analyzer) - table_alias = modified_query_info.query_tree->as()->getJoinTree()->as()->getAlias(); - - String database_column = table_alias.empty() || processed_stage == QueryProcessingStage::FetchColumns ? "_database" : table_alias + "._database"; - String table_column = table_alias.empty() || processed_stage == QueryProcessingStage::FetchColumns ? "_table" : table_alias + "._table"; - - if (has_database_virtual_column && common_header.has(database_column) && (storage_stage == QueryProcessingStage::FetchColumns || dynamic_cast(&storage_snapshot_->storage) != nullptr)) { - ColumnWithTypeAndName column; - column.name = database_column; - column.type = std::make_shared(std::make_shared()); - column.column = column.type->createColumnConst(0, Field(database_name)); + String table_alias = modified_query_info.query_tree->as()->getJoinTree()->as()->getAlias(); - auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column)); - auto adding_column_actions = std::make_shared( - std::move(adding_column_dag), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes)); + String database_column = table_alias.empty() || processed_stage == QueryProcessingStage::FetchColumns ? "_database" : table_alias + "._database"; + String table_column = table_alias.empty() || processed_stage == QueryProcessingStage::FetchColumns ? "_table" : table_alias + "._table"; - builder->addSimpleTransform([&](const Block & stream_header) - { return std::make_shared(stream_header, adding_column_actions); }); + if (has_database_virtual_column && common_header.has(database_column) && (storage_stage == QueryProcessingStage::FetchColumns || dynamic_cast(&storage_snapshot_->storage) != nullptr)) + { + ColumnWithTypeAndName column; + column.name = database_column; + column.type = std::make_shared(std::make_shared()); + column.column = column.type->createColumnConst(0, Field(database_name)); + + auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column)); + auto adding_column_actions = std::make_shared( + std::move(adding_column_dag), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes)); + + builder->addSimpleTransform([&](const Block & stream_header) + { return std::make_shared(stream_header, adding_column_actions); }); + } + + if (has_table_virtual_column && common_header.has(table_column) && (storage_stage == QueryProcessingStage::FetchColumns || dynamic_cast(&storage_snapshot_->storage) != nullptr)) + { + ColumnWithTypeAndName column; + column.name = table_column; + column.type = std::make_shared(std::make_shared()); + column.column = column.type->createColumnConst(0, Field(table_name)); + + auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column)); + auto adding_column_actions = std::make_shared( + std::move(adding_column_dag), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes)); + + builder->addSimpleTransform([&](const Block & stream_header) + { return std::make_shared(stream_header, adding_column_actions); }); + } } - - if (has_table_virtual_column && common_header.has(table_column) && (storage_stage == QueryProcessingStage::FetchColumns || dynamic_cast(&storage_snapshot_->storage) != nullptr)) + else { - ColumnWithTypeAndName column; - column.name = table_column; - column.type = std::make_shared(std::make_shared()); - column.column = column.type->createColumnConst(0, Field(table_name)); + if (has_database_virtual_column && common_header.has("_database") && !pipe_header.has("_database")) + { + ColumnWithTypeAndName column; + column.name = "_database"; + column.type = std::make_shared(std::make_shared()); + column.column = column.type->createColumnConst(0, Field(database_name)); - auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column)); - auto adding_column_actions = std::make_shared( - std::move(adding_column_dag), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes)); + auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column)); + auto adding_column_actions = std::make_shared( + std::move(adding_column_dag), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes)); + builder->addSimpleTransform([&](const Block & stream_header) + { return std::make_shared(stream_header, adding_column_actions); }); + } - builder->addSimpleTransform([&](const Block & stream_header) - { return std::make_shared(stream_header, adding_column_actions); }); + if (has_table_virtual_column && common_header.has("_table") && !pipe_header.has("_table")) + { + ColumnWithTypeAndName column; + column.name = "_table"; + column.type = std::make_shared(std::make_shared()); + column.column = column.type->createColumnConst(0, Field(table_name)); + + auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column)); + auto adding_column_actions = std::make_shared( + std::move(adding_column_dag), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes)); + builder->addSimpleTransform([&](const Block & stream_header) + { return std::make_shared(stream_header, adding_column_actions); }); + } } /// Subordinary tables could have different but convertible types, like numeric types of different width. From d8c5008280aaf19bd481d436099afd89019a81c4 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 13 Mar 2024 13:07:14 +0000 Subject: [PATCH 261/374] Follow up to #61258 --- .../functions/other-functions.md | 6 ++--- src/Functions/sleep.h | 25 ++++--------------- 2 files changed, 8 insertions(+), 23 deletions(-) diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 288905c83da..e7fca31483a 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -298,7 +298,7 @@ Full columns and constants are represented differently in memory. Functions usua Accepts any arguments, including `NULL` and does nothing. Always returns 0. The argument is internally still evaluated. Useful e.g. for benchmarks. -## sleep(seconds) +## sleep Used to introduce a delay or pause in the execution of a query. It is primarily used for testing and debugging purposes. @@ -310,7 +310,7 @@ sleep(seconds) **Arguments** -- `seconds`: [Int](../../sql-reference/data-types/int-uint.md) The number of seconds to pause the query execution to a maximum of 3 seconds. It can be a floating-point value to specify fractional seconds. +- `seconds`: [UInt*](../../sql-reference/data-types/int-uint.md) or [Float](../../sql-reference/data-types/float.md) The number of seconds to pause the query execution to a maximum of 3 seconds. It can be a floating-point value to specify fractional seconds. **Returned value** @@ -360,7 +360,7 @@ sleepEachRow(seconds) **Arguments** -- `seconds`: [Int](../../sql-reference/data-types/int-uint.md) The number of seconds to pause the query execution for each row in the result set to a maximum of 3 seconds. It can be a floating-point value to specify fractional seconds. +- `seconds`: [UInt*](../../sql-reference/data-types/int-uint.md) or [Float*](../../sql-reference/data-types/float.md) The number of seconds to pause the query execution for each row in the result set to a maximum of 3 seconds. It can be a floating-point value to specify fractional seconds. **Returned value** diff --git a/src/Functions/sleep.h b/src/Functions/sleep.h index 73d58ca6b5b..84f08dd5440 100644 --- a/src/Functions/sleep.h +++ b/src/Functions/sleep.h @@ -62,32 +62,17 @@ public: { } - /// Get the name of the function. - String getName() const override - { - return name; - } - - /// Do not sleep during query analysis. - bool isSuitableForConstantFolding() const override - { - return false; - } - - size_t getNumberOfArguments() const override - { - return 1; - } - + String getName() const override { return name; } + bool isSuitableForConstantFolding() const override { return false; } /// Do not sleep during query analysis. + size_t getNumberOfArguments() const override { return 1; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override { WhichDataType which(arguments[0]); - if (!which.isFloat() - && !which.isNativeUInt()) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}, expected Float64", + if (!which.isFloat() && !which.isNativeUInt()) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}, expected UInt* or Float*", arguments[0]->getName(), getName()); return std::make_shared(); From e6af636a549f808730c87ab69a6b76531d3dbc95 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Wed, 13 Mar 2024 14:07:49 +0100 Subject: [PATCH 262/374] fix data race in poco tcp server --- base/poco/Net/src/TCPServerDispatcher.cpp | 4 +++- src/Common/tests/gtest_connection_pool.cpp | 11 +++-------- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/base/poco/Net/src/TCPServerDispatcher.cpp b/base/poco/Net/src/TCPServerDispatcher.cpp index 20a1ffe1b4f..7f9f9a20ee7 100644 --- a/base/poco/Net/src/TCPServerDispatcher.cpp +++ b/base/poco/Net/src/TCPServerDispatcher.cpp @@ -93,7 +93,7 @@ void TCPServerDispatcher::release() void TCPServerDispatcher::run() { - AutoPtr guard(this, true); // ensure object stays alive + AutoPtr guard(this); // ensure object stays alive int idleTime = (int) _pParams->getThreadIdleTime().totalMilliseconds(); @@ -149,11 +149,13 @@ void TCPServerDispatcher::enqueue(const StreamSocket& socket) { try { + this->duplicate(); _threadPool.startWithPriority(_pParams->getThreadPriority(), *this, threadName); ++_currentThreads; } catch (Poco::Exception& exc) { + this->release(); ++_refusedConnections; std::cerr << "Got exception while starting thread for connection. Error code: " << exc.code() << ", message: '" << exc.displayText() << "'" << std::endl; diff --git a/src/Common/tests/gtest_connection_pool.cpp b/src/Common/tests/gtest_connection_pool.cpp index c271cc0e2ec..dcc3c11fd52 100644 --- a/src/Common/tests/gtest_connection_pool.cpp +++ b/src/Common/tests/gtest_connection_pool.cpp @@ -123,17 +123,15 @@ protected: std::string getServerUrl() const { - return "http://" + server_data.socket->address().toString(); + return "http://" + server_data.server->socket().address().toString(); } void startServer() { server_data.reset(); - server_data.params = new Poco::Net::HTTPServerParams(); - server_data.socket = std::make_unique(server_data.port); server_data.handler_factory = new HTTPRequestHandlerFactory(slowdown_receive); server_data.server = std::make_unique( - server_data.handler_factory, *server_data.socket, server_data.params); + server_data.handler_factory, server_data.port); server_data.server->start(); } @@ -155,8 +153,7 @@ protected: { // just some port to avoid collisions with others tests UInt16 port = 9871; - Poco::Net::HTTPServerParams::Ptr params; - std::unique_ptr socket; + HTTPRequestHandlerFactory::Ptr handler_factory; std::unique_ptr server; @@ -171,8 +168,6 @@ protected: server = nullptr; handler_factory = nullptr; - socket = nullptr; - params = nullptr; } ~ServerData() { From fefee44540bd029eb2d354706f61a1d96ed0e272 Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 13 Mar 2024 14:27:47 +0100 Subject: [PATCH 263/374] Update settings changes history --- src/Core/SettingsChangesHistory.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Core/SettingsChangesHistory.h b/src/Core/SettingsChangesHistory.h index e680c02671a..d7b0669f64f 100644 --- a/src/Core/SettingsChangesHistory.h +++ b/src/Core/SettingsChangesHistory.h @@ -93,6 +93,7 @@ static std::map sett {"input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects", false, false, "Allow to use String type for ambiguous paths during named tuple inference from JSON objects"}, {"throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert", false, true, "Deduplication is dependent materialized view cannot work together with async inserts."}, {"parallel_replicas_allow_in_with_subquery", false, true, "If true, subquery for IN will be executed on every follower replica"}, + {"filesystem_cache_reserve_space_wait_lock_timeout_milliseconds", 1000, 1000, "Wait time to lock cache for sapce reservation in filesystem cache"}, }}, {"24.2", {{"allow_suspicious_variant_types", true, false, "Don't allow creating Variant type with suspicious variants by default"}, {"validate_experimental_and_suspicious_types_inside_nested_types", false, true, "Validate usage of experimental and suspicious types inside nested types"}, From 9bb71291d50d29dd0c401580402adc12290224bb Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 13 Mar 2024 14:42:57 +0100 Subject: [PATCH 264/374] Fix unit test --- src/Interpreters/tests/gtest_filecache.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/tests/gtest_filecache.cpp b/src/Interpreters/tests/gtest_filecache.cpp index b596ccb0285..2679d1b8d18 100644 --- a/src/Interpreters/tests/gtest_filecache.cpp +++ b/src/Interpreters/tests/gtest_filecache.cpp @@ -245,7 +245,7 @@ void download(FileSegment & file_segment) ASSERT_EQ(file_segment.state(), State::DOWNLOADING); ASSERT_EQ(file_segment.getDownloadedSize(), 0); - ASSERT_TRUE(file_segment.reserve(file_segment.range().size())); + ASSERT_TRUE(file_segment.reserve(file_segment.range().size(), 1000)); download(cache_base_path, file_segment); ASSERT_EQ(file_segment.state(), State::DOWNLOADING); @@ -257,7 +257,7 @@ void assertDownloadFails(FileSegment & file_segment) { ASSERT_EQ(file_segment.getOrSetDownloader(), FileSegment::getCallerId()); ASSERT_EQ(file_segment.getDownloadedSize(), 0); - ASSERT_FALSE(file_segment.reserve(file_segment.range().size())); + ASSERT_FALSE(file_segment.reserve(file_segment.range().size(), 1000)); file_segment.complete(); } @@ -956,7 +956,7 @@ TEST_F(FileCacheTest, temporaryData) for (auto & segment : *some_data_holder) { ASSERT_TRUE(segment->getOrSetDownloader() == DB::FileSegment::getCallerId()); - ASSERT_TRUE(segment->reserve(segment->range().size())); + ASSERT_TRUE(segment->reserve(segment->range().size(), 1000)); download(*segment); segment->complete(); } From 4f2be003521b00c9a9087e17fcffdf08cabcd5f1 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 13 Mar 2024 13:55:32 +0000 Subject: [PATCH 265/374] Some fixups --- docs/en/sql-reference/functions/functions-for-nulls.md | 10 +++++----- docs/en/sql-reference/operators/index.md | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/en/sql-reference/functions/functions-for-nulls.md b/docs/en/sql-reference/functions/functions-for-nulls.md index e73d6c899e7..61da9a191a1 100644 --- a/docs/en/sql-reference/functions/functions-for-nulls.md +++ b/docs/en/sql-reference/functions/functions-for-nulls.md @@ -10,11 +10,13 @@ sidebar_label: Nullable Returns whether the argument is [NULL](../../sql-reference/syntax.md#null). +See also operator [`IS NULL`](../operators/index.md#is_null). + ``` sql isNull(x) ``` -Alias: `IS NULL`. +Alias: `ISNULL` **Arguments** @@ -54,12 +56,12 @@ Result: Returns whether the argument is not [NULL](../../sql-reference/syntax.md#null-literal). +See also operator [`IS NOT NULL`](../operators/index.md#is_not_null). + ``` sql isNotNull(x) ``` -Alias: `IS NOT NULL`. - **Arguments:** - `x` — A value of non-compound data type. @@ -102,8 +104,6 @@ Returns whether the argument is 0 (zero) or [NULL](../../sql-reference/syntax.md isZeroOrNull(x) ``` -Alias: `x = 0 OR x IS NULL`. - **Arguments:** - `x` — A value of non-compound data type. diff --git a/docs/en/sql-reference/operators/index.md b/docs/en/sql-reference/operators/index.md index 120e464e009..31bf43e8b35 100644 --- a/docs/en/sql-reference/operators/index.md +++ b/docs/en/sql-reference/operators/index.md @@ -353,7 +353,7 @@ For efficiency, the `and` and `or` functions accept any number of arguments. The ClickHouse supports the `IS NULL` and `IS NOT NULL` operators. -### IS NULL +### IS NULL {#is_null} - For [Nullable](../../sql-reference/data-types/nullable.md) type values, the `IS NULL` operator returns: - `1`, if the value is `NULL`. @@ -374,7 +374,7 @@ SELECT x+100 FROM t_null WHERE y IS NULL └──────────────┘ ``` -### IS NOT NULL +### IS NOT NULL {#is_not_null} - For [Nullable](../../sql-reference/data-types/nullable.md) type values, the `IS NOT NULL` operator returns: - `0`, if the value is `NULL`. From 6bbf9eb5400206c326a4e453a38612c8beb6ef89 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 13 Mar 2024 13:57:55 +0000 Subject: [PATCH 266/374] Fixup fixups --- docs/en/sql-reference/functions/functions-for-nulls.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/functions/functions-for-nulls.md b/docs/en/sql-reference/functions/functions-for-nulls.md index 61da9a191a1..4dfbf4262ed 100644 --- a/docs/en/sql-reference/functions/functions-for-nulls.md +++ b/docs/en/sql-reference/functions/functions-for-nulls.md @@ -16,7 +16,7 @@ See also operator [`IS NULL`](../operators/index.md#is_null). isNull(x) ``` -Alias: `ISNULL` +Alias: `ISNULL`. **Arguments** From b8abd57b0f1e4f7b059cb119ccc0a7ad1d723d2c Mon Sep 17 00:00:00 2001 From: qaziqarta <96023488+qaziqarta@users.noreply.github.com> Date: Wed, 13 Mar 2024 21:18:05 +0600 Subject: [PATCH 267/374] Updated references to format settings in datetime.md Updated references to date_time_input_format and date_time_output_format. --- docs/en/sql-reference/data-types/datetime.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en/sql-reference/data-types/datetime.md b/docs/en/sql-reference/data-types/datetime.md index 1adff18f598..a465106c2ff 100644 --- a/docs/en/sql-reference/data-types/datetime.md +++ b/docs/en/sql-reference/data-types/datetime.md @@ -36,9 +36,9 @@ You can explicitly set a time zone for `DateTime`-type columns when creating a t The [clickhouse-client](../../interfaces/cli.md) applies the server time zone by default if a time zone isn’t explicitly set when initializing the data type. To use the client time zone, run `clickhouse-client` with the `--use_client_time_zone` parameter. -ClickHouse outputs values depending on the value of the [date_time_output_format](../../operations/settings/settings.md#settings-date_time_output_format) setting. `YYYY-MM-DD hh:mm:ss` text format by default. Additionally, you can change the output with the [formatDateTime](../../sql-reference/functions/date-time-functions.md#formatdatetime) function. +ClickHouse outputs values depending on the value of the [date_time_output_format](../../operations/settings/settings-formats.md#date_time_output_format) setting. `YYYY-MM-DD hh:mm:ss` text format by default. Additionally, you can change the output with the [formatDateTime](../../sql-reference/functions/date-time-functions.md#formatdatetime) function. -When inserting data into ClickHouse, you can use different formats of date and time strings, depending on the value of the [date_time_input_format](../../operations/settings/settings.md#settings-date_time_input_format) setting. +When inserting data into ClickHouse, you can use different formats of date and time strings, depending on the value of the [date_time_input_format](../../operations/settings/settings-formats.md#date_time_input_format) setting. ## Examples @@ -147,8 +147,8 @@ Time shifts for multiple days. Some pacific islands changed their timezone offse - [Type conversion functions](../../sql-reference/functions/type-conversion-functions.md) - [Functions for working with dates and times](../../sql-reference/functions/date-time-functions.md) - [Functions for working with arrays](../../sql-reference/functions/array-functions.md) -- [The `date_time_input_format` setting](../../operations/settings/settings-formats.md#settings-date_time_input_format) -- [The `date_time_output_format` setting](../../operations/settings/settings-formats.md#settings-date_time_output_format) +- [The `date_time_input_format` setting](../../operations/settings/settings-formats.md#date_time_input_format) +- [The `date_time_output_format` setting](../../operations/settings/settings-formats.md#date_time_output_format) - [The `timezone` server configuration parameter](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) - [The `session_timezone` setting](../../operations/settings/settings.md#session_timezone) - [Operators for working with dates and times](../../sql-reference/operators/index.md#operators-datetime) From cb28c84a93709ab12fc32171bf880c0e911ec0d5 Mon Sep 17 00:00:00 2001 From: Sergei Trifonov Date: Wed, 13 Mar 2024 16:25:58 +0100 Subject: [PATCH 268/374] Fix `forget_partition` test (#61237) --- ...et_partition.sql => 02995_forget_partition.sh} | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) rename tests/queries/0_stateless/{02995_forget_partition.sql => 02995_forget_partition.sh} (63%) mode change 100644 => 100755 diff --git a/tests/queries/0_stateless/02995_forget_partition.sql b/tests/queries/0_stateless/02995_forget_partition.sh old mode 100644 new mode 100755 similarity index 63% rename from tests/queries/0_stateless/02995_forget_partition.sql rename to tests/queries/0_stateless/02995_forget_partition.sh index 269f7932ea4..8ece8d3ddb3 --- a/tests/queries/0_stateless/02995_forget_partition.sql +++ b/tests/queries/0_stateless/02995_forget_partition.sh @@ -1,5 +1,12 @@ --- Tags: zookeeper, no-replicated-database +#!/usr/bin/env bash +# Tags: zookeeper, no-replicated-database +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + + +${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ drop table if exists forget_partition; create table forget_partition @@ -16,7 +23,12 @@ insert into forget_partition select number, '2024-01-01' + interval number day, alter table forget_partition drop partition '20240101'; alter table forget_partition drop partition '20240102'; +""" +# DROP PARTITION do not wait for a part to be removed from memory due to possible concurrent SELECTs, so we have to do wait manually here +while [[ $(${CLICKHOUSE_CLIENT} -q "select count() from system.parts where database=currentDatabase() and table='forget_partition' and partition='20240101'") != 0 ]]; do sleep 0.1; done + +${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ set allow_unrestricted_reads_from_keeper=1; select '---before---'; @@ -31,3 +43,4 @@ select '---after---'; select name from system.zookeeper where path = '/test/02995/' || currentDatabase() || '/rmt/block_numbers' order by name; drop table forget_partition; +""" From a366acf59c91fca4df1262c812ac8c58ee7643e2 Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 13 Mar 2024 15:36:53 +0000 Subject: [PATCH 269/374] Don't use default cluster in test test_distibuted_settings --- .../test_distributed_config/configs/clusters.xml | 12 ++++++++++++ tests/integration/test_distributed_config/test.py | 6 +++--- 2 files changed, 15 insertions(+), 3 deletions(-) create mode 100644 tests/integration/test_distributed_config/configs/clusters.xml diff --git a/tests/integration/test_distributed_config/configs/clusters.xml b/tests/integration/test_distributed_config/configs/clusters.xml new file mode 100644 index 00000000000..754d765f23f --- /dev/null +++ b/tests/integration/test_distributed_config/configs/clusters.xml @@ -0,0 +1,12 @@ + + + + + + localhost + 9000 + + + + + diff --git a/tests/integration/test_distributed_config/test.py b/tests/integration/test_distributed_config/test.py index 500e9ecdeed..c08334985b1 100644 --- a/tests/integration/test_distributed_config/test.py +++ b/tests/integration/test_distributed_config/test.py @@ -3,7 +3,7 @@ from helpers.cluster import ClickHouseCluster import logging cluster = ClickHouseCluster(__file__) -node = cluster.add_instance("node", main_configs=["configs/overrides.xml"]) +node = cluster.add_instance("node", main_configs=["configs/overrides.xml", "configs/clusters.xml"]) @pytest.fixture(scope="module") @@ -23,7 +23,7 @@ def test_distibuted_settings(start_cluster): node.query( """ CREATE TABLE data_1 (key Int) ENGINE Memory(); - CREATE TABLE dist_1 as data_1 ENGINE Distributed(default, default, data_1) SETTINGS flush_on_detach = true; + CREATE TABLE dist_1 as data_1 ENGINE Distributed(localhost_cluster, default, data_1) SETTINGS flush_on_detach = true; SYSTEM STOP DISTRIBUTED SENDS dist_1; INSERT INTO dist_1 SETTINGS prefer_localhost_replica=0 VALUES (1); DETACH TABLE dist_1; @@ -36,7 +36,7 @@ def test_distibuted_settings(start_cluster): node.query( """ CREATE TABLE data_2 (key Int) ENGINE Memory(); - CREATE TABLE dist_2 as data_2 ENGINE Distributed(default, default, data_2); + CREATE TABLE dist_2 as data_2 ENGINE Distributed(localhost_cluster, default, data_2); SYSTEM STOP DISTRIBUTED SENDS dist_2; INSERT INTO dist_2 SETTINGS prefer_localhost_replica=0 VALUES (2); DETACH TABLE dist_2; From a4a859ba31c731d8d5163fe5759a166c8c95601c Mon Sep 17 00:00:00 2001 From: qaziqarta <96023488+qaziqarta@users.noreply.github.com> Date: Wed, 13 Mar 2024 21:37:50 +0600 Subject: [PATCH 270/374] Updated ru portion of previous commit --- docs/ru/sql-reference/data-types/datetime.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/ru/sql-reference/data-types/datetime.md b/docs/ru/sql-reference/data-types/datetime.md index 57f24786bb7..25e87794147 100644 --- a/docs/ru/sql-reference/data-types/datetime.md +++ b/docs/ru/sql-reference/data-types/datetime.md @@ -27,9 +27,9 @@ DateTime([timezone]) Консольный клиент ClickHouse по умолчанию использует часовой пояс сервера, если для значения `DateTime` часовой пояс не был задан в явном виде при инициализации типа данных. Чтобы использовать часовой пояс клиента, запустите [clickhouse-client](../../interfaces/cli.md) с параметром `--use_client_time_zone`. -ClickHouse отображает значения в зависимости от значения параметра [date\_time\_output\_format](../../operations/settings/index.md#settings-date_time_output_format). Текстовый формат по умолчанию `YYYY-MM-DD hh:mm:ss`. Кроме того, вы можете поменять отображение с помощью функции [formatDateTime](../../sql-reference/functions/date-time-functions.md#formatdatetime). +ClickHouse отображает значения в зависимости от значения параметра [date\_time\_output\_format](../../operations/settings/settings-formats.md#date_time_output_format). Текстовый формат по умолчанию `YYYY-MM-DD hh:mm:ss`. Кроме того, вы можете поменять отображение с помощью функции [formatDateTime](../../sql-reference/functions/date-time-functions.md#formatdatetime). -При вставке данных в ClickHouse, можно использовать различные форматы даты и времени в зависимости от значения настройки [date_time_input_format](../../operations/settings/index.md#settings-date_time_input_format). +При вставке данных в ClickHouse, можно использовать различные форматы даты и времени в зависимости от значения настройки [date_time_input_format](../../operations/settings/settings-formats.md#date_time_input_format). ## Примеры {#primery} @@ -119,8 +119,8 @@ FROM dt - [Функции преобразования типов](../../sql-reference/functions/type-conversion-functions.md) - [Функции для работы с датой и временем](../../sql-reference/functions/date-time-functions.md) - [Функции для работы с массивами](../../sql-reference/functions/array-functions.md) -- [Настройка `date_time_input_format`](../../operations/settings/index.md#settings-date_time_input_format) -- [Настройка `date_time_output_format`](../../operations/settings/index.md) +- [Настройка `date_time_input_format`](../../operations/settings/settings-formats.md#date_time_input_format) +- [Настройка `date_time_output_format`](../../operations/settings/settings-formats.md#date_time_output_format) - [Конфигурационный параметр сервера `timezone`](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) - [Параметр `session_timezone`](../../operations/settings/settings.md#session_timezone) - [Операторы для работы с датой и временем](../../sql-reference/operators/index.md#operators-datetime) From 868d24415b295da1c0d325e837823bfb7cde9253 Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 13 Mar 2024 15:48:29 +0000 Subject: [PATCH 271/374] Fix style --- tests/integration/test_distributed_config/test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_distributed_config/test.py b/tests/integration/test_distributed_config/test.py index c08334985b1..bf4bb5a4335 100644 --- a/tests/integration/test_distributed_config/test.py +++ b/tests/integration/test_distributed_config/test.py @@ -3,7 +3,9 @@ from helpers.cluster import ClickHouseCluster import logging cluster = ClickHouseCluster(__file__) -node = cluster.add_instance("node", main_configs=["configs/overrides.xml", "configs/clusters.xml"]) +node = cluster.add_instance( + "node", main_configs=["configs/overrides.xml", "configs/clusters.xml"] +) @pytest.fixture(scope="module") From 51ccb520fb5747a00b7c56adae197467284c98d5 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 13 Mar 2024 15:54:06 +0000 Subject: [PATCH 272/374] Change only ignore_limits setting --- src/Planner/Planner.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 861b12e3da2..fde9f110d09 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -1081,9 +1081,10 @@ void addBuildSubqueriesForSetsStepIfNeeded( for (auto & subquery : subqueries) { auto query_tree = subquery->detachQueryTree(); - /// I suppose it should be better to use all flags from select_query_options, + auto subquery_options = select_query_options.subquery(); + /// I don't know if this is a good decision, /// But for now it is done in the same way as in old analyzer. - auto subquery_options = SelectQueryOptions(QueryProcessingStage::Complete, select_query_options.subquery_depth).subquery(); + subquery_options.ignore_limits = false; Planner subquery_planner( query_tree, subquery_options, From 617138cf482f9a55487d0a6c2e63dacdbbdafe8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 13 Mar 2024 16:54:28 +0100 Subject: [PATCH 273/374] Teach the fuzzer to use other numeric types --- src/Client/QueryFuzzer.cpp | 46 +++++++++++++++++++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/src/Client/QueryFuzzer.cpp b/src/Client/QueryFuzzer.cpp index 7f1dce4b29a..38e78157096 100644 --- a/src/Client/QueryFuzzer.cpp +++ b/src/Client/QueryFuzzer.cpp @@ -914,6 +914,38 @@ ASTPtr QueryFuzzer::fuzzLiteralUnderExpressionList(ASTPtr child) child = makeASTFunction( "toFixedString", std::make_shared(value), std::make_shared(static_cast(value.size()))); } + else if (type == Field::Types::Which::UInt64 && fuzz_rand() % 7 == 0) + { + child = makeASTFunction(fuzz_rand() % 2 == 0 ? "toUInt128" : "toUInt256", std::make_shared(l->value.get())); + } + else if (type == Field::Types::Which::Int64 && fuzz_rand() % 7 == 0) + { + child = makeASTFunction(fuzz_rand() % 2 == 0 ? "toInt128" : "toInt256", std::make_shared(l->value.get())); + } + else if (type == Field::Types::Which::Float64 && fuzz_rand() % 7 == 0) + { + int decimal = fuzz_rand() % 4; + if (decimal == 0) + child = makeASTFunction( + "toDecimal32", + std::make_shared(l->value.get()), + std::make_shared(static_cast(fuzz_rand() % 9))); + else if (decimal == 1) + child = makeASTFunction( + "toDecimal64", + std::make_shared(l->value.get()), + std::make_shared(static_cast(fuzz_rand() % 18))); + else if (decimal == 2) + child = makeASTFunction( + "toDecimal128", + std::make_shared(l->value.get()), + std::make_shared(static_cast(fuzz_rand() % 38))); + else + child = makeASTFunction( + "toDecimal256", + std::make_shared(l->value.get()), + std::make_shared(static_cast(fuzz_rand() % 76))); + } if (fuzz_rand() % 7 == 0) child = makeASTFunction("toNullable", child); @@ -933,7 +965,19 @@ ASTPtr QueryFuzzer::reverseLiteralFuzzing(ASTPtr child) { if (auto * function = child.get()->as()) { - std::unordered_set can_be_reverted{"toNullable", "toLowCardinality", "materialize"}; + const std::unordered_set can_be_reverted{ + "materialize", + "toDecimal32", /// Keeping the first parameter only should be ok (valid query most of the time) + "toDecimal64", + "toDecimal128", + "toDecimal256", + "toFixedString", /// Same as toDecimal + "toInt128", + "toInt256", + "toLowCardinality" + "toNullable", + "toUInt128", + "toUInt256"}; if (can_be_reverted.contains(function->name) && function->children.size() == 1) { if (fuzz_rand() % 7 == 0) From aecc135f5dd76c55b99205af170beafc06b9ee62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 13 Mar 2024 17:03:18 +0100 Subject: [PATCH 274/374] Add more details about how NULLs are processed in aggregations --- docs/en/sql-reference/aggregate-functions/index.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/en/sql-reference/aggregate-functions/index.md b/docs/en/sql-reference/aggregate-functions/index.md index 5d2229fbcce..e97db436271 100644 --- a/docs/en/sql-reference/aggregate-functions/index.md +++ b/docs/en/sql-reference/aggregate-functions/index.md @@ -18,6 +18,10 @@ ClickHouse also supports: During aggregation, all `NULL`s are skipped. If the aggregation has several parameters it will ignore any row in which one or more of the parameters are NULL. +There are a few exceptions to this rule: + - Both [`first_value`](../../sql-reference/aggregate-functions/reference/first_value.md) and [`last_value`](../../sql-reference/aggregate-functions/reference/last_value.md) support modifiers that respect NULLs (`first_value(b) ignore nulls`). + - [`count`](../../sql-reference/aggregate-functions/reference/count.md) without parameters (`count()`) or with constant ones (`count(1)`) will count NULL rows too. With a column as parameter, it will count only not null values. + **Examples:** Consider this table: From 5b15ec6ae19fe1caa3800dfa333b61570cfc92b4 Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 13 Mar 2024 17:10:55 +0100 Subject: [PATCH 275/374] Move test from stateless to integration --- .../config.d/storage_conf.xml | 4 + .../integration/test_filesystem_cache/test.py | 75 +++++++++++++++++++ ...810_system_sync_filesystem_cache.reference | 3 - .../02810_system_sync_filesystem_cache.sh | 69 ----------------- 4 files changed, 79 insertions(+), 72 deletions(-) delete mode 100644 tests/queries/0_stateless/02810_system_sync_filesystem_cache.reference delete mode 100755 tests/queries/0_stateless/02810_system_sync_filesystem_cache.sh diff --git a/tests/integration/test_filesystem_cache/config.d/storage_conf.xml b/tests/integration/test_filesystem_cache/config.d/storage_conf.xml index b614815b34f..a8e4f9f8a99 100644 --- a/tests/integration/test_filesystem_cache/config.d/storage_conf.xml +++ b/tests/integration/test_filesystem_cache/config.d/storage_conf.xml @@ -7,4 +7,8 @@ + + system + filesystem_cache_log
+
diff --git a/tests/integration/test_filesystem_cache/test.py b/tests/integration/test_filesystem_cache/test.py index c44d817c57c..dfab462732a 100644 --- a/tests/integration/test_filesystem_cache/test.py +++ b/tests/integration/test_filesystem_cache/test.py @@ -426,3 +426,78 @@ def test_force_filesystem_cache_on_merges(cluster): test(node, True) node = cluster.instances["node"] test(node, False) + + +def test_system_sync_filesystem_cache(cluster): + node = cluster.instances["node"] + node.query( + """ +DROP TABLE IF EXISTS test; + +CREATE TABLE test (a Int32, b String) +ENGINE = MergeTree() ORDER BY tuple() +SETTINGS disk = disk(type = cache, + max_size = '100Ki', + path = "test_system_sync_filesystem_cache", + delayed_cleanup_interval_ms = 10000000, disk = hdd_blob), + min_bytes_for_wide_part = 10485760; + +INSERT INTO test SELECT 1, 'test'; + """ + ) + + query_id = "system_sync_filesystem_cache_1" + node.query( + "SELECT * FROM test FORMAT Null SETTINGS enable_filesystem_cache_log = 1", + query_id=query_id, + ) + + key, offset = ( + node.query( + f""" + SYSTEM FLUSH LOGS; + SELECT key, offset FROM system.filesystem_cache_log WHERE query_id = '{query_id}' ORDER BY size DESC LIMIT 1; + """ + ) + .strip() + .split("\t") + ) + + cache_path = node.query( + f"SELECT cache_path FROM system.filesystem_cache WHERE key = '{key}' and file_segment_range_begin = {offset}" + ) + + node.exec_in_container(["bash", "-c", f"rm {cache_path}"]) + + assert key in node.query("SYSTEM SYNC FILESYSTEM CACHE") + + node.query("SELECT * FROM test FORMAT Null") + assert key not in node.query("SYSTEM SYNC FILESYSTEM CACHE") + + query_id = "system_sync_filesystem_cache_2" + node.query( + "SELECT * FROM test FORMAT Null SETTINGS enable_filesystem_cache_log = 1", + query_id=query_id, + ) + + key, offset = ( + node.query( + f""" + SYSTEM FLUSH LOGS; + SELECT key, offset FROM system.filesystem_cache_log WHERE query_id = '{query_id}' ORDER BY size DESC LIMIT 1; + """ + ) + .strip() + .split("\t") + ) + cache_path = node.query( + f"SELECT cache_path FROM system.filesystem_cache WHERE key = '{key}' and file_segment_range_begin = {offset}" + ) + + node.exec_in_container(["bash", "-c", f"echo -n 'fff' > {cache_path}"]) + + assert key in node.query("SYSTEM SYNC FILESYSTEM CACHE") + + node.query("SELECT * FROM test FORMAT Null") + + assert key not in node.query("SYSTEM SYNC FILESYSTEM CACHE") diff --git a/tests/queries/0_stateless/02810_system_sync_filesystem_cache.reference b/tests/queries/0_stateless/02810_system_sync_filesystem_cache.reference deleted file mode 100644 index 7614df8ec46..00000000000 --- a/tests/queries/0_stateless/02810_system_sync_filesystem_cache.reference +++ /dev/null @@ -1,3 +0,0 @@ -ok -ok -ok diff --git a/tests/queries/0_stateless/02810_system_sync_filesystem_cache.sh b/tests/queries/0_stateless/02810_system_sync_filesystem_cache.sh deleted file mode 100755 index c88ba4d5a74..00000000000 --- a/tests/queries/0_stateless/02810_system_sync_filesystem_cache.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-fasttest, no-parallel, no-s3-storage, no-random-settings - -# set -x - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - - -$CLICKHOUSE_CLIENT -nm --query """ -DROP TABLE IF EXISTS test; - -CREATE TABLE test (a Int32, b String) -ENGINE = MergeTree() ORDER BY tuple() -SETTINGS disk = disk(type = cache, max_size = '100Ki', path = ${CLICKHOUSE_TEST_UNIQUE_NAME}, delayed_cleanup_interval_ms = 10000000, disk = s3_disk), min_bytes_for_wide_part = 10485760; - -INSERT INTO test SELECT 1, 'test'; -""" - -query_id=$RANDOM - -$CLICKHOUSE_CLIENT --query_id "$query_id" --query "SELECT * FROM test FORMAT Null SETTINGS enable_filesystem_cache_log = 1" - -${CLICKHOUSE_CLIENT} -q "system flush logs" - -key=$($CLICKHOUSE_CLIENT -nm --query """ -SELECT key FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1; -""") - -offset=$($CLICKHOUSE_CLIENT -nm --query """ -SELECT offset FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1; -""") - -path=$($CLICKHOUSE_CLIENT -nm --query """ -SELECT cache_path FROM system.filesystem_cache WHERE key = '$key' AND file_segment_range_begin = $offset; -""") - -rm $path - -$CLICKHOUSE_CLIENT --query "SELECT * FROM test FORMAT Null SETTINGS enable_filesystem_cache_log = 1" 2>&1 | grep -F -e "No such file or directory" > /dev/null && echo "ok" || echo "fail" - -CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=fatal/g') - -$CLICKHOUSE_CLIENT --query "SYSTEM SYNC FILESYSTEM CACHE" 2>&1 | grep -q "$key" && echo 'ok' || echo 'fail' - -$CLICKHOUSE_CLIENT --query "SELECT * FROM test FORMAT Null" - -key=$($CLICKHOUSE_CLIENT -nm --query """ -SELECT key FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1; -""") - -offset=$($CLICKHOUSE_CLIENT -nm --query """ -SELECT offset FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1; -""") - -path=$($CLICKHOUSE_CLIENT -nm --query """ -SELECT cache_path FROM system.filesystem_cache WHERE key = '$key' AND file_segment_range_begin = $offset; -""") - -echo -n 'fff' > $path - -#cat $path - -$CLICKHOUSE_CLIENT --query "SYSTEM SYNC FILESYSTEM CACHE" 2>&1 | grep -q "$key" && echo 'ok' || echo 'fail' - -$CLICKHOUSE_CLIENT --query "SELECT * FROM test FORMAT Null" - -$CLICKHOUSE_CLIENT --query "SYSTEM SYNC FILESYSTEM CACHE" From b2e067b3daee284c4a97289dc1b4dac1f920c3e6 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 13 Mar 2024 16:41:35 +0000 Subject: [PATCH 276/374] Fix logical error in RabbitMQ storage with MATERIALIZED columns --- .../table-engines/integrations/rabbitmq.md | 8 ++- src/Interpreters/InterpreterInsertQuery.cpp | 23 +++++-- src/Interpreters/InterpreterInsertQuery.h | 2 +- src/Storages/RabbitMQ/RabbitMQSource.cpp | 14 +++- src/Storages/RabbitMQ/StorageRabbitMQ.cpp | 35 ++++++---- .../integration/test_storage_rabbitmq/test.py | 64 ++++++++++++++----- .../test_rabbitmq_json.reference | 50 --------------- 7 files changed, 106 insertions(+), 90 deletions(-) delete mode 100644 tests/integration/test_storage_rabbitmq/test_rabbitmq_json.reference diff --git a/docs/en/engines/table-engines/integrations/rabbitmq.md b/docs/en/engines/table-engines/integrations/rabbitmq.md index 0f3fef3d6fb..a4d0cf78066 100644 --- a/docs/en/engines/table-engines/integrations/rabbitmq.md +++ b/docs/en/engines/table-engines/integrations/rabbitmq.md @@ -18,8 +18,8 @@ This engine allows integrating ClickHouse with [RabbitMQ](https://www.rabbitmq.c ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + name1 [type1], + name2 [type2], ... ) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'host:port' [or rabbitmq_address = 'amqp(s)://guest:guest@localhost/vhost'], @@ -198,6 +198,10 @@ Additional virtual columns when `kafka_handle_error_mode='stream'`: Note: `_raw_message` and `_error` virtual columns are filled only in case of exception during parsing, they are always `NULL` when message was parsed successfully. +## Caveats {#caveats} + +Even though you may specify [default column expressions](/docs/en/sql-reference/statements/create/table.md/#default_values) (such as `DEFAULT`, `MATERIALIZED`, `ALIAS`) in the table definition, these will be ignored. Instead, the columns will be filled with their respective default values for their types. + ## Data formats support {#data-formats-support} RabbitMQ engine supports all [formats](../../../interfaces/formats.md) supported in ClickHouse. diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp index 70f9e0c51da..3e8bb268fe7 100644 --- a/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -151,7 +151,7 @@ Block InterpreterInsertQuery::getSampleBlock( names.emplace_back(std::move(current_name)); } - return getSampleBlock(names, table, metadata_snapshot, allow_materialized); + return getSampleBlockImpl(names, table, metadata_snapshot, no_destination, allow_materialized); } std::optional InterpreterInsertQuery::getInsertColumnNames() const @@ -173,13 +173,18 @@ std::optional InterpreterInsertQuery::getInsertColumnNames() const return names; } -Block InterpreterInsertQuery::getSampleBlock( +Block InterpreterInsertQuery::getSampleBlockImpl( const Names & names, const StoragePtr & table, const StorageMetadataPtr & metadata_snapshot, + bool no_destination, bool allow_materialized) { Block table_sample_physical = metadata_snapshot->getSampleBlock(); + Block table_sample_virtuals; + if (no_destination) + table_sample_virtuals = table->getVirtualsHeader(); + Block table_sample_insertable = metadata_snapshot->getSampleBlockInsertable(); Block res; for (const auto & current_name : names) @@ -194,13 +199,19 @@ Block InterpreterInsertQuery::getSampleBlock( if (table_sample_physical.has(current_name)) { if (!allow_materialized) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Cannot insert column {}, because it is MATERIALIZED column.", - current_name); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Cannot insert column {}, because it is MATERIALIZED column", current_name); res.insert(ColumnWithTypeAndName(table_sample_physical.getByName(current_name).type, current_name)); } - else /// The table does not have a column with that name + else if (table_sample_virtuals.has(current_name)) + { + res.insert(ColumnWithTypeAndName(table_sample_virtuals.getByName(current_name).type, current_name)); + } + else + { + /// The table does not have a column with that name throw Exception(ErrorCodes::NO_SUCH_COLUMN_IN_TABLE, "No such column {} in table {}", current_name, table->getStorageID().getNameForLogs()); + } } else res.insert(ColumnWithTypeAndName(table_sample_insertable.getByName(current_name).type, current_name)); @@ -276,7 +287,7 @@ Chain InterpreterInsertQuery::buildChain( if (!running_group) running_group = std::make_shared(getContext()); - auto sample = getSampleBlock(columns, table, metadata_snapshot, allow_materialized); + auto sample = getSampleBlockImpl(columns, table, metadata_snapshot, no_destination, allow_materialized); if (check_access) getContext()->checkAccess(AccessType::INSERT, table->getStorageID(), sample.getNames()); diff --git a/src/Interpreters/InterpreterInsertQuery.h b/src/Interpreters/InterpreterInsertQuery.h index 3647126afb9..bf73fb2a319 100644 --- a/src/Interpreters/InterpreterInsertQuery.h +++ b/src/Interpreters/InterpreterInsertQuery.h @@ -69,7 +69,7 @@ public: bool shouldAddSquashingFroStorage(const StoragePtr & table) const; private: - static Block getSampleBlock(const Names & names, const StoragePtr & table, const StorageMetadataPtr & metadata_snapshot, bool allow_materialized); + static Block getSampleBlockImpl(const Names & names, const StoragePtr & table, const StorageMetadataPtr & metadata_snapshot, bool no_destination, bool allow_materialized); ASTPtr query_ptr; const bool allow_materialized; diff --git a/src/Storages/RabbitMQ/RabbitMQSource.cpp b/src/Storages/RabbitMQ/RabbitMQSource.cpp index 4dc257074f3..09c1bf1b2e7 100644 --- a/src/Storages/RabbitMQ/RabbitMQSource.cpp +++ b/src/Storages/RabbitMQ/RabbitMQSource.cpp @@ -11,11 +11,21 @@ namespace DB { -static std::pair getHeaders(const StorageSnapshotPtr & storage_snapshot) +static std::pair getHeaders(const StorageSnapshotPtr & storage_snapshot, const Names & column_names) { + auto all_columns_header = storage_snapshot->metadata->getSampleBlock(); + auto non_virtual_header = storage_snapshot->metadata->getSampleBlockNonMaterialized(); auto virtual_header = storage_snapshot->virtual_columns->getSampleBlock(); + for (const auto & column_name : column_names) + { + if (non_virtual_header.has(column_name) || virtual_header.has(column_name)) + continue; + const auto & column = all_columns_header.getByName(column_name); + non_virtual_header.insert(column); + } + return {non_virtual_header, virtual_header}; } @@ -40,7 +50,7 @@ RabbitMQSource::RabbitMQSource( : RabbitMQSource( storage_, storage_snapshot_, - getHeaders(storage_snapshot_), + getHeaders(storage_snapshot_, columns), context_, columns, max_block_size_, diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp index 980fccd307e..b882fd2728c 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -133,6 +134,9 @@ StorageRabbitMQ::StorageRabbitMQ( if (configuration.secure) SSL_library_init(); + if (!columns_.getMaterialized().empty() || !columns_.getAliases().empty() || !columns_.getDefaults().empty() || !columns_.getEphemeral().empty()) + context_->addWarningMessage("RabbitMQ table engine doesn't support ALIAS, DEFAULT or MATERIALIZED columns. They will be ignored and filled with default values"); + StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); setInMemoryMetadata(storage_metadata); @@ -1055,18 +1059,7 @@ bool StorageRabbitMQ::tryStreamToViews() if (!table) throw Exception(ErrorCodes::LOGICAL_ERROR, "Engine table {} doesn't exist.", table_id.getNameForLogs()); - // Create an INSERT query for streaming data - auto insert = std::make_shared(); - insert->table_id = table_id; - - // Only insert into dependent views and expect that input blocks contain virtual columns - InterpreterInsertQuery interpreter(insert, rabbitmq_context, false, true, true); - auto block_io = interpreter.execute(); - auto storage_snapshot = getStorageSnapshot(getInMemoryMetadataPtr(), getContext()); - auto column_names = block_io.pipeline.getHeader().getNames(); - auto sample_block = storage_snapshot->getSampleBlockForColumns(column_names); - auto block_size = getMaxBlockSize(); // Create a stream for each consumer and join them in a union stream @@ -1082,13 +1075,29 @@ bool StorageRabbitMQ::tryStreamToViews() for (size_t i = 0; i < num_created_consumers; ++i) { auto source = std::make_shared( - *this, storage_snapshot, rabbitmq_context, column_names, block_size, - max_execution_time_ms, rabbitmq_settings->rabbitmq_handle_error_mode, false); + *this, storage_snapshot, rabbitmq_context, Names{}, block_size, + max_execution_time_ms, rabbitmq_settings->rabbitmq_handle_error_mode); sources.emplace_back(source); pipes.emplace_back(source); } + // Create an INSERT query for streaming data + auto insert = std::make_shared(); + insert->table_id = table_id; + if (!sources.empty()) + { + auto column_list = std::make_shared(); + const auto & header = sources[0]->getPort().getHeader(); + for (const auto & column : header) + column_list->children.emplace_back(std::make_shared(column.name)); + insert->columns = std::move(column_list); + } + + // Only insert into dependent views and expect that input blocks contain virtual columns + InterpreterInsertQuery interpreter(insert, rabbitmq_context, /* allow_materialized_ */ false, /* no_squash_ */ true, /* no_destination_ */ true); + auto block_io = interpreter.execute(); + block_io.pipeline.complete(Pipe::unitePipes(std::move(pipes))); std::atomic_size_t rows = 0; diff --git a/tests/integration/test_storage_rabbitmq/test.py b/tests/integration/test_storage_rabbitmq/test.py index 280ce230921..0f1c5eb17dd 100644 --- a/tests/integration/test_storage_rabbitmq/test.py +++ b/tests/integration/test_storage_rabbitmq/test.py @@ -53,13 +53,13 @@ instance3 = cluster.add_instance( # Helpers -def rabbitmq_check_result(result, check=False, ref_file="test_rabbitmq_json.reference"): - fpath = p.join(p.dirname(__file__), ref_file) - with open(fpath) as reference: - if check: - assert TSV(result) == TSV(reference) - else: - return TSV(result) == TSV(reference) +def rabbitmq_check_result(result, check=False, reference=None): + if reference is None: + reference = "\n".join([f"{i}\t{i}" for i in range(50)]) + if check: + assert TSV(result) == TSV(reference) + else: + return TSV(result) == TSV(reference) def wait_rabbitmq_to_start(rabbitmq_docker_id, cookie, timeout=180): @@ -133,9 +133,10 @@ def test_rabbitmq_select(rabbitmq_cluster, secure): if secure: port = cluster.rabbitmq_secure_port + # MATERIALIZED and ALIAS columns are not supported in RabbitMQ engine, but we can test that it does not fail instance.query( """ - CREATE TABLE test.rabbitmq (key UInt64, value UInt64) + CREATE TABLE test.rabbitmq (key UInt64, value UInt64, value2 ALIAS value + 1, value3 MATERIALIZED value + 1) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = '{}:{}', rabbitmq_exchange_name = 'select', @@ -148,6 +149,11 @@ def test_rabbitmq_select(rabbitmq_cluster, secure): ) ) + assert ( + "RabbitMQ table engine doesn\\'t support ALIAS, DEFAULT or MATERIALIZED columns" + in instance.query("SELECT * FROM system.warnings") + ) + credentials = pika.PlainCredentials("root", "clickhouse") parameters = pika.ConnectionParameters( rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials @@ -379,7 +385,7 @@ def test_rabbitmq_macros(rabbitmq_cluster): def test_rabbitmq_materialized_view(rabbitmq_cluster): instance.query( """ - CREATE TABLE test.rabbitmq (key UInt64, value UInt64) + CREATE TABLE test.rabbitmq (key UInt64, value UInt64, dt1 DateTime MATERIALIZED now(), value2 ALIAS value + 1) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'mv', @@ -484,9 +490,11 @@ def test_rabbitmq_many_materialized_views(rabbitmq_cluster): """ DROP TABLE IF EXISTS test.view1; DROP TABLE IF EXISTS test.view2; + DROP TABLE IF EXISTS test.view3; DROP TABLE IF EXISTS test.consumer1; DROP TABLE IF EXISTS test.consumer2; - CREATE TABLE test.rabbitmq (key UInt64, value UInt64) + DROP TABLE IF EXISTS test.consumer3; + CREATE TABLE test.rabbitmq (key UInt64, value UInt64, value2 ALIAS value + 1, value3 MATERIALIZED value + 1, value4 DEFAULT 1) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'mmv', @@ -497,13 +505,18 @@ def test_rabbitmq_many_materialized_views(rabbitmq_cluster): CREATE TABLE test.view1 (key UInt64, value UInt64) ENGINE = MergeTree() ORDER BY key; - CREATE TABLE test.view2 (key UInt64, value UInt64) + CREATE TABLE test.view2 (key UInt64, value UInt64, value2 UInt64, value3 UInt64, value4 UInt64) + ENGINE = MergeTree() + ORDER BY key; + CREATE TABLE test.view3 (key UInt64) ENGINE = MergeTree() ORDER BY key; CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS SELECT * FROM test.rabbitmq; CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS SELECT * FROM test.rabbitmq; + CREATE MATERIALIZED VIEW test.consumer3 TO test.view3 AS + SELECT * FROM test.rabbitmq; """ ) @@ -514,7 +527,7 @@ def test_rabbitmq_many_materialized_views(rabbitmq_cluster): connection = pika.BlockingConnection(parameters) channel = connection.channel() - instance.wait_for_log_line("Started streaming to 2 attached views") + instance.wait_for_log_line("Started streaming to 3 attached views") messages = [] for i in range(50): @@ -522,24 +535,43 @@ def test_rabbitmq_many_materialized_views(rabbitmq_cluster): for message in messages: channel.basic_publish(exchange="mmv", routing_key="", body=message) - while True: + is_check_passed = False + deadline = time.monotonic() + 60 + while time.monotonic() < deadline: result1 = instance.query("SELECT * FROM test.view1 ORDER BY key") result2 = instance.query("SELECT * FROM test.view2 ORDER BY key") - if rabbitmq_check_result(result1) and rabbitmq_check_result(result2): + result3 = instance.query("SELECT * FROM test.view3 ORDER BY key") + # Note that for view2 result is `i i 0 0 0`, but not `i i i+1 i+1 1` as expected, ALIAS/MATERIALIZED/DEFAULT columns are not supported in RabbitMQ engine + # We onlt check that at least it do not fail + if ( + rabbitmq_check_result(result1) + and rabbitmq_check_result( + result2, reference="\n".join([f"{i}\t{i}\t0\t0\t0" for i in range(50)]) + ) + and rabbitmq_check_result( + result3, reference="\n".join([str(i) for i in range(50)]) + ) + ): + is_check_passed = True break + time.sleep(0.1) + + assert ( + is_check_passed + ), f"References are not equal to results, result1: {result1}, result2: {result2}, result3: {result3}" instance.query( """ DROP TABLE test.consumer1; DROP TABLE test.consumer2; + DROP TABLE test.consumer3; DROP TABLE test.view1; DROP TABLE test.view2; + DROP TABLE test.view3; """ ) connection.close() - rabbitmq_check_result(result1, True) - rabbitmq_check_result(result2, True) def test_rabbitmq_big_message(rabbitmq_cluster): diff --git a/tests/integration/test_storage_rabbitmq/test_rabbitmq_json.reference b/tests/integration/test_storage_rabbitmq/test_rabbitmq_json.reference deleted file mode 100644 index 959bb2aad74..00000000000 --- a/tests/integration/test_storage_rabbitmq/test_rabbitmq_json.reference +++ /dev/null @@ -1,50 +0,0 @@ -0 0 -1 1 -2 2 -3 3 -4 4 -5 5 -6 6 -7 7 -8 8 -9 9 -10 10 -11 11 -12 12 -13 13 -14 14 -15 15 -16 16 -17 17 -18 18 -19 19 -20 20 -21 21 -22 22 -23 23 -24 24 -25 25 -26 26 -27 27 -28 28 -29 29 -30 30 -31 31 -32 32 -33 33 -34 34 -35 35 -36 36 -37 37 -38 38 -39 39 -40 40 -41 41 -42 42 -43 43 -44 44 -45 45 -46 46 -47 47 -48 48 -49 49 From 4670f055649f5f8f216acd42947881038dedbdbd Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 13 Mar 2024 17:18:13 +0000 Subject: [PATCH 277/374] Fix test test_input_format_parallel_parsing_memory_tracking --- .../test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_input_format_parallel_parsing_memory_tracking/test.py b/tests/integration/test_input_format_parallel_parsing_memory_tracking/test.py index c95bbfda708..a89cb619350 100644 --- a/tests/integration/test_input_format_parallel_parsing_memory_tracking/test.py +++ b/tests/integration/test_input_format_parallel_parsing_memory_tracking/test.py @@ -41,7 +41,7 @@ def test_memory_tracking_total(): [ "bash", "-c", - "clickhouse local -q \"SELECT arrayStringConcat(arrayMap(x->toString(cityHash64(x)), range(1000)), ' ') from numbers(10000)\" > data.json", + "clickhouse local -q \"SELECT arrayStringConcat(arrayMap(x->toString(cityHash64(x)), range(1000)), ' ') from numbers(10000)\" > data.jsonl", ] ) @@ -56,7 +56,7 @@ def test_memory_tracking_total(): "--show-error", "--data-binary", "@data.json", - "http://127.1:8123/?query=INSERT%20INTO%20null%20FORMAT%20TSV", + "http://127.1:8123/?query=INSERT%20INTO%20null%20FORMAT%20JSONEachRow", ] ) == "" From 42a1cc47e6e24c3e854346bf9dec522677bb0d09 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 13 Mar 2024 18:05:08 +0000 Subject: [PATCH 278/374] Fix 01761_cast_to_enum_nullable with analyzer. --- src/DataTypes/DataTypeNullable.cpp | 25 +++++++++++++++++++++++++ src/DataTypes/DataTypeNullable.h | 1 + tests/analyzer_tech_debt.txt | 1 - 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/src/DataTypes/DataTypeNullable.cpp b/src/DataTypes/DataTypeNullable.cpp index 16d5d41e5e5..db252659d41 100644 --- a/src/DataTypes/DataTypeNullable.cpp +++ b/src/DataTypes/DataTypeNullable.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -56,6 +57,30 @@ bool DataTypeNullable::equals(const IDataType & rhs) const return rhs.isNullable() && nested_data_type->equals(*static_cast(rhs).nested_data_type); } +ColumnPtr DataTypeNullable::createColumnConst(size_t size, const Field & field) const +{ + if (onlyNull()) + { + auto column = createColumn(); + column->insert(field); + return ColumnConst::create(std::move(column), size); + } + + auto column = nested_data_type->createColumn(); + bool is_null = field.isNull(); + + if (is_null) + nested_data_type->insertDefaultInto(*column); + else + column->insert(field); + + auto null_mask = ColumnUInt8::create(); + null_mask->getData().push_back(is_null ? 1 : 0); + + auto res = ColumnNullable::create(std::move(column), std::move(null_mask)); + return ColumnConst::create(std::move(res), size); +} + SerializationPtr DataTypeNullable::doGetDefaultSerialization() const { return std::make_shared(nested_data_type->getDefaultSerialization()); diff --git a/src/DataTypes/DataTypeNullable.h b/src/DataTypes/DataTypeNullable.h index b102c767993..71abe48c151 100644 --- a/src/DataTypes/DataTypeNullable.h +++ b/src/DataTypes/DataTypeNullable.h @@ -41,6 +41,7 @@ public: bool onlyNull() const override; bool canBeInsideLowCardinality() const override { return nested_data_type->canBeInsideLowCardinality(); } bool canBePromoted() const override { return nested_data_type->canBePromoted(); } + ColumnPtr createColumnConst(size_t size, const Field & field) const override; const DataTypePtr & getNestedType() const { return nested_data_type; } diff --git a/tests/analyzer_tech_debt.txt b/tests/analyzer_tech_debt.txt index dbd216ea7be..b4cf3cf288b 100644 --- a/tests/analyzer_tech_debt.txt +++ b/tests/analyzer_tech_debt.txt @@ -7,7 +7,6 @@ 01584_distributed_buffer_cannot_find_column 01624_soft_constraints 01747_join_view_filter_dictionary -01761_cast_to_enum_nullable 01925_join_materialized_columns 01952_optimize_distributed_group_by_sharding_key 02354_annoy From 6e6a67a2fb37ef22807d22aac1a8958bc618dc4d Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 13 Mar 2024 19:13:39 +0100 Subject: [PATCH 279/374] Fix unit test --- .../Cache/WriteBufferToFileSegment.cpp | 19 ++++++++++++++----- .../Cache/WriteBufferToFileSegment.h | 2 +- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/src/Interpreters/Cache/WriteBufferToFileSegment.cpp b/src/Interpreters/Cache/WriteBufferToFileSegment.cpp index 759135722dc..51914c0a14e 100644 --- a/src/Interpreters/Cache/WriteBufferToFileSegment.cpp +++ b/src/Interpreters/Cache/WriteBufferToFileSegment.cpp @@ -19,9 +19,22 @@ namespace ErrorCodes extern const int NOT_ENOUGH_SPACE; } +namespace +{ + size_t getCacheLockWaitTimeout() + { + auto query_context = CurrentThread::getQueryContext(); + if (query_context) + return query_context->getReadSettings().filesystem_cache_reserve_space_wait_lock_timeout_milliseconds; + else + return Context::getGlobalContextInstance()->getReadSettings().filesystem_cache_reserve_space_wait_lock_timeout_milliseconds; + } +} + WriteBufferToFileSegment::WriteBufferToFileSegment(FileSegment * file_segment_) : WriteBufferFromFileDecorator(std::make_unique(file_segment_->getPath())) , file_segment(file_segment_) + , reserve_space_lock_wait_timeout_milliseconds(getCacheLockWaitTimeout()) { } @@ -32,12 +45,8 @@ WriteBufferToFileSegment::WriteBufferToFileSegment(FileSegmentsHolderPtr segment : throw Exception(ErrorCodes::LOGICAL_ERROR, "WriteBufferToFileSegment can be created only from single segment")) , file_segment(&segment_holder_->front()) , segment_holder(std::move(segment_holder_)) + , reserve_space_lock_wait_timeout_milliseconds(getCacheLockWaitTimeout()) { - auto query_context = CurrentThread::getQueryContext(); - if (query_context) - reserve_space_lock_wait_timeout_milliseconds = query_context->getReadSettings().filesystem_cache_reserve_space_wait_lock_timeout_milliseconds; - else - reserve_space_lock_wait_timeout_milliseconds = Context::getGlobalContextInstance()->getReadSettings().filesystem_cache_reserve_space_wait_lock_timeout_milliseconds; } /// If it throws an exception, the file segment will be incomplete, so you should not use it in the future. diff --git a/src/Interpreters/Cache/WriteBufferToFileSegment.h b/src/Interpreters/Cache/WriteBufferToFileSegment.h index bff340d79b3..822488ceb48 100644 --- a/src/Interpreters/Cache/WriteBufferToFileSegment.h +++ b/src/Interpreters/Cache/WriteBufferToFileSegment.h @@ -29,7 +29,7 @@ private: /// Empty if file_segment is not owned by this WriteBufferToFileSegment FileSegmentsHolderPtr segment_holder; - size_t reserve_space_lock_wait_timeout_milliseconds; + const size_t reserve_space_lock_wait_timeout_milliseconds; }; From 89de338676a0233dac4563d9f4ba7e4a16b54135 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 13 Mar 2024 17:33:39 +0100 Subject: [PATCH 280/374] Better --- .../aggregate-functions/index.md | 53 +++++++++++++++++-- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/index.md b/docs/en/sql-reference/aggregate-functions/index.md index e97db436271..96bf0c5d93b 100644 --- a/docs/en/sql-reference/aggregate-functions/index.md +++ b/docs/en/sql-reference/aggregate-functions/index.md @@ -16,11 +16,9 @@ ClickHouse also supports: ## NULL Processing -During aggregation, all `NULL`s are skipped. If the aggregation has several parameters it will ignore any row in which one or more of the parameters are NULL. +During aggregation, all `NULL` arguments are skipped. If the aggregation has several arguments it will ignore any row in which one or more of them are NULL. -There are a few exceptions to this rule: - - Both [`first_value`](../../sql-reference/aggregate-functions/reference/first_value.md) and [`last_value`](../../sql-reference/aggregate-functions/reference/last_value.md) support modifiers that respect NULLs (`first_value(b) ignore nulls`). - - [`count`](../../sql-reference/aggregate-functions/reference/count.md) without parameters (`count()`) or with constant ones (`count(1)`) will count NULL rows too. With a column as parameter, it will count only not null values. +There is an exception to this rule, which are the functions [`first_value`](../../sql-reference/aggregate-functions/reference/first_value.md), [`last_value`](../../sql-reference/aggregate-functions/reference/last_value.md) and their aliases when followed by the modifier `RESPECT NULLS`: `FIRST_VALUE(b) RESPECT NULLS`. **Examples:** @@ -89,3 +87,50 @@ FROM t_null_big; │ [2,2,3] │ [2,NULL,2,3,NULL] │ └───────────────┴───────────────────────────────────────┘ ``` + +Note that aggregations are skipped when the columns are used as arguments to an aggregated function. For example [`count`](../../sql-reference/aggregate-functions/reference/count.md) without parameters (`count()`) or with constant ones (`count(1)`) will count all rows in the block (independently of the value of the GROUP BY column as it's not an argument), while `count(column)` will only return the number of rows where column is not NULL. + +```sql +SELECT + v, + count(1), + count(v) +FROM +( + SELECT if(number < 10, NULL, number % 3) AS v + FROM numbers(15) +) +GROUP BY v + +┌────v─┬─count()─┬─count(v)─┐ +│ ᴺᵁᴸᴸ │ 10 │ 0 │ +│ 0 │ 1 │ 1 │ +│ 1 │ 2 │ 2 │ +│ 2 │ 2 │ 2 │ +└──────┴─────────┴──────────┘ +``` + +And here is an example of of first_value with `RESPECT NULLS` where we can see that NULL inputs are respected and it will return the first value read, whether it's NULL or not: + +```sql +SELECT + col || '_' || ((col + 1) * 5 - 1) as range, + first_value(odd_or_null) as first, + first_value(odd_or_null) IGNORE NULLS as first_ignore_null, + first_value(odd_or_null) RESPECT NULLS as first_respect_nulls +FROM +( + SELECT + intDiv(number, 5) AS col, + if(number % 2 == 0, NULL, number) as odd_or_null + FROM numbers(15) +) +GROUP BY col +ORDER BY col + +┌─range─┬─first─┬─first_ignore_null─┬─first_respect_nulls─┐ +│ 0_4 │ 1 │ 1 │ ᴺᵁᴸᴸ │ +│ 1_9 │ 5 │ 5 │ 5 │ +│ 2_14 │ 11 │ 11 │ ᴺᵁᴸᴸ │ +└───────┴───────┴───────────────────┴─────────────────────┘ +``` From be9a77b6ca7aa35b78857bc812ccacbe1d22bfa3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 14 Mar 2024 03:54:07 +0300 Subject: [PATCH 281/374] Update SettingsChangesHistory.h --- src/Core/SettingsChangesHistory.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/SettingsChangesHistory.h b/src/Core/SettingsChangesHistory.h index 129a89bfa23..04b6add0e56 100644 --- a/src/Core/SettingsChangesHistory.h +++ b/src/Core/SettingsChangesHistory.h @@ -88,6 +88,7 @@ static std::map sett {"24.3", {{"use_page_cache_for_disks_without_file_cache", false, false, "Added userspace page cache"}, {"read_from_page_cache_if_exists_otherwise_bypass_cache", false, false, "Added userspace page cache"}, {"page_cache_inject_eviction", false, false, "Added userspace page cache"}, + {"default_table_engine", "None", "MergeTree", "Set default table engine to MergeTree for better usability"}, }}, {"24.2", {{"allow_suspicious_variant_types", true, false, "Don't allow creating Variant type with suspicious variants by default"}, {"validate_experimental_and_suspicious_types_inside_nested_types", false, true, "Validate usage of experimental and suspicious types inside nested types"}, @@ -136,7 +137,6 @@ static std::map sett {"function_visible_width_behavior", 0, 1, "We changed the default behavior of `visibleWidth` to be more precise"}, {"max_estimated_execution_time", 0, 0, "Separate max_execution_time and max_estimated_execution_time"}, {"iceberg_engine_ignore_schema_evolution", false, false, "Allow to ignore schema evolution in Iceberg table engine"}, - {"default_table_engine", "None", "MergeTree", "Set default table engine to MergeTree for better usability"}, {"optimize_injective_functions_in_group_by", false, true, "Replace injective functions by it's arguments in GROUP BY section in analyzer"}, {"update_insert_deduplication_token_in_dependent_materialized_views", false, false, "Allow to update insert deduplication token with table identifier during insert in dependent materialized views"}, {"azure_max_unexpected_write_error_retries", 4, 4, "The maximum number of retries in case of unexpected errors during Azure blob storage write"}, From 0b522f5fb34a774ce6cf6092dc97d5360fde7ea6 Mon Sep 17 00:00:00 2001 From: peter279k Date: Thu, 14 Mar 2024 13:53:26 +0800 Subject: [PATCH 282/374] Fix issue #61351 --- docs/en/getting-started/example-datasets/laion.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/docs/en/getting-started/example-datasets/laion.md b/docs/en/getting-started/example-datasets/laion.md index 0dbaceffc13..327c1796d11 100644 --- a/docs/en/getting-started/example-datasets/laion.md +++ b/docs/en/getting-started/example-datasets/laion.md @@ -10,10 +10,14 @@ The embeddings and the metadata are stored in separate files in the raw data. A converts them to CSV and imports them into ClickHouse. You can use the following `download.sh` script for that: ```bash -wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/img_emb/img_emb_${1}.npy # download image embedding -wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/text_emb/text_emb_${1}.npy # download text embedding -wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/metadata/metadata_${1}.parquet # download metadata -python3 process.py ${1} # merge files and convert to CSV +number=${1} +if [[ $number == '' ]]; then + number=1 +fi; +wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/img_emb/img_emb_${number}.npy # download image embedding +wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/text_emb/text_emb_${number}.npy # download text embedding +wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/metadata/metadata_${number}.parquet # download metadata +python3 process.py $number # merge files and convert to CSV ``` Script `process.py` is defined as follows: From ff976520389c277cfcb034466aed514fe96acc6d Mon Sep 17 00:00:00 2001 From: serxa Date: Thu, 14 Mar 2024 09:53:18 +0000 Subject: [PATCH 283/374] fix spell check --- utils/check-style/aspell-ignore/en/aspell-dict.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index c7eb213bff2..1706d44bc8a 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -2687,7 +2687,9 @@ userver utils uuid varPop +varPopStable varSamp +varSampStable variadic variantElement variantType From c6f0b2a5a458648c179456a6843aae8c9c3fb53a Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Thu, 14 Mar 2024 11:57:03 +0100 Subject: [PATCH 284/374] Revert "Fix usage of session_token in S3 engine" --- src/Coordination/KeeperSnapshotManagerS3.cpp | 3 +-- src/Storages/StorageS3.cpp | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/Coordination/KeeperSnapshotManagerS3.cpp b/src/Coordination/KeeperSnapshotManagerS3.cpp index 796506a07db..80345db2524 100644 --- a/src/Coordination/KeeperSnapshotManagerS3.cpp +++ b/src/Coordination/KeeperSnapshotManagerS3.cpp @@ -121,8 +121,7 @@ void KeeperSnapshotManagerS3::updateS3Configuration(const Poco::Util::AbstractCo auth_settings.use_insecure_imds_request.value_or(false), auth_settings.expiration_window_seconds.value_or(S3::DEFAULT_EXPIRATION_WINDOW_SECONDS), auth_settings.no_sign_request.value_or(false), - }, - credentials.GetSessionToken()); + }); auto new_client = std::make_shared(std::move(new_uri), std::move(auth_settings), std::move(client)); diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index ff055508aa6..11da394feec 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -1451,8 +1451,7 @@ void StorageS3::Configuration::connect(const ContextPtr & context) auth_settings.expiration_window_seconds.value_or( context->getConfigRef().getUInt64("s3.expiration_window_seconds", S3::DEFAULT_EXPIRATION_WINDOW_SECONDS)), auth_settings.no_sign_request.value_or(context->getConfigRef().getBool("s3.no_sign_request", false)), - }, - credentials.GetSessionToken()); + }); } void StorageS3::processNamedCollectionResult(StorageS3::Configuration & configuration, const NamedCollection & collection) From 56220b5105005acef0ab4b09d4f7b48c6aac8d66 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Thu, 14 Mar 2024 12:23:32 +0100 Subject: [PATCH 285/374] Revert "Revert "Fix usage of session_token in S3 engine"" --- src/Coordination/KeeperSnapshotManagerS3.cpp | 3 ++- src/Storages/StorageS3.cpp | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/Coordination/KeeperSnapshotManagerS3.cpp b/src/Coordination/KeeperSnapshotManagerS3.cpp index 80345db2524..796506a07db 100644 --- a/src/Coordination/KeeperSnapshotManagerS3.cpp +++ b/src/Coordination/KeeperSnapshotManagerS3.cpp @@ -121,7 +121,8 @@ void KeeperSnapshotManagerS3::updateS3Configuration(const Poco::Util::AbstractCo auth_settings.use_insecure_imds_request.value_or(false), auth_settings.expiration_window_seconds.value_or(S3::DEFAULT_EXPIRATION_WINDOW_SECONDS), auth_settings.no_sign_request.value_or(false), - }); + }, + credentials.GetSessionToken()); auto new_client = std::make_shared(std::move(new_uri), std::move(auth_settings), std::move(client)); diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index 11da394feec..ff055508aa6 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -1451,7 +1451,8 @@ void StorageS3::Configuration::connect(const ContextPtr & context) auth_settings.expiration_window_seconds.value_or( context->getConfigRef().getUInt64("s3.expiration_window_seconds", S3::DEFAULT_EXPIRATION_WINDOW_SECONDS)), auth_settings.no_sign_request.value_or(context->getConfigRef().getBool("s3.no_sign_request", false)), - }); + }, + credentials.GetSessionToken()); } void StorageS3::processNamedCollectionResult(StorageS3::Configuration & configuration, const NamedCollection & collection) From 8fdf3ae747877b1aa0cbab3a74290f5a94edea63 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 14 Mar 2024 12:31:30 +0000 Subject: [PATCH 286/374] Add a comment. --- src/Planner/Planner.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index fde9f110d09..5624a911210 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -1084,6 +1084,8 @@ void addBuildSubqueriesForSetsStepIfNeeded( auto subquery_options = select_query_options.subquery(); /// I don't know if this is a good decision, /// But for now it is done in the same way as in old analyzer. + /// This would not ignore limits for subqueries (affects mutations only). + /// See test_build_sets_from_multiple_threads-analyzer. subquery_options.ignore_limits = false; Planner subquery_planner( query_tree, From b59680911c036eca84776247b9644b9f5475e25c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Thu, 14 Mar 2024 12:32:13 +0000 Subject: [PATCH 287/374] Move `getItemsShortCircuitImpl` and `getItemsImpl` into separate files --- src/Dictionaries/DictionaryStructure.h | 27 ++ src/Dictionaries/RangeHashedDictionary.cpp | 218 ---------------- src/Dictionaries/RangeHashedDictionary.h | 13 +- src/Dictionaries/RangeHashedDictionary_2.cpp | 250 +++++++++++++++++++ src/Dictionaries/RangeHashedDictionary_3.cpp | 135 ++++++++++ utils/check-style/check-large-objects.sh | 1 + 6 files changed, 422 insertions(+), 222 deletions(-) create mode 100644 src/Dictionaries/RangeHashedDictionary_2.cpp create mode 100644 src/Dictionaries/RangeHashedDictionary_3.cpp diff --git a/src/Dictionaries/DictionaryStructure.h b/src/Dictionaries/DictionaryStructure.h index 55060b1592f..56d11be9837 100644 --- a/src/Dictionaries/DictionaryStructure.h +++ b/src/Dictionaries/DictionaryStructure.h @@ -41,6 +41,33 @@ enum class AttributeUnderlyingType : TypeIndexUnderlying #undef map_item + +#define CALL_FOR_ALL_DICTIONARY_ATTRIBUTE_TYPES(M) \ + M(UInt8) \ + M(UInt16) \ + M(UInt32) \ + M(UInt64) \ + M(UInt128) \ + M(UInt256) \ + M(Int8) \ + M(Int16) \ + M(Int32) \ + M(Int64) \ + M(Int128) \ + M(Int256) \ + M(Decimal32) \ + M(Decimal64) \ + M(Decimal128) \ + M(Decimal256) \ + M(DateTime64) \ + M(Float32) \ + M(Float64) \ + M(UUID) \ + M(IPv4) \ + M(IPv6) \ + M(String) \ + M(Array) + /// Min and max lifetimes for a dictionary or its entry using DictionaryLifetime = ExternalLoadableLifetime; diff --git a/src/Dictionaries/RangeHashedDictionary.cpp b/src/Dictionaries/RangeHashedDictionary.cpp index 203561fc23d..8299c3ad93a 100644 --- a/src/Dictionaries/RangeHashedDictionary.cpp +++ b/src/Dictionaries/RangeHashedDictionary.cpp @@ -211,224 +211,6 @@ ColumnPtr RangeHashedDictionary::getColumn( return result; } -template -template -void RangeHashedDictionary::getItemsImpl( - const Attribute & attribute, - const Columns & key_columns, - ValueSetter && set_value, - DefaultValueExtractor & default_value_extractor) const -{ - const auto & attribute_container = std::get>(attribute.container); - - size_t keys_found = 0; - - const ColumnPtr & range_column = key_columns.back(); - auto key_columns_copy = key_columns; - key_columns_copy.pop_back(); - - DictionaryKeysArenaHolder arena_holder; - DictionaryKeysExtractor keys_extractor(key_columns_copy, arena_holder.getComplexKeyArena()); - const size_t keys_size = keys_extractor.getKeysSize(); - - callOnRangeType(dict_struct.range_min->type, [&](const auto & types) - { - using Types = std::decay_t; - using RangeColumnType = typename Types::LeftType; - using RangeStorageType = typename RangeColumnType::ValueType; - using RangeInterval = Interval; - - const auto * range_column_typed = typeid_cast(range_column.get()); - if (!range_column_typed) - throw Exception(ErrorCodes::TYPE_MISMATCH, - "Dictionary {} range column type should be equal to {}", - getFullName(), - dict_struct.range_min->type->getName()); - - const auto & range_column_data = range_column_typed->getData(); - - const auto & key_attribute_container = std::get>(key_attribute.container); - - for (size_t key_index = 0; key_index < keys_size; ++key_index) - { - auto key = keys_extractor.extractCurrentKey(); - const auto it = key_attribute_container.find(key); - - if (it) - { - const auto date = range_column_data[key_index]; - const auto & interval_tree = it->getMapped(); - - size_t value_index = 0; - std::optional range; - - interval_tree.find(date, [&](auto & interval, auto & interval_value_index) - { - if (range) - { - if (likely(configuration.lookup_strategy == RangeHashedDictionaryLookupStrategy::min) && interval < *range) - { - range = interval; - value_index = interval_value_index; - } - else if (configuration.lookup_strategy == RangeHashedDictionaryLookupStrategy::max && interval > * range) - { - range = interval; - value_index = interval_value_index; - } - } - else - { - range = interval; - value_index = interval_value_index; - } - - return true; - }); - - if (range.has_value()) - { - ++keys_found; - - AttributeType value = attribute_container[value_index]; - - if constexpr (is_nullable) - { - bool is_null = (*attribute.is_value_nullable)[value_index]; - set_value(key_index, value, is_null); - } - else - { - set_value(key_index, value, false); - } - - keys_extractor.rollbackCurrentKey(); - continue; - } - } - - if constexpr (is_nullable) - set_value(key_index, default_value_extractor[key_index], default_value_extractor.isNullAt(key_index)); - else - set_value(key_index, default_value_extractor[key_index], false); - - keys_extractor.rollbackCurrentKey(); - } - }); - - query_count.fetch_add(keys_size, std::memory_order_relaxed); - found_count.fetch_add(keys_found, std::memory_order_relaxed); -} - -template -template -size_t RangeHashedDictionary::getItemsShortCircuitImpl( - const Attribute & attribute, - const Columns & key_columns, - ValueSetter && set_value, - IColumn::Filter & default_mask) const -{ - const auto & attribute_container = std::get>(attribute.container); - - size_t keys_found = 0; - - const ColumnPtr & range_column = key_columns.back(); - auto key_columns_copy = key_columns; - key_columns_copy.pop_back(); - - DictionaryKeysArenaHolder arena_holder; - DictionaryKeysExtractor keys_extractor(key_columns_copy, arena_holder.getComplexKeyArena()); - const size_t keys_size = keys_extractor.getKeysSize(); - default_mask.resize(keys_size); - - callOnRangeType(dict_struct.range_min->type, [&](const auto & types) - { - using Types = std::decay_t; - using RangeColumnType = typename Types::LeftType; - using RangeStorageType = typename RangeColumnType::ValueType; - using RangeInterval = Interval; - - const auto * range_column_typed = typeid_cast(range_column.get()); - if (!range_column_typed) - throw Exception(ErrorCodes::TYPE_MISMATCH, - "Dictionary {} range column type should be equal to {}", - getFullName(), - dict_struct.range_min->type->getName()); - - const auto & range_column_data = range_column_typed->getData(); - - const auto & key_attribute_container = std::get>(key_attribute.container); - - for (size_t key_index = 0; key_index < keys_size; ++key_index) - { - auto key = keys_extractor.extractCurrentKey(); - const auto it = key_attribute_container.find(key); - - if (it) - { - const auto date = range_column_data[key_index]; - const auto & interval_tree = it->getMapped(); - - size_t value_index = 0; - std::optional range; - - interval_tree.find(date, [&](auto & interval, auto & interval_value_index) - { - if (range) - { - if (likely(configuration.lookup_strategy == RangeHashedDictionaryLookupStrategy::min) && interval < *range) - { - range = interval; - value_index = interval_value_index; - } - else if (configuration.lookup_strategy == RangeHashedDictionaryLookupStrategy::max && interval > * range) - { - range = interval; - value_index = interval_value_index; - } - } - else - { - range = interval; - value_index = interval_value_index; - } - - return true; - }); - - if (range.has_value()) - { - default_mask[key_index] = 0; - ++keys_found; - - AttributeType value = attribute_container[value_index]; - - if constexpr (is_nullable) - { - bool is_null = (*attribute.is_value_nullable)[value_index]; - set_value(key_index, value, is_null); - } - else - { - set_value(key_index, value, false); - } - - keys_extractor.rollbackCurrentKey(); - continue; - } - } - - default_mask[key_index] = 1; - - keys_extractor.rollbackCurrentKey(); - } - }); - - query_count.fetch_add(keys_size, std::memory_order_relaxed); - found_count.fetch_add(keys_found, std::memory_order_relaxed); - return keys_found; -} - template ColumnPtr RangeHashedDictionary::getColumn( const std::string & attribute_name, diff --git a/src/Dictionaries/RangeHashedDictionary.h b/src/Dictionaries/RangeHashedDictionary.h index 4a8008b9051..a5dedae97c4 100644 --- a/src/Dictionaries/RangeHashedDictionary.h +++ b/src/Dictionaries/RangeHashedDictionary.h @@ -236,18 +236,23 @@ private: static Attribute createAttribute(const DictionaryAttribute & dictionary_attribute); - template + + + template + using ValueSetterFunc = std::function; + + template void getItemsImpl( const Attribute & attribute, const Columns & key_columns, - ValueSetter && set_value, + ValueSetterFunc && set_value, DefaultValueExtractor & default_value_extractor) const; - template + template size_t getItemsShortCircuitImpl( const Attribute & attribute, const Columns & key_columns, - ValueSetter && set_value, + ValueSetterFunc && set_value, IColumn::Filter & default_mask) const; ColumnPtr getColumnInternal( diff --git a/src/Dictionaries/RangeHashedDictionary_2.cpp b/src/Dictionaries/RangeHashedDictionary_2.cpp new file mode 100644 index 00000000000..2329d621da4 --- /dev/null +++ b/src/Dictionaries/RangeHashedDictionary_2.cpp @@ -0,0 +1,250 @@ +#include + +namespace DB +{ + + +template +template +size_t RangeHashedDictionary::getItemsShortCircuitImpl( + const Attribute & attribute, + const Columns & key_columns, + typename RangeHashedDictionary::ValueSetterFunc && set_value, + IColumn::Filter & default_mask) const +{ + const auto & attribute_container = std::get>(attribute.container); + + size_t keys_found = 0; + + const ColumnPtr & range_column = key_columns.back(); + auto key_columns_copy = key_columns; + key_columns_copy.pop_back(); + + DictionaryKeysArenaHolder arena_holder; + DictionaryKeysExtractor keys_extractor(key_columns_copy, arena_holder.getComplexKeyArena()); + const size_t keys_size = keys_extractor.getKeysSize(); + default_mask.resize(keys_size); + + callOnRangeType( + dict_struct.range_min->type, + [&](const auto & types) + { + using Types = std::decay_t; + using RangeColumnType = typename Types::LeftType; + using RangeStorageType = typename RangeColumnType::ValueType; + using RangeInterval = Interval; + + const auto * range_column_typed = typeid_cast(range_column.get()); + if (!range_column_typed) + throw Exception( + ErrorCodes::TYPE_MISMATCH, + "Dictionary {} range column type should be equal to {}", + getFullName(), + dict_struct.range_min->type->getName()); + + const auto & range_column_data = range_column_typed->getData(); + + const auto & key_attribute_container = std::get>(key_attribute.container); + + for (size_t key_index = 0; key_index < keys_size; ++key_index) + { + auto key = keys_extractor.extractCurrentKey(); + const auto it = key_attribute_container.find(key); + + if (it) + { + const auto date = range_column_data[key_index]; + const auto & interval_tree = it->getMapped(); + + size_t value_index = 0; + std::optional range; + + interval_tree.find( + date, + [&](auto & interval, auto & interval_value_index) + { + if (range) + { + if (likely(configuration.lookup_strategy == RangeHashedDictionaryLookupStrategy::min) && interval < *range) + { + range = interval; + value_index = interval_value_index; + } + else if (configuration.lookup_strategy == RangeHashedDictionaryLookupStrategy::max && interval > *range) + { + range = interval; + value_index = interval_value_index; + } + } + else + { + range = interval; + value_index = interval_value_index; + } + + return true; + }); + + if (range.has_value()) + { + default_mask[key_index] = 0; + ++keys_found; + + ValueType value = attribute_container[value_index]; + + if constexpr (is_nullable) + { + bool is_null = (*attribute.is_value_nullable)[value_index]; + set_value(key_index, value, is_null); + } + else + { + set_value(key_index, value, false); + } + + keys_extractor.rollbackCurrentKey(); + continue; + } + } + + default_mask[key_index] = 1; + + keys_extractor.rollbackCurrentKey(); + } + }); + + query_count.fetch_add(keys_size, std::memory_order_relaxed); + found_count.fetch_add(keys_found, std::memory_order_relaxed); + return keys_found; +} + +template +template +void RangeHashedDictionary::getItemsImpl( + const Attribute & attribute, + const Columns & key_columns, + typename RangeHashedDictionary::ValueSetterFunc && set_value, + DefaultValueExtractor & default_value_extractor) const +{ + const auto & attribute_container = std::get>(attribute.container); + + size_t keys_found = 0; + + const ColumnPtr & range_column = key_columns.back(); + auto key_columns_copy = key_columns; + key_columns_copy.pop_back(); + + DictionaryKeysArenaHolder arena_holder; + DictionaryKeysExtractor keys_extractor(key_columns_copy, arena_holder.getComplexKeyArena()); + const size_t keys_size = keys_extractor.getKeysSize(); + + callOnRangeType( + dict_struct.range_min->type, + [&](const auto & types) + { + using Types = std::decay_t; + using RangeColumnType = typename Types::LeftType; + using RangeStorageType = typename RangeColumnType::ValueType; + using RangeInterval = Interval; + + const auto * range_column_typed = typeid_cast(range_column.get()); + if (!range_column_typed) + throw Exception( + ErrorCodes::TYPE_MISMATCH, + "Dictionary {} range column type should be equal to {}", + getFullName(), + dict_struct.range_min->type->getName()); + + const auto & range_column_data = range_column_typed->getData(); + + const auto & key_attribute_container = std::get>(key_attribute.container); + + for (size_t key_index = 0; key_index < keys_size; ++key_index) + { + auto key = keys_extractor.extractCurrentKey(); + const auto it = key_attribute_container.find(key); + + if (it) + { + const auto date = range_column_data[key_index]; + const auto & interval_tree = it->getMapped(); + + size_t value_index = 0; + std::optional range; + + interval_tree.find( + date, + [&](auto & interval, auto & interval_value_index) + { + if (range) + { + if (likely(configuration.lookup_strategy == RangeHashedDictionaryLookupStrategy::min) && interval < *range) + { + range = interval; + value_index = interval_value_index; + } + else if (configuration.lookup_strategy == RangeHashedDictionaryLookupStrategy::max && interval > *range) + { + range = interval; + value_index = interval_value_index; + } + } + else + { + range = interval; + value_index = interval_value_index; + } + + return true; + }); + + if (range.has_value()) + { + ++keys_found; + + ValueType value = attribute_container[value_index]; + + if constexpr (is_nullable) + { + bool is_null = (*attribute.is_value_nullable)[value_index]; + set_value(key_index, value, is_null); + } + else + { + set_value(key_index, value, false); + } + + keys_extractor.rollbackCurrentKey(); + continue; + } + } + + if constexpr (is_nullable) + set_value(key_index, default_value_extractor[key_index], default_value_extractor.isNullAt(key_index)); + else + set_value(key_index, default_value_extractor[key_index], false); + + keys_extractor.rollbackCurrentKey(); + } + }); + + query_count.fetch_add(keys_size, std::memory_order_relaxed); + found_count.fetch_add(keys_found, std::memory_order_relaxed); +} + +#define INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL(DictionaryKeyType, IsNullable, ValueType) \ + template size_t RangeHashedDictionary::getItemsShortCircuitImpl( \ + const Attribute & attribute, \ + const Columns & key_columns, \ + typename RangeHashedDictionary::ValueSetterFunc && set_value, \ + IColumn::Filter & default_mask) const; + +#define INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(AttributeType) \ + INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL(DictionaryKeyType::Simple, true, DictionaryValueType) \ + INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL(DictionaryKeyType::Simple, false, DictionaryValueType) \ + INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL(DictionaryKeyType::Complex, true, DictionaryValueType) \ + INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL(DictionaryKeyType::Complex, false, DictionaryValueType) + +CALL_FOR_ALL_DICTIONARY_ATTRIBUTE_TYPES(INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE) + +} diff --git a/src/Dictionaries/RangeHashedDictionary_3.cpp b/src/Dictionaries/RangeHashedDictionary_3.cpp new file mode 100644 index 00000000000..a3136d6f63d --- /dev/null +++ b/src/Dictionaries/RangeHashedDictionary_3.cpp @@ -0,0 +1,135 @@ +#include + +namespace DB +{ + +template +template +void RangeHashedDictionary::getItemsImpl( + const Attribute & attribute, + const Columns & key_columns, + typename RangeHashedDictionary::ValueSetterFunc && set_value, + DefaultValueExtractor & default_value_extractor) const +{ + const auto & attribute_container = std::get>(attribute.container); + + size_t keys_found = 0; + + const ColumnPtr & range_column = key_columns.back(); + auto key_columns_copy = key_columns; + key_columns_copy.pop_back(); + + DictionaryKeysArenaHolder arena_holder; + DictionaryKeysExtractor keys_extractor(key_columns_copy, arena_holder.getComplexKeyArena()); + const size_t keys_size = keys_extractor.getKeysSize(); + + callOnRangeType( + dict_struct.range_min->type, + [&](const auto & types) + { + using Types = std::decay_t; + using RangeColumnType = typename Types::LeftType; + using RangeStorageType = typename RangeColumnType::ValueType; + using RangeInterval = Interval; + + const auto * range_column_typed = typeid_cast(range_column.get()); + if (!range_column_typed) + throw Exception( + ErrorCodes::TYPE_MISMATCH, + "Dictionary {} range column type should be equal to {}", + getFullName(), + dict_struct.range_min->type->getName()); + + const auto & range_column_data = range_column_typed->getData(); + + const auto & key_attribute_container = std::get>(key_attribute.container); + + for (size_t key_index = 0; key_index < keys_size; ++key_index) + { + auto key = keys_extractor.extractCurrentKey(); + const auto it = key_attribute_container.find(key); + + if (it) + { + const auto date = range_column_data[key_index]; + const auto & interval_tree = it->getMapped(); + + size_t value_index = 0; + std::optional range; + + interval_tree.find( + date, + [&](auto & interval, auto & interval_value_index) + { + if (range) + { + if (likely(configuration.lookup_strategy == RangeHashedDictionaryLookupStrategy::min) && interval < *range) + { + range = interval; + value_index = interval_value_index; + } + else if (configuration.lookup_strategy == RangeHashedDictionaryLookupStrategy::max && interval > *range) + { + range = interval; + value_index = interval_value_index; + } + } + else + { + range = interval; + value_index = interval_value_index; + } + + return true; + }); + + if (range.has_value()) + { + ++keys_found; + + ValueType value = attribute_container[value_index]; + + if constexpr (is_nullable) + { + bool is_null = (*attribute.is_value_nullable)[value_index]; + set_value(key_index, value, is_null); + } + else + { + set_value(key_index, value, false); + } + + keys_extractor.rollbackCurrentKey(); + continue; + } + } + + if constexpr (is_nullable) + set_value(key_index, default_value_extractor[key_index], default_value_extractor.isNullAt(key_index)); + else + set_value(key_index, default_value_extractor[key_index], false); + + keys_extractor.rollbackCurrentKey(); + } + }); + + query_count.fetch_add(keys_size, std::memory_order_relaxed); + found_count.fetch_add(keys_found, std::memory_order_relaxed); +} + +#define INSTANTIATE_GET_ITEMS_IMPL(DictionaryKeyType, IsNullable, AttributeType, ValueType) \ +template void RangeHashedDictionary::getItemsImpl>( \ + const Attribute & attribute,\ + const Columns & key_columns,\ + typename RangeHashedDictionary::ValueSetterFunc && set_value,\ + DictionaryDefaultValueExtractor & default_value_extractor) const; + +#define INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(AttributeType) \ + INSTANTIATE_GET_ITEMS_IMPL(DictionaryKeyType::Simple, true, AttributeType, DictionaryValueType) \ + INSTANTIATE_GET_ITEMS_IMPL(DictionaryKeyType::Simple, false, AttributeType, DictionaryValueType) \ + INSTANTIATE_GET_ITEMS_IMPL(DictionaryKeyType::Complex, true, AttributeType, DictionaryValueType) \ + INSTANTIATE_GET_ITEMS_IMPL(DictionaryKeyType::Complex, false, AttributeType, DictionaryValueType) + +CALL_FOR_ALL_DICTIONARY_ATTRIBUTE_TYPES(INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE) + +} diff --git a/utils/check-style/check-large-objects.sh b/utils/check-style/check-large-objects.sh index 5b0e8e88df5..5ef57ea4f6c 100755 --- a/utils/check-style/check-large-objects.sh +++ b/utils/check-style/check-large-objects.sh @@ -6,6 +6,7 @@ TU_EXCLUDES=( CastOverloadResolver AggregateFunctionUniq FunctionsConversion + RangeHashedDictionary_ Aggregator ) From 85a79bc1cc8b9a374bfa2584dc4af87b24267cfd Mon Sep 17 00:00:00 2001 From: serxa Date: Thu, 14 Mar 2024 12:39:23 +0000 Subject: [PATCH 288/374] Fix the longest test unnecessary 3 minute wait --- .../01599_multiline_input_and_singleline_comments.sh | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/01599_multiline_input_and_singleline_comments.sh b/tests/queries/0_stateless/01599_multiline_input_and_singleline_comments.sh index 7f77f8bb403..a537dce2d92 100755 --- a/tests/queries/0_stateless/01599_multiline_input_and_singleline_comments.sh +++ b/tests/queries/0_stateless/01599_multiline_input_and_singleline_comments.sh @@ -1,9 +1,10 @@ #!/usr/bin/expect -f -# Tags: no-fasttest -# Tag no-fasttest: 180 seconds running log_user 0 + +# In some places `-timeout 1`` is used to avoid expect to always wait for the whole timeout set timeout 60 + match_max 100000 if ![info exists env(CLICKHOUSE_PORT_TCP)] {set env(CLICKHOUSE_PORT_TCP) 9000} @@ -13,11 +14,11 @@ expect ":) " # Make a query send -- "SELECT 1\r" -expect ":-] " +expect -timeout 1 ":-] " send -- "-- xxx\r" -expect ":-] " +expect -timeout 1 ":-] " send -- ", 2\r" -expect ":-] " +expect -timeout 1 ":-] " send -- ";\r" expect "│ 1 │ 2 │" From 37913d94a3c9704775010cfc2f1d1fb9c705f7a9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 14 Mar 2024 13:39:50 +0100 Subject: [PATCH 289/374] Merge with master --- src/Functions/FunctionsConversion.cpp | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 70cbf31bcb3..42056067f00 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -4471,20 +4471,20 @@ arguments, result_type, input_rows_count); \ if (from_low_cardinality) { - const auto * col_low_cardinality = typeid_cast(arguments[0].column.get()); + const auto & col_low_cardinality = typeid_cast(*arguments[0].column); - if (skip_not_null_check && col_low_cardinality->containsNull()) + if (skip_not_null_check && col_low_cardinality.containsNull()) throw Exception(ErrorCodes::CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN, "Cannot convert NULL value to non-Nullable type"); - arg.column = col_low_cardinality->getDictionary().getNestedColumn(); + arg.column = col_low_cardinality.getDictionary().getNestedColumn(); arg.type = from_low_cardinality->getDictionaryType(); /// TODO: Make map with defaults conversion. src_converted_to_full_column = !removeNullable(arg.type)->equals(*removeNullable(res_type)); if (src_converted_to_full_column) - arg.column = arg.column->index(col_low_cardinality->getIndexes(), 0); + arg.column = arg.column->index(col_low_cardinality.getIndexes(), 0); else - res_indexes = col_low_cardinality->getIndexesPtr(); + res_indexes = col_low_cardinality.getIndexesPtr(); tmp_rows_count = arg.column->size(); } @@ -4496,14 +4496,12 @@ arguments, result_type, input_rows_count); \ if (to_low_cardinality) { auto res_column = to_low_cardinality->createColumn(); - auto * col_low_cardinality = typeid_cast(res_column.get()); + auto & col_low_cardinality = typeid_cast(*res_column); if (from_low_cardinality && !src_converted_to_full_column) - { - col_low_cardinality->insertRangeFromDictionaryEncodedColumn(*converted_column, *res_indexes); - } + col_low_cardinality.insertRangeFromDictionaryEncodedColumn(*converted_column, *res_indexes); else - col_low_cardinality->insertRangeFromFullColumn(*converted_column, 0, converted_column->size()); + col_low_cardinality.insertRangeFromFullColumn(*converted_column, 0, converted_column->size()); return res_column; } From 107acf54c609147acc6bf84407f168e274079505 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Thu, 14 Mar 2024 13:42:07 +0100 Subject: [PATCH 290/374] Fix tests --- tests/integration/test_storage_s3/test.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index dbbe670e8ca..6d5b84a8143 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -1414,10 +1414,10 @@ def test_signatures(started_cluster): ) assert int(result) == 1 - result = instance.query( + error = instance.query_and_get_error( f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'minio', 'minio123', '{session_token}')" ) - assert int(result) == 1 + assert "S3_ERROR" in error result = instance.query( f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'Arrow', 'x UInt64', 'auto')" @@ -1429,20 +1429,20 @@ def test_signatures(started_cluster): ) assert int(result) == 1 - result = instance.query( + error = instance.query_and_get_error( f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'minio', 'minio123', '{session_token}', 'Arrow')" ) - assert int(result) == 1 + assert "S3_ERROR" in error - lt = instance.query( + error = instance.query_and_get_error( f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'minio', 'minio123', '{session_token}', 'Arrow', 'x UInt64')" ) - assert int(result) == 1 + assert "S3_ERROR" in error - lt = instance.query( + error = instance.query_and_get_error( f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'minio', 'minio123', '{session_token}', 'Arrow', 'x UInt64', 'auto')" ) - assert int(result) == 1 + assert "S3_ERROR" in error def test_select_columns(started_cluster): From 047fb87a9ae20dd57ba370a277cfd4a43411876d Mon Sep 17 00:00:00 2001 From: serxa Date: Thu, 14 Mar 2024 12:46:45 +0000 Subject: [PATCH 291/374] typo --- .../01599_multiline_input_and_singleline_comments.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01599_multiline_input_and_singleline_comments.sh b/tests/queries/0_stateless/01599_multiline_input_and_singleline_comments.sh index a537dce2d92..f1acd39136f 100755 --- a/tests/queries/0_stateless/01599_multiline_input_and_singleline_comments.sh +++ b/tests/queries/0_stateless/01599_multiline_input_and_singleline_comments.sh @@ -2,7 +2,7 @@ log_user 0 -# In some places `-timeout 1`` is used to avoid expect to always wait for the whole timeout +# In some places `-timeout 1` is used to avoid expect to always wait for the whole timeout set timeout 60 match_max 100000 From bf9da768bfb090dec9b366d97240a2949b0e150e Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 14 Mar 2024 13:53:40 +0100 Subject: [PATCH 292/374] More detailed explanation --- docs/en/operations/storing-data.md | 42 +++++++++++++++++++++++++----- 1 file changed, 36 insertions(+), 6 deletions(-) diff --git a/docs/en/operations/storing-data.md b/docs/en/operations/storing-data.md index 4f676904375..fd81bc197d1 100644 --- a/docs/en/operations/storing-data.md +++ b/docs/en/operations/storing-data.md @@ -10,7 +10,11 @@ Data, processed in ClickHouse, is usually stored in the local file system — on 2. The Hadoop Distributed File System ([HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)) 3. [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs). -Note: to work with data stored on `Amazon S3` disks use [S3](/docs/en/engines/table-engines/integrations/s3.md) table engine, to work with data in the Hadoop Distributed File System — [HDFS](/docs/en/engines/table-engines/integrations/hdfs.md) table engine, and to work with data stored in Azure Blob Storage use [AzureBlobStorage](/docs/en/engines/table-engines/integrations/azureBlobStorage.md) table engine. They are different from external storage described on this page as they allow to read data stored in some general file format (like Parquet), while on this page we are describing storage configuration for ClickHouse `MergeTree` family or `Log` family tables. +:::note ClickHouse also has support for external table engines, which are different from external storage option described on this page as they allow to read data stored in some general file format (like Parquet), while on this page we are describing storage configuration for ClickHouse `MergeTree` family or `Log` family tables. +1. to work with data stored on `Amazon S3` disks, use [S3](/docs/en/engines/table-engines/integrations/s3.md) table engine. +2. to work with data in the Hadoop Distributed File System — [HDFS](/docs/en/engines/table-engines/integrations/hdfs.md) table engine. +3. to work with data stored in Azure Blob Storage use [AzureBlobStorage](/docs/en/engines/table-engines/integrations/azureBlobStorage.md) table engine. +::: ## Configuring external storage {#configuring-external-storage} @@ -23,8 +27,9 @@ Disk configuration requires: Starting from 24.1 clickhouse version, it is possible to use a new configuration option. It requires to specify: 1. `type` equal to `object_storage` -2. `object_storage_type`, equal to one of `s3`, `azure_blob_storage`, `hdfs`, `local_blob_storage`, `web`. +2. `object_storage_type`, equal to one of `s3`, `azure_blob_storage` (or just `azure` from `24.3`), `hdfs`, `local_blob_storage` (or just `local` from `24.3`), `web`. Optionally, `metadata_type` can be specified (it is equal to `local` by default), but it can also be set to `plain`, `web`. +Usage of `plain` metadata type is described in [plain storage section](/docs/en/operations/storing-data.md/#storing-data-on-webserver), `web` metadata type can be used only with `web` object storage type, `local` metadata type stores metadata files locally (each metadata files contains mapping to files in object storage and some additional meta information about them). E.g. configuration option ``` xml @@ -143,7 +148,7 @@ SETTINGS disk = 's3'; ## Dynamic Configuration {#dynamic-configuration} -There is also a possibility to specify storage configuration without a predefined disk in configuration in a configuration file, but can be configured in the CREATE/ATTACH query settings. +There is also a possibility to specify storage configuration without a predefined disk in configuration in a configuration file, but can be configured in the `CREATE`/`ATTACH` query settings. The following example query builds on the above dynamic disk configuration and shows how to use a local disk to cache data from a table stored at a URL. @@ -306,10 +311,35 @@ Optional parameters: Google Cloud Storage (GCS) is also supported using the type `s3`. See [GCS backed MergeTree](/docs/en/integrations/gcs). ::: -### Using Plain Storage {#s3-storage} +### Using Plain Storage {#plain-storage} -There is a disk type `s3_plain`, which provides a write-once storage. Unlike `s3` disk type, it stores data as is, e.g. instead of randomly-generated blob names, it uses normal file names as clickhouse stores files on local disk. So this disk type allows to keeper a static version of the table and can also be used to create backups on it. -Configuration parameters are the same as for `s3` disk type. +In `22.10` a new disk type `s3_plain` was introduced, which provides a write-once storage. Configuration parameters are the same as for `s3` disk type. +Unlike `s3` disk type, it stores data as is, e.g. instead of randomly-generated blob names, it uses normal file names (the same way as clickhouse stores files on local disk) and does not store any metadata locally, e.g. it is derived from data on `s3`. + +This disk type allows to keep a static version of the table, as it does not allow executing merges on the existing data and does not allow inserting of new data. +A use case for this disk type is to create backups on it, which can be done via `BACKUP TABLE data TO Disk('plain_disk_name', 'backup_name')`. Afterwards you can do `RESTORE TABLE data AS data_restored FROM Disk('plain_disk_name', 'backup_name')` or using `ATTACH TABLE data (...) ENGINE = MergeTree() SETTINGS disk = 'plain_disk_name'`. + +Configuration: +``` xml + + s3_plain + https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/ + 1 + +``` + +Starting from `24.1` it is possible configure any object storage disk (`s3`, `azure`, `hdfs`, `local`) using `plain` metadata type. + +Configuration: +``` xml + + object_storage + azure + plain + https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/ + 1 + +``` ### Using Azure Blob Storage {#azure-blob-storage} From c3aefb28e5eb11c171700dfe439c2371c6a3825e Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Thu, 14 Mar 2024 14:13:15 +0100 Subject: [PATCH 293/374] Remove unnecessary layers from clickhouse/cctools --- docker/packager/cctools/Dockerfile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docker/packager/cctools/Dockerfile b/docker/packager/cctools/Dockerfile index 1b8c675a5c5..d986c6a3c86 100644 --- a/docker/packager/cctools/Dockerfile +++ b/docker/packager/cctools/Dockerfile @@ -2,7 +2,7 @@ # It's based on the assumption that we don't care of the cctools version so much # It event does not depend on the clickhouse/fasttest in the `docker/images.json` ARG FROM_TAG=latest -FROM clickhouse/fasttest:$FROM_TAG +FROM clickhouse/fasttest:$FROM_TAG as builder ENV CC=clang-${LLVM_VERSION} ENV CXX=clang++-${LLVM_VERSION} @@ -29,3 +29,6 @@ RUN git clone https://github.com/tpoechtrager/cctools-port.git \ && make install -j$(nproc) \ && cd ../.. \ && rm -rf cctools-port + +FROM scratch +COPY --from=builder /cctools /cctools From 9067c1ab9292de5064a3cb8547557798ace4ac99 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 14 Mar 2024 14:14:24 +0100 Subject: [PATCH 294/374] Merge with master --- src/Functions/FunctionsConversion.h | 4990 --------------------------- 1 file changed, 4990 deletions(-) delete mode 100644 src/Functions/FunctionsConversion.h diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h deleted file mode 100644 index f338af28240..00000000000 --- a/src/Functions/FunctionsConversion.h +++ /dev/null @@ -1,4990 +0,0 @@ -#pragma once - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int ATTEMPT_TO_READ_AFTER_EOF; - extern const int CANNOT_PARSE_NUMBER; - extern const int CANNOT_READ_ARRAY_FROM_TEXT; - extern const int CANNOT_PARSE_INPUT_ASSERTION_FAILED; - extern const int CANNOT_PARSE_QUOTED_STRING; - extern const int CANNOT_PARSE_ESCAPE_SEQUENCE; - extern const int CANNOT_PARSE_DATE; - extern const int CANNOT_PARSE_DATETIME; - extern const int CANNOT_PARSE_TEXT; - extern const int CANNOT_PARSE_UUID; - extern const int CANNOT_PARSE_IPV4; - extern const int CANNOT_PARSE_IPV6; - extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION; - extern const int LOGICAL_ERROR; - extern const int TYPE_MISMATCH; - extern const int CANNOT_CONVERT_TYPE; - extern const int ILLEGAL_COLUMN; - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; - extern const int ILLEGAL_TYPE_OF_ARGUMENT; - extern const int NOT_IMPLEMENTED; - extern const int CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN; - extern const int CANNOT_PARSE_BOOL; - extern const int VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE; -} - -/** Type conversion functions. - * toType - conversion in "natural way"; - */ - -inline UInt32 extractToDecimalScale(const ColumnWithTypeAndName & named_column) -{ - const auto * arg_type = named_column.type.get(); - bool ok = checkAndGetDataType(arg_type) - || checkAndGetDataType(arg_type) - || checkAndGetDataType(arg_type) - || checkAndGetDataType(arg_type); - if (!ok) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type of toDecimal() scale {}", named_column.type->getName()); - - Field field; - named_column.column->get(0, field); - return static_cast(field.get()); -} - -/// Function toUnixTimestamp has exactly the same implementation as toDateTime of String type. -struct NameToUnixTimestamp { static constexpr auto name = "toUnixTimestamp"; }; - -struct AccurateConvertStrategyAdditions -{ - UInt32 scale { 0 }; -}; - -struct AccurateOrNullConvertStrategyAdditions -{ - UInt32 scale { 0 }; -}; - - -struct ConvertDefaultBehaviorTag {}; -struct ConvertReturnNullOnErrorTag {}; -struct ConvertReturnZeroOnErrorTag {}; - -/** Conversion of number types to each other, enums to numbers, dates and datetimes to numbers and back: done by straight assignment. - * (Date is represented internally as number of days from some day; DateTime - as unix timestamp) - */ -template -struct ConvertImpl -{ - using FromFieldType = typename FromDataType::FieldType; - using ToFieldType = typename ToDataType::FieldType; - - template - static ColumnPtr NO_SANITIZE_UNDEFINED execute( - const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type [[maybe_unused]], size_t input_rows_count, - Additions additions [[maybe_unused]] = Additions()) - { - const ColumnWithTypeAndName & named_from = arguments[0]; - - using ColVecFrom = typename FromDataType::ColumnType; - using ColVecTo = typename ToDataType::ColumnType; - - if constexpr ((IsDataTypeDecimal || IsDataTypeDecimal) - && !(std::is_same_v || std::is_same_v)) - { - if constexpr (!IsDataTypeDecimalOrNumber || !IsDataTypeDecimalOrNumber) - { - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", - named_from.column->getName(), Name::name); - } - } - - if (const ColVecFrom * col_from = checkAndGetColumn(named_from.column.get())) - { - typename ColVecTo::MutablePtr col_to = nullptr; - - if constexpr (IsDataTypeDecimal) - { - UInt32 scale; - - if constexpr (std::is_same_v - || std::is_same_v) - { - scale = additions.scale; - } - else - { - scale = additions; - } - - col_to = ColVecTo::create(0, scale); - } - else - col_to = ColVecTo::create(); - - const auto & vec_from = col_from->getData(); - auto & vec_to = col_to->getData(); - vec_to.resize(input_rows_count); - - ColumnUInt8::MutablePtr col_null_map_to; - ColumnUInt8::Container * vec_null_map_to [[maybe_unused]] = nullptr; - if constexpr (std::is_same_v) - { - col_null_map_to = ColumnUInt8::create(input_rows_count, false); - vec_null_map_to = &col_null_map_to->getData(); - } - - bool result_is_bool = isBool(result_type); - for (size_t i = 0; i < input_rows_count; ++i) - { - if constexpr (std::is_same_v) - { - if (result_is_bool) - { - vec_to[i] = vec_from[i] != FromFieldType(0); - continue; - } - } - - if constexpr (std::is_same_v && std::is_same_v) - { - static_assert( - std::is_same_v, - "UInt128 and UUID types must be same"); - - vec_to[i].items[1] = vec_from[i].toUnderType().items[0]; - vec_to[i].items[0] = vec_from[i].toUnderType().items[1]; - - continue; - } - - if constexpr (std::is_same_v && std::is_same_v) - { - static_assert( - std::is_same_v, - "UInt128 and IPv6 types must be same"); - - vec_to[i].items[1] = std::byteswap(vec_from[i].toUnderType().items[0]); - vec_to[i].items[0] = std::byteswap(vec_from[i].toUnderType().items[1]); - - continue; - } - - if constexpr (std::is_same_v != std::is_same_v) - { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "Conversion between numeric types and UUID is not supported. " - "Probably the passed UUID is unquoted"); - } - else if constexpr ( - (std::is_same_v != std::is_same_v) - && !(is_any_of || is_any_of) - ) - { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Conversion from {} to {} is not supported", - TypeName, TypeName); - } - else if constexpr (std::is_same_v != std::is_same_v && !(std::is_same_v || std::is_same_v)) - { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "Conversion between numeric types and IPv6 is not supported. " - "Probably the passed IPv6 is unquoted"); - } - else - { - if constexpr (IsDataTypeDecimal || IsDataTypeDecimal) - { - if constexpr (std::is_same_v) - { - ToFieldType result; - bool convert_result = false; - - if constexpr (IsDataTypeDecimal && IsDataTypeDecimal) - convert_result = tryConvertDecimals(vec_from[i], col_from->getScale(), col_to->getScale(), result); - else if constexpr (IsDataTypeDecimal && IsDataTypeNumber) - convert_result = tryConvertFromDecimal(vec_from[i], col_from->getScale(), result); - else if constexpr (IsDataTypeNumber && IsDataTypeDecimal) - convert_result = tryConvertToDecimal(vec_from[i], col_to->getScale(), result); - - if (convert_result) - vec_to[i] = result; - else - { - vec_to[i] = static_cast(0); - (*vec_null_map_to)[i] = true; - } - } - else - { - if constexpr (IsDataTypeDecimal && IsDataTypeDecimal) - vec_to[i] = convertDecimals(vec_from[i], col_from->getScale(), col_to->getScale()); - else if constexpr (IsDataTypeDecimal && IsDataTypeNumber) - vec_to[i] = convertFromDecimal(vec_from[i], col_from->getScale()); - else if constexpr (IsDataTypeNumber && IsDataTypeDecimal) - vec_to[i] = convertToDecimal(vec_from[i], col_to->getScale()); - else - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Unsupported data type in conversion function"); - } - } - else - { - /// If From Data is Nan or Inf and we convert to integer type, throw exception - if constexpr (std::is_floating_point_v && !std::is_floating_point_v) - { - if (!isFinite(vec_from[i])) - { - if constexpr (std::is_same_v) - { - vec_to[i] = 0; - (*vec_null_map_to)[i] = true; - continue; - } - else - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Unexpected inf or nan to integer conversion"); - } - } - - if constexpr (std::is_same_v - || std::is_same_v) - { - bool convert_result = accurate::convertNumeric(vec_from[i], vec_to[i]); - - if (!convert_result) - { - if (std::is_same_v) - { - vec_to[i] = 0; - (*vec_null_map_to)[i] = true; - } - else - { - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Value in column {} cannot be safely converted into type {}", - named_from.column->getName(), result_type->getName()); - } - } - } - else - { - if constexpr (std::is_same_v && std::is_same_v) - { - const uint8_t ip4_cidr[] {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00}; - const uint8_t * src = reinterpret_cast(&vec_from[i].toUnderType()); - if (!matchIPv6Subnet(src, ip4_cidr, 96)) - { - char addr[IPV6_MAX_TEXT_LENGTH + 1] {}; - char * paddr = addr; - formatIPv6(src, paddr); - - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "IPv6 {} in column {} is not in IPv4 mapping block", addr, named_from.column->getName()); - } - - uint8_t * dst = reinterpret_cast(&vec_to[i].toUnderType()); - if constexpr (std::endian::native == std::endian::little) - { - dst[0] = src[15]; - dst[1] = src[14]; - dst[2] = src[13]; - dst[3] = src[12]; - } - else - { - dst[0] = src[12]; - dst[1] = src[13]; - dst[2] = src[14]; - dst[3] = src[15]; - } - } - else if constexpr (std::is_same_v && std::is_same_v) - { - const uint8_t * src = reinterpret_cast(&vec_from[i].toUnderType()); - uint8_t * dst = reinterpret_cast(&vec_to[i].toUnderType()); - std::memset(dst, '\0', IPV6_BINARY_LENGTH); - dst[10] = dst[11] = 0xff; - - if constexpr (std::endian::native == std::endian::little) - { - dst[12] = src[3]; - dst[13] = src[2]; - dst[14] = src[1]; - dst[15] = src[0]; - } - else - { - dst[12] = src[0]; - dst[13] = src[1]; - dst[14] = src[2]; - dst[15] = src[3]; - } - } - else if constexpr (std::is_same_v && std::is_same_v) - vec_to[i] = static_cast(static_cast(vec_from[i])); - else if constexpr (std::is_same_v && (std::is_same_v || std::is_same_v)) - vec_to[i] = static_cast(vec_from[i] * DATE_SECONDS_PER_DAY); - else - vec_to[i] = static_cast(vec_from[i]); - } - } - } - } - - if constexpr (std::is_same_v) - return ColumnNullable::create(std::move(col_to), std::move(col_null_map_to)); - else - return col_to; - } - else - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", - named_from.column->getName(), Name::name); - } -}; - -/** Conversion of DateTime to Date: throw off time component. - */ -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -/** Conversion of DateTime to Date32: throw off time component. - */ -template -struct ConvertImpl - : DateTimeTransformImpl {}; - -/** Conversion of Date to DateTime: adding 00:00:00 time component. - */ -template -struct ToDateTimeImpl -{ - static constexpr auto name = "toDateTime"; - - static UInt32 execute(UInt16 d, const DateLUTImpl & time_zone) - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - { - if (d > MAX_DATETIME_DAY_NUM) [[unlikely]] - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Day number {} is out of bounds of type DateTime", d); - } - else if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Saturate) - { - if (d > MAX_DATETIME_DAY_NUM) - d = MAX_DATETIME_DAY_NUM; - } - return static_cast(time_zone.fromDayNum(DayNum(d))); - } - - static UInt32 execute(Int32 d, const DateLUTImpl & time_zone) - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Saturate) - { - if (d < 0) - return 0; - else if (d > MAX_DATETIME_DAY_NUM) - d = MAX_DATETIME_DAY_NUM; - } - else if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - { - if (d < 0 || d > MAX_DATETIME_DAY_NUM) [[unlikely]] - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Value {} is out of bounds of type DateTime", d); - } - return static_cast(time_zone.fromDayNum(ExtendedDayNum(d))); - } - - static UInt32 execute(UInt32 dt, const DateLUTImpl & /*time_zone*/) - { - return dt; - } - - static UInt32 execute(Int64 dt64, const DateLUTImpl & /*time_zone*/) - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Ignore) - return static_cast(dt64); - else - { - if (dt64 < 0 || dt64 >= MAX_DATETIME_TIMESTAMP) - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Saturate) - return dt64 < 0 ? 0 : std::numeric_limits::max(); - else - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Value {} is out of bounds of type DateTime", dt64); - } - else - return static_cast(dt64); - } - } -}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -/// Implementation of toDate function. - -template -struct ToDateTransform32Or64 -{ - static constexpr auto name = "toDate"; - - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - { - if (from > MAX_DATETIME_TIMESTAMP) [[unlikely]] - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Value {} is out of bounds of type Date", from); - } - /// if value is smaller (or equal) than maximum day value for Date, than treat it as day num, - /// otherwise treat it as unix timestamp. This is a bit weird, but we leave this behavior. - if (from <= DATE_LUT_MAX_DAY_NUM) - return from; - else - return time_zone.toDayNum(std::min(time_t(from), time_t(MAX_DATETIME_TIMESTAMP))); - } -}; - -/** Conversion of Date32 to Date. - */ -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ToDateTransform32Or64Signed -{ - static constexpr auto name = "toDate"; - - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) - { - // TODO: decide narrow or extended range based on FromType - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - { - if (from < 0 || from > MAX_DATE_TIMESTAMP) [[unlikely]] - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Value {} is out of bounds of type Date", from); - } - else - { - if (from < 0) - return 0; - } - return (from <= DATE_LUT_MAX_DAY_NUM) - ? static_cast(from) - : time_zone.toDayNum(std::min(time_t(from), time_t(MAX_DATE_TIMESTAMP))); - } -}; - -template -struct ToDateTransform8Or16Signed -{ - static constexpr auto name = "toDate"; - - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) - { - if (from < 0) - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Value {} is out of bounds of type Date", from); - else - return 0; - } - return from; - } -}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -/// Implementation of toDate32 function. - -template -struct ToDate32Transform32Or64 -{ - static constexpr auto name = "toDate32"; - - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) - { - if (from < DATE_LUT_MAX_EXTEND_DAY_NUM) - return static_cast(from); - else - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - { - if (from > MAX_DATETIME64_TIMESTAMP) [[unlikely]] - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type Date32", from); - } - return time_zone.toDayNum(std::min(time_t(from), time_t(MAX_DATETIME64_TIMESTAMP))); - } - } -}; - -template -struct ToDate32Transform32Or64Signed -{ - static constexpr auto name = "toDate32"; - - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) - { - static const Int32 daynum_min_offset = -static_cast(time_zone.getDayNumOffsetEpoch()); - - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - { - if (from < daynum_min_offset || from > MAX_DATETIME64_TIMESTAMP) [[unlikely]] - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type Date32", from); - } - - if (from < daynum_min_offset) - return daynum_min_offset; - - return (from < DATE_LUT_MAX_EXTEND_DAY_NUM) - ? static_cast(from) - : time_zone.toDayNum(std::min(time_t(Int64(from)), time_t(MAX_DATETIME64_TIMESTAMP))); - } -}; - -template -struct ToDate32Transform8Or16Signed -{ - static constexpr auto name = "toDate32"; - - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) - { - return from; - } -}; - -/** Special case of converting Int8, Int16, (U)Int32 or (U)Int64 (and also, for convenience, - * Float32, Float64) to Date. If the - * number is less than 65536, then it is treated as DayNum, and if it's greater or equals to 65536, - * then treated as unix timestamp. If the number exceeds UInt32, saturate to MAX_UINT32 then as DayNum. - * It's a bit illogical, as we actually have two functions in one. - * But allows to support frequent case, - * when user write toDate(UInt32), expecting conversion of unix timestamp to Date. - * (otherwise such usage would be frequent mistake). - */ -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - - -template -struct ToDateTimeTransform64 -{ - static constexpr auto name = "toDateTime"; - - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - { - if (from > MAX_DATETIME_TIMESTAMP) [[unlikely]] - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type DateTime", from); - } - return static_cast(std::min(time_t(from), time_t(MAX_DATETIME_TIMESTAMP))); - } -}; - -template -struct ToDateTimeTransformSigned -{ - static constexpr auto name = "toDateTime"; - - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) - { - if (from < 0) - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type DateTime", from); - else - return 0; - } - return from; - } -}; - -template -struct ToDateTimeTransform64Signed -{ - static constexpr auto name = "toDateTime"; - - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - { - if (from < 0 || from > MAX_DATETIME_TIMESTAMP) [[unlikely]] - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type DateTime", from); - } - - if (from < 0) - return 0; - return static_cast(std::min(time_t(from), time_t(MAX_DATETIME_TIMESTAMP))); - } -}; - -/// Special case of converting Int8, Int16, Int32 or (U)Int64 (and also, for convenience, Float32, Float64) to DateTime. -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -/** Conversion of numeric to DateTime64 - */ - -template -struct ToDateTime64TransformUnsigned -{ - static constexpr auto name = "toDateTime64"; - - const DateTime64::NativeType scale_multiplier = 1; - - ToDateTime64TransformUnsigned(UInt32 scale = 0) /// NOLINT - : scale_multiplier(DecimalUtils::scaleMultiplier(scale)) - {} - - NO_SANITIZE_UNDEFINED DateTime64::NativeType execute(FromType from, const DateLUTImpl &) const - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - { - if (from > MAX_DATETIME64_TIMESTAMP) [[unlikely]] - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type DateTime64", from); - else - return DecimalUtils::decimalFromComponentsWithMultiplier(from, 0, scale_multiplier); - } - else - return DecimalUtils::decimalFromComponentsWithMultiplier(std::min(from, MAX_DATETIME64_TIMESTAMP), 0, scale_multiplier); - } -}; -template -struct ToDateTime64TransformSigned -{ - static constexpr auto name = "toDateTime64"; - - const DateTime64::NativeType scale_multiplier = 1; - - ToDateTime64TransformSigned(UInt32 scale = 0) /// NOLINT - : scale_multiplier(DecimalUtils::scaleMultiplier(scale)) - {} - - NO_SANITIZE_UNDEFINED DateTime64::NativeType execute(FromType from, const DateLUTImpl &) const - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - { - if (from < MIN_DATETIME64_TIMESTAMP || from > MAX_DATETIME64_TIMESTAMP) [[unlikely]] - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type DateTime64", from); - } - from = static_cast(std::max(from, MIN_DATETIME64_TIMESTAMP)); - from = static_cast(std::min(from, MAX_DATETIME64_TIMESTAMP)); - - return DecimalUtils::decimalFromComponentsWithMultiplier(from, 0, scale_multiplier); - } -}; -template -struct ToDateTime64TransformFloat -{ - static constexpr auto name = "toDateTime64"; - - const UInt32 scale = 1; - - ToDateTime64TransformFloat(UInt32 scale_ = 0) /// NOLINT - : scale(scale_) - {} - - NO_SANITIZE_UNDEFINED DateTime64::NativeType execute(FromType from, const DateLUTImpl &) const - { - if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) - { - if (from < MIN_DATETIME64_TIMESTAMP || from > MAX_DATETIME64_TIMESTAMP) [[unlikely]] - throw Exception(ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, "Timestamp value {} is out of bounds of type DateTime64", from); - } - - from = std::max(from, static_cast(MIN_DATETIME64_TIMESTAMP)); - from = std::min(from, static_cast(MAX_DATETIME64_TIMESTAMP)); - return convertToDecimal(from, scale); - } -}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl, false> {}; - - -/** Conversion of DateTime64 to Date or DateTime: discards fractional part. - */ -template -struct FromDateTime64Transform -{ - static constexpr auto name = Transform::name; - - const DateTime64::NativeType scale_multiplier = 1; - - FromDateTime64Transform(UInt32 scale) /// NOLINT - : scale_multiplier(DecimalUtils::scaleMultiplier(scale)) - {} - - auto execute(DateTime64::NativeType dt, const DateLUTImpl & time_zone) const - { - const auto c = DecimalUtils::splitWithScaleMultiplier(DateTime64(dt), scale_multiplier); - return Transform::execute(static_cast(c.whole), time_zone); - } -}; - -/** Conversion of DateTime64 to Date or DateTime: discards fractional part. - */ -template -struct ConvertImpl - : DateTimeTransformImpl>, false> {}; - -template -struct ConvertImpl - : DateTimeTransformImpl>, false> {}; - -struct ToDateTime64Transform -{ - static constexpr auto name = "toDateTime64"; - - const DateTime64::NativeType scale_multiplier = 1; - - ToDateTime64Transform(UInt32 scale = 0) /// NOLINT - : scale_multiplier(DecimalUtils::scaleMultiplier(scale)) - {} - - DateTime64::NativeType execute(UInt16 d, const DateLUTImpl & time_zone) const - { - const auto dt = ToDateTimeImpl<>::execute(d, time_zone); - return execute(dt, time_zone); - } - - DateTime64::NativeType execute(Int32 d, const DateLUTImpl & time_zone) const - { - Int64 dt = static_cast(time_zone.fromDayNum(ExtendedDayNum(d))); - return DecimalUtils::decimalFromComponentsWithMultiplier(dt, 0, scale_multiplier); - } - - DateTime64::NativeType execute(UInt32 dt, const DateLUTImpl & /*time_zone*/) const - { - return DecimalUtils::decimalFromComponentsWithMultiplier(dt, 0, scale_multiplier); - } -}; - -/** Conversion of Date or DateTime to DateTime64: add zero sub-second part. - */ -template -struct ConvertImpl - : DateTimeTransformImpl {}; - -template -struct ConvertImpl - : DateTimeTransformImpl {}; - -template -struct ConvertImpl - : DateTimeTransformImpl {}; - - -/** Transformation of numbers, dates, datetimes to strings: through formatting. - */ -template -struct FormatImpl -{ - template - static ReturnType execute(const typename DataType::FieldType x, WriteBuffer & wb, const DataType *, const DateLUTImpl *) - { - writeText(x, wb); - return ReturnType(true); - } -}; - -template <> -struct FormatImpl -{ - template - static ReturnType execute(const DataTypeDate::FieldType x, WriteBuffer & wb, const DataTypeDate *, const DateLUTImpl * time_zone) - { - writeDateText(DayNum(x), wb, *time_zone); - return ReturnType(true); - } -}; - -template <> -struct FormatImpl -{ - template - static ReturnType execute(const DataTypeDate32::FieldType x, WriteBuffer & wb, const DataTypeDate32 *, const DateLUTImpl * time_zone) - { - writeDateText(ExtendedDayNum(x), wb, *time_zone); - return ReturnType(true); - } -}; - -template <> -struct FormatImpl -{ - template - static ReturnType execute(const DataTypeDateTime::FieldType x, WriteBuffer & wb, const DataTypeDateTime *, const DateLUTImpl * time_zone) - { - writeDateTimeText(x, wb, *time_zone); - return ReturnType(true); - } -}; - -template <> -struct FormatImpl -{ - template - static ReturnType execute(const DataTypeDateTime64::FieldType x, WriteBuffer & wb, const DataTypeDateTime64 * type, const DateLUTImpl * time_zone) - { - writeDateTimeText(DateTime64(x), type->getScale(), wb, *time_zone); - return ReturnType(true); - } -}; - - -template -struct FormatImpl> -{ - template - static ReturnType execute(const FieldType x, WriteBuffer & wb, const DataTypeEnum * type, const DateLUTImpl *) - { - static constexpr bool throw_exception = std::is_same_v; - - if constexpr (throw_exception) - { - writeString(type->getNameForValue(x), wb); - } - else - { - StringRef res; - bool is_ok = type->getNameForValue(x, res); - if (is_ok) - writeString(res, wb); - return ReturnType(is_ok); - } - } -}; - -template -struct FormatImpl> -{ - template - static ReturnType execute(const FieldType x, WriteBuffer & wb, const DataTypeDecimal * type, const DateLUTImpl *) - { - writeText(x, type->getScale(), wb, false); - return ReturnType(true); - } -}; - - -/// DataTypeEnum to DataType free conversion -template -struct ConvertImpl, DataTypeNumber, Name, ConvertDefaultBehaviorTag> -{ - static ColumnPtr execute(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) - { - return arguments[0].column; - } -}; - -static inline ColumnUInt8::MutablePtr copyNullMap(ColumnPtr col) -{ - ColumnUInt8::MutablePtr null_map = nullptr; - if (const auto * col_null = checkAndGetColumn(col.get())) - { - null_map = ColumnUInt8::create(); - null_map->insertRangeFrom(col_null->getNullMapColumn(), 0, col_null->size()); - } - return null_map; -} - -template -requires (!std::is_same_v) -struct ConvertImpl -{ - using FromFieldType = typename FromDataType::FieldType; - using ColVecType = ColumnVectorOrDecimal; - - static ColumnPtr execute(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) - { - if constexpr (IsDataTypeDateOrDateTime) - { - auto datetime_arg = arguments[0]; - - const DateLUTImpl * time_zone = nullptr; - const ColumnConst * time_zone_column = nullptr; - - if (arguments.size() == 1) - { - auto non_null_args = createBlockWithNestedColumns(arguments); - time_zone = &extractTimeZoneFromFunctionArguments(non_null_args, 1, 0); - } - else /// When we have a column for timezone - { - datetime_arg.column = datetime_arg.column->convertToFullColumnIfConst(); - - if constexpr (std::is_same_v || std::is_same_v) - time_zone = &DateLUT::instance(); - /// For argument of Date or DateTime type, second argument with time zone could be specified. - if constexpr (std::is_same_v || std::is_same_v) - { - if ((time_zone_column = checkAndGetColumnConst(arguments[1].column.get()))) - { - auto non_null_args = createBlockWithNestedColumns(arguments); - time_zone = &extractTimeZoneFromFunctionArguments(non_null_args, 1, 0); - } - } - } - const auto & col_with_type_and_name = columnGetNested(datetime_arg); - - if (const auto col_from = checkAndGetColumn(col_with_type_and_name.column.get())) - { - auto col_to = ColumnString::create(); - - const typename ColVecType::Container & vec_from = col_from->getData(); - ColumnString::Chars & data_to = col_to->getChars(); - ColumnString::Offsets & offsets_to = col_to->getOffsets(); - size_t size = vec_from.size(); - - if constexpr (std::is_same_v) - data_to.resize(size * (strlen("YYYY-MM-DD") + 1)); - else if constexpr (std::is_same_v) - data_to.resize(size * (strlen("YYYY-MM-DD") + 1)); - else if constexpr (std::is_same_v) - data_to.resize(size * (strlen("YYYY-MM-DD hh:mm:ss") + 1)); - else if constexpr (std::is_same_v) - data_to.resize(size * (strlen("YYYY-MM-DD hh:mm:ss.") + col_from->getScale() + 1)); - else - data_to.resize(size * 3); /// Arbitrary - - offsets_to.resize(size); - - WriteBufferFromVector write_buffer(data_to); - const auto & type = static_cast(*col_with_type_and_name.type); - - ColumnUInt8::MutablePtr null_map = copyNullMap(datetime_arg.column); - - if (!null_map && arguments.size() > 1) - null_map = copyNullMap(arguments[1].column->convertToFullColumnIfConst()); - - if (null_map) - { - for (size_t i = 0; i < size; ++i) - { - if (!time_zone_column && arguments.size() > 1) - { - if (!arguments[1].column.get()->getDataAt(i).toString().empty()) - time_zone = &DateLUT::instance(arguments[1].column.get()->getDataAt(i).toString()); - else - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Provided time zone must be non-empty"); - } - bool is_ok = FormatImpl::template execute(vec_from[i], write_buffer, &type, time_zone); - null_map->getData()[i] |= !is_ok; - writeChar(0, write_buffer); - offsets_to[i] = write_buffer.count(); - } - } - else - { - for (size_t i = 0; i < size; ++i) - { - if (!time_zone_column && arguments.size() > 1) - { - if (!arguments[1].column.get()->getDataAt(i).toString().empty()) - time_zone = &DateLUT::instance(arguments[1].column.get()->getDataAt(i).toString()); - else - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Provided time zone must be non-empty"); - } - FormatImpl::template execute(vec_from[i], write_buffer, &type, time_zone); - writeChar(0, write_buffer); - offsets_to[i] = write_buffer.count(); - } - } - - write_buffer.finalize(); - - if (null_map) - return ColumnNullable::create(std::move(col_to), std::move(null_map)); - return col_to; - } - else - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", - arguments[0].column->getName(), Name::name); - } - else - { - ColumnUInt8::MutablePtr null_map = copyNullMap(arguments[0].column); - - const auto & col_with_type_and_name = columnGetNested(arguments[0]); - const auto & type = static_cast(*col_with_type_and_name.type); - - if (const auto col_from = checkAndGetColumn(col_with_type_and_name.column.get())) - { - auto col_to = ColumnString::create(); - - const typename ColVecType::Container & vec_from = col_from->getData(); - ColumnString::Chars & data_to = col_to->getChars(); - ColumnString::Offsets & offsets_to = col_to->getOffsets(); - size_t size = vec_from.size(); - - data_to.resize(size * 3); - offsets_to.resize(size); - - WriteBufferFromVector write_buffer(data_to); - - if (null_map) - { - for (size_t i = 0; i < size; ++i) - { - bool is_ok = FormatImpl::template execute(vec_from[i], write_buffer, &type, nullptr); - /// We don't use timezones in this branch - null_map->getData()[i] |= !is_ok; - writeChar(0, write_buffer); - offsets_to[i] = write_buffer.count(); - } - } - else - { - for (size_t i = 0; i < size; ++i) - { - FormatImpl::template execute(vec_from[i], write_buffer, &type, nullptr); - writeChar(0, write_buffer); - offsets_to[i] = write_buffer.count(); - } - } - - write_buffer.finalize(); - - if (null_map) - return ColumnNullable::create(std::move(col_to), std::move(null_map)); - return col_to; - } - else - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", - arguments[0].column->getName(), Name::name); - } - } -}; - - -/// Generic conversion of any type to String or FixedString via serialization to text. -template -struct ConvertImplGenericToString -{ - static ColumnPtr execute(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /*input_rows_count*/) - { - static_assert(std::is_same_v || std::is_same_v, - "Can be used only to serialize to ColumnString or ColumnFixedString"); - - ColumnUInt8::MutablePtr null_map = copyNullMap(arguments[0].column); - - const auto & col_with_type_and_name = columnGetNested(arguments[0]); - const IDataType & type = *col_with_type_and_name.type; - const IColumn & col_from = *col_with_type_and_name.column; - - size_t size = col_from.size(); - auto col_to = removeNullable(result_type)->createColumn(); - - { - ColumnStringHelpers::WriteHelper write_helper( - assert_cast(*col_to), - size); - - auto & write_buffer = write_helper.getWriteBuffer(); - - FormatSettings format_settings; - auto serialization = type.getDefaultSerialization(); - for (size_t row = 0; row < size; ++row) - { - serialization->serializeText(col_from, row, write_buffer, format_settings); - write_helper.rowWritten(); - } - - write_helper.finalize(); - } - - if (result_type->isNullable() && null_map) - return ColumnNullable::create(std::move(col_to), std::move(null_map)); - return col_to; - } -}; - -/** Conversion of time_t to UInt16, Int32, UInt32 - */ -template -void convertFromTime(typename DataType::FieldType & x, time_t & time) -{ - x = time; -} - -template <> -inline void convertFromTime(DataTypeDate::FieldType & x, time_t & time) -{ - if (unlikely(time < 0)) - x = 0; - else if (unlikely(time > 0xFFFF)) - x = 0xFFFF; - else - x = time; -} - -template <> -inline void convertFromTime(DataTypeDate32::FieldType & x, time_t & time) -{ - x = static_cast(time); -} - -template <> -inline void convertFromTime(DataTypeDateTime::FieldType & x, time_t & time) -{ - if (unlikely(time < 0)) - x = 0; - else if (unlikely(time > MAX_DATETIME_TIMESTAMP)) - x = MAX_DATETIME_TIMESTAMP; - else - x = static_cast(time); -} - -/** Conversion of strings to numbers, dates, datetimes: through parsing. - */ -template -void parseImpl(typename DataType::FieldType & x, ReadBuffer & rb, const DateLUTImpl *, bool precise_float_parsing) -{ - if constexpr (std::is_floating_point_v) - { - if (precise_float_parsing) - readFloatTextPrecise(x, rb); - else - readFloatTextFast(x, rb); - } - else - readText(x, rb); -} - -template <> -inline void parseImpl(DataTypeDate::FieldType & x, ReadBuffer & rb, const DateLUTImpl * time_zone, bool) -{ - DayNum tmp(0); - readDateText(tmp, rb, *time_zone); - x = tmp; -} - -template <> -inline void parseImpl(DataTypeDate32::FieldType & x, ReadBuffer & rb, const DateLUTImpl * time_zone, bool) -{ - ExtendedDayNum tmp(0); - readDateText(tmp, rb, *time_zone); - x = tmp; -} - - -// NOTE: no need of extra overload of DateTime64, since readDateTimeText64 has different signature and that case is explicitly handled in the calling code. -template <> -inline void parseImpl(DataTypeDateTime::FieldType & x, ReadBuffer & rb, const DateLUTImpl * time_zone, bool) -{ - time_t time = 0; - readDateTimeText(time, rb, *time_zone); - convertFromTime(x, time); -} - -template <> -inline void parseImpl(DataTypeUUID::FieldType & x, ReadBuffer & rb, const DateLUTImpl *, bool) -{ - UUID tmp; - readUUIDText(tmp, rb); - x = tmp.toUnderType(); -} - -template <> -inline void parseImpl(DataTypeIPv4::FieldType & x, ReadBuffer & rb, const DateLUTImpl *, bool) -{ - IPv4 tmp; - readIPv4Text(tmp, rb); - x = tmp.toUnderType(); -} - -template <> -inline void parseImpl(DataTypeIPv6::FieldType & x, ReadBuffer & rb, const DateLUTImpl *, bool) -{ - IPv6 tmp; - readIPv6Text(tmp, rb); - x = tmp; -} - -template -bool tryParseImpl(typename DataType::FieldType & x, ReadBuffer & rb, const DateLUTImpl *, bool precise_float_parsing) -{ - if constexpr (std::is_floating_point_v) - { - if (precise_float_parsing) - return tryReadFloatTextPrecise(x, rb); - else - return tryReadFloatTextFast(x, rb); - } - else /*if constexpr (is_integer_v)*/ - return tryReadIntText(x, rb); -} - -template <> -inline bool tryParseImpl(DataTypeDate::FieldType & x, ReadBuffer & rb, const DateLUTImpl * time_zone, bool) -{ - DayNum tmp(0); - if (!tryReadDateText(tmp, rb, *time_zone)) - return false; - x = tmp; - return true; -} - -template <> -inline bool tryParseImpl(DataTypeDate32::FieldType & x, ReadBuffer & rb, const DateLUTImpl * time_zone, bool) -{ - ExtendedDayNum tmp(0); - if (!tryReadDateText(tmp, rb, *time_zone)) - return false; - x = tmp; - return true; -} - -template <> -inline bool tryParseImpl(DataTypeDateTime::FieldType & x, ReadBuffer & rb, const DateLUTImpl * time_zone, bool) -{ - time_t time = 0; - if (!tryReadDateTimeText(time, rb, *time_zone)) - return false; - convertFromTime(x, time); - return true; -} - -template <> -inline bool tryParseImpl(DataTypeUUID::FieldType & x, ReadBuffer & rb, const DateLUTImpl *, bool) -{ - UUID tmp; - if (!tryReadUUIDText(tmp, rb)) - return false; - - x = tmp.toUnderType(); - return true; -} - -template <> -inline bool tryParseImpl(DataTypeIPv4::FieldType & x, ReadBuffer & rb, const DateLUTImpl *, bool) -{ - IPv4 tmp; - if (!tryReadIPv4Text(tmp, rb)) - return false; - - x = tmp.toUnderType(); - return true; -} - -template <> -inline bool tryParseImpl(DataTypeIPv6::FieldType & x, ReadBuffer & rb, const DateLUTImpl *, bool) -{ - IPv6 tmp; - if (!tryReadIPv6Text(tmp, rb)) - return false; - - x = tmp; - return true; -} - - -/** Throw exception with verbose message when string value is not parsed completely. - */ -[[noreturn]] inline void throwExceptionForIncompletelyParsedValue(ReadBuffer & read_buffer, const IDataType & result_type) -{ - WriteBufferFromOwnString message_buf; - message_buf << "Cannot parse string " << quote << String(read_buffer.buffer().begin(), read_buffer.buffer().size()) - << " as " << result_type.getName() - << ": syntax error"; - - if (read_buffer.offset()) - message_buf << " at position " << read_buffer.offset() - << " (parsed just " << quote << String(read_buffer.buffer().begin(), read_buffer.offset()) << ")"; - else - message_buf << " at begin of string"; - - // Currently there are no functions toIPv{4,6}Or{Null,Zero} - if (isNativeNumber(result_type) && !(result_type.getName() == "IPv4" || result_type.getName() == "IPv6")) - message_buf << ". Note: there are to" << result_type.getName() << "OrZero and to" << result_type.getName() << "OrNull functions, which returns zero/NULL instead of throwing exception."; - - throw Exception(PreformattedMessage{message_buf.str(), "Cannot parse string {} as {}: syntax error {}"}, ErrorCodes::CANNOT_PARSE_TEXT); -} - - -enum class ConvertFromStringExceptionMode -{ - Throw, /// Throw exception if value cannot be parsed. - Zero, /// Fill with zero or default if value cannot be parsed. - Null /// Return ColumnNullable with NULLs when value cannot be parsed. -}; - -enum class ConvertFromStringParsingMode -{ - Normal, - BestEffort, /// Only applicable for DateTime. Will use sophisticated method, that is slower. - BestEffortUS -}; - -template -struct ConvertThroughParsing -{ - static_assert(std::is_same_v || std::is_same_v, - "ConvertThroughParsing is only applicable for String or FixedString data types"); - - static constexpr bool to_datetime64 = std::is_same_v; - - static bool isAllRead(ReadBuffer & in) - { - /// In case of FixedString, skip zero bytes at end. - if constexpr (std::is_same_v) - while (!in.eof() && *in.position() == 0) - ++in.position(); - - if (in.eof()) - return true; - - /// Special case, that allows to parse string with DateTime or DateTime64 as Date or Date32. - if constexpr (std::is_same_v || std::is_same_v) - { - if (!in.eof() && (*in.position() == ' ' || *in.position() == 'T')) - { - if (in.buffer().size() == strlen("YYYY-MM-DD hh:mm:ss")) - return true; - - if (in.buffer().size() >= strlen("YYYY-MM-DD hh:mm:ss.x") - && in.buffer().begin()[19] == '.') - { - in.position() = in.buffer().begin() + 20; - - while (!in.eof() && isNumericASCII(*in.position())) - ++in.position(); - - if (in.eof()) - return true; - } - } - } - - return false; - } - - template - static ColumnPtr execute(const ColumnsWithTypeAndName & arguments, const DataTypePtr & res_type, size_t input_rows_count, - Additions additions [[maybe_unused]] = Additions()) - { - using ColVecTo = typename ToDataType::ColumnType; - - const DateLUTImpl * local_time_zone [[maybe_unused]] = nullptr; - const DateLUTImpl * utc_time_zone [[maybe_unused]] = nullptr; - - /// For conversion to Date or DateTime type, second argument with time zone could be specified. - if constexpr (std::is_same_v || to_datetime64) - { - const auto result_type = removeNullable(res_type); - // Time zone is already figured out during result type resolution, no need to do it here. - if (const auto dt_col = checkAndGetDataType(result_type.get())) - local_time_zone = &dt_col->getTimeZone(); - else - local_time_zone = &extractTimeZoneFromFunctionArguments(arguments, 1, 0); - - if constexpr (parsing_mode == ConvertFromStringParsingMode::BestEffort || parsing_mode == ConvertFromStringParsingMode::BestEffortUS) - utc_time_zone = &DateLUT::instance("UTC"); - } - else if constexpr (std::is_same_v || std::is_same_v) - { - // Timezone is more or less dummy when parsing Date/Date32 from string. - local_time_zone = &DateLUT::instance(); - utc_time_zone = &DateLUT::instance("UTC"); - } - - const IColumn * col_from = arguments[0].column.get(); - const ColumnString * col_from_string = checkAndGetColumn(col_from); - const ColumnFixedString * col_from_fixed_string = checkAndGetColumn(col_from); - - if (std::is_same_v && !col_from_string) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", - col_from->getName(), Name::name); - - if (std::is_same_v && !col_from_fixed_string) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", - col_from->getName(), Name::name); - - size_t size = input_rows_count; - typename ColVecTo::MutablePtr col_to = nullptr; - - if constexpr (IsDataTypeDecimal) - { - UInt32 scale = additions; - if constexpr (to_datetime64) - { - ToDataType check_bounds_in_ctor(scale, local_time_zone ? local_time_zone->getTimeZone() : String{}); - } - else - { - ToDataType check_bounds_in_ctor(ToDataType::maxPrecision(), scale); - } - col_to = ColVecTo::create(size, scale); - } - else - col_to = ColVecTo::create(size); - - typename ColVecTo::Container & vec_to = col_to->getData(); - - ColumnUInt8::MutablePtr col_null_map_to; - ColumnUInt8::Container * vec_null_map_to [[maybe_unused]] = nullptr; - if constexpr (exception_mode == ConvertFromStringExceptionMode::Null) - { - col_null_map_to = ColumnUInt8::create(size); - vec_null_map_to = &col_null_map_to->getData(); - } - - const ColumnString::Chars * chars = nullptr; - const IColumn::Offsets * offsets = nullptr; - size_t fixed_string_size = 0; - - if constexpr (std::is_same_v) - { - chars = &col_from_string->getChars(); - offsets = &col_from_string->getOffsets(); - } - else - { - chars = &col_from_fixed_string->getChars(); - fixed_string_size = col_from_fixed_string->getN(); - } - - size_t current_offset = 0; - - bool precise_float_parsing = false; - - if (DB::CurrentThread::isInitialized()) - { - const DB::ContextPtr query_context = DB::CurrentThread::get().getQueryContext(); - - if (query_context) - precise_float_parsing = query_context->getSettingsRef().precise_float_parsing; - } - - for (size_t i = 0; i < size; ++i) - { - size_t next_offset = std::is_same_v ? (*offsets)[i] : (current_offset + fixed_string_size); - size_t string_size = std::is_same_v ? next_offset - current_offset - 1 : fixed_string_size; - - ReadBufferFromMemory read_buffer(&(*chars)[current_offset], string_size); - - if constexpr (exception_mode == ConvertFromStringExceptionMode::Throw) - { - if constexpr (parsing_mode == ConvertFromStringParsingMode::BestEffort) - { - if constexpr (to_datetime64) - { - DateTime64 res = 0; - parseDateTime64BestEffort(res, col_to->getScale(), read_buffer, *local_time_zone, *utc_time_zone); - vec_to[i] = res; - } - else - { - time_t res; - parseDateTimeBestEffort(res, read_buffer, *local_time_zone, *utc_time_zone); - convertFromTime(vec_to[i], res); - } - } - else if constexpr (parsing_mode == ConvertFromStringParsingMode::BestEffortUS) - { - if constexpr (to_datetime64) - { - DateTime64 res = 0; - parseDateTime64BestEffortUS(res, col_to->getScale(), read_buffer, *local_time_zone, *utc_time_zone); - vec_to[i] = res; - } - else - { - time_t res; - parseDateTimeBestEffortUS(res, read_buffer, *local_time_zone, *utc_time_zone); - convertFromTime(vec_to[i], res); - } - } - else - { - if constexpr (to_datetime64) - { - DateTime64 value = 0; - readDateTime64Text(value, col_to->getScale(), read_buffer, *local_time_zone); - vec_to[i] = value; - } - else if constexpr (IsDataTypeDecimal) - { - SerializationDecimal::readText( - vec_to[i], read_buffer, ToDataType::maxPrecision(), col_to->getScale()); - } - else - { - /// we want to utilize constexpr condition here, which is not mixable with value comparison - do - { - if constexpr (std::is_same_v && std::is_same_v) - { - if (fixed_string_size == IPV6_BINARY_LENGTH) - { - readBinary(vec_to[i], read_buffer); - break; - } - } - parseImpl(vec_to[i], read_buffer, local_time_zone, precise_float_parsing); - } while (false); - } - } - - if (!isAllRead(read_buffer)) - throwExceptionForIncompletelyParsedValue(read_buffer, *res_type); - } - else - { - bool parsed; - - if constexpr (parsing_mode == ConvertFromStringParsingMode::BestEffort) - { - if constexpr (to_datetime64) - { - DateTime64 res = 0; - parsed = tryParseDateTime64BestEffort(res, col_to->getScale(), read_buffer, *local_time_zone, *utc_time_zone); - vec_to[i] = res; - } - else - { - time_t res; - parsed = tryParseDateTimeBestEffort(res, read_buffer, *local_time_zone, *utc_time_zone); - convertFromTime(vec_to[i],res); - } - } - else if constexpr (parsing_mode == ConvertFromStringParsingMode::BestEffortUS) - { - if constexpr (to_datetime64) - { - DateTime64 res = 0; - parsed = tryParseDateTime64BestEffortUS(res, col_to->getScale(), read_buffer, *local_time_zone, *utc_time_zone); - vec_to[i] = res; - } - else - { - time_t res; - parsed = tryParseDateTimeBestEffortUS(res, read_buffer, *local_time_zone, *utc_time_zone); - convertFromTime(vec_to[i],res); - } - } - else - { - if constexpr (to_datetime64) - { - DateTime64 value = 0; - parsed = tryReadDateTime64Text(value, col_to->getScale(), read_buffer, *local_time_zone); - vec_to[i] = value; - } - else if constexpr (IsDataTypeDecimal) - { - parsed = SerializationDecimal::tryReadText( - vec_to[i], read_buffer, ToDataType::maxPrecision(), col_to->getScale()); - } - else - { - /// we want to utilize constexpr condition here, which is not mixable with value comparison - do - { - if constexpr (std::is_same_v && std::is_same_v) - { - if (fixed_string_size == IPV6_BINARY_LENGTH) - { - readBinary(vec_to[i], read_buffer); - parsed = true; - break; - } - } - - parsed = tryParseImpl(vec_to[i], read_buffer, local_time_zone, precise_float_parsing); - } while (false); - } - } - - if (!isAllRead(read_buffer)) - parsed = false; - - if (!parsed) - { - if constexpr (std::is_same_v) - { - vec_to[i] = -static_cast(DateLUT::instance().getDayNumOffsetEpoch()); - } - else - { - vec_to[i] = static_cast(0); - } - } - - if constexpr (exception_mode == ConvertFromStringExceptionMode::Null) - (*vec_null_map_to)[i] = !parsed; - } - - current_offset = next_offset; - } - - if constexpr (exception_mode == ConvertFromStringExceptionMode::Null) - return ColumnNullable::create(std::move(col_to), std::move(col_null_map_to)); - else - return col_to; - } -}; - - -template -requires (!std::is_same_v) -struct ConvertImpl - : ConvertThroughParsing {}; - -template -requires (!std::is_same_v) -struct ConvertImpl - : ConvertThroughParsing {}; - -template -requires (!std::is_same_v) -struct ConvertImpl - : ConvertThroughParsing {}; - -template -requires (!std::is_same_v) -struct ConvertImpl - : ConvertThroughParsing {}; - -template -requires (is_any_of && is_any_of) -struct ConvertImpl - : ConvertThroughParsing {}; - -/// Generic conversion of any type from String. Used for complex types: Array and Tuple or types with custom serialization. -template -struct ConvertImplGenericFromString -{ - static ColumnPtr execute(ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * column_nullable, size_t input_rows_count) - { - static_assert(std::is_same_v || std::is_same_v, - "Can be used only to parse from ColumnString or ColumnFixedString"); - - const IColumn & column_from = *arguments[0].column; - const IDataType & data_type_to = *result_type; - auto res = data_type_to.createColumn(); - auto serialization = data_type_to.getDefaultSerialization(); - const auto * null_map = column_nullable ? &column_nullable->getNullMapData() : nullptr; - - executeImpl(column_from, *res, *serialization, input_rows_count, null_map, result_type.get()); - return res; - } - - static void executeImpl( - const IColumn & column_from, - IColumn & column_to, - const ISerialization & serialization_from, - size_t input_rows_count, - const PaddedPODArray * null_map = nullptr, - const IDataType * result_type = nullptr) - { - static_assert(std::is_same_v || std::is_same_v, - "Can be used only to parse from ColumnString or ColumnFixedString"); - - if (const StringColumnType * col_from_string = checkAndGetColumn(&column_from)) - { - column_to.reserve(input_rows_count); - - FormatSettings format_settings; - for (size_t i = 0; i < input_rows_count; ++i) - { - if (null_map && (*null_map)[i]) - { - column_to.insertDefault(); - continue; - } - - const auto & val = col_from_string->getDataAt(i); - ReadBufferFromMemory read_buffer(val.data, val.size); - try - { - serialization_from.deserializeWholeText(column_to, read_buffer, format_settings); - } - catch (const Exception & e) - { - auto * nullable_column = typeid_cast(&column_to); - if (e.code() == ErrorCodes::CANNOT_PARSE_BOOL && nullable_column) - { - auto & col_nullmap = nullable_column->getNullMapData(); - if (col_nullmap.size() != nullable_column->size()) - col_nullmap.resize_fill(nullable_column->size()); - if (nullable_column->size() == (i + 1)) - nullable_column->popBack(1); - nullable_column->insertDefault(); - continue; - } - throw; - } - - if (!read_buffer.eof()) - { - if (result_type) - throwExceptionForIncompletelyParsedValue(read_buffer, *result_type); - else - throw Exception(ErrorCodes::CANNOT_PARSE_TEXT, - "Cannot parse string to column {}. Expected eof", column_to.getName()); - } - } - } - else - throw Exception(ErrorCodes::ILLEGAL_COLUMN, - "Illegal column {} of first argument of conversion function from string", - column_from.getName()); - } - -}; - - -template <> -struct ConvertImpl - : ConvertImpl {}; - -template <> -struct ConvertImpl - : ConvertImpl {}; - -/** If types are identical, just take reference to column. - */ -template -requires (!T::is_parametric) -struct ConvertImpl -{ - template - static ColumnPtr execute(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/, - Additions additions [[maybe_unused]] = Additions()) - { - return arguments[0].column; - } -}; - -template -struct ConvertImpl -{ - template - static ColumnPtr execute(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/, - Additions additions [[maybe_unused]] = Additions()) - { - - return arguments[0].column; - } -}; - - -/** Conversion from FixedString to String. - * Cutting sequences of zero bytes from end of strings. - */ -template -struct ConvertImpl -{ - static ColumnPtr execute(const ColumnsWithTypeAndName & arguments, const DataTypePtr & return_type, size_t /*input_rows_count*/) - { - ColumnUInt8::MutablePtr null_map = copyNullMap(arguments[0].column); - const auto & nested = columnGetNested(arguments[0]); - if (const ColumnFixedString * col_from = checkAndGetColumn(nested.column.get())) - { - auto col_to = ColumnString::create(); - - const ColumnFixedString::Chars & data_from = col_from->getChars(); - ColumnString::Chars & data_to = col_to->getChars(); - ColumnString::Offsets & offsets_to = col_to->getOffsets(); - size_t size = col_from->size(); - size_t n = col_from->getN(); - data_to.resize(size * (n + 1)); /// + 1 - zero terminator - offsets_to.resize(size); - - size_t offset_from = 0; - size_t offset_to = 0; - for (size_t i = 0; i < size; ++i) - { - if (!null_map || !null_map->getData()[i]) - { - size_t bytes_to_copy = n; - while (bytes_to_copy > 0 && data_from[offset_from + bytes_to_copy - 1] == 0) - --bytes_to_copy; - - memcpy(&data_to[offset_to], &data_from[offset_from], bytes_to_copy); - offset_to += bytes_to_copy; - } - data_to[offset_to] = 0; - ++offset_to; - offsets_to[i] = offset_to; - offset_from += n; - } - - data_to.resize(offset_to); - if (return_type->isNullable() && null_map) - return ColumnNullable::create(std::move(col_to), std::move(null_map)); - return col_to; - } - else - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", - arguments[0].column->getName(), Name::name); - } -}; - - -/// Declared early because used below. -struct NameToDate { static constexpr auto name = "toDate"; }; -struct NameToDate32 { static constexpr auto name = "toDate32"; }; -struct NameToDateTime { static constexpr auto name = "toDateTime"; }; -struct NameToDateTime32 { static constexpr auto name = "toDateTime32"; }; -struct NameToDateTime64 { static constexpr auto name = "toDateTime64"; }; -struct NameToString { static constexpr auto name = "toString"; }; -struct NameToDecimal32 { static constexpr auto name = "toDecimal32"; }; -struct NameToDecimal64 { static constexpr auto name = "toDecimal64"; }; -struct NameToDecimal128 { static constexpr auto name = "toDecimal128"; }; -struct NameToDecimal256 { static constexpr auto name = "toDecimal256"; }; - - -#define DEFINE_NAME_TO_INTERVAL(INTERVAL_KIND) \ - struct NameToInterval ## INTERVAL_KIND \ - { \ - static constexpr auto name = "toInterval" #INTERVAL_KIND; \ - static constexpr auto kind = IntervalKind::Kind::INTERVAL_KIND; \ - }; - -DEFINE_NAME_TO_INTERVAL(Nanosecond) -DEFINE_NAME_TO_INTERVAL(Microsecond) -DEFINE_NAME_TO_INTERVAL(Millisecond) -DEFINE_NAME_TO_INTERVAL(Second) -DEFINE_NAME_TO_INTERVAL(Minute) -DEFINE_NAME_TO_INTERVAL(Hour) -DEFINE_NAME_TO_INTERVAL(Day) -DEFINE_NAME_TO_INTERVAL(Week) -DEFINE_NAME_TO_INTERVAL(Month) -DEFINE_NAME_TO_INTERVAL(Quarter) -DEFINE_NAME_TO_INTERVAL(Year) - -#undef DEFINE_NAME_TO_INTERVAL - -struct NameParseDateTimeBestEffort; -struct NameParseDateTimeBestEffortOrZero; -struct NameParseDateTimeBestEffortOrNull; - -template -static inline bool isDateTime64(const ColumnsWithTypeAndName & arguments) -{ - if constexpr (std::is_same_v) - return true; - else if constexpr (std::is_same_v || std::is_same_v - || std::is_same_v || std::is_same_v) - { - return (arguments.size() == 2 && isUInt(arguments[1].type)) || arguments.size() == 3; - } - - return false; -} - -template -class FunctionConvert : public IFunction -{ -public: - using Monotonic = MonotonicityImpl; - - static constexpr auto name = Name::name; - static constexpr bool to_decimal = - std::is_same_v || std::is_same_v - || std::is_same_v || std::is_same_v; - - static constexpr bool to_datetime64 = std::is_same_v; - - static constexpr bool to_string_or_fixed_string = std::is_same_v || - std::is_same_v; - - static constexpr bool to_date_or_datetime = std::is_same_v || - std::is_same_v || - std::is_same_v; - - static FunctionPtr create(ContextPtr context) { return std::make_shared(context); } - static FunctionPtr create() { return std::make_shared(); } - - FunctionConvert() = default; - explicit FunctionConvert(ContextPtr context_) : context(context_) {} - - String getName() const override - { - return name; - } - - bool isVariadic() const override { return true; } - size_t getNumberOfArguments() const override { return 0; } - bool isInjective(const ColumnsWithTypeAndName &) const override { return std::is_same_v; } - bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & arguments) const override - { - /// TODO: We can make more optimizations here. - return !(to_date_or_datetime && isNumber(*arguments[0].type)); - } - - using DefaultReturnTypeGetter = std::function; - static DataTypePtr getReturnTypeDefaultImplementationForNulls(const ColumnsWithTypeAndName & arguments, const DefaultReturnTypeGetter & getter) - { - NullPresence null_presence = getNullPresense(arguments); - - if (null_presence.has_null_constant) - { - return makeNullable(std::make_shared()); - } - if (null_presence.has_nullable) - { - auto nested_columns = Block(createBlockWithNestedColumns(arguments)); - auto return_type = getter(ColumnsWithTypeAndName(nested_columns.begin(), nested_columns.end())); - return makeNullable(return_type); - } - - return getter(arguments); - } - - DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override - { - auto getter = [&] (const auto & args) { return getReturnTypeImplRemovedNullable(args); }; - auto res = getReturnTypeDefaultImplementationForNulls(arguments, getter); - to_nullable = res->isNullable(); - checked_return_type = true; - return res; - } - - DataTypePtr getReturnTypeImplRemovedNullable(const ColumnsWithTypeAndName & arguments) const - { - FunctionArgumentDescriptors mandatory_args = {{"Value", nullptr, nullptr, nullptr}}; - FunctionArgumentDescriptors optional_args; - - if constexpr (to_decimal) - { - mandatory_args.push_back({"scale", static_cast(&isNativeInteger), &isColumnConst, "const Integer"}); - } - - if (!to_decimal && isDateTime64(arguments)) - { - mandatory_args.push_back({"scale", static_cast(&isNativeInteger), &isColumnConst, "const Integer"}); - } - - // toString(DateTime or DateTime64, [timezone: String]) - if ((std::is_same_v && !arguments.empty() && (isDateTime64(arguments[0].type) || isDateTime(arguments[0].type))) - // toUnixTimestamp(value[, timezone : String]) - || std::is_same_v - // toDate(value[, timezone : String]) - || std::is_same_v // TODO: shall we allow timestamp argument for toDate? DateTime knows nothing about timezones and this argument is ignored below. - // toDate32(value[, timezone : String]) - || std::is_same_v - // toDateTime(value[, timezone: String]) - || std::is_same_v - // toDateTime64(value, scale : Integer[, timezone: String]) - || std::is_same_v) - { - optional_args.push_back({"timezone", static_cast(&isString), nullptr, "String"}); - } - - validateFunctionArgumentTypes(*this, arguments, mandatory_args, optional_args); - - if constexpr (std::is_same_v) - { - return std::make_shared(Name::kind); - } - else if constexpr (to_decimal) - { - UInt64 scale = extractToDecimalScale(arguments[1]); - - if constexpr (std::is_same_v) - return createDecimalMaxPrecision(scale); - else if constexpr (std::is_same_v) - return createDecimalMaxPrecision(scale); - else if constexpr (std::is_same_v) - return createDecimalMaxPrecision(scale); - else if constexpr (std::is_same_v) - return createDecimalMaxPrecision(scale); - - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected branch in code of conversion function: it is a bug."); - } - else - { - // Optional second argument with time zone for DateTime. - UInt8 timezone_arg_position = 1; - UInt32 scale [[maybe_unused]] = DataTypeDateTime64::default_scale; - - // DateTime64 requires more arguments: scale and timezone. Since timezone is optional, scale should be first. - if (isDateTime64(arguments)) - { - timezone_arg_position += 1; - scale = static_cast(arguments[1].column->get64(0)); - - if (to_datetime64 || scale != 0) /// toDateTime('xxxx-xx-xx xx:xx:xx', 0) return DateTime - return std::make_shared(scale, - extractTimeZoneNameFromFunctionArguments(arguments, timezone_arg_position, 0, false)); - - return std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, timezone_arg_position, 0, false)); - } - - if constexpr (std::is_same_v) - return std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, timezone_arg_position, 0, false)); - else if constexpr (std::is_same_v) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected branch in code of conversion function: it is a bug."); - else - return std::make_shared(); - } - } - - /// Function actually uses default implementation for nulls, - /// but we need to know if return type is Nullable or not, - /// so we use checked_return_type only to intercept the first call to getReturnTypeImpl(...). - bool useDefaultImplementationForNulls() const override - { - bool to_nullable_string = to_nullable && std::is_same_v; - return checked_return_type && !to_nullable_string; - } - - bool useDefaultImplementationForConstants() const override { return true; } - ColumnNumbers getArgumentsThatAreAlwaysConstant() const override - { - if constexpr (std::is_same_v) - return {}; - else if constexpr (std::is_same_v) - return {2}; - return {1}; - } - bool canBeExecutedOnDefaultArguments() const override { return false; } - - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override - { - try - { - return executeInternal(arguments, result_type, input_rows_count); - } - catch (Exception & e) - { - /// More convenient error message. - if (e.code() == ErrorCodes::ATTEMPT_TO_READ_AFTER_EOF) - { - e.addMessage("Cannot parse " - + result_type->getName() + " from " - + arguments[0].type->getName() - + ", because value is too short"); - } - else if (e.code() == ErrorCodes::CANNOT_PARSE_NUMBER - || e.code() == ErrorCodes::CANNOT_READ_ARRAY_FROM_TEXT - || e.code() == ErrorCodes::CANNOT_PARSE_INPUT_ASSERTION_FAILED - || e.code() == ErrorCodes::CANNOT_PARSE_QUOTED_STRING - || e.code() == ErrorCodes::CANNOT_PARSE_ESCAPE_SEQUENCE - || e.code() == ErrorCodes::CANNOT_PARSE_DATE - || e.code() == ErrorCodes::CANNOT_PARSE_DATETIME - || e.code() == ErrorCodes::CANNOT_PARSE_UUID - || e.code() == ErrorCodes::CANNOT_PARSE_IPV4 - || e.code() == ErrorCodes::CANNOT_PARSE_IPV6) - { - e.addMessage("Cannot parse " - + result_type->getName() + " from " - + arguments[0].type->getName()); - } - - throw; - } - } - - bool hasInformationAboutMonotonicity() const override - { - return Monotonic::has(); - } - - Monotonicity getMonotonicityForRange(const IDataType & type, const Field & left, const Field & right) const override - { - return Monotonic::get(type, left, right); - } - -private: - ContextPtr context; - mutable bool checked_return_type = false; - mutable bool to_nullable = false; - - ColumnPtr executeInternal(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const - { - if (arguments.empty()) - throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, "Function {} expects at least 1 argument", getName()); - - if (result_type->onlyNull()) - return result_type->createColumnConstWithDefaultValue(input_rows_count); - - const DataTypePtr from_type = removeNullable(arguments[0].type); - ColumnPtr result_column; - - [[maybe_unused]] FormatSettings::DateTimeOverflowBehavior date_time_overflow_behavior = default_date_time_overflow_behavior; - - if (context) - date_time_overflow_behavior = context->getSettingsRef().date_time_overflow_behavior.value; - - auto call = [&](const auto & types, const auto & tag) -> bool - { - using Types = std::decay_t; - using LeftDataType = typename Types::LeftType; - using RightDataType = typename Types::RightType; - using SpecialTag = std::decay_t; - - if constexpr (IsDataTypeDecimal) - { - if constexpr (std::is_same_v) - { - /// Account for optional timezone argument. - if (arguments.size() != 2 && arguments.size() != 3) - throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, "Function {} expects 2 or 3 arguments for DataTypeDateTime64.", getName()); - } - else if (arguments.size() != 2) - { - throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, "Function {} expects 2 arguments for Decimal.", getName()); - } - - const ColumnWithTypeAndName & scale_column = arguments[1]; - UInt32 scale = extractToDecimalScale(scale_column); - - switch (date_time_overflow_behavior) - { - case FormatSettings::DateTimeOverflowBehavior::Throw: - result_column = ConvertImpl::execute(arguments, result_type, input_rows_count, scale); - break; - case FormatSettings::DateTimeOverflowBehavior::Ignore: - result_column = ConvertImpl::execute(arguments, result_type, input_rows_count, scale); - break; - case FormatSettings::DateTimeOverflowBehavior::Saturate: - result_column = ConvertImpl::execute(arguments, result_type, input_rows_count, scale); - break; - } - - } - else if constexpr (IsDataTypeDateOrDateTime && std::is_same_v) - { - const auto * dt64 = assert_cast(arguments[0].type.get()); - switch (date_time_overflow_behavior) - { - case FormatSettings::DateTimeOverflowBehavior::Throw: - result_column = ConvertImpl::execute(arguments, result_type, input_rows_count, dt64->getScale()); - break; - case FormatSettings::DateTimeOverflowBehavior::Ignore: - result_column = ConvertImpl::execute(arguments, result_type, input_rows_count, dt64->getScale()); - break; - case FormatSettings::DateTimeOverflowBehavior::Saturate: - result_column = ConvertImpl::execute(arguments, result_type, input_rows_count, dt64->getScale()); - break; - } - } -#define GENERATE_OVERFLOW_MODE_CASE(OVERFLOW_MODE) \ - case FormatSettings::DateTimeOverflowBehavior::OVERFLOW_MODE: \ - result_column = ConvertImpl::execute( \ - arguments, result_type, input_rows_count); \ - break; - - else if constexpr (IsDataTypeDecimalOrNumber && IsDataTypeDecimalOrNumber) - { - using LeftT = typename LeftDataType::FieldType; - using RightT = typename RightDataType::FieldType; - - static constexpr bool bad_left = - is_decimal || std::is_floating_point_v || is_big_int_v || is_signed_v; - static constexpr bool bad_right = - is_decimal || std::is_floating_point_v || is_big_int_v || is_signed_v; - - /// Disallow int vs UUID conversion (but support int vs UInt128 conversion) - if constexpr ((bad_left && std::is_same_v) || - (bad_right && std::is_same_v)) - { - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Wrong UUID conversion"); - } - else - { - switch (date_time_overflow_behavior) - { - GENERATE_OVERFLOW_MODE_CASE(Throw) - GENERATE_OVERFLOW_MODE_CASE(Ignore) - GENERATE_OVERFLOW_MODE_CASE(Saturate) - } - } - } - else if constexpr ((IsDataTypeNumber || IsDataTypeDateOrDateTime) - && IsDataTypeDateOrDateTime) - { - switch (date_time_overflow_behavior) - { - GENERATE_OVERFLOW_MODE_CASE(Throw) - GENERATE_OVERFLOW_MODE_CASE(Ignore) - GENERATE_OVERFLOW_MODE_CASE(Saturate) - } - } -#undef GENERATE_OVERFLOW_MODE_CASE - else - result_column = ConvertImpl::execute(arguments, result_type, input_rows_count); - - return true; - }; - - if (isDateTime64(arguments)) - { - /// For toDateTime('xxxx-xx-xx xx:xx:xx.00', 2[, 'timezone']) we need to it convert to DateTime64 - const ColumnWithTypeAndName & scale_column = arguments[1]; - UInt32 scale = extractToDecimalScale(scale_column); - - if (to_datetime64 || scale != 0) /// When scale = 0, the data type is DateTime otherwise the data type is DateTime64 - { - if (!callOnIndexAndDataType(from_type->getTypeId(), call, ConvertDefaultBehaviorTag{})) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}", - arguments[0].type->getName(), getName()); - - return result_column; - } - } - - if constexpr (std::is_same_v) - { - if (from_type->getCustomSerialization()) - return ConvertImplGenericToString::execute(arguments, result_type, input_rows_count); - } - - bool done = false; - if constexpr (to_string_or_fixed_string) - { - done = callOnIndexAndDataType(from_type->getTypeId(), call, ConvertDefaultBehaviorTag{}); - } - else - { - bool cast_ipv4_ipv6_default_on_conversion_error = false; - if constexpr (is_any_of) - if (context && (cast_ipv4_ipv6_default_on_conversion_error = context->getSettingsRef().cast_ipv4_ipv6_default_on_conversion_error)) - done = callOnIndexAndDataType(from_type->getTypeId(), call, ConvertReturnZeroOnErrorTag{}); - - if (!cast_ipv4_ipv6_default_on_conversion_error) - { - /// We should use ConvertFromStringExceptionMode::Null mode when converting from String (or FixedString) - /// to Nullable type, to avoid 'value is too short' error on attempt to parse empty string from NULL values. - if (to_nullable && WhichDataType(from_type).isStringOrFixedString()) - done = callOnIndexAndDataType(from_type->getTypeId(), call, ConvertReturnNullOnErrorTag{}); - else - done = callOnIndexAndDataType(from_type->getTypeId(), call, ConvertDefaultBehaviorTag{}); - } - } - - if (!done) - { - /// Generic conversion of any type to String. - if (std::is_same_v) - { - return ConvertImplGenericToString::execute(arguments, result_type, input_rows_count); - } - else - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}", - arguments[0].type->getName(), getName()); - } - - return result_column; - } -}; - - -/** Function toTOrZero (where T is number of date or datetime type): - * try to convert from String to type T through parsing, - * if cannot parse, return default value instead of throwing exception. - * Function toTOrNull will return Nullable type with NULL when cannot parse. - * NOTE Also need to implement tryToUnixTimestamp with timezone. - */ -template -class FunctionConvertFromString : public IFunction -{ -public: - static constexpr auto name = Name::name; - static constexpr bool to_decimal = - std::is_same_v> || - std::is_same_v> || - std::is_same_v> || - std::is_same_v>; - - static constexpr bool to_datetime64 = std::is_same_v; - - static FunctionPtr create(ContextPtr) { return std::make_shared(); } - static FunctionPtr create() { return std::make_shared(); } - - String getName() const override - { - return name; - } - - bool isVariadic() const override { return true; } - bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } - size_t getNumberOfArguments() const override { return 0; } - - bool useDefaultImplementationForConstants() const override { return true; } - bool canBeExecutedOnDefaultArguments() const override { return false; } - - ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; } - - DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override - { - DataTypePtr res; - - if (isDateTime64(arguments)) - { - validateFunctionArgumentTypes(*this, arguments, - FunctionArgumentDescriptors{{"string", static_cast(&isStringOrFixedString), nullptr, "String or FixedString"}}, - // optional - FunctionArgumentDescriptors{ - {"precision", static_cast(&isUInt8), isColumnConst, "const UInt8"}, - {"timezone", static_cast(&isStringOrFixedString), isColumnConst, "const String or FixedString"}, - }); - - UInt64 scale = to_datetime64 ? DataTypeDateTime64::default_scale : 0; - if (arguments.size() > 1) - scale = extractToDecimalScale(arguments[1]); - const auto timezone = extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false); - - res = scale == 0 ? res = std::make_shared(timezone) : std::make_shared(scale, timezone); - } - else - { - if ((arguments.size() != 1 && arguments.size() != 2) || (to_decimal && arguments.size() != 2)) - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Number of arguments for function {} doesn't match: passed {}, should be 1 or 2. " - "Second argument only make sense for DateTime (time zone, optional) and Decimal (scale).", - getName(), arguments.size()); - - if (!isStringOrFixedString(arguments[0].type)) - { - if (this->getName().find("OrZero") != std::string::npos || - this->getName().find("OrNull") != std::string::npos) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of first argument of function {}. " - "Conversion functions with postfix 'OrZero' or 'OrNull' should take String argument", - arguments[0].type->getName(), getName()); - else - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of first argument of function {}", - arguments[0].type->getName(), getName()); - } - - if (arguments.size() == 2) - { - if constexpr (std::is_same_v) - { - if (!isString(arguments[1].type)) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of 2nd argument of function {}", - arguments[1].type->getName(), getName()); - } - else if constexpr (to_decimal) - { - if (!isInteger(arguments[1].type)) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of 2nd argument of function {}", - arguments[1].type->getName(), getName()); - if (!arguments[1].column) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Second argument for function {} must be constant", getName()); - } - else - { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Number of arguments for function {} doesn't match: passed {}, should be 1. " - "Second argument makes sense only for DateTime and Decimal.", - getName(), arguments.size()); - } - } - - if constexpr (std::is_same_v) - res = std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 1, 0, false)); - else if constexpr (std::is_same_v) - throw Exception(ErrorCodes::LOGICAL_ERROR, "MaterializedMySQL is a bug."); - else if constexpr (to_decimal) - { - UInt64 scale = extractToDecimalScale(arguments[1]); - res = createDecimalMaxPrecision(scale); - if (!res) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Something wrong with toDecimalNNOrZero() or toDecimalNNOrNull()"); - } - else - res = std::make_shared(); - } - - if constexpr (exception_mode == ConvertFromStringExceptionMode::Null) - res = std::make_shared(res); - - return res; - } - - template - ColumnPtr executeInternal(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count, UInt32 scale = 0) const - { - const IDataType * from_type = arguments[0].type.get(); - - if (checkAndGetDataType(from_type)) - { - return ConvertThroughParsing::execute( - arguments, result_type, input_rows_count, scale); - } - else if (checkAndGetDataType(from_type)) - { - return ConvertThroughParsing::execute( - arguments, result_type, input_rows_count, scale); - } - - return nullptr; - } - - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override - { - ColumnPtr result_column; - - if constexpr (to_decimal) - result_column = executeInternal(arguments, result_type, input_rows_count, - assert_cast(*removeNullable(result_type)).getScale()); - else - { - if (isDateTime64(arguments)) - { - UInt64 scale = to_datetime64 ? DataTypeDateTime64::default_scale : 0; - if (arguments.size() > 1) - scale = extractToDecimalScale(arguments[1]); - - if (scale == 0) - result_column = executeInternal(arguments, result_type, input_rows_count); - else - { - result_column = executeInternal(arguments, result_type, input_rows_count, static_cast(scale)); - } - } - else - { - result_column = executeInternal(arguments, result_type, input_rows_count); - } - } - - if (!result_column) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}. " - "Only String or FixedString argument is accepted for try-conversion function. For other arguments, " - "use function without 'orZero' or 'orNull'.", arguments[0].type->getName(), getName()); - - return result_column; - } -}; - - -/// Monotonicity. - -struct PositiveMonotonicity -{ - static bool has() { return true; } - static IFunction::Monotonicity get(const IDataType &, const Field &, const Field &) - { - return { .is_monotonic = true }; - } -}; - -struct UnknownMonotonicity -{ - static bool has() { return false; } - static IFunction::Monotonicity get(const IDataType &, const Field &, const Field &) - { - return { }; - } -}; - -template -struct ToNumberMonotonicity -{ - static bool has() { return true; } - - static UInt64 divideByRangeOfType(UInt64 x) - { - if constexpr (sizeof(T) < sizeof(UInt64)) - return x >> (sizeof(T) * 8); - else - return 0; - } - - static IFunction::Monotonicity get(const IDataType & type, const Field & left, const Field & right) - { - if (!type.isValueRepresentedByNumber()) - return {}; - - /// If type is same, the conversion is always monotonic. - /// (Enum has separate case, because it is different data type) - if (checkAndGetDataType>(&type) || - checkAndGetDataType>(&type)) - return { .is_monotonic = true, .is_always_monotonic = true }; - - /// Float cases. - - /// When converting to Float, the conversion is always monotonic. - if constexpr (std::is_floating_point_v) - return { .is_monotonic = true, .is_always_monotonic = true }; - - const auto * low_cardinality = typeid_cast(&type); - const IDataType * low_cardinality_dictionary_type = nullptr; - if (low_cardinality) - low_cardinality_dictionary_type = low_cardinality->getDictionaryType().get(); - - WhichDataType which_type(type); - WhichDataType which_inner_type = low_cardinality - ? WhichDataType(low_cardinality_dictionary_type) - : WhichDataType(type); - - /// If converting from Float, for monotonicity, arguments must fit in range of result type. - if (which_inner_type.isFloat()) - { - if (left.isNull() || right.isNull()) - return {}; - - Float64 left_float = left.get(); - Float64 right_float = right.get(); - - if (left_float >= static_cast(std::numeric_limits::min()) - && left_float <= static_cast(std::numeric_limits::max()) - && right_float >= static_cast(std::numeric_limits::min()) - && right_float <= static_cast(std::numeric_limits::max())) - return { .is_monotonic = true }; - - return {}; - } - - /// Integer cases. - - /// Only support types represented by native integers. - /// It can be extended to big integers, decimals and DateTime64 later. - /// By the way, NULLs are representing unbounded ranges. - if (!((left.isNull() || left.getType() == Field::Types::UInt64 || left.getType() == Field::Types::Int64) - && (right.isNull() || right.getType() == Field::Types::UInt64 || right.getType() == Field::Types::Int64))) - return {}; - - const bool from_is_unsigned = type.isValueRepresentedByUnsignedInteger(); - const bool to_is_unsigned = is_unsigned_v; - - const size_t size_of_from = type.getSizeOfValueInMemory(); - const size_t size_of_to = sizeof(T); - - const bool left_in_first_half = left.isNull() - ? from_is_unsigned - : (left.get() >= 0); - - const bool right_in_first_half = right.isNull() - ? !from_is_unsigned - : (right.get() >= 0); - - /// Size of type is the same. - if (size_of_from == size_of_to) - { - if (from_is_unsigned == to_is_unsigned) - return { .is_monotonic = true, .is_always_monotonic = true }; - - if (left_in_first_half == right_in_first_half) - return { .is_monotonic = true }; - - return {}; - } - - /// Size of type is expanded. - if (size_of_from < size_of_to) - { - if (from_is_unsigned == to_is_unsigned) - return { .is_monotonic = true, .is_always_monotonic = true }; - - if (!to_is_unsigned) - return { .is_monotonic = true, .is_always_monotonic = true }; - - /// signed -> unsigned. If arguments from the same half, then function is monotonic. - if (left_in_first_half == right_in_first_half) - return { .is_monotonic = true }; - - return {}; - } - - /// Size of type is shrunk. - if (size_of_from > size_of_to) - { - /// Function cannot be monotonic on unbounded ranges. - if (left.isNull() || right.isNull()) - return {}; - - /// Function cannot be monotonic when left and right are not on the same ranges. - if (divideByRangeOfType(left.get()) != divideByRangeOfType(right.get())) - return {}; - - if (to_is_unsigned) - return { .is_monotonic = true }; - else - { - // If To is signed, it's possible that the signedness is different after conversion. So we check it explicitly. - const bool is_monotonic = (T(left.get()) >= 0) == (T(right.get()) >= 0); - - return { .is_monotonic = is_monotonic }; - } - } - - UNREACHABLE(); - } -}; - -struct ToDateMonotonicity -{ - static bool has() { return true; } - - static IFunction::Monotonicity get(const IDataType & type, const Field & left, const Field & right) - { - auto which = WhichDataType(type); - if (which.isDateOrDate32() || which.isDateTime() || which.isDateTime64() || which.isInt8() || which.isInt16() || which.isUInt8() - || which.isUInt16()) - { - return {.is_monotonic = true, .is_always_monotonic = true}; - } - else if ( - ((left.getType() == Field::Types::UInt64 || left.isNull()) && (right.getType() == Field::Types::UInt64 || right.isNull()) - && ((left.isNull() || left.get() < 0xFFFF) && (right.isNull() || right.get() >= 0xFFFF))) - || ((left.getType() == Field::Types::Int64 || left.isNull()) && (right.getType() == Field::Types::Int64 || right.isNull()) - && ((left.isNull() || left.get() < 0xFFFF) && (right.isNull() || right.get() >= 0xFFFF))) - || (( - (left.getType() == Field::Types::Float64 || left.isNull()) - && (right.getType() == Field::Types::Float64 || right.isNull()) - && ((left.isNull() || left.get() < 0xFFFF) && (right.isNull() || right.get() >= 0xFFFF)))) - || !isNativeNumber(type)) - { - return {}; - } - else - { - return {.is_monotonic = true, .is_always_monotonic = true}; - } - } -}; - -struct ToDateTimeMonotonicity -{ - static bool has() { return true; } - - static IFunction::Monotonicity get(const IDataType & type, const Field &, const Field &) - { - if (type.isValueRepresentedByNumber()) - return {.is_monotonic = true, .is_always_monotonic = true}; - else - return {}; - } -}; - -/** The monotonicity for the `toString` function is mainly determined for test purposes. - * It is doubtful that anyone is looking to optimize queries with conditions `toString(CounterID) = 34`. - */ -struct ToStringMonotonicity -{ - static bool has() { return true; } - - static IFunction::Monotonicity get(const IDataType & type, const Field & left, const Field & right) - { - IFunction::Monotonicity positive{ .is_monotonic = true }; - IFunction::Monotonicity not_monotonic; - - const auto * type_ptr = &type; - if (const auto * low_cardinality_type = checkAndGetDataType(type_ptr)) - type_ptr = low_cardinality_type->getDictionaryType().get(); - - /// Order on enum values (which is the order on integers) is completely arbitrary in respect to the order on strings. - if (WhichDataType(type).isEnum()) - return not_monotonic; - - /// `toString` function is monotonous if the argument is Date or Date32 or DateTime or String, or non-negative numbers with the same number of symbols. - if (checkDataTypes(type_ptr)) - return positive; - - if (left.isNull() || right.isNull()) - return {}; - - if (left.getType() == Field::Types::UInt64 - && right.getType() == Field::Types::UInt64) - { - return (left.get() == 0 && right.get() == 0) - || (floor(log10(left.get())) == floor(log10(right.get()))) - ? positive : not_monotonic; - } - - if (left.getType() == Field::Types::Int64 - && right.getType() == Field::Types::Int64) - { - return (left.get() == 0 && right.get() == 0) - || (left.get() > 0 && right.get() > 0 && floor(log10(left.get())) == floor(log10(right.get()))) - ? positive : not_monotonic; - } - - return not_monotonic; - } -}; - - -struct NameToUInt8 { static constexpr auto name = "toUInt8"; }; -struct NameToUInt16 { static constexpr auto name = "toUInt16"; }; -struct NameToUInt32 { static constexpr auto name = "toUInt32"; }; -struct NameToUInt64 { static constexpr auto name = "toUInt64"; }; -struct NameToUInt128 { static constexpr auto name = "toUInt128"; }; -struct NameToUInt256 { static constexpr auto name = "toUInt256"; }; -struct NameToInt8 { static constexpr auto name = "toInt8"; }; -struct NameToInt16 { static constexpr auto name = "toInt16"; }; -struct NameToInt32 { static constexpr auto name = "toInt32"; }; -struct NameToInt64 { static constexpr auto name = "toInt64"; }; -struct NameToInt128 { static constexpr auto name = "toInt128"; }; -struct NameToInt256 { static constexpr auto name = "toInt256"; }; -struct NameToFloat32 { static constexpr auto name = "toFloat32"; }; -struct NameToFloat64 { static constexpr auto name = "toFloat64"; }; -struct NameToUUID { static constexpr auto name = "toUUID"; }; -struct NameToIPv4 { static constexpr auto name = "toIPv4"; }; -struct NameToIPv6 { static constexpr auto name = "toIPv6"; }; - -using FunctionToUInt8 = FunctionConvert>; -using FunctionToUInt16 = FunctionConvert>; -using FunctionToUInt32 = FunctionConvert>; -using FunctionToUInt64 = FunctionConvert>; -using FunctionToUInt128 = FunctionConvert>; -using FunctionToUInt256 = FunctionConvert>; -using FunctionToInt8 = FunctionConvert>; -using FunctionToInt16 = FunctionConvert>; -using FunctionToInt32 = FunctionConvert>; -using FunctionToInt64 = FunctionConvert>; -using FunctionToInt128 = FunctionConvert>; -using FunctionToInt256 = FunctionConvert>; -using FunctionToFloat32 = FunctionConvert>; -using FunctionToFloat64 = FunctionConvert>; - -using FunctionToDate = FunctionConvert; - -using FunctionToDate32 = FunctionConvert; - -using FunctionToDateTime = FunctionConvert; - -using FunctionToDateTime32 = FunctionConvert; - -using FunctionToDateTime64 = FunctionConvert; - -using FunctionToUUID = FunctionConvert>; -using FunctionToIPv4 = FunctionConvert>; -using FunctionToIPv6 = FunctionConvert>; -using FunctionToString = FunctionConvert; -using FunctionToUnixTimestamp = FunctionConvert>; -using FunctionToDecimal32 = FunctionConvert, NameToDecimal32, UnknownMonotonicity>; -using FunctionToDecimal64 = FunctionConvert, NameToDecimal64, UnknownMonotonicity>; -using FunctionToDecimal128 = FunctionConvert, NameToDecimal128, UnknownMonotonicity>; -using FunctionToDecimal256 = FunctionConvert, NameToDecimal256, UnknownMonotonicity>; - -template struct FunctionTo; - -template <> struct FunctionTo { using Type = FunctionToUInt8; }; -template <> struct FunctionTo { using Type = FunctionToUInt16; }; -template <> struct FunctionTo { using Type = FunctionToUInt32; }; -template <> struct FunctionTo { using Type = FunctionToUInt64; }; -template <> struct FunctionTo { using Type = FunctionToUInt128; }; -template <> struct FunctionTo { using Type = FunctionToUInt256; }; -template <> struct FunctionTo { using Type = FunctionToInt8; }; -template <> struct FunctionTo { using Type = FunctionToInt16; }; -template <> struct FunctionTo { using Type = FunctionToInt32; }; -template <> struct FunctionTo { using Type = FunctionToInt64; }; -template <> struct FunctionTo { using Type = FunctionToInt128; }; -template <> struct FunctionTo { using Type = FunctionToInt256; }; -template <> struct FunctionTo { using Type = FunctionToFloat32; }; -template <> struct FunctionTo { using Type = FunctionToFloat64; }; - -template -struct FunctionTo { using Type = FunctionToDate; }; - -template -struct FunctionTo { using Type = FunctionToDate32; }; - -template -struct FunctionTo { using Type = FunctionToDateTime; }; - -template -struct FunctionTo { using Type = FunctionToDateTime64; }; - -template <> struct FunctionTo { using Type = FunctionToUUID; }; -template <> struct FunctionTo { using Type = FunctionToIPv4; }; -template <> struct FunctionTo { using Type = FunctionToIPv6; }; -template <> struct FunctionTo { using Type = FunctionToString; }; -template <> struct FunctionTo { using Type = FunctionToFixedString; }; -template <> struct FunctionTo> { using Type = FunctionToDecimal32; }; -template <> struct FunctionTo> { using Type = FunctionToDecimal64; }; -template <> struct FunctionTo> { using Type = FunctionToDecimal128; }; -template <> struct FunctionTo> { using Type = FunctionToDecimal256; }; - -template struct FunctionTo> - : FunctionTo> -{ -}; - -struct NameToUInt8OrZero { static constexpr auto name = "toUInt8OrZero"; }; -struct NameToUInt16OrZero { static constexpr auto name = "toUInt16OrZero"; }; -struct NameToUInt32OrZero { static constexpr auto name = "toUInt32OrZero"; }; -struct NameToUInt64OrZero { static constexpr auto name = "toUInt64OrZero"; }; -struct NameToUInt128OrZero { static constexpr auto name = "toUInt128OrZero"; }; -struct NameToUInt256OrZero { static constexpr auto name = "toUInt256OrZero"; }; -struct NameToInt8OrZero { static constexpr auto name = "toInt8OrZero"; }; -struct NameToInt16OrZero { static constexpr auto name = "toInt16OrZero"; }; -struct NameToInt32OrZero { static constexpr auto name = "toInt32OrZero"; }; -struct NameToInt64OrZero { static constexpr auto name = "toInt64OrZero"; }; -struct NameToInt128OrZero { static constexpr auto name = "toInt128OrZero"; }; -struct NameToInt256OrZero { static constexpr auto name = "toInt256OrZero"; }; -struct NameToFloat32OrZero { static constexpr auto name = "toFloat32OrZero"; }; -struct NameToFloat64OrZero { static constexpr auto name = "toFloat64OrZero"; }; -struct NameToDateOrZero { static constexpr auto name = "toDateOrZero"; }; -struct NameToDate32OrZero { static constexpr auto name = "toDate32OrZero"; }; -struct NameToDateTimeOrZero { static constexpr auto name = "toDateTimeOrZero"; }; -struct NameToDateTime64OrZero { static constexpr auto name = "toDateTime64OrZero"; }; -struct NameToDecimal32OrZero { static constexpr auto name = "toDecimal32OrZero"; }; -struct NameToDecimal64OrZero { static constexpr auto name = "toDecimal64OrZero"; }; -struct NameToDecimal128OrZero { static constexpr auto name = "toDecimal128OrZero"; }; -struct NameToDecimal256OrZero { static constexpr auto name = "toDecimal256OrZero"; }; -struct NameToUUIDOrZero { static constexpr auto name = "toUUIDOrZero"; }; -struct NameToIPv4OrZero { static constexpr auto name = "toIPv4OrZero"; }; -struct NameToIPv6OrZero { static constexpr auto name = "toIPv6OrZero"; }; - -using FunctionToUInt8OrZero = FunctionConvertFromString; -using FunctionToUInt16OrZero = FunctionConvertFromString; -using FunctionToUInt32OrZero = FunctionConvertFromString; -using FunctionToUInt64OrZero = FunctionConvertFromString; -using FunctionToUInt128OrZero = FunctionConvertFromString; -using FunctionToUInt256OrZero = FunctionConvertFromString; -using FunctionToInt8OrZero = FunctionConvertFromString; -using FunctionToInt16OrZero = FunctionConvertFromString; -using FunctionToInt32OrZero = FunctionConvertFromString; -using FunctionToInt64OrZero = FunctionConvertFromString; -using FunctionToInt128OrZero = FunctionConvertFromString; -using FunctionToInt256OrZero = FunctionConvertFromString; -using FunctionToFloat32OrZero = FunctionConvertFromString; -using FunctionToFloat64OrZero = FunctionConvertFromString; -using FunctionToDateOrZero = FunctionConvertFromString; -using FunctionToDate32OrZero = FunctionConvertFromString; -using FunctionToDateTimeOrZero = FunctionConvertFromString; -using FunctionToDateTime64OrZero = FunctionConvertFromString; -using FunctionToDecimal32OrZero = FunctionConvertFromString, NameToDecimal32OrZero, ConvertFromStringExceptionMode::Zero>; -using FunctionToDecimal64OrZero = FunctionConvertFromString, NameToDecimal64OrZero, ConvertFromStringExceptionMode::Zero>; -using FunctionToDecimal128OrZero = FunctionConvertFromString, NameToDecimal128OrZero, ConvertFromStringExceptionMode::Zero>; -using FunctionToDecimal256OrZero = FunctionConvertFromString, NameToDecimal256OrZero, ConvertFromStringExceptionMode::Zero>; -using FunctionToUUIDOrZero = FunctionConvertFromString; -using FunctionToIPv4OrZero = FunctionConvertFromString; -using FunctionToIPv6OrZero = FunctionConvertFromString; - -struct NameToUInt8OrNull { static constexpr auto name = "toUInt8OrNull"; }; -struct NameToUInt16OrNull { static constexpr auto name = "toUInt16OrNull"; }; -struct NameToUInt32OrNull { static constexpr auto name = "toUInt32OrNull"; }; -struct NameToUInt64OrNull { static constexpr auto name = "toUInt64OrNull"; }; -struct NameToUInt128OrNull { static constexpr auto name = "toUInt128OrNull"; }; -struct NameToUInt256OrNull { static constexpr auto name = "toUInt256OrNull"; }; -struct NameToInt8OrNull { static constexpr auto name = "toInt8OrNull"; }; -struct NameToInt16OrNull { static constexpr auto name = "toInt16OrNull"; }; -struct NameToInt32OrNull { static constexpr auto name = "toInt32OrNull"; }; -struct NameToInt64OrNull { static constexpr auto name = "toInt64OrNull"; }; -struct NameToInt128OrNull { static constexpr auto name = "toInt128OrNull"; }; -struct NameToInt256OrNull { static constexpr auto name = "toInt256OrNull"; }; -struct NameToFloat32OrNull { static constexpr auto name = "toFloat32OrNull"; }; -struct NameToFloat64OrNull { static constexpr auto name = "toFloat64OrNull"; }; -struct NameToDateOrNull { static constexpr auto name = "toDateOrNull"; }; -struct NameToDate32OrNull { static constexpr auto name = "toDate32OrNull"; }; -struct NameToDateTimeOrNull { static constexpr auto name = "toDateTimeOrNull"; }; -struct NameToDateTime64OrNull { static constexpr auto name = "toDateTime64OrNull"; }; -struct NameToDecimal32OrNull { static constexpr auto name = "toDecimal32OrNull"; }; -struct NameToDecimal64OrNull { static constexpr auto name = "toDecimal64OrNull"; }; -struct NameToDecimal128OrNull { static constexpr auto name = "toDecimal128OrNull"; }; -struct NameToDecimal256OrNull { static constexpr auto name = "toDecimal256OrNull"; }; -struct NameToUUIDOrNull { static constexpr auto name = "toUUIDOrNull"; }; -struct NameToIPv4OrNull { static constexpr auto name = "toIPv4OrNull"; }; -struct NameToIPv6OrNull { static constexpr auto name = "toIPv6OrNull"; }; - -using FunctionToUInt8OrNull = FunctionConvertFromString; -using FunctionToUInt16OrNull = FunctionConvertFromString; -using FunctionToUInt32OrNull = FunctionConvertFromString; -using FunctionToUInt64OrNull = FunctionConvertFromString; -using FunctionToUInt128OrNull = FunctionConvertFromString; -using FunctionToUInt256OrNull = FunctionConvertFromString; -using FunctionToInt8OrNull = FunctionConvertFromString; -using FunctionToInt16OrNull = FunctionConvertFromString; -using FunctionToInt32OrNull = FunctionConvertFromString; -using FunctionToInt64OrNull = FunctionConvertFromString; -using FunctionToInt128OrNull = FunctionConvertFromString; -using FunctionToInt256OrNull = FunctionConvertFromString; -using FunctionToFloat32OrNull = FunctionConvertFromString; -using FunctionToFloat64OrNull = FunctionConvertFromString; -using FunctionToDateOrNull = FunctionConvertFromString; -using FunctionToDate32OrNull = FunctionConvertFromString; -using FunctionToDateTimeOrNull = FunctionConvertFromString; -using FunctionToDateTime64OrNull = FunctionConvertFromString; -using FunctionToDecimal32OrNull = FunctionConvertFromString, NameToDecimal32OrNull, ConvertFromStringExceptionMode::Null>; -using FunctionToDecimal64OrNull = FunctionConvertFromString, NameToDecimal64OrNull, ConvertFromStringExceptionMode::Null>; -using FunctionToDecimal128OrNull = FunctionConvertFromString, NameToDecimal128OrNull, ConvertFromStringExceptionMode::Null>; -using FunctionToDecimal256OrNull = FunctionConvertFromString, NameToDecimal256OrNull, ConvertFromStringExceptionMode::Null>; -using FunctionToUUIDOrNull = FunctionConvertFromString; -using FunctionToIPv4OrNull = FunctionConvertFromString; -using FunctionToIPv6OrNull = FunctionConvertFromString; - -struct NameParseDateTimeBestEffort { static constexpr auto name = "parseDateTimeBestEffort"; }; -struct NameParseDateTimeBestEffortOrZero { static constexpr auto name = "parseDateTimeBestEffortOrZero"; }; -struct NameParseDateTimeBestEffortOrNull { static constexpr auto name = "parseDateTimeBestEffortOrNull"; }; -struct NameParseDateTimeBestEffortUS { static constexpr auto name = "parseDateTimeBestEffortUS"; }; -struct NameParseDateTimeBestEffortUSOrZero { static constexpr auto name = "parseDateTimeBestEffortUSOrZero"; }; -struct NameParseDateTimeBestEffortUSOrNull { static constexpr auto name = "parseDateTimeBestEffortUSOrNull"; }; -struct NameParseDateTime32BestEffort { static constexpr auto name = "parseDateTime32BestEffort"; }; -struct NameParseDateTime32BestEffortOrZero { static constexpr auto name = "parseDateTime32BestEffortOrZero"; }; -struct NameParseDateTime32BestEffortOrNull { static constexpr auto name = "parseDateTime32BestEffortOrNull"; }; -struct NameParseDateTime64BestEffort { static constexpr auto name = "parseDateTime64BestEffort"; }; -struct NameParseDateTime64BestEffortOrZero { static constexpr auto name = "parseDateTime64BestEffortOrZero"; }; -struct NameParseDateTime64BestEffortOrNull { static constexpr auto name = "parseDateTime64BestEffortOrNull"; }; -struct NameParseDateTime64BestEffortUS { static constexpr auto name = "parseDateTime64BestEffortUS"; }; -struct NameParseDateTime64BestEffortUSOrZero { static constexpr auto name = "parseDateTime64BestEffortUSOrZero"; }; -struct NameParseDateTime64BestEffortUSOrNull { static constexpr auto name = "parseDateTime64BestEffortUSOrNull"; }; - - -using FunctionParseDateTimeBestEffort = FunctionConvertFromString< - DataTypeDateTime, NameParseDateTimeBestEffort, ConvertFromStringExceptionMode::Throw, ConvertFromStringParsingMode::BestEffort>; -using FunctionParseDateTimeBestEffortOrZero = FunctionConvertFromString< - DataTypeDateTime, NameParseDateTimeBestEffortOrZero, ConvertFromStringExceptionMode::Zero, ConvertFromStringParsingMode::BestEffort>; -using FunctionParseDateTimeBestEffortOrNull = FunctionConvertFromString< - DataTypeDateTime, NameParseDateTimeBestEffortOrNull, ConvertFromStringExceptionMode::Null, ConvertFromStringParsingMode::BestEffort>; - -using FunctionParseDateTimeBestEffortUS = FunctionConvertFromString< - DataTypeDateTime, NameParseDateTimeBestEffortUS, ConvertFromStringExceptionMode::Throw, ConvertFromStringParsingMode::BestEffortUS>; -using FunctionParseDateTimeBestEffortUSOrZero = FunctionConvertFromString< - DataTypeDateTime, NameParseDateTimeBestEffortUSOrZero, ConvertFromStringExceptionMode::Zero, ConvertFromStringParsingMode::BestEffortUS>; -using FunctionParseDateTimeBestEffortUSOrNull = FunctionConvertFromString< - DataTypeDateTime, NameParseDateTimeBestEffortUSOrNull, ConvertFromStringExceptionMode::Null, ConvertFromStringParsingMode::BestEffortUS>; - -using FunctionParseDateTime32BestEffort = FunctionConvertFromString< - DataTypeDateTime, NameParseDateTime32BestEffort, ConvertFromStringExceptionMode::Throw, ConvertFromStringParsingMode::BestEffort>; -using FunctionParseDateTime32BestEffortOrZero = FunctionConvertFromString< - DataTypeDateTime, NameParseDateTime32BestEffortOrZero, ConvertFromStringExceptionMode::Zero, ConvertFromStringParsingMode::BestEffort>; -using FunctionParseDateTime32BestEffortOrNull = FunctionConvertFromString< - DataTypeDateTime, NameParseDateTime32BestEffortOrNull, ConvertFromStringExceptionMode::Null, ConvertFromStringParsingMode::BestEffort>; - -using FunctionParseDateTime64BestEffort = FunctionConvertFromString< - DataTypeDateTime64, NameParseDateTime64BestEffort, ConvertFromStringExceptionMode::Throw, ConvertFromStringParsingMode::BestEffort>; -using FunctionParseDateTime64BestEffortOrZero = FunctionConvertFromString< - DataTypeDateTime64, NameParseDateTime64BestEffortOrZero, ConvertFromStringExceptionMode::Zero, ConvertFromStringParsingMode::BestEffort>; -using FunctionParseDateTime64BestEffortOrNull = FunctionConvertFromString< - DataTypeDateTime64, NameParseDateTime64BestEffortOrNull, ConvertFromStringExceptionMode::Null, ConvertFromStringParsingMode::BestEffort>; - -using FunctionParseDateTime64BestEffortUS = FunctionConvertFromString< - DataTypeDateTime64, NameParseDateTime64BestEffortUS, ConvertFromStringExceptionMode::Throw, ConvertFromStringParsingMode::BestEffortUS>; -using FunctionParseDateTime64BestEffortUSOrZero = FunctionConvertFromString< - DataTypeDateTime64, NameParseDateTime64BestEffortUSOrZero, ConvertFromStringExceptionMode::Zero, ConvertFromStringParsingMode::BestEffortUS>; -using FunctionParseDateTime64BestEffortUSOrNull = FunctionConvertFromString< - DataTypeDateTime64, NameParseDateTime64BestEffortUSOrNull, ConvertFromStringExceptionMode::Null, ConvertFromStringParsingMode::BestEffortUS>; - - -class ExecutableFunctionCast : public IExecutableFunction -{ -public: - using WrapperType = std::function; - - explicit ExecutableFunctionCast( - WrapperType && wrapper_function_, const char * name_, std::optional diagnostic_) - : wrapper_function(std::move(wrapper_function_)), name(name_), diagnostic(std::move(diagnostic_)) {} - - String getName() const override { return name; } - -protected: - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override - { - /// drop second argument, pass others - ColumnsWithTypeAndName new_arguments{arguments.front()}; - if (arguments.size() > 2) - new_arguments.insert(std::end(new_arguments), std::next(std::begin(arguments), 2), std::end(arguments)); - - try - { - return wrapper_function(new_arguments, result_type, nullptr, input_rows_count); - } - catch (Exception & e) - { - if (diagnostic) - e.addMessage("while converting source column " + backQuoteIfNeed(diagnostic->column_from) + - " to destination column " + backQuoteIfNeed(diagnostic->column_to)); - throw; - } - } - - bool useDefaultImplementationForNulls() const override { return false; } - /// CAST(Nothing, T) -> T - bool useDefaultImplementationForNothing() const override { return false; } - bool useDefaultImplementationForConstants() const override { return true; } - bool useDefaultImplementationForLowCardinalityColumns() const override { return false; } - ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; } - -private: - WrapperType wrapper_function; - const char * name; - std::optional diagnostic; -}; - -struct CastName { static constexpr auto name = "CAST"; }; -struct CastInternalName { static constexpr auto name = "_CAST"; }; - -class FunctionCastBase : public IFunctionBase -{ -public: - using MonotonicityForRange = std::function; -}; - -template -class FunctionCast final : public FunctionCastBase -{ -public: - using WrapperType = std::function; - - FunctionCast(ContextPtr context_ - , const char * cast_name_ - , MonotonicityForRange && monotonicity_for_range_ - , const DataTypes & argument_types_ - , const DataTypePtr & return_type_ - , std::optional diagnostic_ - , CastType cast_type_) - : cast_name(cast_name_), monotonicity_for_range(std::move(monotonicity_for_range_)) - , argument_types(argument_types_), return_type(return_type_), diagnostic(std::move(diagnostic_)) - , cast_type(cast_type_) - , context(context_) - { - } - - const DataTypes & getArgumentTypes() const override { return argument_types; } - const DataTypePtr & getResultType() const override { return return_type; } - - ExecutableFunctionPtr prepare(const ColumnsWithTypeAndName & /*sample_columns*/) const override - { - try - { - return std::make_unique( - prepareUnpackDictionaries(getArgumentTypes()[0], getResultType()), cast_name, diagnostic); - } - catch (Exception & e) - { - if (diagnostic) - e.addMessage("while converting source column " + backQuoteIfNeed(diagnostic->column_from) + - " to destination column " + backQuoteIfNeed(diagnostic->column_to)); - throw; - } - } - - String getName() const override { return cast_name; } - - bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } - - bool hasInformationAboutMonotonicity() const override - { - return static_cast(monotonicity_for_range); - } - - Monotonicity getMonotonicityForRange(const IDataType & type, const Field & left, const Field & right) const override - { - return monotonicity_for_range(type, left, right); - } - -private: - - const char * cast_name; - MonotonicityForRange monotonicity_for_range; - - DataTypes argument_types; - DataTypePtr return_type; - - std::optional diagnostic; - CastType cast_type; - ContextPtr context; - - static WrapperType createFunctionAdaptor(FunctionPtr function, const DataTypePtr & from_type) - { - auto function_adaptor = std::make_unique(function)->build({ColumnWithTypeAndName{nullptr, from_type, ""}}); - - return [function_adaptor] - (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable *, size_t input_rows_count) - { - return function_adaptor->execute(arguments, result_type, input_rows_count); - }; - } - - static WrapperType createToNullableColumnWrapper() - { - return [] (ColumnsWithTypeAndName &, const DataTypePtr & result_type, const ColumnNullable *, size_t input_rows_count) - { - ColumnPtr res = result_type->createColumn(); - ColumnUInt8::Ptr col_null_map_to = ColumnUInt8::create(input_rows_count, true); - return ColumnNullable::create(res->cloneResized(input_rows_count), std::move(col_null_map_to)); - }; - } - - template - WrapperType createWrapper(const DataTypePtr & from_type, const ToDataType * const to_type, bool requested_result_is_nullable) const - { - TypeIndex from_type_index = from_type->getTypeId(); - WhichDataType which(from_type_index); - bool can_apply_accurate_cast = (cast_type == CastType::accurate || cast_type == CastType::accurateOrNull) - && (which.isInt() || which.isUInt() || which.isFloat()); - - FormatSettings::DateTimeOverflowBehavior date_time_overflow_behavior = default_date_time_overflow_behavior; - if (context) - date_time_overflow_behavior = context->getSettingsRef().date_time_overflow_behavior; - - if (requested_result_is_nullable && checkAndGetDataType(from_type.get())) - { - /// In case when converting to Nullable type, we apply different parsing rule, - /// that will not throw an exception but return NULL in case of malformed input. - FunctionPtr function = FunctionConvertFromString::create(); - return createFunctionAdaptor(function, from_type); - } - else if (!can_apply_accurate_cast) - { - FunctionPtr function = FunctionTo::Type::create(context); - return createFunctionAdaptor(function, from_type); - } - - auto wrapper_cast_type = cast_type; - - return [wrapper_cast_type, from_type_index, to_type, date_time_overflow_behavior] - (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable *column_nullable, size_t input_rows_count) - { - ColumnPtr result_column; - auto res = callOnIndexAndDataType(from_type_index, [&](const auto & types) -> bool { - using Types = std::decay_t; - using LeftDataType = typename Types::LeftType; - using RightDataType = typename Types::RightType; - - if constexpr (IsDataTypeNumber) - { - if constexpr (IsDataTypeNumber) - { -#define GENERATE_OVERFLOW_MODE_CASE(OVERFLOW_MODE, ADDITIONS) \ - case FormatSettings::DateTimeOverflowBehavior::OVERFLOW_MODE: \ - result_column = ConvertImpl::execute( \ - arguments, result_type, input_rows_count, ADDITIONS()); \ - break; - if (wrapper_cast_type == CastType::accurate) - { - switch (date_time_overflow_behavior) - { - GENERATE_OVERFLOW_MODE_CASE(Throw, AccurateConvertStrategyAdditions) - GENERATE_OVERFLOW_MODE_CASE(Ignore, AccurateConvertStrategyAdditions) - GENERATE_OVERFLOW_MODE_CASE(Saturate, AccurateConvertStrategyAdditions) - } - } - else - { - switch (date_time_overflow_behavior) - { - GENERATE_OVERFLOW_MODE_CASE(Throw, AccurateOrNullConvertStrategyAdditions) - GENERATE_OVERFLOW_MODE_CASE(Ignore, AccurateOrNullConvertStrategyAdditions) - GENERATE_OVERFLOW_MODE_CASE(Saturate, AccurateOrNullConvertStrategyAdditions) - } - } -#undef GENERATE_OVERFLOW_MODE_CASE - - return true; - } - - if constexpr (std::is_same_v || std::is_same_v) - { -#define GENERATE_OVERFLOW_MODE_CASE(OVERFLOW_MODE, ADDITIONS) \ - case FormatSettings::DateTimeOverflowBehavior::OVERFLOW_MODE: \ - result_column = ConvertImpl::template execute( \ -arguments, result_type, input_rows_count); \ - break; - if (wrapper_cast_type == CastType::accurate) - { - switch (date_time_overflow_behavior) - { - GENERATE_OVERFLOW_MODE_CASE(Throw, DateTimeAccurateConvertStrategyAdditions) - GENERATE_OVERFLOW_MODE_CASE(Ignore, DateTimeAccurateConvertStrategyAdditions) - GENERATE_OVERFLOW_MODE_CASE(Saturate, DateTimeAccurateConvertStrategyAdditions) - } - } - else - { - switch (date_time_overflow_behavior) - { - GENERATE_OVERFLOW_MODE_CASE(Throw, DateTimeAccurateOrNullConvertStrategyAdditions) - GENERATE_OVERFLOW_MODE_CASE(Ignore, DateTimeAccurateOrNullConvertStrategyAdditions) - GENERATE_OVERFLOW_MODE_CASE(Saturate, DateTimeAccurateOrNullConvertStrategyAdditions) - } - } -#undef GENERATE_OVERFLOW_MODE_CASE - return true; - } - } - - return false; - }); - - /// Additionally check if callOnIndexAndDataType wasn't called at all. - if (!res) - { - if (wrapper_cast_type == CastType::accurateOrNull) - { - auto nullable_column_wrapper = FunctionCast::createToNullableColumnWrapper(); - return nullable_column_wrapper(arguments, result_type, column_nullable, input_rows_count); - } - else - { - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, - "Conversion from {} to {} is not supported", - from_type_index, to_type->getName()); - } - } - - return result_column; - }; - } - - template - WrapperType createBoolWrapper(const DataTypePtr & from_type, const ToDataType * const to_type, bool requested_result_is_nullable) const - { - if (checkAndGetDataType(from_type.get())) - { - return &ConvertImplGenericFromString::execute; - } - - return createWrapper(from_type, to_type, requested_result_is_nullable); - } - - WrapperType createUInt8ToBoolWrapper(const DataTypePtr from_type, const DataTypePtr to_type) const - { - return [from_type, to_type] (ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable *, size_t /*input_rows_count*/) -> ColumnPtr - { - /// Special case when we convert UInt8 column to Bool column. - /// both columns have type UInt8, but we shouldn't use identity wrapper, - /// because Bool column can contain only 0 and 1. - auto res_column = to_type->createColumn(); - const auto & data_from = checkAndGetColumn(arguments[0].column.get())->getData(); - auto & data_to = assert_cast(res_column.get())->getData(); - data_to.resize(data_from.size()); - for (size_t i = 0; i != data_from.size(); ++i) - data_to[i] = static_cast(data_from[i]); - return res_column; - }; - } - - static WrapperType createStringWrapper(const DataTypePtr & from_type) - { - FunctionPtr function = FunctionToString::create(); - return createFunctionAdaptor(function, from_type); - } - - WrapperType createFixedStringWrapper(const DataTypePtr & from_type, const size_t N) const - { - if (!isStringOrFixedString(from_type)) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "CAST AS FixedString is only implemented for types String and FixedString"); - - bool exception_mode_null = cast_type == CastType::accurateOrNull; - return [exception_mode_null, N] (ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable *, size_t /*input_rows_count*/) - { - if (exception_mode_null) - return FunctionToFixedString::executeForN(arguments, N); - else - return FunctionToFixedString::executeForN(arguments, N); - }; - } - -#define GENERATE_INTERVAL_CASE(INTERVAL_KIND) \ - case IntervalKind::Kind::INTERVAL_KIND: \ - return createFunctionAdaptor(FunctionConvert::create(), from_type); - - static WrapperType createIntervalWrapper(const DataTypePtr & from_type, IntervalKind kind) - { - switch (kind) - { - GENERATE_INTERVAL_CASE(Nanosecond) - GENERATE_INTERVAL_CASE(Microsecond) - GENERATE_INTERVAL_CASE(Millisecond) - GENERATE_INTERVAL_CASE(Second) - GENERATE_INTERVAL_CASE(Minute) - GENERATE_INTERVAL_CASE(Hour) - GENERATE_INTERVAL_CASE(Day) - GENERATE_INTERVAL_CASE(Week) - GENERATE_INTERVAL_CASE(Month) - GENERATE_INTERVAL_CASE(Quarter) - GENERATE_INTERVAL_CASE(Year) - } - throw Exception{ErrorCodes::CANNOT_CONVERT_TYPE, "Conversion to unexpected IntervalKind: {}", kind.toString()}; - } - -#undef GENERATE_INTERVAL_CASE - - template - requires IsDataTypeDecimal - WrapperType createDecimalWrapper(const DataTypePtr & from_type, const ToDataType * to_type, bool requested_result_is_nullable) const - { - TypeIndex type_index = from_type->getTypeId(); - UInt32 scale = to_type->getScale(); - - WhichDataType which(type_index); - bool ok = which.isNativeInt() || which.isNativeUInt() || which.isDecimal() || which.isFloat() || which.isDateOrDate32() || which.isDateTime() || which.isDateTime64() - || which.isStringOrFixedString(); - if (!ok) - { - if (cast_type == CastType::accurateOrNull) - return createToNullableColumnWrapper(); - else - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Conversion from {} to {} is not supported", - from_type->getName(), to_type->getName()); - } - - auto wrapper_cast_type = cast_type; - - return [wrapper_cast_type, type_index, scale, to_type, requested_result_is_nullable] - (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable *column_nullable, size_t input_rows_count) - { - ColumnPtr result_column; - auto res = callOnIndexAndDataType(type_index, [&](const auto & types) -> bool - { - using Types = std::decay_t; - using LeftDataType = typename Types::LeftType; - using RightDataType = typename Types::RightType; - - if constexpr (IsDataTypeDecimalOrNumber && IsDataTypeDecimalOrNumber && !std::is_same_v) - { - if (wrapper_cast_type == CastType::accurate) - { - AccurateConvertStrategyAdditions additions; - additions.scale = scale; - result_column = ConvertImpl::execute( - arguments, result_type, input_rows_count, additions); - - return true; - } - else if (wrapper_cast_type == CastType::accurateOrNull) - { - AccurateOrNullConvertStrategyAdditions additions; - additions.scale = scale; - result_column = ConvertImpl::execute( - arguments, result_type, input_rows_count, additions); - - return true; - } - } - else if constexpr (std::is_same_v) - { - if (requested_result_is_nullable) - { - /// Consistent with CAST(Nullable(String) AS Nullable(Numbers)) - /// In case when converting to Nullable type, we apply different parsing rule, - /// that will not throw an exception but return NULL in case of malformed input. - result_column = ConvertImpl::execute( - arguments, result_type, input_rows_count, scale); - - return true; - } - } - - result_column = ConvertImpl::execute(arguments, result_type, input_rows_count, scale); - - return true; - }); - - /// Additionally check if callOnIndexAndDataType wasn't called at all. - if (!res) - { - if (wrapper_cast_type == CastType::accurateOrNull) - { - auto nullable_column_wrapper = FunctionCast::createToNullableColumnWrapper(); - return nullable_column_wrapper(arguments, result_type, column_nullable, input_rows_count); - } - else - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, - "Conversion from {} to {} is not supported", - type_index, to_type->getName()); - } - - return result_column; - }; - } - - WrapperType createAggregateFunctionWrapper(const DataTypePtr & from_type_untyped, const DataTypeAggregateFunction * to_type) const - { - /// Conversion from String through parsing. - if (checkAndGetDataType(from_type_untyped.get())) - { - return &ConvertImplGenericFromString::execute; - } - else if (const auto * agg_type = checkAndGetDataType(from_type_untyped.get())) - { - if (agg_type->getFunction()->haveSameStateRepresentation(*to_type->getFunction())) - { - return [function = to_type->getFunction()]( - ColumnsWithTypeAndName & arguments, - const DataTypePtr & /* result_type */, - const ColumnNullable * /* nullable_source */, - size_t /*input_rows_count*/) -> ColumnPtr - { - const auto & argument_column = arguments.front(); - const auto * col_agg = checkAndGetColumn(argument_column.column.get()); - if (col_agg) - { - auto new_col_agg = ColumnAggregateFunction::create(*col_agg); - new_col_agg->set(function); - return new_col_agg; - } - else - { - throw Exception( - ErrorCodes::LOGICAL_ERROR, - "Illegal column {} for function CAST AS AggregateFunction", - argument_column.column->getName()); - } - }; - } - } - - if (cast_type == CastType::accurateOrNull) - return createToNullableColumnWrapper(); - else - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Conversion from {} to {} is not supported", - from_type_untyped->getName(), to_type->getName()); - } - - WrapperType createArrayWrapper(const DataTypePtr & from_type_untyped, const DataTypeArray & to_type) const - { - /// Conversion from String through parsing. - if (checkAndGetDataType(from_type_untyped.get())) - { - return &ConvertImplGenericFromString::execute; - } - - DataTypePtr from_type_holder; - const auto * from_type = checkAndGetDataType(from_type_untyped.get()); - const auto * from_type_map = checkAndGetDataType(from_type_untyped.get()); - - /// Convert from Map - if (from_type_map) - { - /// Recreate array of unnamed tuples because otherwise it may work - /// unexpectedly while converting to array of named tuples. - from_type_holder = from_type_map->getNestedTypeWithUnnamedTuple(); - from_type = assert_cast(from_type_holder.get()); - } - - if (!from_type) - { - throw Exception(ErrorCodes::TYPE_MISMATCH, - "CAST AS Array can only be performed between same-dimensional Array, Map or String types"); - } - - DataTypePtr from_nested_type = from_type->getNestedType(); - - /// In query SELECT CAST([] AS Array(Array(String))) from type is Array(Nothing) - bool from_empty_array = isNothing(from_nested_type); - - if (from_type->getNumberOfDimensions() != to_type.getNumberOfDimensions() && !from_empty_array) - throw Exception(ErrorCodes::TYPE_MISMATCH, - "CAST AS Array can only be performed between same-dimensional array types"); - - const DataTypePtr & to_nested_type = to_type.getNestedType(); - - /// Prepare nested type conversion - const auto nested_function = prepareUnpackDictionaries(from_nested_type, to_nested_type); - - return [nested_function, from_nested_type, to_nested_type]( - ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable * nullable_source, size_t /*input_rows_count*/) -> ColumnPtr - { - const auto & argument_column = arguments.front(); - - const ColumnArray * col_array = nullptr; - - if (const ColumnMap * col_map = checkAndGetColumn(argument_column.column.get())) - col_array = &col_map->getNestedColumn(); - else - col_array = checkAndGetColumn(argument_column.column.get()); - - if (col_array) - { - /// create columns for converting nested column containing original and result columns - ColumnsWithTypeAndName nested_columns{{ col_array->getDataPtr(), from_nested_type, "" }}; - - /// convert nested column - auto result_column = nested_function(nested_columns, to_nested_type, nullable_source, nested_columns.front().column->size()); - - /// set converted nested column to result - return ColumnArray::create(result_column, col_array->getOffsetsPtr()); - } - else - { - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Illegal column {} for function CAST AS Array", - argument_column.column->getName()); - } - }; - } - - using ElementWrappers = std::vector; - - ElementWrappers getElementWrappers(const DataTypes & from_element_types, const DataTypes & to_element_types) const - { - ElementWrappers element_wrappers; - element_wrappers.reserve(from_element_types.size()); - - /// Create conversion wrapper for each element in tuple - for (size_t i = 0; i < from_element_types.size(); ++i) - { - const DataTypePtr & from_element_type = from_element_types[i]; - const DataTypePtr & to_element_type = to_element_types[i]; - element_wrappers.push_back(prepareUnpackDictionaries(from_element_type, to_element_type)); - } - - return element_wrappers; - } - - WrapperType createTupleWrapper(const DataTypePtr & from_type_untyped, const DataTypeTuple * to_type) const - { - /// Conversion from String through parsing. - if (checkAndGetDataType(from_type_untyped.get())) - { - return &ConvertImplGenericFromString::execute; - } - - const auto * from_type = checkAndGetDataType(from_type_untyped.get()); - if (!from_type) - throw Exception(ErrorCodes::TYPE_MISMATCH, "CAST AS Tuple can only be performed between tuple types or from String.\n" - "Left type: {}, right type: {}", from_type_untyped->getName(), to_type->getName()); - - const auto & from_element_types = from_type->getElements(); - const auto & to_element_types = to_type->getElements(); - - std::vector element_wrappers; - std::vector> to_reverse_index; - - /// For named tuples allow conversions for tuples with - /// different sets of elements. If element exists in @to_type - /// and doesn't exist in @to_type it will be filled by default values. - if (from_type->haveExplicitNames() && to_type->haveExplicitNames()) - { - const auto & from_names = from_type->getElementNames(); - std::unordered_map from_positions; - from_positions.reserve(from_names.size()); - for (size_t i = 0; i < from_names.size(); ++i) - from_positions[from_names[i]] = i; - - const auto & to_names = to_type->getElementNames(); - element_wrappers.reserve(to_names.size()); - to_reverse_index.reserve(from_names.size()); - - for (size_t i = 0; i < to_names.size(); ++i) - { - auto it = from_positions.find(to_names[i]); - if (it != from_positions.end()) - { - element_wrappers.emplace_back(prepareUnpackDictionaries(from_element_types[it->second], to_element_types[i])); - to_reverse_index.emplace_back(it->second); - } - else - { - element_wrappers.emplace_back(); - to_reverse_index.emplace_back(); - } - } - } - else - { - if (from_element_types.size() != to_element_types.size()) - throw Exception(ErrorCodes::TYPE_MISMATCH, "CAST AS Tuple can only be performed between tuple types " - "with the same number of elements or from String.\nLeft type: {}, right type: {}", - from_type->getName(), to_type->getName()); - - element_wrappers = getElementWrappers(from_element_types, to_element_types); - to_reverse_index.reserve(to_element_types.size()); - for (size_t i = 0; i < to_element_types.size(); ++i) - to_reverse_index.emplace_back(i); - } - - return [element_wrappers, from_element_types, to_element_types, to_reverse_index] - (ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable * nullable_source, size_t input_rows_count) -> ColumnPtr - { - const auto * col = arguments.front().column.get(); - - size_t tuple_size = to_element_types.size(); - const ColumnTuple & column_tuple = typeid_cast(*col); - - Columns converted_columns(tuple_size); - - /// invoke conversion for each element - for (size_t i = 0; i < tuple_size; ++i) - { - if (to_reverse_index[i]) - { - size_t from_idx = *to_reverse_index[i]; - ColumnsWithTypeAndName element = {{column_tuple.getColumns()[from_idx], from_element_types[from_idx], "" }}; - converted_columns[i] = element_wrappers[i](element, to_element_types[i], nullable_source, input_rows_count); - } - else - { - converted_columns[i] = to_element_types[i]->createColumn()->cloneResized(input_rows_count); - } - } - - return ColumnTuple::create(converted_columns); - }; - } - - /// The case of: tuple([key1, key2, ..., key_n], [value1, value2, ..., value_n]) - WrapperType createTupleToMapWrapper(const DataTypes & from_kv_types, const DataTypes & to_kv_types) const - { - return [element_wrappers = getElementWrappers(from_kv_types, to_kv_types), from_kv_types, to_kv_types] - (ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable * nullable_source, size_t /*input_rows_count*/) -> ColumnPtr - { - const auto * col = arguments.front().column.get(); - const auto & column_tuple = assert_cast(*col); - - Columns offsets(2); - Columns converted_columns(2); - for (size_t i = 0; i < 2; ++i) - { - const auto & column_array = assert_cast(column_tuple.getColumn(i)); - ColumnsWithTypeAndName element = {{column_array.getDataPtr(), from_kv_types[i], ""}}; - converted_columns[i] = element_wrappers[i](element, to_kv_types[i], nullable_source, (element[0].column)->size()); - offsets[i] = column_array.getOffsetsPtr(); - } - - const auto & keys_offsets = assert_cast(*offsets[0]).getData(); - const auto & values_offsets = assert_cast(*offsets[1]).getData(); - if (keys_offsets != values_offsets) - throw Exception(ErrorCodes::TYPE_MISMATCH, - "CAST AS Map can only be performed from tuple of arrays with equal sizes."); - - return ColumnMap::create(converted_columns[0], converted_columns[1], offsets[0]); - }; - } - - WrapperType createMapToMapWrapper(const DataTypes & from_kv_types, const DataTypes & to_kv_types) const - { - return [element_wrappers = getElementWrappers(from_kv_types, to_kv_types), from_kv_types, to_kv_types] - (ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable * nullable_source, size_t /*input_rows_count*/) -> ColumnPtr - { - const auto * col = arguments.front().column.get(); - const auto & column_map = typeid_cast(*col); - const auto & nested_data = column_map.getNestedData(); - - Columns converted_columns(2); - for (size_t i = 0; i < 2; ++i) - { - ColumnsWithTypeAndName element = {{nested_data.getColumnPtr(i), from_kv_types[i], ""}}; - converted_columns[i] = element_wrappers[i](element, to_kv_types[i], nullable_source, (element[0].column)->size()); - } - - return ColumnMap::create(converted_columns[0], converted_columns[1], column_map.getNestedColumn().getOffsetsPtr()); - }; - } - - /// The case of: [(key1, value1), (key2, value2), ...] - WrapperType createArrayToMapWrapper(const DataTypes & from_kv_types, const DataTypes & to_kv_types) const - { - return [element_wrappers = getElementWrappers(from_kv_types, to_kv_types), from_kv_types, to_kv_types] - (ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable * nullable_source, size_t /*input_rows_count*/) -> ColumnPtr - { - const auto * col = arguments.front().column.get(); - const auto & column_array = typeid_cast(*col); - const auto & nested_data = typeid_cast(column_array.getData()); - - Columns converted_columns(2); - for (size_t i = 0; i < 2; ++i) - { - ColumnsWithTypeAndName element = {{nested_data.getColumnPtr(i), from_kv_types[i], ""}}; - converted_columns[i] = element_wrappers[i](element, to_kv_types[i], nullable_source, (element[0].column)->size()); - } - - return ColumnMap::create(converted_columns[0], converted_columns[1], column_array.getOffsetsPtr()); - }; - } - - - WrapperType createMapWrapper(const DataTypePtr & from_type_untyped, const DataTypeMap * to_type) const - { - if (const auto * from_tuple = checkAndGetDataType(from_type_untyped.get())) - { - if (from_tuple->getElements().size() != 2) - throw Exception( - ErrorCodes::TYPE_MISMATCH, - "CAST AS Map from tuple requires 2 elements. " - "Left type: {}, right type: {}", - from_tuple->getName(), - to_type->getName()); - - DataTypes from_kv_types; - const auto & to_kv_types = to_type->getKeyValueTypes(); - - for (const auto & elem : from_tuple->getElements()) - { - const auto * type_array = checkAndGetDataType(elem.get()); - if (!type_array) - throw Exception(ErrorCodes::TYPE_MISMATCH, - "CAST AS Map can only be performed from tuples of array. Got: {}", from_tuple->getName()); - - from_kv_types.push_back(type_array->getNestedType()); - } - - return createTupleToMapWrapper(from_kv_types, to_kv_types); - } - else if (const auto * from_array = typeid_cast(from_type_untyped.get())) - { - const auto * nested_tuple = typeid_cast(from_array->getNestedType().get()); - if (!nested_tuple || nested_tuple->getElements().size() != 2) - throw Exception( - ErrorCodes::TYPE_MISMATCH, - "CAST AS Map from array requires nested tuple of 2 elements. " - "Left type: {}, right type: {}", - from_array->getName(), - to_type->getName()); - - return createArrayToMapWrapper(nested_tuple->getElements(), to_type->getKeyValueTypes()); - } - else if (const auto * from_type = checkAndGetDataType(from_type_untyped.get())) - { - return createMapToMapWrapper(from_type->getKeyValueTypes(), to_type->getKeyValueTypes()); - } - else - { - throw Exception(ErrorCodes::TYPE_MISMATCH, "Unsupported types to CAST AS Map. " - "Left type: {}, right type: {}", from_type_untyped->getName(), to_type->getName()); - } - } - - WrapperType createTupleToObjectWrapper(const DataTypeTuple & from_tuple, bool has_nullable_subcolumns) const - { - if (!from_tuple.haveExplicitNames()) - throw Exception(ErrorCodes::TYPE_MISMATCH, - "Cast to Object can be performed only from flatten Named Tuple. Got: {}", from_tuple.getName()); - - PathsInData paths; - DataTypes from_types; - - std::tie(paths, from_types) = flattenTuple(from_tuple.getPtr()); - auto to_types = from_types; - - for (auto & type : to_types) - { - if (isTuple(type) || isNested(type)) - throw Exception(ErrorCodes::TYPE_MISMATCH, - "Cast to Object can be performed only from flatten Named Tuple. Got: {}", - from_tuple.getName()); - - type = recursiveRemoveLowCardinality(type); - } - - return [element_wrappers = getElementWrappers(from_types, to_types), - has_nullable_subcolumns, from_types, to_types, paths] - (ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable * nullable_source, size_t input_rows_count) - { - size_t tuple_size = to_types.size(); - auto flattened_column = flattenTuple(arguments.front().column); - const auto & column_tuple = assert_cast(*flattened_column); - - if (tuple_size != column_tuple.getColumns().size()) - throw Exception(ErrorCodes::TYPE_MISMATCH, - "Expected tuple with {} subcolumn, but got {} subcolumns", - tuple_size, column_tuple.getColumns().size()); - - auto res = ColumnObject::create(has_nullable_subcolumns); - for (size_t i = 0; i < tuple_size; ++i) - { - ColumnsWithTypeAndName element = {{column_tuple.getColumns()[i], from_types[i], "" }}; - auto converted_column = element_wrappers[i](element, to_types[i], nullable_source, input_rows_count); - res->addSubcolumn(paths[i], converted_column->assumeMutable()); - } - - return res; - }; - } - - WrapperType createMapToObjectWrapper(const DataTypeMap & from_map, bool has_nullable_subcolumns) const - { - auto key_value_types = from_map.getKeyValueTypes(); - - if (!isStringOrFixedString(key_value_types[0])) - throw Exception(ErrorCodes::TYPE_MISMATCH, - "Cast to Object from Map can be performed only from Map " - "with String or FixedString key. Got: {}", from_map.getName()); - - const auto & value_type = key_value_types[1]; - auto to_value_type = value_type; - - if (!has_nullable_subcolumns && value_type->isNullable()) - to_value_type = removeNullable(value_type); - - if (has_nullable_subcolumns && !value_type->isNullable()) - to_value_type = makeNullable(value_type); - - DataTypes to_key_value_types{std::make_shared(), std::move(to_value_type)}; - auto element_wrappers = getElementWrappers(key_value_types, to_key_value_types); - - return [has_nullable_subcolumns, element_wrappers, key_value_types, to_key_value_types] - (ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable * nullable_source, size_t) -> ColumnPtr - { - const auto & column_map = assert_cast(*arguments.front().column); - const auto & offsets = column_map.getNestedColumn().getOffsets(); - auto key_value_columns = column_map.getNestedData().getColumnsCopy(); - - for (size_t i = 0; i < 2; ++i) - { - ColumnsWithTypeAndName element{{key_value_columns[i], key_value_types[i], ""}}; - key_value_columns[i] = element_wrappers[i](element, to_key_value_types[i], nullable_source, key_value_columns[i]->size()); - } - - const auto & key_column_str = assert_cast(*key_value_columns[0]); - const auto & value_column = *key_value_columns[1]; - - using SubcolumnsMap = HashMap; - SubcolumnsMap subcolumns; - - for (size_t row = 0; row < offsets.size(); ++row) - { - for (size_t i = offsets[static_cast(row) - 1]; i < offsets[row]; ++i) - { - auto ref = key_column_str.getDataAt(i); - - bool inserted; - SubcolumnsMap::LookupResult it; - subcolumns.emplace(ref, it, inserted); - auto & subcolumn = it->getMapped(); - - if (inserted) - subcolumn = value_column.cloneEmpty()->cloneResized(row); - - /// Map can have duplicated keys. We insert only first one. - if (subcolumn->size() == row) - subcolumn->insertFrom(value_column, i); - } - - /// Insert default values for keys missed in current row. - for (const auto & [_, subcolumn] : subcolumns) - if (subcolumn->size() == row) - subcolumn->insertDefault(); - } - - auto column_object = ColumnObject::create(has_nullable_subcolumns); - for (auto && [key, subcolumn] : subcolumns) - { - PathInData path(key.toView()); - column_object->addSubcolumn(path, std::move(subcolumn)); - } - - return column_object; - }; - } - - WrapperType createObjectWrapper(const DataTypePtr & from_type, const DataTypeObject * to_type) const - { - if (const auto * from_tuple = checkAndGetDataType(from_type.get())) - { - return createTupleToObjectWrapper(*from_tuple, to_type->hasNullableSubcolumns()); - } - else if (const auto * from_map = checkAndGetDataType(from_type.get())) - { - return createMapToObjectWrapper(*from_map, to_type->hasNullableSubcolumns()); - } - else if (checkAndGetDataType(from_type.get())) - { - return [] (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * nullable_source, size_t input_rows_count) - { - auto res = ConvertImplGenericFromString::execute(arguments, result_type, nullable_source, input_rows_count)->assumeMutable(); - res->finalize(); - return res; - }; - } - else if (checkAndGetDataType(from_type.get())) - { - return [is_nullable = to_type->hasNullableSubcolumns()] (ColumnsWithTypeAndName & arguments, const DataTypePtr & , const ColumnNullable * , size_t) -> ColumnPtr - { - auto & column_object = assert_cast(*arguments.front().column); - auto res = ColumnObject::create(is_nullable); - for (size_t i = 0; i < column_object.size(); i++) - res->insert(column_object[i]); - - res->finalize(); - return res; - }; - } - - throw Exception(ErrorCodes::TYPE_MISMATCH, - "Cast to Object can be performed only from flatten named Tuple, Map or String. Got: {}", from_type->getName()); - } - - WrapperType createVariantToVariantWrapper(const DataTypeVariant & from_variant, const DataTypeVariant & to_variant) const - { - /// We support only extension of variant type, so, only new types can be added. - /// For example: Variant(T1, T2) -> Variant(T1, T2, T3) is supported, but Variant(T1, T2) -> Variant(T1, T3) is not supported. - /// We want to extend Variant type for free without rewriting the data, but we sort data types inside Variant during type creation - /// (we do it because we want Variant(T1, T2) to be the same as Variant(T2, T1)), but after extension the order of variant types - /// (and so their discriminators) can be different. For example: Variant(T1, T3) -> Variant(T1, T2, T3). - /// To avoid full rewrite of discriminators column, ColumnVariant supports it's local order of variant columns (and so local - /// discriminators) and stores mapping global order -> local order. - /// So, to extend Variant with new types for free, we should keep old local order for old variants, append new variants and change - /// mapping global order -> local order according to the new global order. - - /// Create map (new variant type) -> (it's global discriminator in new order). - const auto & new_variants = to_variant.getVariants(); - std::unordered_map new_variant_types_to_new_global_discriminator; - new_variant_types_to_new_global_discriminator.reserve(new_variants.size()); - for (size_t i = 0; i != new_variants.size(); ++i) - new_variant_types_to_new_global_discriminator[new_variants[i]->getName()] = i; - - /// Create set of old variant types. - const auto & old_variants = from_variant.getVariants(); - std::unordered_map old_variant_types_to_old_global_discriminator; - old_variant_types_to_old_global_discriminator.reserve(old_variants.size()); - for (size_t i = 0; i != old_variants.size(); ++i) - old_variant_types_to_old_global_discriminator[old_variants[i]->getName()] = i; - - /// Check that the set of old variants types is a subset of new variant types and collect new global discriminator for each old global discriminator. - std::unordered_map old_global_discriminator_to_new; - old_global_discriminator_to_new.reserve(old_variants.size()); - for (const auto & [old_variant_type, old_discriminator] : old_variant_types_to_old_global_discriminator) - { - auto it = new_variant_types_to_new_global_discriminator.find(old_variant_type); - if (it == new_variant_types_to_new_global_discriminator.end()) - throw Exception( - ErrorCodes::CANNOT_CONVERT_TYPE, - "Cannot convert type {} to {}. Conversion between Variant types is allowed only when new Variant type is an extension " - "of an initial one", from_variant.getName(), to_variant.getName()); - old_global_discriminator_to_new[old_discriminator] = it->second; - } - - /// Collect variant types and their global discriminators that should be added to the old Variant to get the new Variant. - std::vector> variant_types_and_discriminators_to_add; - variant_types_and_discriminators_to_add.reserve(new_variants.size() - old_variants.size()); - for (size_t i = 0; i != new_variants.size(); ++i) - { - if (!old_variant_types_to_old_global_discriminator.contains(new_variants[i]->getName())) - variant_types_and_discriminators_to_add.emplace_back(new_variants[i], i); - } - - return [old_global_discriminator_to_new, variant_types_and_discriminators_to_add] - (ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable *, size_t) -> ColumnPtr - { - const auto & column_variant = assert_cast(*arguments.front().column.get()); - size_t num_old_variants = column_variant.getNumVariants(); - Columns new_variant_columns; - new_variant_columns.reserve(num_old_variants + variant_types_and_discriminators_to_add.size()); - std::vector new_local_to_global_discriminators; - new_local_to_global_discriminators.reserve(num_old_variants + variant_types_and_discriminators_to_add.size()); - for (size_t i = 0; i != num_old_variants; ++i) - { - new_variant_columns.push_back(column_variant.getVariantPtrByLocalDiscriminator(i)); - new_local_to_global_discriminators.push_back(old_global_discriminator_to_new.at(column_variant.globalDiscriminatorByLocal(i))); - } - - for (const auto & [new_variant_type, new_global_discriminator] : variant_types_and_discriminators_to_add) - { - new_variant_columns.push_back(new_variant_type->createColumn()); - new_local_to_global_discriminators.push_back(new_global_discriminator); - } - - return ColumnVariant::create(column_variant.getLocalDiscriminatorsPtr(), column_variant.getOffsetsPtr(), new_variant_columns, new_local_to_global_discriminators); - }; - } - - WrapperType createVariantToColumnWrapper(const DataTypeVariant & from_variant, const DataTypePtr & to_type) const - { - const auto & variant_types = from_variant.getVariants(); - std::vector variant_wrappers; - variant_wrappers.reserve(variant_types.size()); - - /// Create conversion wrapper for each variant. - for (const auto & variant_type : variant_types) - variant_wrappers.push_back(prepareUnpackDictionaries(variant_type, to_type)); - - return [variant_wrappers, variant_types, to_type] - (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable *, size_t input_rows_count) -> ColumnPtr - { - const auto & column_variant = assert_cast(*arguments.front().column.get()); - - /// First, cast each variant to the result type. - std::vector casted_variant_columns; - casted_variant_columns.reserve(variant_types.size()); - for (size_t i = 0; i != variant_types.size(); ++i) - { - auto variant_col = column_variant.getVariantPtrByLocalDiscriminator(i); - ColumnsWithTypeAndName variant = {{variant_col, variant_types[i], "" }}; - const auto & variant_wrapper = variant_wrappers[column_variant.globalDiscriminatorByLocal(i)]; - casted_variant_columns.push_back(variant_wrapper(variant, result_type, nullptr, variant_col->size())); - } - - /// Second, construct resulting column from casted variant columns according to discriminators. - const auto & local_discriminators = column_variant.getLocalDiscriminators(); - auto res = result_type->createColumn(); - res->reserve(input_rows_count); - for (size_t i = 0; i != input_rows_count; ++i) - { - auto local_discr = local_discriminators[i]; - if (local_discr == ColumnVariant::NULL_DISCRIMINATOR) - res->insertDefault(); - else - res->insertFrom(*casted_variant_columns[local_discr], column_variant.offsetAt(i)); - } - - return res; - }; - } - - static ColumnPtr createVariantFromDescriptorsAndOneNonEmptyVariant(const DataTypes & variant_types, const ColumnPtr & discriminators, const ColumnPtr & variant, ColumnVariant::Discriminator variant_discr) - { - Columns variants; - variants.reserve(variant_types.size()); - for (size_t i = 0; i != variant_types.size(); ++i) - { - if (i == variant_discr) - variants.emplace_back(variant); - else - variants.push_back(variant_types[i]->createColumn()); - } - - return ColumnVariant::create(discriminators, variants); - } - - WrapperType createColumnToVariantWrapper(const DataTypePtr & from_type, const DataTypeVariant & to_variant) const - { - /// We allow converting NULL to Variant(...) as Variant can store NULLs. - if (from_type->onlyNull()) - { - return [](ColumnsWithTypeAndName &, const DataTypePtr & result_type, const ColumnNullable *, size_t input_rows_count) -> ColumnPtr - { - auto result_column = result_type->createColumn(); - result_column->insertManyDefaults(input_rows_count); - return result_column; - }; - } - - auto variant_discr_opt = to_variant.tryGetVariantDiscriminator(*removeNullableOrLowCardinalityNullable(from_type)); - if (!variant_discr_opt) - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Cannot convert type {} to {}. Conversion to Variant allowed only for types from this Variant", from_type->getName(), to_variant.getName()); - - return [variant_discr = *variant_discr_opt] - (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable *, size_t) -> ColumnPtr - { - const auto & result_variant_type = assert_cast(*result_type); - const auto & variant_types = result_variant_type.getVariants(); - if (const ColumnNullable * col_nullable = typeid_cast(arguments.front().column.get())) - { - const auto & column = col_nullable->getNestedColumnPtr(); - const auto & null_map = col_nullable->getNullMapData(); - IColumn::Filter filter; - filter.reserve(column->size()); - auto discriminators = ColumnVariant::ColumnDiscriminators::create(); - auto & discriminators_data = discriminators->getData(); - discriminators_data.reserve(column->size()); - size_t variant_size_hint = 0; - for (size_t i = 0; i != column->size(); ++i) - { - if (null_map[i]) - { - discriminators_data.push_back(ColumnVariant::NULL_DISCRIMINATOR); - filter.push_back(0); - } - else - { - discriminators_data.push_back(variant_discr); - filter.push_back(1); - ++variant_size_hint; - } - } - - ColumnPtr variant_column; - /// If there were no NULLs, just use the column. - if (variant_size_hint == column->size()) - variant_column = column; - /// Otherwise we should use filtered column. - else - variant_column = column->filter(filter, variant_size_hint); - return createVariantFromDescriptorsAndOneNonEmptyVariant(variant_types, std::move(discriminators), variant_column, variant_discr); - } - else if (isColumnLowCardinalityNullable(*arguments.front().column)) - { - const auto & column = arguments.front().column; - - /// Variant column cannot have LowCardinality(Nullable(...)) variant, as Variant column stores NULLs itself. - /// We should create a null-map, insert NULL_DISCRIMINATOR on NULL values and filter initial column. - const auto & col_lc = assert_cast(*column); - const auto & indexes = col_lc.getIndexes(); - auto null_index = col_lc.getDictionary().getNullValueIndex(); - IColumn::Filter filter; - filter.reserve(col_lc.size()); - auto discriminators = ColumnVariant::ColumnDiscriminators::create(); - auto & discriminators_data = discriminators->getData(); - discriminators_data.reserve(col_lc.size()); - size_t variant_size_hint = 0; - for (size_t i = 0; i != col_lc.size(); ++i) - { - if (indexes.getUInt(i) == null_index) - { - discriminators_data.push_back(ColumnVariant::NULL_DISCRIMINATOR); - filter.push_back(0); - } - else - { - discriminators_data.push_back(variant_discr); - filter.push_back(1); - ++variant_size_hint; - } - } - - MutableColumnPtr variant_column; - /// If there were no NULLs, we can just clone the column. - if (variant_size_hint == col_lc.size()) - variant_column = IColumn::mutate(column); - /// Otherwise we should filter column. - else - variant_column = column->filter(filter, variant_size_hint)->assumeMutable(); - - assert_cast(*variant_column).nestedRemoveNullable(); - return createVariantFromDescriptorsAndOneNonEmptyVariant(variant_types, std::move(discriminators), std::move(variant_column), variant_discr); - } - else - { - const auto & column = arguments.front().column; - auto discriminators = ColumnVariant::ColumnDiscriminators::create(); - discriminators->getData().resize_fill(column->size(), variant_discr); - return createVariantFromDescriptorsAndOneNonEmptyVariant(variant_types, std::move(discriminators), column, variant_discr); - } - }; - } - - /// Wrapper for conversion to/from Variant type - WrapperType createVariantWrapper(const DataTypePtr & from_type, const DataTypePtr & to_type) const - { - if (const auto * from_variant = checkAndGetDataType(from_type.get())) - { - if (const auto * to_variant = checkAndGetDataType(to_type.get())) - return createVariantToVariantWrapper(*from_variant, *to_variant); - - return createVariantToColumnWrapper(*from_variant, to_type); - } - - return createColumnToVariantWrapper(from_type, assert_cast(*to_type)); - } - - template - WrapperType createEnumWrapper(const DataTypePtr & from_type, const DataTypeEnum * to_type) const - { - using EnumType = DataTypeEnum; - using Function = typename FunctionTo::Type; - - if (const auto * from_enum8 = checkAndGetDataType(from_type.get())) - checkEnumToEnumConversion(from_enum8, to_type); - else if (const auto * from_enum16 = checkAndGetDataType(from_type.get())) - checkEnumToEnumConversion(from_enum16, to_type); - - if (checkAndGetDataType(from_type.get())) - return createStringToEnumWrapper(); - else if (checkAndGetDataType(from_type.get())) - return createStringToEnumWrapper(); - else if (isNativeNumber(from_type) || isEnum(from_type)) - { - auto function = Function::create(); - return createFunctionAdaptor(function, from_type); - } - else - { - if (cast_type == CastType::accurateOrNull) - return createToNullableColumnWrapper(); - else - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Conversion from {} to {} is not supported", - from_type->getName(), to_type->getName()); - } - } - - template - void checkEnumToEnumConversion(const EnumTypeFrom * from_type, const EnumTypeTo * to_type) const - { - const auto & from_values = from_type->getValues(); - const auto & to_values = to_type->getValues(); - - using ValueType = std::common_type_t; - using NameValuePair = std::pair; - using EnumValues = std::vector; - - EnumValues name_intersection; - std::set_intersection(std::begin(from_values), std::end(from_values), - std::begin(to_values), std::end(to_values), std::back_inserter(name_intersection), - [] (auto && from, auto && to) { return from.first < to.first; }); - - for (const auto & name_value : name_intersection) - { - const auto & old_value = name_value.second; - const auto & new_value = to_type->getValue(name_value.first); - if (old_value != new_value) - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Enum conversion changes value for element '{}' from {} to {}", - name_value.first, toString(old_value), toString(new_value)); - } - } - - template - WrapperType createStringToEnumWrapper() const - { - const char * function_name = cast_name; - return [function_name] ( - ColumnsWithTypeAndName & arguments, const DataTypePtr & res_type, const ColumnNullable * nullable_col, size_t /*input_rows_count*/) - { - const auto & first_col = arguments.front().column.get(); - const auto & result_type = typeid_cast(*res_type); - - const ColumnStringType * col = typeid_cast(first_col); - - if (col && nullable_col && nullable_col->size() != col->size()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "ColumnNullable is not compatible with original"); - - if (col) - { - const auto size = col->size(); - - auto res = result_type.createColumn(); - auto & out_data = static_cast(*res).getData(); - out_data.resize(size); - - auto default_enum_value = result_type.getValues().front().second; - - if (nullable_col) - { - for (size_t i = 0; i < size; ++i) - { - if (!nullable_col->isNullAt(i)) - out_data[i] = result_type.getValue(col->getDataAt(i)); - else - out_data[i] = default_enum_value; - } - } - else - { - for (size_t i = 0; i < size; ++i) - out_data[i] = result_type.getValue(col->getDataAt(i)); - } - - return res; - } - else - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected column {} as first argument of function {}", - first_col->getName(), function_name); - }; - } - - template - WrapperType createEnumToStringWrapper() const - { - const char * function_name = cast_name; - return [function_name] ( - ColumnsWithTypeAndName & arguments, const DataTypePtr & res_type, const ColumnNullable * nullable_col, size_t /*input_rows_count*/) - { - using ColumnEnumType = EnumType::ColumnType; - - const auto & first_col = arguments.front().column.get(); - const auto & first_type = arguments.front().type.get(); - - const ColumnEnumType * enum_col = typeid_cast(first_col); - const EnumType * enum_type = typeid_cast(first_type); - - if (enum_col && nullable_col && nullable_col->size() != enum_col->size()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "ColumnNullable is not compatible with original"); - - if (enum_col && enum_type) - { - const auto size = enum_col->size(); - const auto & enum_data = enum_col->getData(); - - auto res = res_type->createColumn(); - - if (nullable_col) - { - for (size_t i = 0; i < size; ++i) - { - if (!nullable_col->isNullAt(i)) - { - const auto & value = enum_type->getNameForValue(enum_data[i]); - res->insertData(value.data, value.size); - } - else - res->insertDefault(); - } - } - else - { - for (size_t i = 0; i < size; ++i) - { - const auto & value = enum_type->getNameForValue(enum_data[i]); - res->insertData(value.data, value.size); - } - } - - return res; - } - else - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected column {} as first argument of function {}", - first_col->getName(), function_name); - }; - } - - static WrapperType createIdentityWrapper(const DataTypePtr &) - { - return [] (ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable *, size_t /*input_rows_count*/) - { - return arguments.front().column; - }; - } - - static WrapperType createNothingWrapper(const IDataType * to_type) - { - ColumnPtr res = to_type->createColumnConstWithDefaultValue(1); - return [res] (ColumnsWithTypeAndName &, const DataTypePtr &, const ColumnNullable *, size_t input_rows_count) - { - /// Column of Nothing type is trivially convertible to any other column - return res->cloneResized(input_rows_count)->convertToFullColumnIfConst(); - }; - } - - WrapperType prepareUnpackDictionaries(const DataTypePtr & from_type, const DataTypePtr & to_type) const - { - /// Conversion from/to Variant data type is processed in a special way. - /// We don't need to remove LowCardinality/Nullable. - if (isVariant(to_type) || isVariant(from_type)) - return createVariantWrapper(from_type, to_type); - - const auto * from_low_cardinality = typeid_cast(from_type.get()); - const auto * to_low_cardinality = typeid_cast(to_type.get()); - const auto & from_nested = from_low_cardinality ? from_low_cardinality->getDictionaryType() : from_type; - const auto & to_nested = to_low_cardinality ? to_low_cardinality->getDictionaryType() : to_type; - - if (from_type->onlyNull()) - { - if (!to_nested->isNullable() && !isVariant(to_type)) - { - if (cast_type == CastType::accurateOrNull) - { - return createToNullableColumnWrapper(); - } - else - { - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Cannot convert NULL to a non-nullable type"); - } - } - - return [](ColumnsWithTypeAndName &, const DataTypePtr & result_type, const ColumnNullable *, size_t input_rows_count) - { - return result_type->createColumnConstWithDefaultValue(input_rows_count)->convertToFullColumnIfConst(); - }; - } - - bool skip_not_null_check = false; - - if (from_low_cardinality && from_nested->isNullable() && !to_nested->isNullable()) - /// Disable check for dictionary. Will check that column doesn't contain NULL in wrapper below. - skip_not_null_check = true; - - auto wrapper = prepareRemoveNullable(from_nested, to_nested, skip_not_null_check); - if (!from_low_cardinality && !to_low_cardinality) - return wrapper; - - return [wrapper, from_low_cardinality, to_low_cardinality, skip_not_null_check] - (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * nullable_source, size_t input_rows_count) -> ColumnPtr - { - ColumnsWithTypeAndName args = {arguments[0]}; - auto & arg = args.front(); - auto res_type = result_type; - - ColumnPtr converted_column; - - ColumnPtr res_indexes; - /// For some types default can't be casted (for example, String to Int). In that case convert column to full. - bool src_converted_to_full_column = false; - - { - auto tmp_rows_count = input_rows_count; - - if (to_low_cardinality) - res_type = to_low_cardinality->getDictionaryType(); - - if (from_low_cardinality) - { - const auto * col_low_cardinality = assert_cast(arguments[0].column.get()); - - if (skip_not_null_check && col_low_cardinality->containsNull()) - throw Exception(ErrorCodes::CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN, "Cannot convert NULL value to non-Nullable type"); - - arg.column = col_low_cardinality->getDictionary().getNestedColumn(); - arg.type = from_low_cardinality->getDictionaryType(); - - /// TODO: Make map with defaults conversion. - src_converted_to_full_column = !removeNullable(arg.type)->equals(*removeNullable(res_type)); - if (src_converted_to_full_column) - arg.column = arg.column->index(col_low_cardinality->getIndexes(), 0); - else - res_indexes = col_low_cardinality->getIndexesPtr(); - - tmp_rows_count = arg.column->size(); - } - - /// Perform the requested conversion. - converted_column = wrapper(args, res_type, nullable_source, tmp_rows_count); - } - - if (to_low_cardinality) - { - auto res_column = to_low_cardinality->createColumn(); - auto * col_low_cardinality = assert_cast(res_column.get()); - - if (from_low_cardinality && !src_converted_to_full_column) - { - col_low_cardinality->insertRangeFromDictionaryEncodedColumn(*converted_column, *res_indexes); - } - else - col_low_cardinality->insertRangeFromFullColumn(*converted_column, 0, converted_column->size()); - - return res_column; - } - else if (!src_converted_to_full_column) - return converted_column->index(*res_indexes, 0); - else - return converted_column; - }; - } - - WrapperType prepareRemoveNullable(const DataTypePtr & from_type, const DataTypePtr & to_type, bool skip_not_null_check) const - { - /// Determine whether pre-processing and/or post-processing must take place during conversion. - - bool source_is_nullable = from_type->isNullable(); - bool result_is_nullable = to_type->isNullable(); - - auto wrapper = prepareImpl(removeNullable(from_type), removeNullable(to_type), result_is_nullable); - - if (result_is_nullable) - { - return [wrapper, source_is_nullable] - (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable *, size_t input_rows_count) -> ColumnPtr - { - /// Create a temporary columns on which to perform the operation. - const auto & nullable_type = static_cast(*result_type); - const auto & nested_type = nullable_type.getNestedType(); - - ColumnsWithTypeAndName tmp_args; - if (source_is_nullable) - tmp_args = createBlockWithNestedColumns(arguments); - else - tmp_args = arguments; - - const ColumnNullable * nullable_source = nullptr; - - /// Add original ColumnNullable for createStringToEnumWrapper() - if (source_is_nullable) - { - if (arguments.size() != 1) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid number of arguments"); - nullable_source = typeid_cast(arguments.front().column.get()); - } - - /// Perform the requested conversion. - auto tmp_res = wrapper(tmp_args, nested_type, nullable_source, input_rows_count); - - /// May happen in fuzzy tests. For debug purpose. - if (!tmp_res) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Couldn't convert {} to {} in prepareRemoveNullable wrapper.", - arguments[0].type->getName(), nested_type->getName()); - - return wrapInNullable(tmp_res, arguments, nested_type, input_rows_count); - }; - } - else if (source_is_nullable) - { - /// Conversion from Nullable to non-Nullable. - - return [wrapper, skip_not_null_check] - (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable *, size_t input_rows_count) -> ColumnPtr - { - auto tmp_args = createBlockWithNestedColumns(arguments); - auto nested_type = removeNullable(result_type); - - /// Check that all values are not-NULL. - /// Check can be skipped in case if LowCardinality dictionary is transformed. - /// In that case, correctness will be checked beforehand. - if (!skip_not_null_check) - { - const auto & col = arguments[0].column; - const auto & nullable_col = assert_cast(*col); - const auto & null_map = nullable_col.getNullMapData(); - - if (!memoryIsZero(null_map.data(), 0, null_map.size())) - throw Exception(ErrorCodes::CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN, "Cannot convert NULL value to non-Nullable type"); - } - const ColumnNullable * nullable_source = typeid_cast(arguments.front().column.get()); - return wrapper(tmp_args, nested_type, nullable_source, input_rows_count); - }; - } - else - return wrapper; - } - - /// 'from_type' and 'to_type' are nested types in case of Nullable. - /// 'requested_result_is_nullable' is true if CAST to Nullable type is requested. - WrapperType prepareImpl(const DataTypePtr & from_type, const DataTypePtr & to_type, bool requested_result_is_nullable) const - { - if (isUInt8(from_type) && isBool(to_type)) - return createUInt8ToBoolWrapper(from_type, to_type); - - /// We can cast IPv6 into IPv6, IPv4 into IPv4, but we should not allow to cast FixedString(16) into IPv6 as part of identity cast - bool safe_convert_custom_types = true; - - if (const auto * to_type_custom_name = to_type->getCustomName()) - safe_convert_custom_types = from_type->getCustomName() && from_type->getCustomName()->getName() == to_type_custom_name->getName(); - else if (const auto * from_type_custom_name = from_type->getCustomName()) - safe_convert_custom_types = to_type->getCustomName() && from_type_custom_name->getName() == to_type->getCustomName()->getName(); - - if (from_type->equals(*to_type) && safe_convert_custom_types) - { - /// We can only use identity conversion for DataTypeAggregateFunction when they are strictly equivalent. - if (typeid_cast(from_type.get())) - { - if (DataTypeAggregateFunction::strictEquals(from_type, to_type)) - return createIdentityWrapper(from_type); - } - else - return createIdentityWrapper(from_type); - } - else if (WhichDataType(from_type).isNothing()) - return createNothingWrapper(to_type.get()); - - WrapperType ret; - - auto make_default_wrapper = [&](const auto & types) -> bool - { - using Types = std::decay_t; - using ToDataType = typename Types::LeftType; - - if constexpr ( - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v) - { - ret = createWrapper(from_type, checkAndGetDataType(to_type.get()), requested_result_is_nullable); - return true; - } - if constexpr (std::is_same_v) - { - if (isBool(to_type)) - ret = createBoolWrapper(from_type, checkAndGetDataType(to_type.get()), requested_result_is_nullable); - else - ret = createWrapper(from_type, checkAndGetDataType(to_type.get()), requested_result_is_nullable); - return true; - } - if constexpr ( - std::is_same_v || - std::is_same_v) - { - ret = createEnumWrapper(from_type, checkAndGetDataType(to_type.get())); - return true; - } - if constexpr ( - std::is_same_v> || - std::is_same_v> || - std::is_same_v> || - std::is_same_v> || - std::is_same_v) - { - ret = createDecimalWrapper(from_type, checkAndGetDataType(to_type.get()), requested_result_is_nullable); - return true; - } - - return false; - }; - - bool cast_ipv4_ipv6_default_on_conversion_error_value = context && context->getSettingsRef().cast_ipv4_ipv6_default_on_conversion_error; - bool input_format_ipv4_default_on_conversion_error_value = context && context->getSettingsRef().input_format_ipv4_default_on_conversion_error; - bool input_format_ipv6_default_on_conversion_error_value = context && context->getSettingsRef().input_format_ipv6_default_on_conversion_error; - - auto make_custom_serialization_wrapper = [&, cast_ipv4_ipv6_default_on_conversion_error_value, input_format_ipv4_default_on_conversion_error_value, input_format_ipv6_default_on_conversion_error_value](const auto & types) -> bool - { - using Types = std::decay_t; - using ToDataType = typename Types::RightType; - using FromDataType = typename Types::LeftType; - - if constexpr (WhichDataType(FromDataType::type_id).isStringOrFixedString()) - { - if constexpr (std::is_same_v) - { - ret = [cast_ipv4_ipv6_default_on_conversion_error_value, - input_format_ipv4_default_on_conversion_error_value, - requested_result_is_nullable]( - ColumnsWithTypeAndName & arguments, - const DataTypePtr & result_type, - const ColumnNullable * column_nullable, - size_t) -> ColumnPtr - { - if (!WhichDataType(result_type).isIPv4()) - throw Exception(ErrorCodes::TYPE_MISMATCH, "Wrong result type {}. Expected IPv4", result_type->getName()); - - const auto * null_map = column_nullable ? &column_nullable->getNullMapData() : nullptr; - if (requested_result_is_nullable) - return convertToIPv4(arguments[0].column, null_map); - else if (cast_ipv4_ipv6_default_on_conversion_error_value || input_format_ipv4_default_on_conversion_error_value) - return convertToIPv4(arguments[0].column, null_map); - else - return convertToIPv4(arguments[0].column, null_map); - }; - - return true; - } - - if constexpr (std::is_same_v) - { - ret = [cast_ipv4_ipv6_default_on_conversion_error_value, - input_format_ipv6_default_on_conversion_error_value, - requested_result_is_nullable]( - ColumnsWithTypeAndName & arguments, - const DataTypePtr & result_type, - const ColumnNullable * column_nullable, - size_t) -> ColumnPtr - { - if (!WhichDataType(result_type).isIPv6()) - throw Exception( - ErrorCodes::TYPE_MISMATCH, "Wrong result type {}. Expected IPv6", result_type->getName()); - - const auto * null_map = column_nullable ? &column_nullable->getNullMapData() : nullptr; - if (requested_result_is_nullable) - return convertToIPv6(arguments[0].column, null_map); - else if (cast_ipv4_ipv6_default_on_conversion_error_value || input_format_ipv6_default_on_conversion_error_value) - return convertToIPv6(arguments[0].column, null_map); - else - return convertToIPv6(arguments[0].column, null_map); - }; - - return true; - } - - if (to_type->getCustomSerialization() && to_type->getCustomName()) - { - ret = [requested_result_is_nullable]( - ColumnsWithTypeAndName & arguments, - const DataTypePtr & result_type, - const ColumnNullable * column_nullable, - size_t input_rows_count) -> ColumnPtr - { - auto wrapped_result_type = result_type; - if (requested_result_is_nullable) - wrapped_result_type = makeNullable(result_type); - return ConvertImplGenericFromString::execute( - arguments, wrapped_result_type, column_nullable, input_rows_count); - }; - return true; - } - } - else if constexpr (WhichDataType(FromDataType::type_id).isIPv6() && WhichDataType(ToDataType::type_id).isIPv4()) - { - ret = [cast_ipv4_ipv6_default_on_conversion_error_value, requested_result_is_nullable]( - ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * column_nullable, size_t) - -> ColumnPtr - { - if (!WhichDataType(result_type).isIPv4()) - throw Exception( - ErrorCodes::TYPE_MISMATCH, "Wrong result type {}. Expected IPv4", result_type->getName()); - - const auto * null_map = column_nullable ? &column_nullable->getNullMapData() : nullptr; - if (requested_result_is_nullable) - return convertIPv6ToIPv4(arguments[0].column, null_map); - else if (cast_ipv4_ipv6_default_on_conversion_error_value) - return convertIPv6ToIPv4(arguments[0].column, null_map); - else - return convertIPv6ToIPv4(arguments[0].column, null_map); - }; - - return true; - } - - if constexpr (WhichDataType(ToDataType::type_id).isStringOrFixedString()) - { - if constexpr (WhichDataType(FromDataType::type_id).isEnum()) - { - ret = createEnumToStringWrapper(); - return true; - } - else if (from_type->getCustomSerialization()) - { - ret = [](ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable *, size_t input_rows_count) -> ColumnPtr - { - return ConvertImplGenericToString::execute(arguments, result_type, input_rows_count); - }; - return true; - } - } - - return false; - }; - - if (callOnTwoTypeIndexes(from_type->getTypeId(), to_type->getTypeId(), make_custom_serialization_wrapper)) - return ret; - - if (callOnIndexAndDataType(to_type->getTypeId(), make_default_wrapper)) - return ret; - - switch (to_type->getTypeId()) - { - case TypeIndex::String: - return createStringWrapper(from_type); - case TypeIndex::FixedString: - return createFixedStringWrapper(from_type, checkAndGetDataType(to_type.get())->getN()); - case TypeIndex::Array: - return createArrayWrapper(from_type, static_cast(*to_type)); - case TypeIndex::Tuple: - return createTupleWrapper(from_type, checkAndGetDataType(to_type.get())); - case TypeIndex::Map: - return createMapWrapper(from_type, checkAndGetDataType(to_type.get())); - case TypeIndex::Object: - return createObjectWrapper(from_type, checkAndGetDataType(to_type.get())); - case TypeIndex::AggregateFunction: - return createAggregateFunctionWrapper(from_type, checkAndGetDataType(to_type.get())); - case TypeIndex::Interval: - return createIntervalWrapper(from_type, checkAndGetDataType(to_type.get())->getKind()); - default: - break; - } - - if (cast_type == CastType::accurateOrNull) - return createToNullableColumnWrapper(); - else - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Conversion from {} to {} is not supported", - from_type->getName(), to_type->getName()); - } -}; - -class MonotonicityHelper -{ -public: - using MonotonicityForRange = FunctionCastBase::MonotonicityForRange; - - template - static auto monotonicityForType(const DataType * const) - { - return FunctionTo::Type::Monotonic::get; - } - - static MonotonicityForRange getMonotonicityInformation(const DataTypePtr & from_type, const IDataType * to_type) - { - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (isEnum(from_type)) - { - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - } - /// other types like Null, FixedString, Array and Tuple have no monotonicity defined - return {}; - } -}; - -} From 277032e444652cfc18f62032a35af918bac39f4d Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Thu, 14 Mar 2024 14:16:33 +0100 Subject: [PATCH 295/374] Move the least updating cctools into beginning, update it --- docker/packager/binary-builder/Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/packager/binary-builder/Dockerfile b/docker/packager/binary-builder/Dockerfile index 96c90403187..c9442accd7e 100644 --- a/docker/packager/binary-builder/Dockerfile +++ b/docker/packager/binary-builder/Dockerfile @@ -4,6 +4,9 @@ FROM clickhouse/fasttest:$FROM_TAG ENV CC=clang-${LLVM_VERSION} ENV CXX=clang++-${LLVM_VERSION} +# If the cctools is updated, then first build it in the CI, then update here in a different commit +COPY --from=clickhouse/cctools:d9e3596e706b /cctools /cctools + # Rust toolchain and libraries ENV RUSTUP_HOME=/rust/rustup ENV CARGO_HOME=/rust/cargo @@ -73,9 +76,6 @@ RUN curl -Lo /usr/bin/clang-tidy-cache \ "https://raw.githubusercontent.com/matus-chochlik/ctcache/$CLANG_TIDY_SHA1/clang-tidy-cache" \ && chmod +x /usr/bin/clang-tidy-cache -# If the cctools is updated, then first build it in the CI, then update here in a different commit -COPY --from=clickhouse/cctools:5a908f73878a /cctools /cctools - RUN mkdir /workdir && chmod 777 /workdir WORKDIR /workdir From d465835306c013b62e35c1330326505095b8b658 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Thu, 14 Mar 2024 14:08:43 +0100 Subject: [PATCH 296/374] Reorder hidden and shown checks in comment, change url of Mergeable check --- tests/ci/commit_status_helper.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/tests/ci/commit_status_helper.py b/tests/ci/commit_status_helper.py index 1c2d8b2ade8..bda2db13991 100644 --- a/tests/ci/commit_status_helper.py +++ b/tests/ci/commit_status_helper.py @@ -18,8 +18,10 @@ from github.GithubObject import NotSet from github.IssueComment import IssueComment from github.Repository import Repository -from ci_config import REQUIRED_CHECKS, CHECK_DESCRIPTIONS, CheckDescription -from env_helper import GITHUB_JOB_URL, GITHUB_REPOSITORY, TEMP_PATH +# isort: on + +from ci_config import CHECK_DESCRIPTIONS, REQUIRED_CHECKS, CheckDescription +from env_helper import GITHUB_REPOSITORY, GITHUB_RUN_URL, TEMP_PATH from pr_info import SKIP_MERGEABLE_CHECK_LABEL, PRInfo from report import ( ERROR, @@ -259,6 +261,12 @@ def generate_status_comment(pr_info: PRInfo, statuses: CommitStatuses) -> str: result = [comment_body] + if visible_table_rows: + visible_table_rows.sort() + result.append(table_header) + result.extend(visible_table_rows) + result.append(table_footer) + if hidden_table_rows: hidden_table_rows.sort() result.append(details_header) @@ -267,12 +275,6 @@ def generate_status_comment(pr_info: PRInfo, statuses: CommitStatuses) -> str: result.append(table_footer) result.append(details_footer) - if visible_table_rows: - visible_table_rows.sort() - result.append(table_header) - result.extend(visible_table_rows) - result.append(table_footer) - return "".join(result) @@ -427,7 +429,7 @@ def set_mergeable_check( context=MERGEABLE_NAME, description=format_description(description), state=state, - target_url=GITHUB_JOB_URL(), + target_url=GITHUB_RUN_URL, ) From 03b9bca8a4dc7f27c20d8cca22cbbc234ed0468c Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Thu, 14 Mar 2024 14:31:55 +0100 Subject: [PATCH 297/374] Terminate EC2 on spot event if runner isn't running --- tests/ci/worker/init_runner.sh | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/ci/worker/init_runner.sh b/tests/ci/worker/init_runner.sh index b211128cf10..de1d128dc87 100644 --- a/tests/ci/worker/init_runner.sh +++ b/tests/ci/worker/init_runner.sh @@ -138,13 +138,15 @@ check_spot_instance_is_old() { check_proceed_spot_termination() { # The function checks and proceeds spot instance termination if exists # The event for spot instance termination + local FORCE + FORCE=${1:-} if TERMINATION_DATA=$(curl -s --fail http://169.254.169.254/latest/meta-data/spot/instance-action); then # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-instance-termination-notices.html#instance-action-metadata _action=$(jq '.action' -r <<< "$TERMINATION_DATA") _time=$(jq '.time | fromdate' <<< "$TERMINATION_DATA") _until_action=$((_time - $(date +%s))) echo "Received the '$_action' event that will be effective in $_until_action seconds" - if (( _until_action <= 30 )); then + if (( _until_action <= 30 )) || [ "$FORCE" == "force" ]; then echo "The action $_action will be done in $_until_action, killing the runner and exit" local runner_pid runner_pid=$(pgrep Runner.Listener) @@ -309,7 +311,7 @@ while true; do echo "Checking if the instance suppose to terminate" no_terminating_metadata || terminate_on_event check_spot_instance_is_old && terminate_and_exit - check_proceed_spot_termination + check_proceed_spot_termination force echo "Going to configure runner" sudo -u ubuntu ./config.sh --url $RUNNER_URL --token "$(get_runner_token)" \ @@ -319,7 +321,7 @@ while true; do echo "Another one check to avoid race between runner and infrastructure" no_terminating_metadata || terminate_on_event check_spot_instance_is_old && terminate_and_exit - check_proceed_spot_termination + check_proceed_spot_termination force echo "Run" sudo -u ubuntu \ From 82b089f4e95073e7048254bf5d4dc03a515f71a2 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Thu, 14 Mar 2024 13:59:29 +0000 Subject: [PATCH 298/374] remove profile event --- src/Common/ProfileEvents.cpp | 1 - src/Interpreters/Aggregator.cpp | 4 ---- 2 files changed, 5 deletions(-) diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index 8fd1e189977..e43e8910089 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -533,7 +533,6 @@ The server successfully detected this situation and will download merged part fr \ M(AggregationPreallocatedElementsInHashTables, "How many elements were preallocated in hash tables for aggregation.") \ M(AggregationHashTablesInitializedAsTwoLevel, "How many hash tables were inited as two-level for aggregation.") \ - M(AggregationProcessedBlocks, "How many blocks were processed by Aggregator") \ M(AggregationOptimizedEqualRangesOfKeys, "For how many blocks optimization of equal ranges of keys was applied") \ \ M(MetadataFromKeeperCacheHit, "Number of times an object storage metadata request was answered from cache without making request to Keeper") \ diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 7c9dac82eff..40b1c09a32e 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -53,7 +53,6 @@ namespace ProfileEvents extern const Event OverflowThrow; extern const Event OverflowBreak; extern const Event OverflowAny; - extern const Event AggregationProcessedBlocks; extern const Event AggregationOptimizedEqualRangesOfKeys; } @@ -987,7 +986,6 @@ void Aggregator::executeOnBlockSmall( { /// `result` will destroy the states of aggregate functions in the destructor result.aggregator = this; - ProfileEvents::increment(ProfileEvents::AggregationProcessedBlocks); /// How to perform the aggregation? if (result.empty()) @@ -1521,7 +1519,6 @@ void NO_INLINE Aggregator::executeOnIntervalWithoutKey( /// `data_variants` will destroy the states of aggregate functions in the destructor data_variants.aggregator = this; data_variants.init(AggregatedDataVariants::Type::without_key); - ProfileEvents::increment(ProfileEvents::AggregationProcessedBlocks); AggregatedDataWithoutKey & res = data_variants.without_key; @@ -1653,7 +1650,6 @@ bool Aggregator::executeOnBlock(Columns columns, { /// `result` will destroy the states of aggregate functions in the destructor result.aggregator = this; - ProfileEvents::increment(ProfileEvents::AggregationProcessedBlocks); /// How to perform the aggregation? if (result.empty()) From 200823c31108015b16ead859a346a59f83cc6e74 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 14 Mar 2024 15:04:19 +0100 Subject: [PATCH 299/374] Try fix docs check --- .../sql-reference/aggregate-functions/reference/varpop.md | 4 ++-- .../sql-reference/aggregate-functions/reference/varsamp.md | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/varpop.md b/docs/en/sql-reference/aggregate-functions/reference/varpop.md index 76472f62789..2044b7e690b 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/varpop.md +++ b/docs/en/sql-reference/aggregate-functions/reference/varpop.md @@ -63,8 +63,8 @@ covarPopStable(x, y) **Parameters** -- `x`: The first data column. [String literal](../syntax#syntax-string-literal) -- `y`: The second data column. [Expression](../syntax#syntax-expressions) +- `x`: The first data column. [String literal](../../syntax#syntax-string-literal) +- `y`: The second data column. [Expression](../../syntax#syntax-expressions) **Returned value** diff --git a/docs/en/sql-reference/aggregate-functions/reference/varsamp.md b/docs/en/sql-reference/aggregate-functions/reference/varsamp.md index e75cb075ff8..be669a16ae8 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/varsamp.md +++ b/docs/en/sql-reference/aggregate-functions/reference/varsamp.md @@ -8,7 +8,7 @@ This page contains information on the `varSamp` and `varSampStable` ClickHouse f ## varSamp -Calculate the sample variance of a data set. +Calculate the sample variance of a data set. **Syntax** @@ -18,7 +18,7 @@ varSamp(expr) **Parameters** -- `expr`: An expression representing the data set for which you want to calculate the sample variance. [Expression](../syntax#syntax-expressions) +- `expr`: An expression representing the data set for which you want to calculate the sample variance. [Expression](../../syntax#syntax-expressions) **Returned value** @@ -78,7 +78,7 @@ varSampStable(expr) **Parameters** -- `expr`: An expression representing the data set for which you want to calculate the sample variance. [Expression](../syntax#syntax-expressions) +- `expr`: An expression representing the data set for which you want to calculate the sample variance. [Expression](../../syntax#syntax-expressions) **Returned value** From 02e81329794ed326853f1e039bca49e960ed469d Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Thu, 14 Mar 2024 14:05:19 +0000 Subject: [PATCH 300/374] fix test --- .../03008_optimize_equal_ranges.reference | 12 ++++++++++-- .../0_stateless/03008_optimize_equal_ranges.sql | 9 ++++++--- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/03008_optimize_equal_ranges.reference b/tests/queries/0_stateless/03008_optimize_equal_ranges.reference index 08f8008fca6..fc7a4f3c118 100644 --- a/tests/queries/0_stateless/03008_optimize_equal_ranges.reference +++ b/tests/queries/0_stateless/03008_optimize_equal_ranges.reference @@ -1,8 +1,16 @@ 0 30000 1 30000 2 30000 +0 30000 +1 30000 +2 30000 0 449985000 1 449985000 2 449985000 -sum 1 -uniqExact 0 +0 449985000 +1 449985000 +2 449985000 +sum 1 1 +sum 16 1 +uniqExact 1 1 +uniqExact 16 0 diff --git a/tests/queries/0_stateless/03008_optimize_equal_ranges.sql b/tests/queries/0_stateless/03008_optimize_equal_ranges.sql index c6143fb7f51..4d521420741 100644 --- a/tests/queries/0_stateless/03008_optimize_equal_ranges.sql +++ b/tests/queries/0_stateless/03008_optimize_equal_ranges.sql @@ -10,16 +10,19 @@ INSERT INTO t_optimize_equal_ranges SELECT 0, toString(number), number FROM numb INSERT INTO t_optimize_equal_ranges SELECT 1, toString(number), number FROM numbers(30000); INSERT INTO t_optimize_equal_ranges SELECT 2, toString(number), number FROM numbers(30000); -SELECT a, uniqExact(b) FROM t_optimize_equal_ranges GROUP BY a ORDER BY a; -SELECT a, sum(c) FROM t_optimize_equal_ranges GROUP BY a ORDER BY a; +SELECT a, uniqExact(b) FROM t_optimize_equal_ranges GROUP BY a ORDER BY a SETTINGS max_threads = 16; +SELECT a, uniqExact(b) FROM t_optimize_equal_ranges GROUP BY a ORDER BY a SETTINGS max_threads = 1; +SELECT a, sum(c) FROM t_optimize_equal_ranges GROUP BY a ORDER BY a SETTINGS max_threads = 16; +SELECT a, sum(c) FROM t_optimize_equal_ranges GROUP BY a ORDER BY a SETTINGS max_threads = 1; SYSTEM FLUSH LOGS; SELECT used_aggregate_functions[1] AS func, + Settings['max_threads'] AS threads, ProfileEvents['AggregationOptimizedEqualRangesOfKeys'] > 0 FROM system.query_log WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND query LIKE '%SELECT%FROM%t_optimize_equal_ranges%' -ORDER BY func; +ORDER BY func, threads; DROP TABLE t_optimize_equal_ranges; From b21a5fec7b99bf223482491759f72357ec447480 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 14 Mar 2024 15:06:08 +0100 Subject: [PATCH 301/374] Fix broken list and thank clang tidy --- src/Client/QueryFuzzer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Client/QueryFuzzer.cpp b/src/Client/QueryFuzzer.cpp index 38e78157096..0a7cb1b36db 100644 --- a/src/Client/QueryFuzzer.cpp +++ b/src/Client/QueryFuzzer.cpp @@ -974,7 +974,7 @@ ASTPtr QueryFuzzer::reverseLiteralFuzzing(ASTPtr child) "toFixedString", /// Same as toDecimal "toInt128", "toInt256", - "toLowCardinality" + "toLowCardinality", "toNullable", "toUInt128", "toUInt256"}; From 026ac4deb14963077722ba527cabf76abd2ff0c0 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Thu, 14 Mar 2024 14:20:22 +0000 Subject: [PATCH 302/374] Fix heap-use-after-free for Merge table with alias --- src/Storages/StorageMerge.cpp | 3 +- ...t_storage_merge_aliases_analyzer.reference | 10 ++++ ...25_test_storage_merge_aliases_analyzer.sql | 60 +++++++++++++++++++ 3 files changed, 72 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/01925_test_storage_merge_aliases_analyzer.reference create mode 100644 tests/queries/0_stateless/01925_test_storage_merge_aliases_analyzer.sql diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 8410f0a8df8..e07bcf339c3 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -681,8 +681,9 @@ public: { if (column->hasExpression()) { + auto column_name = column->getColumnName(); node = column->getExpressionOrThrow(); - node->setAlias(column->getColumnName()); + node->setAlias(column_name); } else column->setColumnSource(replacement_table_expression); diff --git a/tests/queries/0_stateless/01925_test_storage_merge_aliases_analyzer.reference b/tests/queries/0_stateless/01925_test_storage_merge_aliases_analyzer.reference new file mode 100644 index 00000000000..b0fea25ed4b --- /dev/null +++ b/tests/queries/0_stateless/01925_test_storage_merge_aliases_analyzer.reference @@ -0,0 +1,10 @@ +alias1 +1 4 16 23 +23 16 4 1 +2020-02-02 1 4 2 16 3 23 +alias2 +1 3 4 4 +4 4 3 1 +23 16 4 1 +2020-02-01 1 3 2 4 3 4 +2020-02-02 1 4 2 16 3 23 diff --git a/tests/queries/0_stateless/01925_test_storage_merge_aliases_analyzer.sql b/tests/queries/0_stateless/01925_test_storage_merge_aliases_analyzer.sql new file mode 100644 index 00000000000..31035aa80cd --- /dev/null +++ b/tests/queries/0_stateless/01925_test_storage_merge_aliases_analyzer.sql @@ -0,0 +1,60 @@ +-- Tags: no-parallel + +drop table if exists merge; +set allow_experimental_analyzer = 1; +create table merge +( + dt Date, + colAlias0 Int32, + colAlias1 Int32, + col2 Int32, + colAlias2 UInt32, + col3 Int32, + colAlias3 UInt32 +) +engine = Merge(currentDatabase(), '^alias_'); + +drop table if exists alias_1; +drop table if exists alias_2; + +create table alias_1 +( + dt Date, + col Int32, + colAlias0 UInt32 alias col, + colAlias1 UInt32 alias col3 + colAlias0, + col2 Int32, + colAlias2 Int32 alias colAlias1 + col2 + 10, + col3 Int32, + colAlias3 Int32 alias colAlias2 + colAlias1 + col3 +) +engine = MergeTree() +order by (dt); + +insert into alias_1 (dt, col, col2, col3) values ('2020-02-02', 1, 2, 3); + +select 'alias1'; +select colAlias0, colAlias1, colAlias2, colAlias3 from alias_1; +select colAlias3, colAlias2, colAlias1, colAlias0 from merge; +select * from merge; + +create table alias_2 +( + dt Date, + col Int32, + col2 Int32, + colAlias0 UInt32 alias col, + colAlias3 Int32 alias col3 + colAlias0, + colAlias1 UInt32 alias colAlias0 + col2, + colAlias2 Int32 alias colAlias0 + colAlias1, + col3 Int32 +) +engine = MergeTree() +order by (dt); + +insert into alias_2 (dt, col, col2, col3) values ('2020-02-01', 1, 2, 3); + +select 'alias2'; +select colAlias0, colAlias1, colAlias2, colAlias3 from alias_2; +select colAlias3, colAlias2, colAlias1, colAlias0 from merge order by dt; +select * from merge order by dt; From e145115ef1a1cdb8f480cbbd51a7eab3f86a43d3 Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Thu, 14 Mar 2024 16:15:06 +0100 Subject: [PATCH 303/374] Fixup --- src/Storages/StorageMerge.cpp | 25 +++++-------------------- 1 file changed, 5 insertions(+), 20 deletions(-) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index f0b9d58f3dd..7124cd7393e 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -56,17 +56,12 @@ #include #include #include -#include "Common/logger_useful.h" #include #include #include #include -#include "Analyzer/QueryNode.h" -#include "Core/QueryProcessingStage.h" -#include "IO/WriteHelpers.h" #include #include -#include namespace DB { @@ -803,13 +798,10 @@ QueryTreeNodePtr replaceTableExpressionAndRemoveJoin( const ContextPtr & context, const Names & required_column_names) { - LOG_DEBUG(&Poco::Logger::get("replaceTableExpressionAndRemoveJoin"), "BEFORE:\n{}", query->dumpTree()); auto * query_node = query->as(); auto join_tree_type = query_node->getJoinTree()->getNodeType(); auto modified_query = query_node->cloneAndReplace(original_table_expression, replacement_table_expression); - LOG_DEBUG(&Poco::Logger::get("replaceTableExpressionAndRemoveJoin"), "AFTER:\n{}", modified_query->dumpTree()); - // For the case when join tree is just a table or a table function we don't need to do anything more. if (join_tree_type == QueryTreeNodeType::TABLE || join_tree_type == QueryTreeNodeType::TABLE_FUNCTION) return modified_query; @@ -969,8 +961,6 @@ SelectQueryInfo ReadFromMerge::getModifiedQueryInfo(const ContextPtr & modified_ column_name_to_node); } - LOG_DEBUG(&Poco::Logger::get("getModifiedQueryInfo"), "{}", modified_query_info.query_tree->dumpTree()); - modified_query_info.query = queryNodeToSelectQuery(modified_query_info.query_tree); } else @@ -1058,8 +1048,6 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( Block pipe_header = builder->getHeader(); - LOG_DEBUG(&Poco::Logger::get("createSources"), "Processed:{}\nStorage:{}", toString(processed_stage), toString(storage_stage)); - if (allow_experimental_analyzer) { String table_alias = modified_query_info.query_tree->as()->getJoinTree()->as()->getAlias(); @@ -1067,7 +1055,8 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( String database_column = table_alias.empty() || processed_stage == QueryProcessingStage::FetchColumns ? "_database" : table_alias + "._database"; String table_column = table_alias.empty() || processed_stage == QueryProcessingStage::FetchColumns ? "_table" : table_alias + "._table"; - if (has_database_virtual_column && common_header.has(database_column) && (storage_stage == QueryProcessingStage::FetchColumns || dynamic_cast(&storage_snapshot_->storage) != nullptr)) + if (has_database_virtual_column && common_header.has(database_column) + && (storage_stage == QueryProcessingStage::FetchColumns || (dynamic_cast(&storage_snapshot_->storage) != nullptr && !pipe_header.has("'" + database_name + "'_String")))) { ColumnWithTypeAndName column; column.name = database_column; @@ -1082,7 +1071,8 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( { return std::make_shared(stream_header, adding_column_actions); }); } - if (has_table_virtual_column && common_header.has(table_column) && (storage_stage == QueryProcessingStage::FetchColumns || dynamic_cast(&storage_snapshot_->storage) != nullptr)) + if (has_table_virtual_column && common_header.has(table_column) + && (storage_stage == QueryProcessingStage::FetchColumns || (dynamic_cast(&storage_snapshot_->storage) != nullptr && !pipe_header.has("'" + table_name + "'_String")))) { ColumnWithTypeAndName column; column.name = table_column; @@ -1166,8 +1156,6 @@ QueryPlan ReadFromMerge::createPlanForTable( storage_snapshot_, modified_query_info); - LOG_DEBUG(&Poco::Logger::get("createPlanForTable"), "Storage: {}", toString(storage_stage)); - QueryPlan plan; if (processed_stage <= storage_stage) @@ -1545,8 +1533,7 @@ void ReadFromMerge::convertAndFilterSourceStream( ActionsDAG::MatchColumnsMode convert_actions_match_columns_mode = ActionsDAG::MatchColumnsMode::Name; - if (local_context->getSettingsRef().allow_experimental_analyzer - && (processed_stage == QueryProcessingStage::FetchColumns && dynamic_cast(&snapshot->storage) != nullptr)) + if (local_context->getSettingsRef().allow_experimental_analyzer && dynamic_cast(&snapshot->storage) != nullptr) convert_actions_match_columns_mode = ActionsDAG::MatchColumnsMode::Position; if (row_policy_data_opt) @@ -1554,8 +1541,6 @@ void ReadFromMerge::convertAndFilterSourceStream( row_policy_data_opt->addFilterTransform(builder); } - LOG_DEBUG(&Poco::Logger::get("convertAndFilterSourceStream"), "SOURCE:\n{}\nRESULT:\n{}", builder.getHeader().dumpStructure(), header.dumpStructure()); - auto convert_actions_dag = ActionsDAG::makeConvertingActions(builder.getHeader().getColumnsWithTypeAndName(), header.getColumnsWithTypeAndName(), convert_actions_match_columns_mode); From d558c4dcb7ed2d80ca63d1bf2f9e09d580c843c0 Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Thu, 14 Mar 2024 16:17:58 +0100 Subject: [PATCH 304/374] Cleanup --- src/Storages/StorageMerge.cpp | 5 ++--- src/Storages/StorageMerge.h | 3 +-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 7124cd7393e..f95464f4bff 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -1121,7 +1121,7 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( /// Subordinary tables could have different but convertible types, like numeric types of different width. /// We must return streams with structure equals to structure of Merge table. convertAndFilterSourceStream( - header, modified_query_info, storage_snapshot_, aliases, row_policy_data_opt, context, *builder, storage_stage); + header, modified_query_info, storage_snapshot_, aliases, row_policy_data_opt, context, *builder); } return builder; @@ -1473,8 +1473,7 @@ void ReadFromMerge::convertAndFilterSourceStream( const Aliases & aliases, const RowPolicyDataOpt & row_policy_data_opt, ContextPtr local_context, - QueryPipelineBuilder & builder, - QueryProcessingStage::Enum processed_stage [[maybe_unused]]) + QueryPipelineBuilder & builder) { Block before_block_header = builder.getHeader(); diff --git a/src/Storages/StorageMerge.h b/src/Storages/StorageMerge.h index 556649f622d..3aabd7e26e3 100644 --- a/src/Storages/StorageMerge.h +++ b/src/Storages/StorageMerge.h @@ -277,8 +277,7 @@ private: const Aliases & aliases, const RowPolicyDataOpt & row_policy_data_opt, ContextPtr context, - QueryPipelineBuilder & builder, - QueryProcessingStage::Enum processed_stage); + QueryPipelineBuilder & builder); StorageMerge::StorageListWithLocks getSelectedTables( ContextPtr query_context, From c89010f80336384252e230158c85ae4f7ae1dae1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Thu, 14 Mar 2024 15:22:01 +0000 Subject: [PATCH 305/374] Remove unused function template --- src/Dictionaries/RangeHashedDictionary_2.cpp | 114 ------------------- 1 file changed, 114 deletions(-) diff --git a/src/Dictionaries/RangeHashedDictionary_2.cpp b/src/Dictionaries/RangeHashedDictionary_2.cpp index 2329d621da4..d400fd1d830 100644 --- a/src/Dictionaries/RangeHashedDictionary_2.cpp +++ b/src/Dictionaries/RangeHashedDictionary_2.cpp @@ -118,120 +118,6 @@ size_t RangeHashedDictionary::getItemsShortCircuitImpl( return keys_found; } -template -template -void RangeHashedDictionary::getItemsImpl( - const Attribute & attribute, - const Columns & key_columns, - typename RangeHashedDictionary::ValueSetterFunc && set_value, - DefaultValueExtractor & default_value_extractor) const -{ - const auto & attribute_container = std::get>(attribute.container); - - size_t keys_found = 0; - - const ColumnPtr & range_column = key_columns.back(); - auto key_columns_copy = key_columns; - key_columns_copy.pop_back(); - - DictionaryKeysArenaHolder arena_holder; - DictionaryKeysExtractor keys_extractor(key_columns_copy, arena_holder.getComplexKeyArena()); - const size_t keys_size = keys_extractor.getKeysSize(); - - callOnRangeType( - dict_struct.range_min->type, - [&](const auto & types) - { - using Types = std::decay_t; - using RangeColumnType = typename Types::LeftType; - using RangeStorageType = typename RangeColumnType::ValueType; - using RangeInterval = Interval; - - const auto * range_column_typed = typeid_cast(range_column.get()); - if (!range_column_typed) - throw Exception( - ErrorCodes::TYPE_MISMATCH, - "Dictionary {} range column type should be equal to {}", - getFullName(), - dict_struct.range_min->type->getName()); - - const auto & range_column_data = range_column_typed->getData(); - - const auto & key_attribute_container = std::get>(key_attribute.container); - - for (size_t key_index = 0; key_index < keys_size; ++key_index) - { - auto key = keys_extractor.extractCurrentKey(); - const auto it = key_attribute_container.find(key); - - if (it) - { - const auto date = range_column_data[key_index]; - const auto & interval_tree = it->getMapped(); - - size_t value_index = 0; - std::optional range; - - interval_tree.find( - date, - [&](auto & interval, auto & interval_value_index) - { - if (range) - { - if (likely(configuration.lookup_strategy == RangeHashedDictionaryLookupStrategy::min) && interval < *range) - { - range = interval; - value_index = interval_value_index; - } - else if (configuration.lookup_strategy == RangeHashedDictionaryLookupStrategy::max && interval > *range) - { - range = interval; - value_index = interval_value_index; - } - } - else - { - range = interval; - value_index = interval_value_index; - } - - return true; - }); - - if (range.has_value()) - { - ++keys_found; - - ValueType value = attribute_container[value_index]; - - if constexpr (is_nullable) - { - bool is_null = (*attribute.is_value_nullable)[value_index]; - set_value(key_index, value, is_null); - } - else - { - set_value(key_index, value, false); - } - - keys_extractor.rollbackCurrentKey(); - continue; - } - } - - if constexpr (is_nullable) - set_value(key_index, default_value_extractor[key_index], default_value_extractor.isNullAt(key_index)); - else - set_value(key_index, default_value_extractor[key_index], false); - - keys_extractor.rollbackCurrentKey(); - } - }); - - query_count.fetch_add(keys_size, std::memory_order_relaxed); - found_count.fetch_add(keys_found, std::memory_order_relaxed); -} - #define INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL(DictionaryKeyType, IsNullable, ValueType) \ template size_t RangeHashedDictionary::getItemsShortCircuitImpl( \ const Attribute & attribute, \ From 22ca96cf8d44aa907c0c3c463e4b0a5628312aa0 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Thu, 14 Mar 2024 16:05:01 +0000 Subject: [PATCH 306/374] Disable optimize_rewrite_sum_if_to_count_if if return is nullable --- src/Analyzer/Passes/SumIfToCountIfPass.cpp | 2 +- .../0_stateless/03010_sum_to_to_count_if_nullable.reference | 0 .../queries/0_stateless/03010_sum_to_to_count_if_nullable.sql | 3 +++ 3 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.reference create mode 100644 tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.sql diff --git a/src/Analyzer/Passes/SumIfToCountIfPass.cpp b/src/Analyzer/Passes/SumIfToCountIfPass.cpp index 1a6ee9215a9..d374d92c1fb 100644 --- a/src/Analyzer/Passes/SumIfToCountIfPass.cpp +++ b/src/Analyzer/Passes/SumIfToCountIfPass.cpp @@ -32,7 +32,7 @@ public: return; auto * function_node = node->as(); - if (!function_node || !function_node->isAggregateFunction()) + if (!function_node || !function_node->isAggregateFunction() || function_node->getResultType()->isNullable()) return; auto function_name = function_node->getFunctionName(); diff --git a/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.reference b/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.sql b/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.sql new file mode 100644 index 00000000000..394cd4f1ea5 --- /dev/null +++ b/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.sql @@ -0,0 +1,3 @@ +SET optimize_rewrite_sum_if_to_count_if = 1; +SELECT (sumIf(toNullable(1), (number % 2) = 0), NULL) FROM numbers(10) SETTINGS allow_experimental_analyzer=0; +SELECT (sumIf(toNullable(1), (number % 2) = 0), NULL) FROM numbers(10) SETTINGS allow_experimental_analyzer=1; \ No newline at end of file From 7c5ef07c7b9f20db16811eb60dbe27a36b821575 Mon Sep 17 00:00:00 2001 From: Peter Date: Fri, 15 Mar 2024 00:18:37 +0800 Subject: [PATCH 307/374] Add checksum validating before extracting archive --- docs/en/getting-started/example-datasets/nyc-taxi.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/en/getting-started/example-datasets/nyc-taxi.md b/docs/en/getting-started/example-datasets/nyc-taxi.md index cac75fdc45a..516a6d54248 100644 --- a/docs/en/getting-started/example-datasets/nyc-taxi.md +++ b/docs/en/getting-started/example-datasets/nyc-taxi.md @@ -248,6 +248,9 @@ Some of the files might not download fully. Check the file sizes and re-download ``` bash $ curl -O https://datasets.clickhouse.com/trips_mergetree/partitions/trips_mergetree.tar +# Validate the checksum +$ md5sum trips_mergetree.tar +# Checksum should be equal to: f3b8d469b41d9a82da064ded7245d12c $ tar xvf trips_mergetree.tar -C /var/lib/clickhouse # path to ClickHouse data directory $ # check permissions of unpacked data, fix if required $ sudo service clickhouse-server restart From 0b3b734c9b268e65c758e1f2092d176dfc1d9489 Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Thu, 14 Mar 2024 17:37:49 +0100 Subject: [PATCH 308/374] Analyzer: Fix planner context for subquery in StorageMerge --- src/Planner/PlannerContext.cpp | 6 ++++++ src/Planner/PlannerContext.h | 8 ++++++-- src/Storages/StorageMerge.cpp | 22 ++++++++++++--------- src/Storages/StorageMerge.h | 2 +- tests/analyzer_integration_broken_tests.txt | 1 - 5 files changed, 26 insertions(+), 13 deletions(-) diff --git a/src/Planner/PlannerContext.cpp b/src/Planner/PlannerContext.cpp index f33255f0a44..f939b959ce7 100644 --- a/src/Planner/PlannerContext.cpp +++ b/src/Planner/PlannerContext.cpp @@ -48,6 +48,12 @@ PlannerContext::PlannerContext(ContextMutablePtr query_context_, GlobalPlannerCo , is_ast_level_optimization_allowed(!(query_context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY || select_query_options_.ignore_ast_optimizations)) {} +PlannerContext::PlannerContext(ContextMutablePtr query_context_, PlannerContextPtr planner_context_) + : query_context(std::move(query_context_)) + , global_planner_context(planner_context_->global_planner_context) + , is_ast_level_optimization_allowed(planner_context_->is_ast_level_optimization_allowed) +{} + TableExpressionData & PlannerContext::getOrCreateTableExpressionData(const QueryTreeNodePtr & table_expression_node) { auto [it, _] = table_expression_node_to_data.emplace(table_expression_node, TableExpressionData()); diff --git a/src/Planner/PlannerContext.h b/src/Planner/PlannerContext.h index 4d9ba037cac..418240fa34e 100644 --- a/src/Planner/PlannerContext.h +++ b/src/Planner/PlannerContext.h @@ -75,12 +75,18 @@ private: using GlobalPlannerContextPtr = std::shared_ptr; +class PlannerContext; +using PlannerContextPtr = std::shared_ptr; + class PlannerContext { public: /// Create planner context with query context and global planner context PlannerContext(ContextMutablePtr query_context_, GlobalPlannerContextPtr global_planner_context_, const SelectQueryOptions & select_query_options_); + /// Create planner with modified query_context + PlannerContext(ContextMutablePtr query_context_, PlannerContextPtr planner_context_); + /// Get planner context query context ContextPtr getQueryContext() const { @@ -191,6 +197,4 @@ private: PreparedSets prepared_sets; }; -using PlannerContextPtr = std::shared_ptr; - } diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 8410f0a8df8..4c53f67c76b 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -422,6 +422,7 @@ void ReadFromMerge::initializePipeline(QueryPipelineBuilder & pipeline, const Bu std::vector> pipelines; auto table_it = selected_tables.begin(); + auto modified_context = Context::createCopy(context); for (size_t i = 0; i < selected_tables.size(); ++i, ++table_it) { auto & child_plan = child_plans->at(i); @@ -438,7 +439,7 @@ void ReadFromMerge::initializePipeline(QueryPipelineBuilder & pipeline, const Bu if (child_plan.row_policy_data_opt) child_plan.row_policy_data_opt->extendNames(real_column_names); - auto modified_query_info = getModifiedQueryInfo(context, table, nested_storage_snaphsot, real_column_names, column_names_as_aliases, aliases); + auto modified_query_info = getModifiedQueryInfo(modified_context, table, nested_storage_snaphsot, real_column_names, column_names_as_aliases, aliases); auto source_pipeline = createSources( child_plan.plan, @@ -547,9 +548,10 @@ std::vector ReadFromMerge::createChildrenPlans(SelectQ } /// Settings will be modified when planning children tables. - auto modified_context = Context::createCopy(context); for (const auto & table : selected_tables) { + auto modified_context = Context::createCopy(context); + size_t current_need_streams = tables_count >= num_streams ? 1 : (num_streams / tables_count); size_t current_streams = std::min(current_need_streams, remaining_streams); remaining_streams -= current_streams; @@ -570,25 +572,25 @@ std::vector ReadFromMerge::createChildrenPlans(SelectQ auto & aliases = res.back().table_aliases; auto & row_policy_data_opt = res.back().row_policy_data_opt; auto storage_metadata_snapshot = storage->getInMemoryMetadataPtr(); - auto nested_storage_snaphsot = storage->getStorageSnapshot(storage_metadata_snapshot, context); + auto nested_storage_snaphsot = storage->getStorageSnapshot(storage_metadata_snapshot, modified_context); Names column_names_as_aliases; Names real_column_names = column_names; const auto & database_name = std::get<0>(table); const auto & table_name = std::get<3>(table); - auto row_policy_filter_ptr = context->getRowPolicyFilter( + auto row_policy_filter_ptr = modified_context->getRowPolicyFilter( database_name, table_name, RowPolicyFilterType::SELECT_FILTER); if (row_policy_filter_ptr) { - row_policy_data_opt = RowPolicyData(row_policy_filter_ptr, storage, context); + row_policy_data_opt = RowPolicyData(row_policy_filter_ptr, storage, modified_context); row_policy_data_opt->extendNames(real_column_names); } auto modified_query_info - = getModifiedQueryInfo(context, table, nested_storage_snaphsot, real_column_names, column_names_as_aliases, aliases); + = getModifiedQueryInfo(modified_context, table, nested_storage_snaphsot, real_column_names, column_names_as_aliases, aliases); if (!context->getSettingsRef().allow_experimental_analyzer) { @@ -657,10 +659,9 @@ std::vector ReadFromMerge::createChildrenPlans(SelectQ row_policy_data_opt, modified_context, current_streams); + res.back().plan.addInterpreterContext(modified_context); } - if (!res.empty()) - res[0].plan.addInterpreterContext(modified_context); return res; } @@ -863,7 +864,7 @@ QueryTreeNodePtr replaceTableExpressionAndRemoveJoin( } -SelectQueryInfo ReadFromMerge::getModifiedQueryInfo(const ContextPtr & modified_context, +SelectQueryInfo ReadFromMerge::getModifiedQueryInfo(const ContextMutablePtr & modified_context, const StorageWithLockAndName & storage_with_lock_and_name, const StorageSnapshotPtr & storage_snapshot_, Names required_column_names, @@ -877,6 +878,9 @@ SelectQueryInfo ReadFromMerge::getModifiedQueryInfo(const ContextPtr & modified_ if (modified_query_info.optimized_prewhere_info && !modified_query_info.prewhere_info) modified_query_info.prewhere_info = modified_query_info.optimized_prewhere_info; + if (modified_query_info.planner_context) + modified_query_info.planner_context = std::make_shared(modified_context, modified_query_info.planner_context); + if (modified_query_info.table_expression) { auto replacement_table_expression = std::make_shared(storage, storage_lock, storage_snapshot_); diff --git a/src/Storages/StorageMerge.h b/src/Storages/StorageMerge.h index 556649f622d..c049d50f3b4 100644 --- a/src/Storages/StorageMerge.h +++ b/src/Storages/StorageMerge.h @@ -192,7 +192,7 @@ private: using Aliases = std::vector; - SelectQueryInfo getModifiedQueryInfo(const ContextPtr & modified_context, + SelectQueryInfo getModifiedQueryInfo(const ContextMutablePtr & modified_context, const StorageWithLockAndName & storage_with_lock_and_name, const StorageSnapshotPtr & storage_snapshot, Names required_column_names, diff --git a/tests/analyzer_integration_broken_tests.txt b/tests/analyzer_integration_broken_tests.txt index 31527dc3476..d2ef983f26d 100644 --- a/tests/analyzer_integration_broken_tests.txt +++ b/tests/analyzer_integration_broken_tests.txt @@ -1,4 +1,3 @@ test_build_sets_from_multiple_threads/test.py::test_set test_concurrent_backups_s3/test.py::test_concurrent_backups test_distributed_type_object/test.py::test_distributed_type_object -test_merge_table_over_distributed/test.py::test_global_in From a6f1e09e69a583ab2f235f918e4bc0d92949d478 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 14 Mar 2024 17:35:10 +0000 Subject: [PATCH 309/374] Test test test_system_clusters_actual_information flakiness --- .../test_system_clusters_actual_information/test.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/tests/integration/test_system_clusters_actual_information/test.py b/tests/integration/test_system_clusters_actual_information/test.py index e90a6cdeb3f..c6e3262fd62 100644 --- a/tests/integration/test_system_clusters_actual_information/test.py +++ b/tests/integration/test_system_clusters_actual_information/test.py @@ -12,20 +12,11 @@ cluster = ClickHouseCluster(__file__) node = cluster.add_instance( "node", with_zookeeper=True, main_configs=["configs/remote_servers.xml"] ) -node_1 = cluster.add_instance("node_1", with_zookeeper=True) - @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() - node_1.query_with_retry("DROP TABLE IF EXISTS replicated") - - node_1.query_with_retry( - """CREATE TABLE replicated (id UInt32, date Date) ENGINE = - ReplicatedMergeTree('/clickhouse/tables/replicated', 'node_1') ORDER BY id PARTITION BY toYYYYMM(date)""" - ) - node.query_with_retry( "CREATE TABLE distributed (id UInt32, date Date) ENGINE = Distributed('test_cluster', 'default', 'replicated')" ) @@ -37,8 +28,6 @@ def started_cluster(): def test(started_cluster): - cluster.pause_container("node_1") - node.query("SYSTEM RELOAD CONFIG") error = node.query_and_get_error( "SELECT count() FROM distributed SETTINGS receive_timeout=1, handshake_timeout_ms=1" @@ -67,5 +56,3 @@ def test(started_cluster): assert recovery_time == 0 assert errors_count == 0 - - cluster.unpause_container("node_1") From 5c8f2bbda0124704fffc414e329b37d8245c42e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Thu, 14 Mar 2024 17:45:42 +0000 Subject: [PATCH 310/374] Split template instantiations into separate files --- src/Dictionaries/RangeHashedDictionary.h | 1 + ... => RangeHashedDictionaryGetItemsImpl.txx} | 30 +++++++++---------- ...ngeHashedDictionaryGetItemsImplDecimal.cpp | 10 +++++++ ...RangeHashedDictionaryGetItemsImplFloat.cpp | 7 +++++ .../RangeHashedDictionaryGetItemsImplInt.cpp | 11 +++++++ ...angeHashedDictionaryGetItemsImplOthers.cpp | 10 +++++++ .../RangeHashedDictionaryGetItemsImplUInt.cpp | 11 +++++++ ...hedDictionaryGetItemsShortCircuitImpl.txx} | 30 ++++++++----------- ...tionaryGetItemsShortCircuitImplDecimal.cpp | 10 +++++++ ...ictionaryGetItemsShortCircuitImplFloat.cpp | 7 +++++ ...dDictionaryGetItemsShortCircuitImplInt.cpp | 11 +++++++ ...ctionaryGetItemsShortCircuitImplOthers.cpp | 10 +++++++ ...DictionaryGetItemsShortCircuitImplUInt.cpp | 11 +++++++ 13 files changed, 126 insertions(+), 33 deletions(-) rename src/Dictionaries/{RangeHashedDictionary_3.cpp => RangeHashedDictionaryGetItemsImpl.txx} (98%) create mode 100644 src/Dictionaries/RangeHashedDictionaryGetItemsImplDecimal.cpp create mode 100644 src/Dictionaries/RangeHashedDictionaryGetItemsImplFloat.cpp create mode 100644 src/Dictionaries/RangeHashedDictionaryGetItemsImplInt.cpp create mode 100644 src/Dictionaries/RangeHashedDictionaryGetItemsImplOthers.cpp create mode 100644 src/Dictionaries/RangeHashedDictionaryGetItemsImplUInt.cpp rename src/Dictionaries/{RangeHashedDictionary_2.cpp => RangeHashedDictionaryGetItemsShortCircuitImpl.txx} (98%) create mode 100644 src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImplDecimal.cpp create mode 100644 src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImplFloat.cpp create mode 100644 src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImplInt.cpp create mode 100644 src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImplOthers.cpp create mode 100644 src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImplUInt.cpp diff --git a/src/Dictionaries/RangeHashedDictionary.h b/src/Dictionaries/RangeHashedDictionary.h index a5dedae97c4..23f7df8133e 100644 --- a/src/Dictionaries/RangeHashedDictionary.h +++ b/src/Dictionaries/RangeHashedDictionary.h @@ -292,6 +292,7 @@ private: extern template class RangeHashedDictionary; extern template class RangeHashedDictionary; + namespace { template diff --git a/src/Dictionaries/RangeHashedDictionary_3.cpp b/src/Dictionaries/RangeHashedDictionaryGetItemsImpl.txx similarity index 98% rename from src/Dictionaries/RangeHashedDictionary_3.cpp rename to src/Dictionaries/RangeHashedDictionaryGetItemsImpl.txx index a3136d6f63d..9da2b0faf4a 100644 --- a/src/Dictionaries/RangeHashedDictionary_3.cpp +++ b/src/Dictionaries/RangeHashedDictionaryGetItemsImpl.txx @@ -1,5 +1,18 @@ #include +#define INSTANTIATE_GET_ITEMS_IMPL(DictionaryKeyType, IsNullable, AttributeType, ValueType) \ +template void RangeHashedDictionary::getItemsImpl>( \ + const Attribute & attribute,\ + const Columns & key_columns,\ + typename RangeHashedDictionary::ValueSetterFunc && set_value,\ + DictionaryDefaultValueExtractor & default_value_extractor) const; + +#define INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(AttributeType) \ + INSTANTIATE_GET_ITEMS_IMPL(DictionaryKeyType::Simple, true, AttributeType, DictionaryValueType) \ + INSTANTIATE_GET_ITEMS_IMPL(DictionaryKeyType::Simple, false, AttributeType, DictionaryValueType) \ + INSTANTIATE_GET_ITEMS_IMPL(DictionaryKeyType::Complex, true, AttributeType, DictionaryValueType) \ + INSTANTIATE_GET_ITEMS_IMPL(DictionaryKeyType::Complex, false, AttributeType, DictionaryValueType) + namespace DB { @@ -13,6 +26,7 @@ void RangeHashedDictionary::getItemsImpl( { const auto & attribute_container = std::get>(attribute.container); + size_t keys_found = 0; const ColumnPtr & range_column = key_columns.back(); @@ -116,20 +130,4 @@ void RangeHashedDictionary::getItemsImpl( query_count.fetch_add(keys_size, std::memory_order_relaxed); found_count.fetch_add(keys_found, std::memory_order_relaxed); } - -#define INSTANTIATE_GET_ITEMS_IMPL(DictionaryKeyType, IsNullable, AttributeType, ValueType) \ -template void RangeHashedDictionary::getItemsImpl>( \ - const Attribute & attribute,\ - const Columns & key_columns,\ - typename RangeHashedDictionary::ValueSetterFunc && set_value,\ - DictionaryDefaultValueExtractor & default_value_extractor) const; - -#define INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(AttributeType) \ - INSTANTIATE_GET_ITEMS_IMPL(DictionaryKeyType::Simple, true, AttributeType, DictionaryValueType) \ - INSTANTIATE_GET_ITEMS_IMPL(DictionaryKeyType::Simple, false, AttributeType, DictionaryValueType) \ - INSTANTIATE_GET_ITEMS_IMPL(DictionaryKeyType::Complex, true, AttributeType, DictionaryValueType) \ - INSTANTIATE_GET_ITEMS_IMPL(DictionaryKeyType::Complex, false, AttributeType, DictionaryValueType) - -CALL_FOR_ALL_DICTIONARY_ATTRIBUTE_TYPES(INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE) - } diff --git a/src/Dictionaries/RangeHashedDictionaryGetItemsImplDecimal.cpp b/src/Dictionaries/RangeHashedDictionaryGetItemsImplDecimal.cpp new file mode 100644 index 00000000000..f1ee4dd58e1 --- /dev/null +++ b/src/Dictionaries/RangeHashedDictionaryGetItemsImplDecimal.cpp @@ -0,0 +1,10 @@ +#include + +namespace DB +{ +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(Decimal32); +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(Decimal64); +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(Decimal128); +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(Decimal256); +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(DateTime64); +} diff --git a/src/Dictionaries/RangeHashedDictionaryGetItemsImplFloat.cpp b/src/Dictionaries/RangeHashedDictionaryGetItemsImplFloat.cpp new file mode 100644 index 00000000000..291a55a76db --- /dev/null +++ b/src/Dictionaries/RangeHashedDictionaryGetItemsImplFloat.cpp @@ -0,0 +1,7 @@ +#include + +namespace DB +{ +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(Float32); +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(Float64); +} diff --git a/src/Dictionaries/RangeHashedDictionaryGetItemsImplInt.cpp b/src/Dictionaries/RangeHashedDictionaryGetItemsImplInt.cpp new file mode 100644 index 00000000000..a0748a9f486 --- /dev/null +++ b/src/Dictionaries/RangeHashedDictionaryGetItemsImplInt.cpp @@ -0,0 +1,11 @@ +#include + +namespace DB +{ +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(Int8); +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(Int16); +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(Int32); +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(Int64); +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(Int128); +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(Int256); +} diff --git a/src/Dictionaries/RangeHashedDictionaryGetItemsImplOthers.cpp b/src/Dictionaries/RangeHashedDictionaryGetItemsImplOthers.cpp new file mode 100644 index 00000000000..96e5bb54d0b --- /dev/null +++ b/src/Dictionaries/RangeHashedDictionaryGetItemsImplOthers.cpp @@ -0,0 +1,10 @@ +#include + +namespace DB +{ +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(UUID); +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(IPv4); +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(IPv6); +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(String); +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(Array); +} diff --git a/src/Dictionaries/RangeHashedDictionaryGetItemsImplUInt.cpp b/src/Dictionaries/RangeHashedDictionaryGetItemsImplUInt.cpp new file mode 100644 index 00000000000..e60a7189a2d --- /dev/null +++ b/src/Dictionaries/RangeHashedDictionaryGetItemsImplUInt.cpp @@ -0,0 +1,11 @@ +#include + +namespace DB +{ +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(UInt8); +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(UInt16); +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(UInt32); +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(UInt64); +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(UInt128); +INSTANTIATE_GET_ITEMS_IMPL_FOR_ATTRIBUTE_TYPE(UInt256); +} diff --git a/src/Dictionaries/RangeHashedDictionary_2.cpp b/src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImpl.txx similarity index 98% rename from src/Dictionaries/RangeHashedDictionary_2.cpp rename to src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImpl.txx index d400fd1d830..5807af519f9 100644 --- a/src/Dictionaries/RangeHashedDictionary_2.cpp +++ b/src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImpl.txx @@ -1,9 +1,21 @@ #include +#define INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL(DictionaryKeyType, IsNullable, ValueType) \ + template size_t RangeHashedDictionary::getItemsShortCircuitImpl( \ + const Attribute & attribute, \ + const Columns & key_columns, \ + typename RangeHashedDictionary::ValueSetterFunc && set_value, \ + IColumn::Filter & default_mask) const; + +#define INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(AttributeType) \ + INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL(DictionaryKeyType::Simple, true, DictionaryValueType) \ + INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL(DictionaryKeyType::Simple, false, DictionaryValueType) \ + INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL(DictionaryKeyType::Complex, true, DictionaryValueType) \ + INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL(DictionaryKeyType::Complex, false, DictionaryValueType) + namespace DB { - template template size_t RangeHashedDictionary::getItemsShortCircuitImpl( @@ -117,20 +129,4 @@ size_t RangeHashedDictionary::getItemsShortCircuitImpl( found_count.fetch_add(keys_found, std::memory_order_relaxed); return keys_found; } - -#define INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL(DictionaryKeyType, IsNullable, ValueType) \ - template size_t RangeHashedDictionary::getItemsShortCircuitImpl( \ - const Attribute & attribute, \ - const Columns & key_columns, \ - typename RangeHashedDictionary::ValueSetterFunc && set_value, \ - IColumn::Filter & default_mask) const; - -#define INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(AttributeType) \ - INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL(DictionaryKeyType::Simple, true, DictionaryValueType) \ - INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL(DictionaryKeyType::Simple, false, DictionaryValueType) \ - INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL(DictionaryKeyType::Complex, true, DictionaryValueType) \ - INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL(DictionaryKeyType::Complex, false, DictionaryValueType) - -CALL_FOR_ALL_DICTIONARY_ATTRIBUTE_TYPES(INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE) - } diff --git a/src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImplDecimal.cpp b/src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImplDecimal.cpp new file mode 100644 index 00000000000..298369e4735 --- /dev/null +++ b/src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImplDecimal.cpp @@ -0,0 +1,10 @@ +#include + +namespace DB +{ +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(Decimal32); +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(Decimal64); +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(Decimal128); +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(Decimal256); +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(DateTime64); +} diff --git a/src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImplFloat.cpp b/src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImplFloat.cpp new file mode 100644 index 00000000000..e8e8da6c75e --- /dev/null +++ b/src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImplFloat.cpp @@ -0,0 +1,7 @@ +#include + +namespace DB +{ +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(Float32); +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(Float64); +} diff --git a/src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImplInt.cpp b/src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImplInt.cpp new file mode 100644 index 00000000000..c685b9b5331 --- /dev/null +++ b/src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImplInt.cpp @@ -0,0 +1,11 @@ +#include + +namespace DB +{ +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(Int8); +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(Int16); +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(Int32); +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(Int64); +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(Int128); +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(Int256); +} diff --git a/src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImplOthers.cpp b/src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImplOthers.cpp new file mode 100644 index 00000000000..46ea141b59b --- /dev/null +++ b/src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImplOthers.cpp @@ -0,0 +1,10 @@ +#include + +namespace DB +{ +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(UUID); +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(IPv4); +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(IPv6); +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(String); +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(Array); +} diff --git a/src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImplUInt.cpp b/src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImplUInt.cpp new file mode 100644 index 00000000000..18421fd7e2d --- /dev/null +++ b/src/Dictionaries/RangeHashedDictionaryGetItemsShortCircuitImplUInt.cpp @@ -0,0 +1,11 @@ +#include + +namespace DB +{ +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(UInt8); +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(UInt16); +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(UInt32); +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(UInt64); +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(UInt128); +INSTANTIATE_GET_ITEMS_SHORT_CIRCUIT_IMPL_FOR_ATTRIBUTE_TYPE(UInt256); +} From 7b79d92bbe4c4070135da9747be02edb8499c6a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Thu, 14 Mar 2024 17:49:23 +0000 Subject: [PATCH 311/374] Adjust large object check --- src/Dictionaries/RangeHashedDictionary.h | 1 - utils/check-style/check-large-objects.sh | 1 - 2 files changed, 2 deletions(-) diff --git a/src/Dictionaries/RangeHashedDictionary.h b/src/Dictionaries/RangeHashedDictionary.h index 23f7df8133e..a5dedae97c4 100644 --- a/src/Dictionaries/RangeHashedDictionary.h +++ b/src/Dictionaries/RangeHashedDictionary.h @@ -292,7 +292,6 @@ private: extern template class RangeHashedDictionary; extern template class RangeHashedDictionary; - namespace { template diff --git a/utils/check-style/check-large-objects.sh b/utils/check-style/check-large-objects.sh index 5ef57ea4f6c..5b0e8e88df5 100755 --- a/utils/check-style/check-large-objects.sh +++ b/utils/check-style/check-large-objects.sh @@ -6,7 +6,6 @@ TU_EXCLUDES=( CastOverloadResolver AggregateFunctionUniq FunctionsConversion - RangeHashedDictionary_ Aggregator ) From c6826145dbc6d21c90b3346a26c6c1c53323d138 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Thu, 14 Mar 2024 17:59:58 +0000 Subject: [PATCH 312/374] Remove unnecessary empty lines --- src/Dictionaries/RangeHashedDictionary.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/Dictionaries/RangeHashedDictionary.h b/src/Dictionaries/RangeHashedDictionary.h index a5dedae97c4..0469e82d7be 100644 --- a/src/Dictionaries/RangeHashedDictionary.h +++ b/src/Dictionaries/RangeHashedDictionary.h @@ -31,7 +31,6 @@ #include #include - namespace DB { @@ -44,7 +43,6 @@ namespace ErrorCodes extern const int TYPE_MISMATCH; } - enum class RangeHashedDictionaryLookupStrategy : uint8_t { min, @@ -236,8 +234,6 @@ private: static Attribute createAttribute(const DictionaryAttribute & dictionary_attribute); - - template using ValueSetterFunc = std::function; From e8da3bb2eb4ba0c54e948c1756c2d30e548d4432 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Thu, 14 Mar 2024 19:06:02 +0100 Subject: [PATCH 313/374] Fix style --- .../integration/test_system_clusters_actual_information/test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/test_system_clusters_actual_information/test.py b/tests/integration/test_system_clusters_actual_information/test.py index c6e3262fd62..8b6436aeb5c 100644 --- a/tests/integration/test_system_clusters_actual_information/test.py +++ b/tests/integration/test_system_clusters_actual_information/test.py @@ -13,6 +13,7 @@ node = cluster.add_instance( "node", with_zookeeper=True, main_configs=["configs/remote_servers.xml"] ) + @pytest.fixture(scope="module") def started_cluster(): try: From 7e9d863c22e65ce34539670519de096a9f2261e6 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Thu, 14 Mar 2024 18:08:55 +0000 Subject: [PATCH 314/374] Update reference of the test --- .../0_stateless/03010_sum_to_to_count_if_nullable.reference | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.reference b/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.reference index e69de29bb2d..8627f639a03 100644 --- a/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.reference +++ b/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.reference @@ -0,0 +1,2 @@ +(5,NULL) +(5,NULL) From b65beba1fd439a1e98b028b85548873e8339335c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Thu, 14 Mar 2024 18:13:06 +0000 Subject: [PATCH 315/374] Remove not used error code declaration --- src/Dictionaries/RangeHashedDictionary.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/Dictionaries/RangeHashedDictionary.cpp b/src/Dictionaries/RangeHashedDictionary.cpp index 8299c3ad93a..30a0123ade6 100644 --- a/src/Dictionaries/RangeHashedDictionary.cpp +++ b/src/Dictionaries/RangeHashedDictionary.cpp @@ -3,11 +3,6 @@ namespace DB { -namespace ErrorCodes -{ - extern const int TYPE_MISMATCH; -} - template ColumnPtr RangeHashedDictionary::getColumn( const std::string & attribute_name, From b0008495294761e3394de0060dda61ee66808476 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Thu, 14 Mar 2024 18:15:12 +0000 Subject: [PATCH 316/374] Better --- src/Analyzer/Passes/SumIfToCountIfPass.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Analyzer/Passes/SumIfToCountIfPass.cpp b/src/Analyzer/Passes/SumIfToCountIfPass.cpp index d374d92c1fb..2c41b6dc467 100644 --- a/src/Analyzer/Passes/SumIfToCountIfPass.cpp +++ b/src/Analyzer/Passes/SumIfToCountIfPass.cpp @@ -32,7 +32,7 @@ public: return; auto * function_node = node->as(); - if (!function_node || !function_node->isAggregateFunction() || function_node->getResultType()->isNullable()) + if (!function_node || !function_node->isAggregateFunction()) return; auto function_name = function_node->getFunctionName(); @@ -54,10 +54,10 @@ public: if (!constant_node) return; - const auto & constant_value_literal = constant_node->getValue(); - if (!isInt64OrUInt64FieldType(constant_value_literal.getType())) + if (auto constant_type = constant_node->getResultType(); !isUInt64(constant_type) && !isInt64(constant_type)) return; + const auto & constant_value_literal = constant_node->getValue(); if (getSettings().aggregate_functions_null_for_empty) return; From e9f81170873bd06bf8c9875fa90654ec6fb0abc8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 14 Mar 2024 20:16:25 +0100 Subject: [PATCH 317/374] Increase memory limit for coverage builds --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9ffb4789dc9..eff6dd3ff6a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -56,13 +56,13 @@ option(ENABLE_CHECK_HEAVY_BUILDS "Don't allow C++ translation units to compile t if (ENABLE_CHECK_HEAVY_BUILDS) # set DATA (since RSS does not work since 2.6.x+) to 5G set (RLIMIT_DATA 5000000000) - # set VIRT (RLIMIT_AS) to 10G (DATA*10) + # set VIRT (RLIMIT_AS) to 10G (DATA*2) set (RLIMIT_AS 10000000000) # set CPU time limit to 1000 seconds set (RLIMIT_CPU 1000) # -fsanitize=memory and address are too heavy - if (SANITIZE) + if (SANITIZE OR SANITIZE_COVERAGE OR WITH_COVERAGE) set (RLIMIT_DATA 10000000000) # 10G endif() From e8b3cc28518a409fbefe5206f676c2623d881484 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Thu, 14 Mar 2024 20:01:12 +0000 Subject: [PATCH 318/374] CI: skip hdfs tests for arm #do_not_test #batch_0 #no_merge_commit #ci_set_arm --- .../integration/test_allowed_url_from_config/test.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_allowed_url_from_config/test.py b/tests/integration/test_allowed_url_from_config/test.py index 3106cf12702..fb7564ae9d3 100644 --- a/tests/integration/test_allowed_url_from_config/test.py +++ b/tests/integration/test_allowed_url_from_config/test.py @@ -1,3 +1,4 @@ +import platform import pytest from helpers.cluster import ClickHouseCluster @@ -16,9 +17,11 @@ node5 = cluster.add_instance( "node5", main_configs=["configs/config_without_allowed_hosts.xml"] ) node6 = cluster.add_instance("node6", main_configs=["configs/config_for_remote.xml"]) -node7 = cluster.add_instance( - "node7", main_configs=["configs/config_for_redirect.xml"], with_hdfs=True -) + +if platform.processor() != "arm": + node7 = cluster.add_instance( + "node7", main_configs=["configs/config_for_redirect.xml"], with_hdfs=True + ) @pytest.fixture(scope="module") @@ -270,6 +273,7 @@ def test_table_function_remote(start_cluster): ) +@pytest.mark.skipif(platform.processor() == "arm", reason="skip for ARM") def test_redirect(start_cluster): hdfs_api = start_cluster.hdfs_api @@ -284,6 +288,7 @@ def test_redirect(start_cluster): node7.query("DROP TABLE table_test_7_1") +@pytest.mark.skipif(platform.processor() == "arm", reason="skip for ARM") def test_HDFS(start_cluster): assert "not allowed" in node7.query_and_get_error( "CREATE TABLE table_test_7_2 (word String) ENGINE=HDFS('http://hdfs1:50075/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'CSV')" @@ -293,6 +298,7 @@ def test_HDFS(start_cluster): ) +@pytest.mark.skipif(platform.processor() == "arm", reason="skip for ARM") def test_schema_inference(start_cluster): error = node7.query_and_get_error("desc url('http://test.com`, 'TSVRaw'')") assert error.find("ReadWriteBufferFromHTTPBase") == -1 From e08eaebc9946d39d19b4e995cfdc962719c55c44 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 14 Mar 2024 20:36:18 +0000 Subject: [PATCH 319/374] Add sanity check for poll_max_batch_size FileLog setting to avoid big untracked allocations --- src/Storages/FileLog/FileLogSettings.cpp | 6 ++++++ .../03010_file_log_large_poll_batch_size.reference | 0 .../0_stateless/03010_file_log_large_poll_batch_size.sql | 2 ++ 3 files changed, 8 insertions(+) create mode 100644 tests/queries/0_stateless/03010_file_log_large_poll_batch_size.reference create mode 100644 tests/queries/0_stateless/03010_file_log_large_poll_batch_size.sql diff --git a/src/Storages/FileLog/FileLogSettings.cpp b/src/Storages/FileLog/FileLogSettings.cpp index 2cd42c35870..8e245285b9a 100644 --- a/src/Storages/FileLog/FileLogSettings.cpp +++ b/src/Storages/FileLog/FileLogSettings.cpp @@ -11,6 +11,7 @@ namespace DB namespace ErrorCodes { extern const int UNKNOWN_SETTING; + extern const int INVALID_SETTING_VALUE; } IMPLEMENT_SETTINGS_TRAITS(FileLogSettingsTraits, LIST_OF_FILELOG_SETTINGS) @@ -36,6 +37,11 @@ void FileLogSettings::loadFromQuery(ASTStorage & storage_def) settings_ast->is_standalone = false; storage_def.set(storage_def.settings, settings_ast); } + + /// Check that batch size is not too high (the same as we check setting max_block_size). + constexpr UInt64 max_sane_block_rows_size = 4294967296; // 2^32 + if (poll_max_batch_size > max_sane_block_rows_size) + throw Exception(ErrorCodes::INVALID_SETTING_VALUE, "Sanity check: 'poll_max_batch_size' value is too high ({})", poll_max_batch_size); } } diff --git a/tests/queries/0_stateless/03010_file_log_large_poll_batch_size.reference b/tests/queries/0_stateless/03010_file_log_large_poll_batch_size.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03010_file_log_large_poll_batch_size.sql b/tests/queries/0_stateless/03010_file_log_large_poll_batch_size.sql new file mode 100644 index 00000000000..2663011f2ec --- /dev/null +++ b/tests/queries/0_stateless/03010_file_log_large_poll_batch_size.sql @@ -0,0 +1,2 @@ +create table test (number UInt64) engine=FileLog('./user_files/data.jsonl', 'JSONEachRow') settings poll_max_batch_size=18446744073709; -- {serverError INVALID_SETTING_VALUE} + From 1cae77997a648414d2e16e806c322fae2f2da301 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 14 Mar 2024 21:34:25 +0000 Subject: [PATCH 320/374] Try revert stage back --- src/Storages/StorageMerge.cpp | 8 +++++--- src/Storages/StorageMerge.h | 3 ++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index f95464f4bff..52362eb5cb8 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -1121,7 +1121,7 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( /// Subordinary tables could have different but convertible types, like numeric types of different width. /// We must return streams with structure equals to structure of Merge table. convertAndFilterSourceStream( - header, modified_query_info, storage_snapshot_, aliases, row_policy_data_opt, context, *builder); + header, modified_query_info, storage_snapshot_, aliases, row_policy_data_opt, context, *builder, processed_stage); } return builder; @@ -1473,7 +1473,8 @@ void ReadFromMerge::convertAndFilterSourceStream( const Aliases & aliases, const RowPolicyDataOpt & row_policy_data_opt, ContextPtr local_context, - QueryPipelineBuilder & builder) + QueryPipelineBuilder & builder, + QueryProcessingStage::Enum processed_stage) { Block before_block_header = builder.getHeader(); @@ -1532,7 +1533,8 @@ void ReadFromMerge::convertAndFilterSourceStream( ActionsDAG::MatchColumnsMode convert_actions_match_columns_mode = ActionsDAG::MatchColumnsMode::Name; - if (local_context->getSettingsRef().allow_experimental_analyzer && dynamic_cast(&snapshot->storage) != nullptr) + if (local_context->getSettingsRef().allow_experimental_analyzer + && (processed_stage != QueryProcessingStage::FetchColumns || dynamic_cast(&snapshot->storage) != nullptr)) convert_actions_match_columns_mode = ActionsDAG::MatchColumnsMode::Position; if (row_policy_data_opt) diff --git a/src/Storages/StorageMerge.h b/src/Storages/StorageMerge.h index 3aabd7e26e3..556649f622d 100644 --- a/src/Storages/StorageMerge.h +++ b/src/Storages/StorageMerge.h @@ -277,7 +277,8 @@ private: const Aliases & aliases, const RowPolicyDataOpt & row_policy_data_opt, ContextPtr context, - QueryPipelineBuilder & builder); + QueryPipelineBuilder & builder, + QueryProcessingStage::Enum processed_stage); StorageMerge::StorageListWithLocks getSelectedTables( ContextPtr query_context, From e418f249b8a08484b10bbd5a60de5718686de2f3 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Fri, 15 Mar 2024 00:40:28 +0000 Subject: [PATCH 321/374] fix test --- tests/queries/0_stateless/03008_optimize_equal_ranges.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/03008_optimize_equal_ranges.sql b/tests/queries/0_stateless/03008_optimize_equal_ranges.sql index 4d521420741..6d769c7382a 100644 --- a/tests/queries/0_stateless/03008_optimize_equal_ranges.sql +++ b/tests/queries/0_stateless/03008_optimize_equal_ranges.sql @@ -5,6 +5,7 @@ CREATE TABLE t_optimize_equal_ranges (a UInt64, b String, c UInt64) ENGINE = Mer SET max_block_size = 1024; SET max_bytes_before_external_group_by = 0; SET optimize_aggregation_in_order = 0; +SET optimize_use_projections = 0; INSERT INTO t_optimize_equal_ranges SELECT 0, toString(number), number FROM numbers(30000); INSERT INTO t_optimize_equal_ranges SELECT 1, toString(number), number FROM numbers(30000); From 978fb783517639f495fbfbc67fe721cb8c8b2f5d Mon Sep 17 00:00:00 2001 From: peter279k Date: Fri, 15 Mar 2024 11:18:21 +0800 Subject: [PATCH 322/374] Correct Criteo example dataset instruction section --- docs/en/getting-started/example-datasets/criteo.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/getting-started/example-datasets/criteo.md b/docs/en/getting-started/example-datasets/criteo.md index a2e0fda0cb0..4becdb50731 100644 --- a/docs/en/getting-started/example-datasets/criteo.md +++ b/docs/en/getting-started/example-datasets/criteo.md @@ -55,7 +55,7 @@ CREATE TABLE criteo_log ( ) ENGINE = Log; ``` -Download the data: +Insert the data: ``` bash $ for i in {00..23}; do echo $i; zcat datasets/criteo/day_${i#0}.gz | sed -r 's/^/2000-01-'${i/00/24}'\t/' | clickhouse-client --host=example-perftest01j --query="INSERT INTO criteo_log FORMAT TabSeparated"; done From a3da67ba9ae17440d4809e76ad19d2e308469e44 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Fri, 15 Mar 2024 06:47:10 +0000 Subject: [PATCH 323/374] Update version_date.tsv and changelogs after v23.8.11.28-lts --- docs/changelogs/v23.8.11.28-lts.md | 30 ++++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 4 ++++ 2 files changed, 34 insertions(+) create mode 100644 docs/changelogs/v23.8.11.28-lts.md diff --git a/docs/changelogs/v23.8.11.28-lts.md b/docs/changelogs/v23.8.11.28-lts.md new file mode 100644 index 00000000000..acc284caa72 --- /dev/null +++ b/docs/changelogs/v23.8.11.28-lts.md @@ -0,0 +1,30 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v23.8.11.28-lts (31879d2ab4c) FIXME as compared to v23.8.10.43-lts (a278225bba9) + +#### Improvement +* Backported in [#60828](https://github.com/ClickHouse/ClickHouse/issues/60828): Update tzdata to 2024a. [#60768](https://github.com/ClickHouse/ClickHouse/pull/60768) ([Raúl Marín](https://github.com/Algunenano)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fix buffer overflow in CompressionCodecMultiple [#60731](https://github.com/ClickHouse/ClickHouse/pull/60731) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove nonsense from SQL/JSON [#60738](https://github.com/ClickHouse/ClickHouse/pull/60738) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix crash in arrayEnumerateRanked [#60764](https://github.com/ClickHouse/ClickHouse/pull/60764) ([Raúl Marín](https://github.com/Algunenano)). +* Fix crash when using input() in INSERT SELECT JOIN [#60765](https://github.com/ClickHouse/ClickHouse/pull/60765) ([Kruglov Pavel](https://github.com/Avogar)). +* Remove recursion when reading from S3 [#60849](https://github.com/ClickHouse/ClickHouse/pull/60849) ([Antonio Andelic](https://github.com/antonio2368)). + +#### NO CL ENTRY + +* NO CL ENTRY: 'Use the current branch test-utils to build cctools'. [#61276](https://github.com/ClickHouse/ClickHouse/pull/61276) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Cancel PipelineExecutor properly in case of exception in spawnThreads [#57104](https://github.com/ClickHouse/ClickHouse/pull/57104) ([Kruglov Pavel](https://github.com/Avogar)). +* Detect io_uring in tests [#60373](https://github.com/ClickHouse/ClickHouse/pull/60373) ([Azat Khuzhin](https://github.com/azat)). +* Cancel PipelineExecutor properly in case of exception in spawnThreads [#60499](https://github.com/ClickHouse/ClickHouse/pull/60499) ([Kruglov Pavel](https://github.com/Avogar)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index e372e407ce1..ad7c92d85d5 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,9 +1,11 @@ v24.2.1.2248-stable 2024-02-29 +v24.1.6.52-stable 2024-03-07 v24.1.5.6-stable 2024-02-14 v24.1.4.20-stable 2024-02-14 v24.1.3.31-stable 2024-02-09 v24.1.2.5-stable 2024-02-02 v24.1.1.2048-stable 2024-01-30 +v23.12.5.81-stable 2024-03-15 v23.12.4.15-stable 2024-02-09 v23.12.3.40-stable 2024-02-02 v23.12.2.59-stable 2024-01-05 @@ -25,6 +27,7 @@ v23.9.4.11-stable 2023-11-08 v23.9.3.12-stable 2023-10-31 v23.9.2.56-stable 2023-10-19 v23.9.1.1854-stable 2023-09-29 +v23.8.11.28-lts 2024-03-15 v23.8.10.43-lts 2024-03-05 v23.8.9.54-lts 2024-01-05 v23.8.8.20-lts 2023-11-25 @@ -55,6 +58,7 @@ v23.4.4.16-stable 2023-06-17 v23.4.3.48-stable 2023-06-12 v23.4.2.11-stable 2023-05-02 v23.4.1.1943-stable 2023-04-27 +v23.3.21.26-lts 2024-03-15 v23.3.20.27-lts 2024-03-05 v23.3.19.32-lts 2024-01-05 v23.3.18.15-lts 2023-11-25 From e6f2cd080e2bc8b0fb47287c6749fe6bc45a14e7 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Fri, 15 Mar 2024 06:47:48 +0000 Subject: [PATCH 324/374] Update version_date.tsv and changelogs after v23.3.21.26-lts --- docs/changelogs/v23.3.21.26-lts.md | 24 ++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 2 ++ 2 files changed, 26 insertions(+) create mode 100644 docs/changelogs/v23.3.21.26-lts.md diff --git a/docs/changelogs/v23.3.21.26-lts.md b/docs/changelogs/v23.3.21.26-lts.md new file mode 100644 index 00000000000..b0f059c4907 --- /dev/null +++ b/docs/changelogs/v23.3.21.26-lts.md @@ -0,0 +1,24 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v23.3.21.26-lts (d9672a3731f) FIXME as compared to v23.3.20.27-lts (cc974ba4f81) + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fix reading from sparse columns after restart [#49660](https://github.com/ClickHouse/ClickHouse/pull/49660) ([Anton Popov](https://github.com/CurtizJ)). +* Fix buffer overflow in CompressionCodecMultiple [#60731](https://github.com/ClickHouse/ClickHouse/pull/60731) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove nonsense from SQL/JSON [#60738](https://github.com/ClickHouse/ClickHouse/pull/60738) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix crash in arrayEnumerateRanked [#60764](https://github.com/ClickHouse/ClickHouse/pull/60764) ([Raúl Marín](https://github.com/Algunenano)). +* Fix crash when using input() in INSERT SELECT JOIN [#60765](https://github.com/ClickHouse/ClickHouse/pull/60765) ([Kruglov Pavel](https://github.com/Avogar)). +* Remove recursion when reading from S3 [#60849](https://github.com/ClickHouse/ClickHouse/pull/60849) ([Antonio Andelic](https://github.com/antonio2368)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Cancel PipelineExecutor properly in case of exception in spawnThreads [#57104](https://github.com/ClickHouse/ClickHouse/pull/57104) ([Kruglov Pavel](https://github.com/Avogar)). +* Detect io_uring in tests [#60373](https://github.com/ClickHouse/ClickHouse/pull/60373) ([Azat Khuzhin](https://github.com/azat)). +* Cancel PipelineExecutor properly in case of exception in spawnThreads [#60499](https://github.com/ClickHouse/ClickHouse/pull/60499) ([Kruglov Pavel](https://github.com/Avogar)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index e372e407ce1..4ed577e0b61 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,4 +1,5 @@ v24.2.1.2248-stable 2024-02-29 +v24.1.6.52-stable 2024-03-07 v24.1.5.6-stable 2024-02-14 v24.1.4.20-stable 2024-02-14 v24.1.3.31-stable 2024-02-09 @@ -55,6 +56,7 @@ v23.4.4.16-stable 2023-06-17 v23.4.3.48-stable 2023-06-12 v23.4.2.11-stable 2023-05-02 v23.4.1.1943-stable 2023-04-27 +v23.3.21.26-lts 2024-03-15 v23.3.20.27-lts 2024-03-05 v23.3.19.32-lts 2024-01-05 v23.3.18.15-lts 2023-11-25 From 4e34887ab4d1bfe2355327da452d66d376dc6dae Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Fri, 15 Mar 2024 06:48:11 +0000 Subject: [PATCH 325/374] Update version_date.tsv and changelogs after v24.1.7.18-stable --- docker/keeper/Dockerfile | 2 +- docker/server/Dockerfile.alpine | 2 +- docker/server/Dockerfile.ubuntu | 2 +- docs/changelogs/v24.1.7.18-stable.md | 26 ++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 6 ++++++ 5 files changed, 35 insertions(+), 3 deletions(-) create mode 100644 docs/changelogs/v24.1.7.18-stable.md diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index 2f42854a972..17eee6d4287 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.2.1.2248" +ARG VERSION="24.2.2.71" ARG PACKAGES="clickhouse-keeper" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index 7bd777de5b9..bd5fa313adc 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.2.1.2248" +ARG VERSION="24.2.2.71" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 03d01cfd5d7..256dcdc029f 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -27,7 +27,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="24.2.1.2248" +ARG VERSION="24.2.2.71" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # set non-empty deb_location_url url to create a docker image diff --git a/docs/changelogs/v24.1.7.18-stable.md b/docs/changelogs/v24.1.7.18-stable.md new file mode 100644 index 00000000000..603a83a67be --- /dev/null +++ b/docs/changelogs/v24.1.7.18-stable.md @@ -0,0 +1,26 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.1.7.18-stable (90925babd78) FIXME as compared to v24.1.6.52-stable (fa09f677bc9) + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fix deadlock in parallel parsing when lots of rows are skipped due to errors [#60516](https://github.com/ClickHouse/ClickHouse/pull/60516) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix_max_query_size_for_kql_compound_operator: [#60534](https://github.com/ClickHouse/ClickHouse/pull/60534) ([Yong Wang](https://github.com/kashwy)). +* Fix crash with different allow_experimental_analyzer value in subqueries [#60770](https://github.com/ClickHouse/ClickHouse/pull/60770) ([Dmitry Novik](https://github.com/novikd)). +* Fix Keeper reconfig for standalone binary [#61233](https://github.com/ClickHouse/ClickHouse/pull/61233) ([Antonio Andelic](https://github.com/antonio2368)). + +#### CI Fix or Improvement (changelog entry is not required) + +* Backported in [#61043](https://github.com/ClickHouse/ClickHouse/issues/61043): Debug and fix markreleaseready. [#60611](https://github.com/ClickHouse/ClickHouse/pull/60611) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#61168](https://github.com/ClickHouse/ClickHouse/issues/61168): Just a preparation for the merge queue support. [#61099](https://github.com/ClickHouse/ClickHouse/pull/61099) ([Max K.](https://github.com/maxknv)). +* Backported in [#61192](https://github.com/ClickHouse/ClickHouse/issues/61192): ... [#61185](https://github.com/ClickHouse/ClickHouse/pull/61185) ([Max K.](https://github.com/maxknv)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Cancel PipelineExecutor properly in case of exception in spawnThreads [#60499](https://github.com/ClickHouse/ClickHouse/pull/60499) ([Kruglov Pavel](https://github.com/Avogar)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index e372e407ce1..32fbfee8274 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,9 +1,13 @@ +v24.2.2.71-stable 2024-03-15 v24.2.1.2248-stable 2024-02-29 +v24.1.7.18-stable 2024-03-15 +v24.1.6.52-stable 2024-03-07 v24.1.5.6-stable 2024-02-14 v24.1.4.20-stable 2024-02-14 v24.1.3.31-stable 2024-02-09 v24.1.2.5-stable 2024-02-02 v24.1.1.2048-stable 2024-01-30 +v23.12.5.81-stable 2024-03-15 v23.12.4.15-stable 2024-02-09 v23.12.3.40-stable 2024-02-02 v23.12.2.59-stable 2024-01-05 @@ -25,6 +29,7 @@ v23.9.4.11-stable 2023-11-08 v23.9.3.12-stable 2023-10-31 v23.9.2.56-stable 2023-10-19 v23.9.1.1854-stable 2023-09-29 +v23.8.11.28-lts 2024-03-15 v23.8.10.43-lts 2024-03-05 v23.8.9.54-lts 2024-01-05 v23.8.8.20-lts 2023-11-25 @@ -55,6 +60,7 @@ v23.4.4.16-stable 2023-06-17 v23.4.3.48-stable 2023-06-12 v23.4.2.11-stable 2023-05-02 v23.4.1.1943-stable 2023-04-27 +v23.3.21.26-lts 2024-03-15 v23.3.20.27-lts 2024-03-05 v23.3.19.32-lts 2024-01-05 v23.3.18.15-lts 2023-11-25 From 5a56c439395c3864d0f95d7b3b8ef63ed069fab6 Mon Sep 17 00:00:00 2001 From: Pham Anh Tuan Date: Wed, 13 Mar 2024 15:06:11 +0000 Subject: [PATCH 326/374] add async metrics for virtual memory mappings --- src/Common/AsynchronousMetrics.cpp | 53 +++++++++++++++++++ src/Common/AsynchronousMetrics.h | 3 ++ ...ry_mappings_asynchronous_metrics.reference | 2 + ...l_memory_mappings_asynchronous_metrics.sql | 4 ++ 4 files changed, 62 insertions(+) create mode 100644 tests/queries/0_stateless/03010_virtual_memory_mappings_asynchronous_metrics.reference create mode 100644 tests/queries/0_stateless/03010_virtual_memory_mappings_asynchronous_metrics.sql diff --git a/src/Common/AsynchronousMetrics.cpp b/src/Common/AsynchronousMetrics.cpp index b24d9bcc301..0b9be18c84e 100644 --- a/src/Common/AsynchronousMetrics.cpp +++ b/src/Common/AsynchronousMetrics.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -90,6 +91,9 @@ AsynchronousMetrics::AsynchronousMetrics( openFileIfExists("/sys/fs/cgroup/cpu/cpu.cfs_quota_us", cgroupcpu_cfs_quota); } + openFileIfExists("/proc/sys/vm/max_map_count", vm_max_map_count); + openFileIfExists("/proc/self/maps", vm_maps); + openSensors(); openBlockDevices(); openEDAC(); @@ -1423,6 +1427,55 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) } } + if (vm_max_map_count) + { + try + { + vm_max_map_count->rewind(); + + uint64_t max_map_count = 0; + readText(max_map_count, *vm_max_map_count); + new_values["VMMaxMapCount"] = { max_map_count, "The maximum number of memory mappings a process may have (/proc/sys/vm/max_map_count)."}; + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + openFileIfExists("/proc/sys/vm/max_map_count", vm_max_map_count); + } + } + + if (vm_maps) + { + try + { + vm_maps->rewind(); + + uint64_t num_maps = 0; + while (!vm_maps->eof()) + { + char * next_pos = find_first_symbols<'\n'>(vm_maps->position(), vm_maps->buffer().end()); + vm_maps->position() = next_pos; + + if (!vm_maps->hasPendingData()) + continue; + + if (*vm_maps->position() == '\n') + { + ++num_maps; + ++vm_maps->position(); + } + } + new_values["VMNumMaps"] = { num_maps, + "The current number of memory mappings of the process (/proc/self/maps)." + " If it is close to the maximum (VMMaxMapCount), you should increase the limit for vm.max_map_count in /etc/sysctl.conf"}; + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + openFileIfExists("/proc/self/maps", vm_maps); + } + } + try { for (size_t i = 0, size = thermal.size(); i < size; ++i) diff --git a/src/Common/AsynchronousMetrics.h b/src/Common/AsynchronousMetrics.h index 305e8136b8a..4b3d28e80c5 100644 --- a/src/Common/AsynchronousMetrics.h +++ b/src/Common/AsynchronousMetrics.h @@ -123,6 +123,9 @@ private: std::optional cgroupcpu_cfs_quota TSA_GUARDED_BY(data_mutex); std::optional cgroupcpu_max TSA_GUARDED_BY(data_mutex); + std::optional vm_max_map_count TSA_GUARDED_BY(data_mutex); + std::optional vm_maps TSA_GUARDED_BY(data_mutex); + std::vector> thermal TSA_GUARDED_BY(data_mutex); std::unordered_map Date: Fri, 15 Mar 2024 06:50:44 +0000 Subject: [PATCH 327/374] Update version_date.tsv and changelogs after v24.2.2.71-stable --- docker/keeper/Dockerfile | 2 +- docker/server/Dockerfile.alpine | 2 +- docker/server/Dockerfile.ubuntu | 2 +- docs/changelogs/v24.2.2.71-stable.md | 55 ++++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 6 +++ 5 files changed, 64 insertions(+), 3 deletions(-) create mode 100644 docs/changelogs/v24.2.2.71-stable.md diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index 2f42854a972..17eee6d4287 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.2.1.2248" +ARG VERSION="24.2.2.71" ARG PACKAGES="clickhouse-keeper" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index 7bd777de5b9..bd5fa313adc 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.2.1.2248" +ARG VERSION="24.2.2.71" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 03d01cfd5d7..256dcdc029f 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -27,7 +27,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="24.2.1.2248" +ARG VERSION="24.2.2.71" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # set non-empty deb_location_url url to create a docker image diff --git a/docs/changelogs/v24.2.2.71-stable.md b/docs/changelogs/v24.2.2.71-stable.md new file mode 100644 index 00000000000..b9aa5be626b --- /dev/null +++ b/docs/changelogs/v24.2.2.71-stable.md @@ -0,0 +1,55 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.2.2.71-stable (9293d361e72) FIXME as compared to v24.2.1.2248-stable (891689a4150) + +#### Improvement +* Backported in [#60834](https://github.com/ClickHouse/ClickHouse/issues/60834): Update tzdata to 2024a. [#60768](https://github.com/ClickHouse/ClickHouse/pull/60768) ([Raúl Marín](https://github.com/Algunenano)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* PartsSplitter invalid ranges for the same part [#60041](https://github.com/ClickHouse/ClickHouse/pull/60041) ([Maksim Kita](https://github.com/kitaisreal)). +* Try to avoid calculation of scalar subqueries for CREATE TABLE. [#60464](https://github.com/ClickHouse/ClickHouse/pull/60464) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix deadlock in parallel parsing when lots of rows are skipped due to errors [#60516](https://github.com/ClickHouse/ClickHouse/pull/60516) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix_max_query_size_for_kql_compound_operator: [#60534](https://github.com/ClickHouse/ClickHouse/pull/60534) ([Yong Wang](https://github.com/kashwy)). +* Reduce the number of read rows from `system.numbers` [#60546](https://github.com/ClickHouse/ClickHouse/pull/60546) ([JackyWoo](https://github.com/JackyWoo)). +* Don't output number tips for date types [#60577](https://github.com/ClickHouse/ClickHouse/pull/60577) ([Raúl Marín](https://github.com/Algunenano)). +* Fix buffer overflow in CompressionCodecMultiple [#60731](https://github.com/ClickHouse/ClickHouse/pull/60731) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove nonsense from SQL/JSON [#60738](https://github.com/ClickHouse/ClickHouse/pull/60738) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Prevent setting custom metadata headers on unsupported multipart upload operations [#60748](https://github.com/ClickHouse/ClickHouse/pull/60748) ([Francisco J. Jurado Moreno](https://github.com/Beetelbrox)). +* Fix crash in arrayEnumerateRanked [#60764](https://github.com/ClickHouse/ClickHouse/pull/60764) ([Raúl Marín](https://github.com/Algunenano)). +* Fix crash when using input() in INSERT SELECT JOIN [#60765](https://github.com/ClickHouse/ClickHouse/pull/60765) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix crash with different allow_experimental_analyzer value in subqueries [#60770](https://github.com/ClickHouse/ClickHouse/pull/60770) ([Dmitry Novik](https://github.com/novikd)). +* Remove recursion when reading from S3 [#60849](https://github.com/ClickHouse/ClickHouse/pull/60849) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix multiple bugs in groupArraySorted [#61203](https://github.com/ClickHouse/ClickHouse/pull/61203) ([Raúl Marín](https://github.com/Algunenano)). +* Fix Keeper reconfig for standalone binary [#61233](https://github.com/ClickHouse/ClickHouse/pull/61233) ([Antonio Andelic](https://github.com/antonio2368)). + +#### CI Fix or Improvement (changelog entry is not required) + +* Backported in [#60758](https://github.com/ClickHouse/ClickHouse/issues/60758): Decoupled changes from [#60408](https://github.com/ClickHouse/ClickHouse/issues/60408). [#60553](https://github.com/ClickHouse/ClickHouse/pull/60553) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#60706](https://github.com/ClickHouse/ClickHouse/issues/60706): Eliminates the need to provide input args to docker server jobs to clean yml files. [#60602](https://github.com/ClickHouse/ClickHouse/pull/60602) ([Max K.](https://github.com/maxknv)). +* Backported in [#61045](https://github.com/ClickHouse/ClickHouse/issues/61045): Debug and fix markreleaseready. [#60611](https://github.com/ClickHouse/ClickHouse/pull/60611) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#60721](https://github.com/ClickHouse/ClickHouse/issues/60721): Fix build_report job so that it's defined by ci_config only (not yml file). [#60613](https://github.com/ClickHouse/ClickHouse/pull/60613) ([Max K.](https://github.com/maxknv)). +* Backported in [#60668](https://github.com/ClickHouse/ClickHouse/issues/60668): Do not await ci pending jobs on release branches decrease wait timeout to fit into gh job timeout. [#60652](https://github.com/ClickHouse/ClickHouse/pull/60652) ([Max K.](https://github.com/maxknv)). +* Backported in [#60863](https://github.com/ClickHouse/ClickHouse/issues/60863): Set limited number of builds for "special build check" report in backports. [#60850](https://github.com/ClickHouse/ClickHouse/pull/60850) ([Max K.](https://github.com/maxknv)). +* Backported in [#60946](https://github.com/ClickHouse/ClickHouse/issues/60946): ... [#60935](https://github.com/ClickHouse/ClickHouse/pull/60935) ([Max K.](https://github.com/maxknv)). +* Backported in [#60972](https://github.com/ClickHouse/ClickHouse/issues/60972): ... [#60952](https://github.com/ClickHouse/ClickHouse/pull/60952) ([Max K.](https://github.com/maxknv)). +* Backported in [#60980](https://github.com/ClickHouse/ClickHouse/issues/60980): ... [#60958](https://github.com/ClickHouse/ClickHouse/pull/60958) ([Max K.](https://github.com/maxknv)). +* Backported in [#61170](https://github.com/ClickHouse/ClickHouse/issues/61170): Just a preparation for the merge queue support. [#61099](https://github.com/ClickHouse/ClickHouse/pull/61099) ([Max K.](https://github.com/maxknv)). +* Backported in [#61181](https://github.com/ClickHouse/ClickHouse/issues/61181): ... [#61172](https://github.com/ClickHouse/ClickHouse/pull/61172) ([Max K.](https://github.com/maxknv)). +* Backported in [#61228](https://github.com/ClickHouse/ClickHouse/issues/61228): ... [#61183](https://github.com/ClickHouse/ClickHouse/pull/61183) ([Han Fei](https://github.com/hanfei1991)). +* Backported in [#61194](https://github.com/ClickHouse/ClickHouse/issues/61194): ... [#61185](https://github.com/ClickHouse/ClickHouse/pull/61185) ([Max K.](https://github.com/maxknv)). +* Backported in [#61244](https://github.com/ClickHouse/ClickHouse/issues/61244): ... [#61214](https://github.com/ClickHouse/ClickHouse/pull/61214) ([Max K.](https://github.com/maxknv)). +* Backported in [#61388](https://github.com/ClickHouse/ClickHouse/issues/61388):. [#61373](https://github.com/ClickHouse/ClickHouse/pull/61373) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* CI: make workflow yml abstract [#60421](https://github.com/ClickHouse/ClickHouse/pull/60421) ([Max K.](https://github.com/maxknv)). +* Cancel PipelineExecutor properly in case of exception in spawnThreads [#60499](https://github.com/ClickHouse/ClickHouse/pull/60499) ([Kruglov Pavel](https://github.com/Avogar)). +* General sanity in function `seriesOutliersDetectTukey` [#60535](https://github.com/ClickHouse/ClickHouse/pull/60535) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Speed up cctools building [#61011](https://github.com/ClickHouse/ClickHouse/pull/61011) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index e372e407ce1..32fbfee8274 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,9 +1,13 @@ +v24.2.2.71-stable 2024-03-15 v24.2.1.2248-stable 2024-02-29 +v24.1.7.18-stable 2024-03-15 +v24.1.6.52-stable 2024-03-07 v24.1.5.6-stable 2024-02-14 v24.1.4.20-stable 2024-02-14 v24.1.3.31-stable 2024-02-09 v24.1.2.5-stable 2024-02-02 v24.1.1.2048-stable 2024-01-30 +v23.12.5.81-stable 2024-03-15 v23.12.4.15-stable 2024-02-09 v23.12.3.40-stable 2024-02-02 v23.12.2.59-stable 2024-01-05 @@ -25,6 +29,7 @@ v23.9.4.11-stable 2023-11-08 v23.9.3.12-stable 2023-10-31 v23.9.2.56-stable 2023-10-19 v23.9.1.1854-stable 2023-09-29 +v23.8.11.28-lts 2024-03-15 v23.8.10.43-lts 2024-03-05 v23.8.9.54-lts 2024-01-05 v23.8.8.20-lts 2023-11-25 @@ -55,6 +60,7 @@ v23.4.4.16-stable 2023-06-17 v23.4.3.48-stable 2023-06-12 v23.4.2.11-stable 2023-05-02 v23.4.1.1943-stable 2023-04-27 +v23.3.21.26-lts 2024-03-15 v23.3.20.27-lts 2024-03-05 v23.3.19.32-lts 2024-01-05 v23.3.18.15-lts 2023-11-25 From 51f0930fe276da5d32a7a4df302f337ebfead00e Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Fri, 15 Mar 2024 06:53:49 +0000 Subject: [PATCH 328/374] Update version_date.tsv and changelogs after v23.12.5.81-stable --- docker/keeper/Dockerfile | 2 +- docker/server/Dockerfile.alpine | 2 +- docker/server/Dockerfile.ubuntu | 2 +- docs/changelogs/v23.12.5.81-stable.md | 64 +++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 6 +++ 5 files changed, 73 insertions(+), 3 deletions(-) create mode 100644 docs/changelogs/v23.12.5.81-stable.md diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index 2f42854a972..17eee6d4287 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.2.1.2248" +ARG VERSION="24.2.2.71" ARG PACKAGES="clickhouse-keeper" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index 7bd777de5b9..bd5fa313adc 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.2.1.2248" +ARG VERSION="24.2.2.71" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 03d01cfd5d7..256dcdc029f 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -27,7 +27,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="24.2.1.2248" +ARG VERSION="24.2.2.71" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # set non-empty deb_location_url url to create a docker image diff --git a/docs/changelogs/v23.12.5.81-stable.md b/docs/changelogs/v23.12.5.81-stable.md new file mode 100644 index 00000000000..0a0acd97d58 --- /dev/null +++ b/docs/changelogs/v23.12.5.81-stable.md @@ -0,0 +1,64 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v23.12.5.81-stable (a0fbe3ae813) FIXME as compared to v23.12.4.15-stable (4233d111d20) + +#### Improvement +* Backported in [#60290](https://github.com/ClickHouse/ClickHouse/issues/60290): Copy S3 file GCP fallback to buffer copy in case GCP returned `Internal Error` with `GATEWAY_TIMEOUT` HTTP error code. [#60164](https://github.com/ClickHouse/ClickHouse/pull/60164) ([Maksim Kita](https://github.com/kitaisreal)). +* Backported in [#60830](https://github.com/ClickHouse/ClickHouse/issues/60830): Update tzdata to 2024a. [#60768](https://github.com/ClickHouse/ClickHouse/pull/60768) ([Raúl Marín](https://github.com/Algunenano)). + +#### Build/Testing/Packaging Improvement +* Backported in [#59883](https://github.com/ClickHouse/ClickHouse/issues/59883): If you want to run initdb scripts every time when ClickHouse container is starting you shoud initialize environment varible CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS. [#59808](https://github.com/ClickHouse/ClickHouse/pull/59808) ([Alexander Nikolaev](https://github.com/AlexNik)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fix_kql_issue_found_by_wingfuzz [#59626](https://github.com/ClickHouse/ClickHouse/pull/59626) ([Yong Wang](https://github.com/kashwy)). +* Fix error "Read beyond last offset" for AsynchronousBoundedReadBuffer [#59630](https://github.com/ClickHouse/ClickHouse/pull/59630) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix query start time on non initial queries [#59662](https://github.com/ClickHouse/ClickHouse/pull/59662) ([Raúl Marín](https://github.com/Algunenano)). +* rabbitmq: fix having neither acked nor nacked messages [#59775](https://github.com/ClickHouse/ClickHouse/pull/59775) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix parsing of partition expressions surrounded by parens [#59901](https://github.com/ClickHouse/ClickHouse/pull/59901) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Fix optimize_uniq_to_count removing the column alias [#60026](https://github.com/ClickHouse/ClickHouse/pull/60026) ([Raúl Marín](https://github.com/Algunenano)). +* Fix cosineDistance crash with Nullable [#60150](https://github.com/ClickHouse/ClickHouse/pull/60150) ([Raúl Marín](https://github.com/Algunenano)). +* Hide sensitive info for s3queue [#60233](https://github.com/ClickHouse/ClickHouse/pull/60233) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix deadlock in parallel parsing when lots of rows are skipped due to errors [#60516](https://github.com/ClickHouse/ClickHouse/pull/60516) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix_max_query_size_for_kql_compound_operator: [#60534](https://github.com/ClickHouse/ClickHouse/pull/60534) ([Yong Wang](https://github.com/kashwy)). +* Reduce the number of read rows from `system.numbers` [#60546](https://github.com/ClickHouse/ClickHouse/pull/60546) ([JackyWoo](https://github.com/JackyWoo)). +* Fix buffer overflow in CompressionCodecMultiple [#60731](https://github.com/ClickHouse/ClickHouse/pull/60731) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove nonsense from SQL/JSON [#60738](https://github.com/ClickHouse/ClickHouse/pull/60738) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Prevent setting custom metadata headers on unsupported multipart upload operations [#60748](https://github.com/ClickHouse/ClickHouse/pull/60748) ([Francisco J. Jurado Moreno](https://github.com/Beetelbrox)). +* Fix crash in arrayEnumerateRanked [#60764](https://github.com/ClickHouse/ClickHouse/pull/60764) ([Raúl Marín](https://github.com/Algunenano)). +* Fix crash when using input() in INSERT SELECT JOIN [#60765](https://github.com/ClickHouse/ClickHouse/pull/60765) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix crash with different allow_experimental_analyzer value in subqueries [#60770](https://github.com/ClickHouse/ClickHouse/pull/60770) ([Dmitry Novik](https://github.com/novikd)). +* Remove recursion when reading from S3 [#60849](https://github.com/ClickHouse/ClickHouse/pull/60849) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix Keeper reconfig for standalone binary [#61233](https://github.com/ClickHouse/ClickHouse/pull/61233) ([Antonio Andelic](https://github.com/antonio2368)). + +#### CI Fix or Improvement (changelog entry is not required) + +* Backported in [#60767](https://github.com/ClickHouse/ClickHouse/issues/60767): Decoupled changes from [#60408](https://github.com/ClickHouse/ClickHouse/issues/60408). [#60553](https://github.com/ClickHouse/ClickHouse/pull/60553) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#60582](https://github.com/ClickHouse/ClickHouse/issues/60582): Arm and amd docker build jobs use similar job names and thus overwrite job reports - aarch64 and amd64 suffixes added to fix this. [#60554](https://github.com/ClickHouse/ClickHouse/pull/60554) ([Max K.](https://github.com/maxknv)). +* Backported in [#61041](https://github.com/ClickHouse/ClickHouse/issues/61041): Debug and fix markreleaseready. [#60611](https://github.com/ClickHouse/ClickHouse/pull/60611) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#61030](https://github.com/ClickHouse/ClickHouse/issues/61030): ... [#61022](https://github.com/ClickHouse/ClickHouse/pull/61022) ([Max K.](https://github.com/maxknv)). +* Backported in [#61224](https://github.com/ClickHouse/ClickHouse/issues/61224): ... [#61183](https://github.com/ClickHouse/ClickHouse/pull/61183) ([Han Fei](https://github.com/hanfei1991)). +* Backported in [#61190](https://github.com/ClickHouse/ClickHouse/issues/61190): ... [#61185](https://github.com/ClickHouse/ClickHouse/pull/61185) ([Max K.](https://github.com/maxknv)). + +#### NO CL ENTRY + +* NO CL ENTRY: 'Revert "Backport [#59798](https://github.com/ClickHouse/ClickHouse/issues/59798) to 23.12: CI: do not reuse builds on release branches"'. [#59979](https://github.com/ClickHouse/ClickHouse/pull/59979) ([Max K.](https://github.com/maxknv)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* CI: move ci-specifics from job scripts to ci.py [#58516](https://github.com/ClickHouse/ClickHouse/pull/58516) ([Max K.](https://github.com/maxknv)). +* Make ZooKeeper actually sequentialy consistent [#59735](https://github.com/ClickHouse/ClickHouse/pull/59735) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix special build reports in release branches [#59797](https://github.com/ClickHouse/ClickHouse/pull/59797) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* CI: do not reuse builds on release branches [#59798](https://github.com/ClickHouse/ClickHouse/pull/59798) ([Max K.](https://github.com/maxknv)). +* Fix mark release ready [#59994](https://github.com/ClickHouse/ClickHouse/pull/59994) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Ability to detect undead ZooKeeper sessions [#60044](https://github.com/ClickHouse/ClickHouse/pull/60044) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Detect io_uring in tests [#60373](https://github.com/ClickHouse/ClickHouse/pull/60373) ([Azat Khuzhin](https://github.com/azat)). +* Cancel PipelineExecutor properly in case of exception in spawnThreads [#60499](https://github.com/ClickHouse/ClickHouse/pull/60499) ([Kruglov Pavel](https://github.com/Avogar)). +* Remove broken test while we fix it [#60547](https://github.com/ClickHouse/ClickHouse/pull/60547) ([Raúl Marín](https://github.com/Algunenano)). +* Speed up cctools building [#61011](https://github.com/ClickHouse/ClickHouse/pull/61011) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index e372e407ce1..32fbfee8274 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,9 +1,13 @@ +v24.2.2.71-stable 2024-03-15 v24.2.1.2248-stable 2024-02-29 +v24.1.7.18-stable 2024-03-15 +v24.1.6.52-stable 2024-03-07 v24.1.5.6-stable 2024-02-14 v24.1.4.20-stable 2024-02-14 v24.1.3.31-stable 2024-02-09 v24.1.2.5-stable 2024-02-02 v24.1.1.2048-stable 2024-01-30 +v23.12.5.81-stable 2024-03-15 v23.12.4.15-stable 2024-02-09 v23.12.3.40-stable 2024-02-02 v23.12.2.59-stable 2024-01-05 @@ -25,6 +29,7 @@ v23.9.4.11-stable 2023-11-08 v23.9.3.12-stable 2023-10-31 v23.9.2.56-stable 2023-10-19 v23.9.1.1854-stable 2023-09-29 +v23.8.11.28-lts 2024-03-15 v23.8.10.43-lts 2024-03-05 v23.8.9.54-lts 2024-01-05 v23.8.8.20-lts 2023-11-25 @@ -55,6 +60,7 @@ v23.4.4.16-stable 2023-06-17 v23.4.3.48-stable 2023-06-12 v23.4.2.11-stable 2023-05-02 v23.4.1.1943-stable 2023-04-27 +v23.3.21.26-lts 2024-03-15 v23.3.20.27-lts 2024-03-05 v23.3.19.32-lts 2024-01-05 v23.3.18.15-lts 2023-11-25 From 7f97b11ce6095ae77d0baf1cb3f9517714c6bdc5 Mon Sep 17 00:00:00 2001 From: Zhuo Qiu Date: Fri, 15 Mar 2024 15:46:09 +0800 Subject: [PATCH 329/374] only read _row_exists column when necessary --- src/Storages/MergeTree/MutateTask.cpp | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 4d1e60f450e..bfdc109a89d 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -1018,8 +1018,8 @@ struct MutationContext scope_guard temporary_directory_lock; - /// Whether this mutation contains lightweight delete - bool has_lightweight_delete; + /// Whether we need to count lightweight delete rows in this mutation + bool count_lightweight_deleted_rows; }; using MutationContextPtr = std::shared_ptr; @@ -1282,7 +1282,8 @@ bool PartMergerWriter::mutateOriginalPartAndPrepareProjections() ctx->out->write(cur_block); - if (ctx->has_lightweight_delete) + /// TODO: move this calculation to DELETE FROM mutation + if (ctx->count_lightweight_deleted_rows) existing_rows_count += MutationHelpers::getExistingRowsCount(cur_block); for (size_t i = 0, size = ctx->projections_to_build.size(); i < size; ++i) @@ -1376,7 +1377,7 @@ bool PartMergerWriter::iterateThroughAllProjections() void PartMergerWriter::finalize() { - if (ctx->has_lightweight_delete) + if (ctx->count_lightweight_deleted_rows) ctx->new_data_part->existing_rows_count = existing_rows_count; } @@ -2225,17 +2226,17 @@ bool MutateTask::prepare() if (ctx->mutating_pipeline_builder.initialized()) ctx->execute_ttl_type = MutationHelpers::shouldExecuteTTL(ctx->metadata_snapshot, ctx->interpreter->getColumnDependencies()); - if (ctx->updated_header.has(RowExistsColumn::name)) + if (ctx->data->getSettings()->exclude_deleted_rows_for_part_size_in_merge && ctx->updated_header.has(RowExistsColumn::name)) { - /// This mutation contains lightweight delete, reset existing_rows_count of new data part to 0 - /// It will be updated while writing _row_exists column - ctx->has_lightweight_delete = true; + /// This mutation contains lightweight delete and we need to count the deleted rows, + /// Reset existing_rows_count of new data part to 0 and it will be updated while writing _row_exists column + ctx->count_lightweight_deleted_rows = true; } else { - ctx->has_lightweight_delete = false; + ctx->count_lightweight_deleted_rows = false; - /// This mutation does not contains lightweight delete, copy existing_rows_count from source part + /// No need to count deleted rows, copy existing_rows_count from source part ctx->new_data_part->existing_rows_count = ctx->source_part->existing_rows_count.value_or(ctx->source_part->rows_count); } From be4554ba431c2c496c139e6b4869a68ca3ba58dc Mon Sep 17 00:00:00 2001 From: Zhuo Qiu Date: Fri, 15 Mar 2024 16:31:50 +0800 Subject: [PATCH 330/374] fix test --- tests/queries/0_stateless/03001_consider_lwd_when_merge.sql | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/queries/0_stateless/03001_consider_lwd_when_merge.sql b/tests/queries/0_stateless/03001_consider_lwd_when_merge.sql index a65e8877020..988d7058f21 100644 --- a/tests/queries/0_stateless/03001_consider_lwd_when_merge.sql +++ b/tests/queries/0_stateless/03001_consider_lwd_when_merge.sql @@ -17,6 +17,9 @@ SELECT count() FROM system.parts WHERE database = currentDatabase() AND table = ALTER TABLE lwd_merge MODIFY SETTING exclude_deleted_rows_for_part_size_in_merge = 1; +-- delete again because deleted rows will be counted in mutation +DELETE FROM lwd_merge WHERE id % 100 == 0; + OPTIMIZE TABLE lwd_merge; SELECT count() FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_merge' AND active = 1; From 4eda78440d696db68d654a5883cc20fb1b145365 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 15 Mar 2024 10:34:32 +0100 Subject: [PATCH 331/374] Remove C++ templates --- src/DataTypes/IDataType.h | 2 + src/Functions/castOrDefault.cpp | 181 ++++++++++++++------------------ 2 files changed, 79 insertions(+), 104 deletions(-) diff --git a/src/DataTypes/IDataType.h b/src/DataTypes/IDataType.h index 55f584ef1e0..4403e3d9bd4 100644 --- a/src/DataTypes/IDataType.h +++ b/src/DataTypes/IDataType.h @@ -533,6 +533,8 @@ class DataTypeDateTime; class DataTypeDateTime64; template constexpr bool IsDataTypeDecimal> = true; + +/// TODO: this is garbage, remove it. template <> inline constexpr bool IsDataTypeDecimal = true; template constexpr bool IsDataTypeNumber> = true; diff --git a/src/Functions/castOrDefault.cpp b/src/Functions/castOrDefault.cpp index 970e6fd6f75..18bdea28029 100644 --- a/src/Functions/castOrDefault.cpp +++ b/src/Functions/castOrDefault.cpp @@ -173,25 +173,22 @@ private: bool keep_nullable; }; -template class FunctionCastOrDefaultTyped final : public IFunction { public: - static constexpr auto name = Name::name; - - static FunctionPtr create(ContextPtr context) - { - return std::make_shared(context); - } - - explicit FunctionCastOrDefaultTyped(ContextPtr context_) - : impl(context_) + explicit FunctionCastOrDefaultTyped(ContextPtr context_, String name_, DataTypePtr type_) + : impl(context_), name(std::move(name_)), type(std::move(type_)), which(type) { } String getName() const override { return name; } private: + FunctionCastOrDefault impl; + String name; + DataTypePtr type; + WhichDataType which; + size_t getNumberOfArguments() const override { return 0; } bool isVariadic() const override { return true; } @@ -209,10 +206,10 @@ private: FunctionArgumentDescriptors mandatory_args = {{"Value", nullptr, nullptr, nullptr}}; FunctionArgumentDescriptors optional_args; - if constexpr (IsDataTypeDecimal) + if (isDecimal(type) || isDateTime64(type)) mandatory_args.push_back({"scale", static_cast(&isNativeInteger), &isColumnConst, "const Integer"}); - if (std::is_same_v || std::is_same_v) + if (isDateTimeOrDateTime64(type)) optional_args.push_back({"timezone", static_cast(&isString), isColumnConst, "const String"}); optional_args.push_back({"default_value", nullptr, nullptr, nullptr}); @@ -224,7 +221,7 @@ private: size_t scale = 0; std::string time_zone; - if constexpr (IsDataTypeDecimal) + if (isDecimal(type)) { const auto & scale_argument = arguments[additional_argument_index]; @@ -241,7 +238,7 @@ private: ++additional_argument_index; } - if constexpr (std::is_same_v || std::is_same_v) + if (isDateTimeOrDateTime64(type)) { if (additional_argument_index < arguments.size()) { @@ -251,16 +248,22 @@ private: } } - std::shared_ptr cast_type; + DataTypePtr cast_type; - if constexpr (std::is_same_v) - cast_type = std::make_shared(scale, time_zone); - else if constexpr (IsDataTypeDecimal) - cast_type = std::make_shared(Type::maxPrecision(), scale); - else if constexpr (std::is_same_v || std::is_same_v) - cast_type = std::make_shared(time_zone); + if (which.isDateTime64()) + cast_type = std::make_shared(scale, time_zone); + else if (which.isDateTime()) + cast_type = std::make_shared(time_zone); + else if (which.isDecimal32()) + cast_type = createDecimalMaxPrecision(scale); + else if (which.isDecimal64()) + cast_type = createDecimalMaxPrecision(scale); + else if (which.isDecimal128()) + cast_type = createDecimalMaxPrecision(scale); + else if (which.isDecimal256()) + cast_type = createDecimalMaxPrecision(scale); else - cast_type = std::make_shared(); + cast_type = type; ColumnWithTypeAndName type_argument = { @@ -289,7 +292,8 @@ private: ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_size) const override { - size_t additional_arguments_size = IsDataTypeDecimal + (std::is_same_v || std::is_same_v); + /// Scale and time zone + size_t additional_arguments_size = (which.isDecimal() || which.isDateTime64()) + which.isDateTimeOrDateTime64(); ColumnWithTypeAndName second_argument = { @@ -310,98 +314,67 @@ private: return impl.executeImpl(arguments_with_cast_type, result_type, input_rows_size); } - - FunctionCastOrDefault impl; }; -struct NameToUInt8OrDefault { static constexpr auto name = "toUInt8OrDefault"; }; -struct NameToUInt16OrDefault { static constexpr auto name = "toUInt16OrDefault"; }; -struct NameToUInt32OrDefault { static constexpr auto name = "toUInt32OrDefault"; }; -struct NameToUInt64OrDefault { static constexpr auto name = "toUInt64OrDefault"; }; -struct NameToUInt256OrDefault { static constexpr auto name = "toUInt256OrDefault"; }; -struct NameToInt8OrDefault { static constexpr auto name = "toInt8OrDefault"; }; -struct NameToInt16OrDefault { static constexpr auto name = "toInt16OrDefault"; }; -struct NameToInt32OrDefault { static constexpr auto name = "toInt32OrDefault"; }; -struct NameToInt64OrDefault { static constexpr auto name = "toInt64OrDefault"; }; -struct NameToInt128OrDefault { static constexpr auto name = "toInt128OrDefault"; }; -struct NameToInt256OrDefault { static constexpr auto name = "toInt256OrDefault"; }; -struct NameToFloat32OrDefault { static constexpr auto name = "toFloat32OrDefault"; }; -struct NameToFloat64OrDefault { static constexpr auto name = "toFloat64OrDefault"; }; -struct NameToDateOrDefault { static constexpr auto name = "toDateOrDefault"; }; -struct NameToDate32OrDefault { static constexpr auto name = "toDate32OrDefault"; }; -struct NameToDateTimeOrDefault { static constexpr auto name = "toDateTimeOrDefault"; }; -struct NameToDateTime64OrDefault { static constexpr auto name = "toDateTime64OrDefault"; }; -struct NameToDecimal32OrDefault { static constexpr auto name = "toDecimal32OrDefault"; }; -struct NameToDecimal64OrDefault { static constexpr auto name = "toDecimal64OrDefault"; }; -struct NameToDecimal128OrDefault { static constexpr auto name = "toDecimal128OrDefault"; }; -struct NameToDecimal256OrDefault { static constexpr auto name = "toDecimal256OrDefault"; }; -struct NameToUUIDOrDefault { static constexpr auto name = "toUUIDOrDefault"; }; -struct NameToIPv4OrDefault { static constexpr auto name = "toIPv4OrDefault"; }; -struct NameToIPv6OrDefault { static constexpr auto name = "toIPv6OrDefault"; }; - -using FunctionToUInt8OrDefault = FunctionCastOrDefaultTyped; -using FunctionToUInt16OrDefault = FunctionCastOrDefaultTyped; -using FunctionToUInt32OrDefault = FunctionCastOrDefaultTyped; -using FunctionToUInt64OrDefault = FunctionCastOrDefaultTyped; -using FunctionToUInt256OrDefault = FunctionCastOrDefaultTyped; - -using FunctionToInt8OrDefault = FunctionCastOrDefaultTyped; -using FunctionToInt16OrDefault = FunctionCastOrDefaultTyped; -using FunctionToInt32OrDefault = FunctionCastOrDefaultTyped; -using FunctionToInt64OrDefault = FunctionCastOrDefaultTyped; -using FunctionToInt128OrDefault = FunctionCastOrDefaultTyped; -using FunctionToInt256OrDefault = FunctionCastOrDefaultTyped; - -using FunctionToFloat32OrDefault = FunctionCastOrDefaultTyped; -using FunctionToFloat64OrDefault = FunctionCastOrDefaultTyped; - -using FunctionToDateOrDefault = FunctionCastOrDefaultTyped; -using FunctionToDate32OrDefault = FunctionCastOrDefaultTyped; -using FunctionToDateTimeOrDefault = FunctionCastOrDefaultTyped; -using FunctionToDateTime64OrDefault = FunctionCastOrDefaultTyped; - -using FunctionToDecimal32OrDefault = FunctionCastOrDefaultTyped, NameToDecimal32OrDefault>; -using FunctionToDecimal64OrDefault = FunctionCastOrDefaultTyped, NameToDecimal64OrDefault>; -using FunctionToDecimal128OrDefault = FunctionCastOrDefaultTyped, NameToDecimal128OrDefault>; -using FunctionToDecimal256OrDefault = FunctionCastOrDefaultTyped, NameToDecimal256OrDefault>; - -using FunctionToUUIDOrDefault = FunctionCastOrDefaultTyped; -using FunctionToIPv4OrDefault = FunctionCastOrDefaultTyped; -using FunctionToIPv6OrDefault = FunctionCastOrDefaultTyped; - REGISTER_FUNCTION(CastOrDefault) { factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); + factory.registerFunction("toUInt8OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toUInt8OrDefault", std::make_shared())); }); + factory.registerFunction("toUInt16OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toUInt16OrDefault", std::make_shared())); }); + factory.registerFunction("toUInt32OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toUInt32OrDefault", std::make_shared())); }); + factory.registerFunction("toUInt64OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toUInt64OrDefault", std::make_shared())); }); + factory.registerFunction("toUInt128OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toUInt128OrDefault", std::make_shared())); }); + factory.registerFunction("toUInt256OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toUInt256OrDefault", std::make_shared())); }); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); + factory.registerFunction("toInt8OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toInt8OrDefault", std::make_shared())); }); + factory.registerFunction("toInt16OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toInt16OrDefault", std::make_shared())); }); + factory.registerFunction("toInt32OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toInt32OrDefault", std::make_shared())); }); + factory.registerFunction("toInt64OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toInt64OrDefault", std::make_shared())); }); + factory.registerFunction("toInt128OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toInt128OrDefault", std::make_shared())); }); + factory.registerFunction("toInt256OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toInt256OrDefault", std::make_shared())); }); - factory.registerFunction(); - factory.registerFunction(); + factory.registerFunction("toFloat32OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toFloat32OrDefault", std::make_shared())); }); + factory.registerFunction("toFloat64OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toFloat64OrDefault", std::make_shared())); }); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); + factory.registerFunction("toDateOrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toDateOrDefault", std::make_shared())); }); + factory.registerFunction("toDate32OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toDate32OrDefault", std::make_shared())); }); + factory.registerFunction("toDateTimeOrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toDateTimeOrDefault", std::make_shared())); }); + factory.registerFunction("toDateTime64OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toDateTime64OrDefault", std::make_shared(3 /* default scale */))); }); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); + factory.registerFunction("toDecimal32OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toDecimal32OrDefault", createDecimalMaxPrecision(0))); }); + factory.registerFunction("toDecimal64OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toDecimal64OrDefault", createDecimalMaxPrecision(0))); }); + factory.registerFunction("toDecimal128OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toDecimal128OrDefault", createDecimalMaxPrecision(0))); }); + factory.registerFunction("toDecimal256OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toDecimal256OrDefault", createDecimalMaxPrecision(0))); }); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); + factory.registerFunction("toUUIDOrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toUUIDOrDefault", std::make_shared())); }); + factory.registerFunction("toIPv4OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toIPv4OrDefault", std::make_shared())); }); + factory.registerFunction("toIPv6OrDefault", [](ContextPtr context){ return std::make_unique( + std::make_shared(context, "toIPv6OrDefault", std::make_shared())); }); } } From 4290b1fe2cfe6eace74c08601db884f0c1738bf8 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 15 Mar 2024 11:32:21 +0100 Subject: [PATCH 332/374] Garbage --- src/Functions/FunctionsConversion.cpp | 49 +++++++-------------------- src/Functions/castOrDefault.cpp | 2 +- 2 files changed, 13 insertions(+), 38 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 42056067f00..f69d3e9146b 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -1980,13 +1980,6 @@ public: static constexpr bool to_datetime64 = std::is_same_v; - static constexpr bool to_string_or_fixed_string = std::is_same_v || - std::is_same_v; - - static constexpr bool to_date_or_datetime = std::is_same_v || - std::is_same_v || - std::is_same_v; - static FunctionPtr create(ContextPtr context) { return std::make_shared(context); } static FunctionPtr create() { return std::make_shared(); } @@ -2003,8 +1996,7 @@ public: bool isInjective(const ColumnsWithTypeAndName &) const override { return std::is_same_v; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & arguments) const override { - /// TODO: We can make more optimizations here. - return !(to_date_or_datetime && isNumber(*arguments[0].type)); + return !(IsDataTypeDateOrDateTime && isNumber(*arguments[0].type)); } using DefaultReturnTypeGetter = std::function; @@ -2327,7 +2319,7 @@ private: } bool done = false; - if constexpr (to_string_or_fixed_string) + if constexpr (std::is_same_v || std::is_same_v) { done = callOnIndexAndDataType(from_type->getTypeId(), call, ConvertDefaultBehaviorTag{}); } @@ -3155,7 +3147,6 @@ public: } private: - const char * cast_name; MonotonicityForRange monotonicity_for_range; @@ -4623,26 +4614,12 @@ arguments, result_type, input_rows_count); \ using Types = std::decay_t; using ToDataType = typename Types::LeftType; - if constexpr ( - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v) + if constexpr (is_any_of) { ret = createWrapper(from_type, checkAndGetDataType(to_type.get()), requested_result_is_nullable); return true; @@ -4662,12 +4639,10 @@ arguments, result_type, input_rows_count); \ ret = createEnumWrapper(from_type, checkAndGetDataType(to_type.get())); return true; } - if constexpr ( - std::is_same_v> || - std::is_same_v> || - std::is_same_v> || - std::is_same_v> || - std::is_same_v) + if constexpr (is_any_of, DataTypeDecimal, + DataTypeDecimal, DataTypeDecimal, + DataTypeDateTime64>) { ret = createDecimalWrapper(from_type, checkAndGetDataType(to_type.get()), requested_result_is_nullable); return true; diff --git a/src/Functions/castOrDefault.cpp b/src/Functions/castOrDefault.cpp index 18bdea28029..57cb03e0349 100644 --- a/src/Functions/castOrDefault.cpp +++ b/src/Functions/castOrDefault.cpp @@ -303,7 +303,7 @@ private: }; ColumnsWithTypeAndName arguments_with_cast_type; - arguments_with_cast_type.reserve(arguments.size()); + arguments_with_cast_type.reserve(arguments.size() + 1); arguments_with_cast_type.emplace_back(arguments[0]); arguments_with_cast_type.emplace_back(second_argument); From 060f79862d1c13f9fb723b592e384db009a45823 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 15 Mar 2024 10:49:36 +0000 Subject: [PATCH 333/374] Fix --- src/Analyzer/Passes/SumIfToCountIfPass.cpp | 2 +- ...3010_sum_to_to_count_if_nullable.reference | 66 +++++++++++++++++++ .../03010_sum_to_to_count_if_nullable.sql | 12 +++- 3 files changed, 77 insertions(+), 3 deletions(-) diff --git a/src/Analyzer/Passes/SumIfToCountIfPass.cpp b/src/Analyzer/Passes/SumIfToCountIfPass.cpp index 2c41b6dc467..1c2097e7be9 100644 --- a/src/Analyzer/Passes/SumIfToCountIfPass.cpp +++ b/src/Analyzer/Passes/SumIfToCountIfPass.cpp @@ -54,7 +54,7 @@ public: if (!constant_node) return; - if (auto constant_type = constant_node->getResultType(); !isUInt64(constant_type) && !isInt64(constant_type)) + if (auto constant_type = constant_node->getResultType(); !isNativeInteger(constant_type)) return; const auto & constant_value_literal = constant_node->getValue(); diff --git a/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.reference b/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.reference index 8627f639a03..89e5f639c66 100644 --- a/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.reference +++ b/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.reference @@ -1,2 +1,68 @@ (5,NULL) (5,NULL) +(5,NULL) +QUERY id: 0 + PROJECTION COLUMNS + (sumIf(toNullable(1), equals(modulo(number, 2), 0)), NULL) Tuple(Nullable(UInt64), Nullable(Nothing)) + PROJECTION + LIST id: 1, nodes: 1 + FUNCTION id: 2, function_name: tuple, function_type: ordinary, result_type: Tuple(Nullable(UInt64), Nullable(Nothing)) + ARGUMENTS + LIST id: 3, nodes: 2 + FUNCTION id: 4, function_name: sumIf, function_type: aggregate, result_type: Nullable(UInt64) + ARGUMENTS + LIST id: 5, nodes: 2 + CONSTANT id: 6, constant_value: UInt64_1, constant_value_type: Nullable(UInt8) + EXPRESSION + FUNCTION id: 7, function_name: toNullable, function_type: ordinary, result_type: Nullable(UInt8) + ARGUMENTS + LIST id: 8, nodes: 1 + CONSTANT id: 9, constant_value: UInt64_1, constant_value_type: UInt8 + FUNCTION id: 10, function_name: equals, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 11, nodes: 2 + FUNCTION id: 12, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 13, nodes: 2 + COLUMN id: 14, column_name: number, result_type: UInt64, source_id: 15 + CONSTANT id: 16, constant_value: UInt64_2, constant_value_type: UInt8 + CONSTANT id: 17, constant_value: UInt64_0, constant_value_type: UInt8 + CONSTANT id: 18, constant_value: NULL, constant_value_type: Nullable(Nothing) + JOIN TREE + TABLE_FUNCTION id: 15, alias: __table1, table_function_name: numbers + ARGUMENTS + LIST id: 19, nodes: 1 + CONSTANT id: 20, constant_value: UInt64_10, constant_value_type: UInt8 +(5,NULL) +QUERY id: 0 + PROJECTION COLUMNS + (sum(if(equals(modulo(number, 2), 0), toNullable(1), 0)), NULL) Tuple(Nullable(UInt64), Nullable(Nothing)) + PROJECTION + LIST id: 1, nodes: 1 + FUNCTION id: 2, function_name: tuple, function_type: ordinary, result_type: Tuple(Nullable(UInt64), Nullable(Nothing)) + ARGUMENTS + LIST id: 3, nodes: 2 + FUNCTION id: 4, function_name: sumOrNullIf, function_type: aggregate, result_type: Nullable(UInt64) + ARGUMENTS + LIST id: 5, nodes: 2 + CONSTANT id: 6, constant_value: UInt64_1, constant_value_type: Nullable(UInt8) + EXPRESSION + FUNCTION id: 7, function_name: toNullable, function_type: ordinary, result_type: Nullable(UInt8) + ARGUMENTS + LIST id: 8, nodes: 1 + CONSTANT id: 9, constant_value: UInt64_1, constant_value_type: UInt8 + FUNCTION id: 10, function_name: equals, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 11, nodes: 2 + FUNCTION id: 12, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 13, nodes: 2 + COLUMN id: 14, column_name: number, result_type: UInt64, source_id: 15 + CONSTANT id: 16, constant_value: UInt64_2, constant_value_type: UInt8 + CONSTANT id: 17, constant_value: UInt64_0, constant_value_type: UInt8 + CONSTANT id: 18, constant_value: NULL, constant_value_type: Nullable(Nothing) + JOIN TREE + TABLE_FUNCTION id: 15, alias: __table1, table_function_name: numbers + ARGUMENTS + LIST id: 19, nodes: 1 + CONSTANT id: 20, constant_value: UInt64_10, constant_value_type: UInt8 diff --git a/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.sql b/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.sql index 394cd4f1ea5..b283a69a020 100644 --- a/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.sql +++ b/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.sql @@ -1,3 +1,11 @@ SET optimize_rewrite_sum_if_to_count_if = 1; -SELECT (sumIf(toNullable(1), (number % 2) = 0), NULL) FROM numbers(10) SETTINGS allow_experimental_analyzer=0; -SELECT (sumIf(toNullable(1), (number % 2) = 0), NULL) FROM numbers(10) SETTINGS allow_experimental_analyzer=1; \ No newline at end of file + +SET allow_experimental_analyzer = 0; +SELECT (sumIf(toNullable(1), (number % 2) = 0), NULL) FROM numbers(10); +SELECT (sum(if((number % 2) = 0, toNullable(1), 0)), NULL) FROM numbers(10); + +SET allow_experimental_analyzer = 1; +SELECT (sumIf(toNullable(1), (number % 2) = 0), NULL) FROM numbers(10); +EXPLAIN QUERY TREE SELECT (sumIf(toNullable(1), (number % 2) = 0), NULL) FROM numbers(10); +SELECT (sum(if((number % 2) = 0, toNullable(1), 0)), NULL) FROM numbers(10); +EXPLAIN QUERY TREE SELECT (sum(if((number % 2) = 0, toNullable(1), 0)), NULL) FROM numbers(10); \ No newline at end of file From ad4a25906842e89c526d2651954d1ac3e64fbfb7 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Fri, 15 Mar 2024 12:13:35 +0100 Subject: [PATCH 334/374] Restore automerge for approved PRs --- .github/workflows/pull_request.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index c065219f980..2dddde9aa14 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -172,6 +172,7 @@ jobs: run: | cd "$GITHUB_WORKSPACE/tests/ci" python3 finish_check.py + python3 merge_pr.py --check-approved ############################################################################################# From a5bd24205947aea5a93490bb698fa99856e00d89 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 15 Mar 2024 12:36:33 +0100 Subject: [PATCH 335/374] Even more garbage --- src/Functions/FunctionsConversion.cpp | 73 ++++++++++++--------------- 1 file changed, 32 insertions(+), 41 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index f69d3e9146b..7f130b0cc86 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -1183,26 +1183,26 @@ struct ConvertImpl { /// Conversion of DateTime to Date: throw off time component. /// Conversion of Date32 to Date. - return DateTimeTransformImpl, false>::execute( + return DateTimeTransformImpl, false>::template execute( arguments, result_type, input_rows_count); } else if constexpr (std::is_same_v && std::is_same_v) { /// Conversion of DateTime to Date: throw off time component. - return DateTimeTransformImpl::execute( + return DateTimeTransformImpl::template execute( arguments, result_type, input_rows_count); } else if constexpr ((std::is_same_v || std::is_same_v) && std::is_same_v) { /// Conversion from Date/Date32 to DateTime. - return DateTimeTransformImpl, false>::execute( + return DateTimeTransformImpl, false>::template execute( arguments, result_type, input_rows_count); } else if constexpr (std::is_same_v && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl, false>::execute( + return DateTimeTransformImpl, false>::template execute( arguments, result_type, input_rows_count, additions); } /** Special case of converting Int8, Int16, (U)Int32 or (U)Int64 (and also, for convenience, @@ -1220,7 +1220,7 @@ struct ConvertImpl && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl, false>::execute( + return DateTimeTransformImpl, false>::template execute( arguments, result_type, input_rows_count); } else if constexpr (( @@ -1229,7 +1229,7 @@ struct ConvertImpl && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl, false>::execute( + return DateTimeTransformImpl, false>::template execute( arguments, result_type, input_rows_count); } else if constexpr (( @@ -1240,7 +1240,7 @@ struct ConvertImpl && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl, false>::execute( + return DateTimeTransformImpl, false>::template execute( arguments, result_type, input_rows_count); } else if constexpr (( @@ -1249,7 +1249,7 @@ struct ConvertImpl && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl, false>::execute( + return DateTimeTransformImpl, false>::template execute( arguments, result_type, input_rows_count); } else if constexpr (( @@ -1258,7 +1258,7 @@ struct ConvertImpl && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl, false>::execute( + return DateTimeTransformImpl, false>::template execute( arguments, result_type, input_rows_count); } else if constexpr (( @@ -1269,7 +1269,7 @@ struct ConvertImpl && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl, false>::execute( + return DateTimeTransformImpl, false>::template execute( arguments, result_type, input_rows_count); } /// Special case of converting Int8, Int16, Int32 or (U)Int64 (and also, for convenience, Float32, Float64) to DateTime. @@ -1280,14 +1280,14 @@ struct ConvertImpl && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl, false>::execute( + return DateTimeTransformImpl, false>::template execute( arguments, result_type, input_rows_count); } else if constexpr (std::is_same_v && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl, false>::execute( + return DateTimeTransformImpl, false>::template execute( arguments, result_type, input_rows_count); } else if constexpr (( @@ -1297,7 +1297,7 @@ struct ConvertImpl && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl, false>::execute( + return DateTimeTransformImpl, false>::template execute( arguments, result_type, input_rows_count); } else if constexpr (( @@ -1308,15 +1308,15 @@ struct ConvertImpl && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count, additions); + return DateTimeTransformImpl, false>::template execute( + arguments, result_type, input_rows_count); } else if constexpr (std::is_same_v && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count, additions); + return DateTimeTransformImpl, false>::template execute( + arguments, result_type, input_rows_count); } else if constexpr (( std::is_same_v @@ -1324,22 +1324,22 @@ struct ConvertImpl && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl, false>::execute( - arguments, result_type, input_rows_count, additions); + return DateTimeTransformImpl, false>::template execute( + arguments, result_type, input_rows_count); } /// Conversion of DateTime64 to Date or DateTime: discards fractional part. else if constexpr (std::is_same_v && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl>, false>::execute( + return DateTimeTransformImpl>, false>::template execute( arguments, result_type, input_rows_count, additions); } else if constexpr (std::is_same_v && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl>, false>::execute( + return DateTimeTransformImpl>, false>::template execute( arguments, result_type, input_rows_count, additions); } /// Conversion of Date or DateTime to DateTime64: add zero sub-second part. @@ -1350,8 +1350,8 @@ struct ConvertImpl && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl::execute( - arguments, result_type, input_rows_count, additions); + return DateTimeTransformImpl::template execute( + arguments, result_type, input_rows_count); } else if constexpr (IsDataTypeDateOrDateTime && std::is_same_v) @@ -1573,8 +1573,8 @@ struct ConvertImpl && std::is_same_v && std::is_same_v) { - return ConvertImpl::execute( - arguments, result_type, input_rows_count, additions); + return ConvertImpl::template execute( + arguments, result_type, input_rows_count); } else if constexpr ((std::is_same_v || std::is_same_v) && std::is_same_v) @@ -1974,11 +1974,8 @@ public: using Monotonic = MonotonicityImpl; static constexpr auto name = Name::name; - static constexpr bool to_decimal = - std::is_same_v || std::is_same_v - || std::is_same_v || std::is_same_v; - static constexpr bool to_datetime64 = std::is_same_v; + static constexpr bool to_decimal = IsDataTypeDecimal && !to_datetime64; static FunctionPtr create(ContextPtr context) { return std::make_shared(context); } static FunctionPtr create() { return std::make_shared(); } @@ -2319,7 +2316,7 @@ private: } bool done = false; - if constexpr (std::is_same_v || std::is_same_v) + if constexpr (is_any_of) { done = callOnIndexAndDataType(from_type->getTypeId(), call, ConvertDefaultBehaviorTag{}); } @@ -2371,13 +2368,8 @@ class FunctionConvertFromString : public IFunction { public: static constexpr auto name = Name::name; - static constexpr bool to_decimal = - std::is_same_v> || - std::is_same_v> || - std::is_same_v> || - std::is_same_v>; - static constexpr bool to_datetime64 = std::is_same_v; + static constexpr bool to_decimal = IsDataTypeDecimal && !to_datetime64; static FunctionPtr create(ContextPtr) { return std::make_shared(); } @@ -3203,13 +3195,12 @@ private: return createFunctionAdaptor(function, from_type); } - auto wrapper_cast_type = cast_type; - - return [wrapper_cast_type, from_type_index, to_type, date_time_overflow_behavior] - (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable *column_nullable, size_t input_rows_count) + return [wrapper_cast_type = cast_type, from_type_index, to_type, date_time_overflow_behavior] + (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * column_nullable, size_t input_rows_count) { ColumnPtr result_column; - auto res = callOnIndexAndDataType(from_type_index, [&](const auto & types) -> bool { + auto res = callOnIndexAndDataType(from_type_index, [&](const auto & types) -> bool + { using Types = std::decay_t; using LeftDataType = typename Types::LeftType; using RightDataType = typename Types::RightType; From 61870fa4b5621d2c9d55a397ee463e285675eee2 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Fri, 15 Mar 2024 12:43:11 +0100 Subject: [PATCH 336/374] Disable broken SonarCloud --- .github/workflows/nightly.yml | 103 +++++++++++++++++----------------- 1 file changed, 53 insertions(+), 50 deletions(-) diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 93ac2be19b4..0ab02db8d7a 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -45,6 +45,7 @@ jobs: with: data: "${{ needs.RunConfig.outputs.data }}" set_latest: true + SonarCloud: runs-on: [self-hosted, builder] env: @@ -54,53 +55,55 @@ jobs: CC: clang-17 CXX: clang++-17 steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis - filter: tree:0 - submodules: true - - name: Set up JDK 11 - uses: actions/setup-java@v1 - with: - java-version: 11 - - name: Download and set up sonar-scanner - env: - SONAR_SCANNER_DOWNLOAD_URL: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${{ env.SONAR_SCANNER_VERSION }}-linux.zip - run: | - mkdir -p "$HOME/.sonar" - curl -sSLo "$HOME/.sonar/sonar-scanner.zip" "${{ env.SONAR_SCANNER_DOWNLOAD_URL }}" - unzip -o "$HOME/.sonar/sonar-scanner.zip" -d "$HOME/.sonar/" - echo "$HOME/.sonar/sonar-scanner-${{ env.SONAR_SCANNER_VERSION }}-linux/bin" >> "$GITHUB_PATH" - - name: Download and set up build-wrapper - env: - BUILD_WRAPPER_DOWNLOAD_URL: ${{ env.SONAR_SERVER_URL }}/static/cpp/build-wrapper-linux-x86.zip - run: | - curl -sSLo "$HOME/.sonar/build-wrapper-linux-x86.zip" "${{ env.BUILD_WRAPPER_DOWNLOAD_URL }}" - unzip -o "$HOME/.sonar/build-wrapper-linux-x86.zip" -d "$HOME/.sonar/" - echo "$HOME/.sonar/build-wrapper-linux-x86" >> "$GITHUB_PATH" - - name: Set Up Build Tools - run: | - sudo apt-get update - sudo apt-get install -yq git cmake ccache ninja-build python3 yasm nasm - sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" - - name: Run build-wrapper - run: | - mkdir build - cd build - cmake .. - cd .. - build-wrapper-linux-x86-64 --out-dir ${{ env.BUILD_WRAPPER_OUT_DIR }} cmake --build build/ - - name: Run sonar-scanner - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} - run: | - sonar-scanner \ - --define sonar.host.url="${{ env.SONAR_SERVER_URL }}" \ - --define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" \ - --define sonar.projectKey="ClickHouse_ClickHouse" \ - --define sonar.organization="clickhouse-java" \ - --define sonar.cfamily.cpp23.enabled=true \ - --define sonar.exclusions="**/*.java,**/*.ts,**/*.js,**/*.css,**/*.sql" + - name: Disabled + run: echo "The job is disabled since permanently broken" + # - name: Check out repository code + # uses: ClickHouse/checkout@v1 + # with: + # clear-repository: true + # fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis + # filter: tree:0 + # submodules: true + # - name: Set up JDK 11 + # uses: actions/setup-java@v1 + # with: + # java-version: 11 + # - name: Download and set up sonar-scanner + # env: + # SONAR_SCANNER_DOWNLOAD_URL: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${{ env.SONAR_SCANNER_VERSION }}-linux.zip + # run: | + # mkdir -p "$HOME/.sonar" + # curl -sSLo "$HOME/.sonar/sonar-scanner.zip" "${{ env.SONAR_SCANNER_DOWNLOAD_URL }}" + # unzip -o "$HOME/.sonar/sonar-scanner.zip" -d "$HOME/.sonar/" + # echo "$HOME/.sonar/sonar-scanner-${{ env.SONAR_SCANNER_VERSION }}-linux/bin" >> "$GITHUB_PATH" + # - name: Download and set up build-wrapper + # env: + # BUILD_WRAPPER_DOWNLOAD_URL: ${{ env.SONAR_SERVER_URL }}/static/cpp/build-wrapper-linux-x86.zip + # run: | + # curl -sSLo "$HOME/.sonar/build-wrapper-linux-x86.zip" "${{ env.BUILD_WRAPPER_DOWNLOAD_URL }}" + # unzip -o "$HOME/.sonar/build-wrapper-linux-x86.zip" -d "$HOME/.sonar/" + # echo "$HOME/.sonar/build-wrapper-linux-x86" >> "$GITHUB_PATH" + # - name: Set Up Build Tools + # run: | + # sudo apt-get update + # sudo apt-get install -yq git cmake ccache ninja-build python3 yasm nasm + # sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" + # - name: Run build-wrapper + # run: | + # mkdir build + # cd build + # cmake .. + # cd .. + # build-wrapper-linux-x86-64 --out-dir ${{ env.BUILD_WRAPPER_OUT_DIR }} cmake --build build/ + # - name: Run sonar-scanner + # env: + # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + # SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} + # run: | + # sonar-scanner \ + # --define sonar.host.url="${{ env.SONAR_SERVER_URL }}" \ + # --define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" \ + # --define sonar.projectKey="ClickHouse_ClickHouse" \ + # --define sonar.organization="clickhouse-java" \ + # --define sonar.cfamily.cpp23.enabled=true \ + # --define sonar.exclusions="**/*.java,**/*.ts,**/*.js,**/*.css,**/*.sql" From 5787b7f7c8a38fd407d8c532b99f143685e55a78 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 15 Mar 2024 14:46:26 +0300 Subject: [PATCH 337/374] Update nightly.yml --- .github/workflows/nightly.yml | 62 ----------------------------------- 1 file changed, 62 deletions(-) diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 0ab02db8d7a..515236bb826 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -45,65 +45,3 @@ jobs: with: data: "${{ needs.RunConfig.outputs.data }}" set_latest: true - - SonarCloud: - runs-on: [self-hosted, builder] - env: - SONAR_SCANNER_VERSION: 4.8.0.2856 - SONAR_SERVER_URL: "https://sonarcloud.io" - BUILD_WRAPPER_OUT_DIR: build_wrapper_output_directory # Directory where build-wrapper output will be placed - CC: clang-17 - CXX: clang++-17 - steps: - - name: Disabled - run: echo "The job is disabled since permanently broken" - # - name: Check out repository code - # uses: ClickHouse/checkout@v1 - # with: - # clear-repository: true - # fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis - # filter: tree:0 - # submodules: true - # - name: Set up JDK 11 - # uses: actions/setup-java@v1 - # with: - # java-version: 11 - # - name: Download and set up sonar-scanner - # env: - # SONAR_SCANNER_DOWNLOAD_URL: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${{ env.SONAR_SCANNER_VERSION }}-linux.zip - # run: | - # mkdir -p "$HOME/.sonar" - # curl -sSLo "$HOME/.sonar/sonar-scanner.zip" "${{ env.SONAR_SCANNER_DOWNLOAD_URL }}" - # unzip -o "$HOME/.sonar/sonar-scanner.zip" -d "$HOME/.sonar/" - # echo "$HOME/.sonar/sonar-scanner-${{ env.SONAR_SCANNER_VERSION }}-linux/bin" >> "$GITHUB_PATH" - # - name: Download and set up build-wrapper - # env: - # BUILD_WRAPPER_DOWNLOAD_URL: ${{ env.SONAR_SERVER_URL }}/static/cpp/build-wrapper-linux-x86.zip - # run: | - # curl -sSLo "$HOME/.sonar/build-wrapper-linux-x86.zip" "${{ env.BUILD_WRAPPER_DOWNLOAD_URL }}" - # unzip -o "$HOME/.sonar/build-wrapper-linux-x86.zip" -d "$HOME/.sonar/" - # echo "$HOME/.sonar/build-wrapper-linux-x86" >> "$GITHUB_PATH" - # - name: Set Up Build Tools - # run: | - # sudo apt-get update - # sudo apt-get install -yq git cmake ccache ninja-build python3 yasm nasm - # sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" - # - name: Run build-wrapper - # run: | - # mkdir build - # cd build - # cmake .. - # cd .. - # build-wrapper-linux-x86-64 --out-dir ${{ env.BUILD_WRAPPER_OUT_DIR }} cmake --build build/ - # - name: Run sonar-scanner - # env: - # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - # SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} - # run: | - # sonar-scanner \ - # --define sonar.host.url="${{ env.SONAR_SERVER_URL }}" \ - # --define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" \ - # --define sonar.projectKey="ClickHouse_ClickHouse" \ - # --define sonar.organization="clickhouse-java" \ - # --define sonar.cfamily.cpp23.enabled=true \ - # --define sonar.exclusions="**/*.java,**/*.ts,**/*.js,**/*.css,**/*.sql" From 16abbcd095c4034a49921288ff0d79835addbf16 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Fri, 15 Mar 2024 12:54:13 +0100 Subject: [PATCH 338/374] Revert "Updated format settings references in the docs (datetime.md)" --- docs/en/sql-reference/data-types/datetime.md | 8 ++++---- docs/ru/sql-reference/data-types/datetime.md | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/en/sql-reference/data-types/datetime.md b/docs/en/sql-reference/data-types/datetime.md index a465106c2ff..1adff18f598 100644 --- a/docs/en/sql-reference/data-types/datetime.md +++ b/docs/en/sql-reference/data-types/datetime.md @@ -36,9 +36,9 @@ You can explicitly set a time zone for `DateTime`-type columns when creating a t The [clickhouse-client](../../interfaces/cli.md) applies the server time zone by default if a time zone isn’t explicitly set when initializing the data type. To use the client time zone, run `clickhouse-client` with the `--use_client_time_zone` parameter. -ClickHouse outputs values depending on the value of the [date_time_output_format](../../operations/settings/settings-formats.md#date_time_output_format) setting. `YYYY-MM-DD hh:mm:ss` text format by default. Additionally, you can change the output with the [formatDateTime](../../sql-reference/functions/date-time-functions.md#formatdatetime) function. +ClickHouse outputs values depending on the value of the [date_time_output_format](../../operations/settings/settings.md#settings-date_time_output_format) setting. `YYYY-MM-DD hh:mm:ss` text format by default. Additionally, you can change the output with the [formatDateTime](../../sql-reference/functions/date-time-functions.md#formatdatetime) function. -When inserting data into ClickHouse, you can use different formats of date and time strings, depending on the value of the [date_time_input_format](../../operations/settings/settings-formats.md#date_time_input_format) setting. +When inserting data into ClickHouse, you can use different formats of date and time strings, depending on the value of the [date_time_input_format](../../operations/settings/settings.md#settings-date_time_input_format) setting. ## Examples @@ -147,8 +147,8 @@ Time shifts for multiple days. Some pacific islands changed their timezone offse - [Type conversion functions](../../sql-reference/functions/type-conversion-functions.md) - [Functions for working with dates and times](../../sql-reference/functions/date-time-functions.md) - [Functions for working with arrays](../../sql-reference/functions/array-functions.md) -- [The `date_time_input_format` setting](../../operations/settings/settings-formats.md#date_time_input_format) -- [The `date_time_output_format` setting](../../operations/settings/settings-formats.md#date_time_output_format) +- [The `date_time_input_format` setting](../../operations/settings/settings-formats.md#settings-date_time_input_format) +- [The `date_time_output_format` setting](../../operations/settings/settings-formats.md#settings-date_time_output_format) - [The `timezone` server configuration parameter](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) - [The `session_timezone` setting](../../operations/settings/settings.md#session_timezone) - [Operators for working with dates and times](../../sql-reference/operators/index.md#operators-datetime) diff --git a/docs/ru/sql-reference/data-types/datetime.md b/docs/ru/sql-reference/data-types/datetime.md index 25e87794147..57f24786bb7 100644 --- a/docs/ru/sql-reference/data-types/datetime.md +++ b/docs/ru/sql-reference/data-types/datetime.md @@ -27,9 +27,9 @@ DateTime([timezone]) Консольный клиент ClickHouse по умолчанию использует часовой пояс сервера, если для значения `DateTime` часовой пояс не был задан в явном виде при инициализации типа данных. Чтобы использовать часовой пояс клиента, запустите [clickhouse-client](../../interfaces/cli.md) с параметром `--use_client_time_zone`. -ClickHouse отображает значения в зависимости от значения параметра [date\_time\_output\_format](../../operations/settings/settings-formats.md#date_time_output_format). Текстовый формат по умолчанию `YYYY-MM-DD hh:mm:ss`. Кроме того, вы можете поменять отображение с помощью функции [formatDateTime](../../sql-reference/functions/date-time-functions.md#formatdatetime). +ClickHouse отображает значения в зависимости от значения параметра [date\_time\_output\_format](../../operations/settings/index.md#settings-date_time_output_format). Текстовый формат по умолчанию `YYYY-MM-DD hh:mm:ss`. Кроме того, вы можете поменять отображение с помощью функции [formatDateTime](../../sql-reference/functions/date-time-functions.md#formatdatetime). -При вставке данных в ClickHouse, можно использовать различные форматы даты и времени в зависимости от значения настройки [date_time_input_format](../../operations/settings/settings-formats.md#date_time_input_format). +При вставке данных в ClickHouse, можно использовать различные форматы даты и времени в зависимости от значения настройки [date_time_input_format](../../operations/settings/index.md#settings-date_time_input_format). ## Примеры {#primery} @@ -119,8 +119,8 @@ FROM dt - [Функции преобразования типов](../../sql-reference/functions/type-conversion-functions.md) - [Функции для работы с датой и временем](../../sql-reference/functions/date-time-functions.md) - [Функции для работы с массивами](../../sql-reference/functions/array-functions.md) -- [Настройка `date_time_input_format`](../../operations/settings/settings-formats.md#date_time_input_format) -- [Настройка `date_time_output_format`](../../operations/settings/settings-formats.md#date_time_output_format) +- [Настройка `date_time_input_format`](../../operations/settings/index.md#settings-date_time_input_format) +- [Настройка `date_time_output_format`](../../operations/settings/index.md) - [Конфигурационный параметр сервера `timezone`](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) - [Параметр `session_timezone`](../../operations/settings/settings.md#session_timezone) - [Операторы для работы с датой и временем](../../sql-reference/operators/index.md#operators-datetime) From f91c45f562c0cdef32c9403e90eb1e52e818064c Mon Sep 17 00:00:00 2001 From: serxa Date: Fri, 15 Mar 2024 12:03:49 +0000 Subject: [PATCH 339/374] fix `01599_multiline_input_and_singleline_comments` properly --- ...multiline_input_and_singleline_comments.sh | 23 ++++++++++++------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/tests/queries/0_stateless/01599_multiline_input_and_singleline_comments.sh b/tests/queries/0_stateless/01599_multiline_input_and_singleline_comments.sh index f1acd39136f..07c2e345009 100755 --- a/tests/queries/0_stateless/01599_multiline_input_and_singleline_comments.sh +++ b/tests/queries/0_stateless/01599_multiline_input_and_singleline_comments.sh @@ -2,7 +2,6 @@ log_user 0 -# In some places `-timeout 1` is used to avoid expect to always wait for the whole timeout set timeout 60 match_max 100000 @@ -14,15 +13,23 @@ expect ":) " # Make a query send -- "SELECT 1\r" -expect -timeout 1 ":-] " send -- "-- xxx\r" -expect -timeout 1 ":-] " send -- ", 2\r" -expect -timeout 1 ":-] " -send -- ";\r" +send -- ";" + +# For some reason this sleep is required for this test to work properly +sleep 1 +send -- "\r" + +expect { + "│ 1 │ 2 │" { } + timeout { exit 1 } +} -expect "│ 1 │ 2 │" expect ":) " -send -- "\4" -expect eof +send -- "" +expect { + eof { exit 0 } + timeout { exit 1 } +} From 20bad992a41a37b65a477dcd40bb1c70b5c4344c Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Fri, 15 Mar 2024 12:46:15 +0000 Subject: [PATCH 340/374] CI: fix checkout action version #do_not_test --- .github/workflows/reusable_build.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/reusable_build.yml b/.github/workflows/reusable_build.yml index d2fe6f5dbe7..80d78d93e1b 100644 --- a/.github/workflows/reusable_build.yml +++ b/.github/workflows/reusable_build.yml @@ -43,8 +43,7 @@ jobs: runs-on: [self-hosted, '${{inputs.runner_type}}'] steps: - name: Check out repository code - # WIP: temporary try commit with limited perallelization of checkout - uses: ClickHouse/checkout@0be3f7b3098bae494d3ef5d29d2e0676fb606232 + uses: ClickHouse/checkout@v1 with: clear-repository: true ref: ${{ fromJson(inputs.data).git_ref }} From bf8ae84cb6dba368966cb220b4ca5b3c6deef106 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Fri, 15 Mar 2024 13:43:03 +0000 Subject: [PATCH 341/374] fixup! CI: skip hdfs tests for arm #do_not_test #batch_0 #no_merge_commit --- tests/integration/helpers/cluster.py | 7 +++++++ .../integration/test_allowed_url_from_config/test.py | 11 +++++------ 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 52c0d8a8ee5..194c66fa5c3 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -1,8 +1,10 @@ import base64 import errno +from functools import cached_property import http.client import logging import os +import platform import stat import os.path as p import pprint @@ -4746,3 +4748,8 @@ class ClickHouseKiller(object): def __exit__(self, exc_type, exc_val, exc_tb): self.clickhouse_node.start_clickhouse() + + +@cached_property +def is_arm(): + return any(arch in platform.processor().lower() for arch in ("arm, aarch")) diff --git a/tests/integration/test_allowed_url_from_config/test.py b/tests/integration/test_allowed_url_from_config/test.py index fb7564ae9d3..df8934aa69b 100644 --- a/tests/integration/test_allowed_url_from_config/test.py +++ b/tests/integration/test_allowed_url_from_config/test.py @@ -1,6 +1,5 @@ -import platform import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, is_arm cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance("node1", main_configs=["configs/config_with_hosts.xml"]) @@ -18,7 +17,7 @@ node5 = cluster.add_instance( ) node6 = cluster.add_instance("node6", main_configs=["configs/config_for_remote.xml"]) -if platform.processor() != "arm": +if not is_arm(): node7 = cluster.add_instance( "node7", main_configs=["configs/config_for_redirect.xml"], with_hdfs=True ) @@ -273,7 +272,7 @@ def test_table_function_remote(start_cluster): ) -@pytest.mark.skipif(platform.processor() == "arm", reason="skip for ARM") +@pytest.mark.skipif(is_arm(), reason="skip for ARM") def test_redirect(start_cluster): hdfs_api = start_cluster.hdfs_api @@ -288,7 +287,7 @@ def test_redirect(start_cluster): node7.query("DROP TABLE table_test_7_1") -@pytest.mark.skipif(platform.processor() == "arm", reason="skip for ARM") +@pytest.mark.skipif(is_arm(), reason="skip for ARM") def test_HDFS(start_cluster): assert "not allowed" in node7.query_and_get_error( "CREATE TABLE table_test_7_2 (word String) ENGINE=HDFS('http://hdfs1:50075/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'CSV')" @@ -298,7 +297,7 @@ def test_HDFS(start_cluster): ) -@pytest.mark.skipif(platform.processor() == "arm", reason="skip for ARM") +@pytest.mark.skipif(is_arm(), reason="skip for ARM") def test_schema_inference(start_cluster): error = node7.query_and_get_error("desc url('http://test.com`, 'TSVRaw'')") assert error.find("ReadWriteBufferFromHTTPBase") == -1 From ff456ffb3399a024657930623b377f91def8ad61 Mon Sep 17 00:00:00 2001 From: Max K Date: Fri, 15 Mar 2024 15:08:03 +0100 Subject: [PATCH 342/374] Revert "CI: ARM integration tests: disable tests with HDFS " --- tests/integration/helpers/cluster.py | 7 ------- .../test_allowed_url_from_config/test.py | 13 ++++--------- 2 files changed, 4 insertions(+), 16 deletions(-) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 194c66fa5c3..52c0d8a8ee5 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -1,10 +1,8 @@ import base64 import errno -from functools import cached_property import http.client import logging import os -import platform import stat import os.path as p import pprint @@ -4748,8 +4746,3 @@ class ClickHouseKiller(object): def __exit__(self, exc_type, exc_val, exc_tb): self.clickhouse_node.start_clickhouse() - - -@cached_property -def is_arm(): - return any(arch in platform.processor().lower() for arch in ("arm, aarch")) diff --git a/tests/integration/test_allowed_url_from_config/test.py b/tests/integration/test_allowed_url_from_config/test.py index df8934aa69b..3106cf12702 100644 --- a/tests/integration/test_allowed_url_from_config/test.py +++ b/tests/integration/test_allowed_url_from_config/test.py @@ -1,5 +1,5 @@ import pytest -from helpers.cluster import ClickHouseCluster, is_arm +from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance("node1", main_configs=["configs/config_with_hosts.xml"]) @@ -16,11 +16,9 @@ node5 = cluster.add_instance( "node5", main_configs=["configs/config_without_allowed_hosts.xml"] ) node6 = cluster.add_instance("node6", main_configs=["configs/config_for_remote.xml"]) - -if not is_arm(): - node7 = cluster.add_instance( - "node7", main_configs=["configs/config_for_redirect.xml"], with_hdfs=True - ) +node7 = cluster.add_instance( + "node7", main_configs=["configs/config_for_redirect.xml"], with_hdfs=True +) @pytest.fixture(scope="module") @@ -272,7 +270,6 @@ def test_table_function_remote(start_cluster): ) -@pytest.mark.skipif(is_arm(), reason="skip for ARM") def test_redirect(start_cluster): hdfs_api = start_cluster.hdfs_api @@ -287,7 +284,6 @@ def test_redirect(start_cluster): node7.query("DROP TABLE table_test_7_1") -@pytest.mark.skipif(is_arm(), reason="skip for ARM") def test_HDFS(start_cluster): assert "not allowed" in node7.query_and_get_error( "CREATE TABLE table_test_7_2 (word String) ENGINE=HDFS('http://hdfs1:50075/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'CSV')" @@ -297,7 +293,6 @@ def test_HDFS(start_cluster): ) -@pytest.mark.skipif(is_arm(), reason="skip for ARM") def test_schema_inference(start_cluster): error = node7.query_and_get_error("desc url('http://test.com`, 'TSVRaw'')") assert error.find("ReadWriteBufferFromHTTPBase") == -1 From 061cd5a53d141136adac7606a68042ce8355afe7 Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Fri, 15 Mar 2024 14:22:46 +0000 Subject: [PATCH 343/374] Fixup #ci_set_analyzer --- src/Storages/StorageMerge.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 52362eb5cb8..4a3035dffce 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -1056,7 +1056,7 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( String table_column = table_alias.empty() || processed_stage == QueryProcessingStage::FetchColumns ? "_table" : table_alias + "._table"; if (has_database_virtual_column && common_header.has(database_column) - && (storage_stage == QueryProcessingStage::FetchColumns || (dynamic_cast(&storage_snapshot_->storage) != nullptr && !pipe_header.has("'" + database_name + "'_String")))) + && (storage_stage == QueryProcessingStage::FetchColumns || !pipe_header.has("'" + database_name + "'_String"))) { ColumnWithTypeAndName column; column.name = database_column; @@ -1072,7 +1072,7 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( } if (has_table_virtual_column && common_header.has(table_column) - && (storage_stage == QueryProcessingStage::FetchColumns || (dynamic_cast(&storage_snapshot_->storage) != nullptr && !pipe_header.has("'" + table_name + "'_String")))) + && (storage_stage == QueryProcessingStage::FetchColumns || !pipe_header.has("'" + table_name + "'_String"))) { ColumnWithTypeAndName column; column.name = table_column; @@ -1121,7 +1121,7 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( /// Subordinary tables could have different but convertible types, like numeric types of different width. /// We must return streams with structure equals to structure of Merge table. convertAndFilterSourceStream( - header, modified_query_info, storage_snapshot_, aliases, row_policy_data_opt, context, *builder, processed_stage); + header, modified_query_info, storage_snapshot_, aliases, row_policy_data_opt, context, *builder, storage_stage); } return builder; From 7a8399317398cc32faa42b24c0df9227eef530fd Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 15 Mar 2024 13:09:13 +0000 Subject: [PATCH 344/374] Convert test 02998_system_dns_cache_table to smoke --- .../02998_system_dns_cache_table.reference | 3 ++- .../02998_system_dns_cache_table.sh | 26 ------------------- .../02998_system_dns_cache_table.sql | 3 +++ 3 files changed, 5 insertions(+), 27 deletions(-) delete mode 100755 tests/queries/0_stateless/02998_system_dns_cache_table.sh create mode 100644 tests/queries/0_stateless/02998_system_dns_cache_table.sql diff --git a/tests/queries/0_stateless/02998_system_dns_cache_table.reference b/tests/queries/0_stateless/02998_system_dns_cache_table.reference index ed6cb000142..600d0bc0b39 100644 --- a/tests/queries/0_stateless/02998_system_dns_cache_table.reference +++ b/tests/queries/0_stateless/02998_system_dns_cache_table.reference @@ -1 +1,2 @@ -localhost 127.0.0.1 IPv4 1 +hostname ip_address ip_family cached_at +String String Enum8(\'IPv4\' = 0, \'IPv6\' = 1, \'UNIX_LOCAL\' = 2) DateTime diff --git a/tests/queries/0_stateless/02998_system_dns_cache_table.sh b/tests/queries/0_stateless/02998_system_dns_cache_table.sh deleted file mode 100755 index b74fc00ab3b..00000000000 --- a/tests/queries/0_stateless/02998_system_dns_cache_table.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CURDIR"/../shell_config.sh - -# Retries are necessary because the DNS cache may be flushed before second statement is executed -i=0 -retries=5 -while [[ $i -lt $retries ]]; do - ${CLICKHOUSE_CURL} -sS --fail --data "SELECT * FROM url('http://localhost:8123/ping', CSV, 'auto', headers())" "${CLICKHOUSE_URL}" | grep -oP -q 'Ok.' || continue - - RECORDS=$(${CLICKHOUSE_CURL} -sS --fail --data "SELECT hostname, ip_address, ip_family, (isNotNull(cached_at) AND cached_at > '1970-01-01 00:00:00') FROM system.dns_cache WHERE hostname = 'localhost' and ip_family = 'IPv4';" "${CLICKHOUSE_URL}") - - if [[ -n "${RECORDS}" ]]; then - echo "${RECORDS}" - exit 0 - fi - - ((++i)) - sleep 0.2 -done - -echo "All tries to fetch entries for localhost failed, no rows returned. -Probably the DNS cache is disabled or the ClickHouse instance not responds to ping." -exit 1 diff --git a/tests/queries/0_stateless/02998_system_dns_cache_table.sql b/tests/queries/0_stateless/02998_system_dns_cache_table.sql new file mode 100644 index 00000000000..0ceb3d8a95a --- /dev/null +++ b/tests/queries/0_stateless/02998_system_dns_cache_table.sql @@ -0,0 +1,3 @@ +SELECT hostname, ip_address, ip_family, cached_at FROM system.dns_cache +LIMIT 0 +FORMAT TSVWithNamesAndTypes; From 9bb697eb171f3e4d2baed381591fae5493beb80e Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Fri, 15 Mar 2024 17:43:48 +0100 Subject: [PATCH 345/374] Fix finishing a failed RESTORE. --- src/Backups/RestorerFromBackup.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/Backups/RestorerFromBackup.cpp b/src/Backups/RestorerFromBackup.cpp index bae2f9aaa25..87c143f0fe2 100644 --- a/src/Backups/RestorerFromBackup.cpp +++ b/src/Backups/RestorerFromBackup.cpp @@ -101,10 +101,12 @@ RestorerFromBackup::RestorerFromBackup( RestorerFromBackup::~RestorerFromBackup() { - if (!futures.empty()) + /// If an exception occurs we can come here to the destructor having some tasks still unfinished. + /// We have to wait until they finish. + if (getNumFutures() > 0) { - LOG_ERROR(log, "RestorerFromBackup must not be destroyed while {} tasks are still running", futures.size()); - chassert(false && "RestorerFromBackup must not be destroyed while some tasks are still running"); + LOG_INFO(log, "Waiting for {} tasks to finish", getNumFutures()); + waitFutures(); } } From c97731fb8c7ac9cab37445de41a4bf04838c4d77 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 15 Mar 2024 18:46:23 +0100 Subject: [PATCH 346/374] Correctly process last stacktrace --- .../postprocess-traces/postprocess-traces.pl | 25 ++++++++++++------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/utils/postprocess-traces/postprocess-traces.pl b/utils/postprocess-traces/postprocess-traces.pl index 476fb46418f..3e50f64d864 100755 --- a/utils/postprocess-traces/postprocess-traces.pl +++ b/utils/postprocess-traces/postprocess-traces.pl @@ -8,6 +8,19 @@ use Data::Dumper; my @current_stack = (); my $grouped_stacks = {}; +sub process_stacktrace +{ + my $group = \$grouped_stacks; + for my $frame (reverse @current_stack) + { + $$group->{count} ||= 0; + ++$$group->{count}; + $group = \$$group->{children}{$frame}; + } + + @current_stack = (); +} + while (my $line = <>) { chomp $line; @@ -21,18 +34,12 @@ while (my $line = <>) if ($line eq '') { - my $group = \$grouped_stacks; - for my $frame (reverse @current_stack) - { - $$group->{count} ||= 0; - ++$$group->{count}; - $group = \$$group->{children}{$frame}; - } - - @current_stack = (); + process_stacktrace(); } } +process_stacktrace(); + sub print_group { my $group = shift; From bc6cd6e769c165a2aaffd06a2960bbce3cb616f0 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Fri, 15 Mar 2024 22:30:49 +0000 Subject: [PATCH 347/374] fix test_polymorphic_parts --- tests/integration/test_polymorphic_parts/test.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_polymorphic_parts/test.py b/tests/integration/test_polymorphic_parts/test.py index 01bc4804c9f..b91a72c5534 100644 --- a/tests/integration/test_polymorphic_parts/test.py +++ b/tests/integration/test_polymorphic_parts/test.py @@ -332,7 +332,13 @@ def test_different_part_types_on_replicas(start_cluster, table, part_type): for _ in range(3): insert_random_data(table, leader, 100) - leader.query("OPTIMIZE TABLE {} FINAL".format(table)) + exec_query_with_retry( + leader, + "OPTIMIZE TABLE {} FINAL".format(table), + settings={"optimize_throw_if_noop": 1}, + silent=True, + ) + follower.query("SYSTEM SYNC REPLICA {}".format(table), timeout=20) expected = "{}\t1\n".format(part_type) From 3a8974e7e1bbad9d6ae49c3624609b69eedf4b1c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 16 Mar 2024 16:23:12 +0100 Subject: [PATCH 348/374] A definitive guide to CAST --- src/Functions/formatDateTime.cpp | 4 +- src/Functions/parseDateTime.cpp | 2 +- .../03011_definitive_guide_to_cast.reference | 56 ++++ .../03011_definitive_guide_to_cast.sql | 252 ++++++++++++++++++ 4 files changed, 311 insertions(+), 3 deletions(-) create mode 100644 tests/queries/0_stateless/03011_definitive_guide_to_cast.reference create mode 100644 tests/queries/0_stateless/03011_definitive_guide_to_cast.sql diff --git a/src/Functions/formatDateTime.cpp b/src/Functions/formatDateTime.cpp index 01ef2a733c8..87438365901 100644 --- a/src/Functions/formatDateTime.cpp +++ b/src/Functions/formatDateTime.cpp @@ -1832,10 +1832,10 @@ using FunctionFromUnixTimestampInJodaSyntax = FunctionFormatDateTimeImpl(); - factory.registerAlias("DATE_FORMAT", FunctionFormatDateTime::name); + factory.registerAlias("DATE_FORMAT", FunctionFormatDateTime::name, FunctionFactory::CaseInsensitive); factory.registerFunction(); - factory.registerAlias("FROM_UNIXTIME", FunctionFromUnixTimestamp::name); + factory.registerAlias("FROM_UNIXTIME", FunctionFromUnixTimestamp::name, FunctionFactory::CaseInsensitive); factory.registerFunction(); factory.registerFunction(); diff --git a/src/Functions/parseDateTime.cpp b/src/Functions/parseDateTime.cpp index 18882177c90..7a0d7c75774 100644 --- a/src/Functions/parseDateTime.cpp +++ b/src/Functions/parseDateTime.cpp @@ -1942,7 +1942,7 @@ namespace REGISTER_FUNCTION(ParseDateTime) { factory.registerFunction(); - factory.registerAlias("TO_UNIXTIME", FunctionParseDateTime::name); + factory.registerAlias("TO_UNIXTIME", FunctionParseDateTime::name, FunctionFactory::CaseInsensitive); factory.registerFunction(); factory.registerFunction(); factory.registerAlias("str_to_date", FunctionParseDateTimeOrNull::name, FunctionFactory::CaseInsensitive); diff --git a/tests/queries/0_stateless/03011_definitive_guide_to_cast.reference b/tests/queries/0_stateless/03011_definitive_guide_to_cast.reference new file mode 100644 index 00000000000..f8f37fa7807 --- /dev/null +++ b/tests/queries/0_stateless/03011_definitive_guide_to_cast.reference @@ -0,0 +1,56 @@ +123 +2009-02-14 00:31:30 +[1,2,3] +123 Nullable(UInt8) +\N Nullable(UInt8) +1 +255 +['Hello','wo\'rld\\'] +Hello wo\\\'rld\\\\ +wo\'rld\\ wo\\\'rld\\\\ +123 +123 +123 +1 -1 +[] [] Array(Nothing) Array(Array(Array(Tuple(UInt64, String)))) +123 +123 +123 +123 +123 +123 +123 +String 123 +123 UInt8 +200 UInt8 +123 +123 +1.1 +1.10000000000000016387 +18446744073709551615 +[1.1,2.3] +[1.10000000000000016387,2.29999999999999967236] +Row 1: +────── +CAST('1.1', 'Decimal(30, 20)'): 1.1 +CAST('1.1', 'Decimal(30, 20)'): 1.1 +CAST(plus(1, 1), 'UInt8'): 2 +-1 +\N +0 +255 +123 +Hello\0\0\0\0\0 +Hello\0\0\0\0\0 +123.45 +2024-04-25 01:02:03 +2024-04-25 01:02:03.000000 +2024-04-25 01:02:03 +2024-04-25 01:02:03.000000 +2024-03-16 16:22:33 +2024-03-16 16:22:33 +2024-04-25 2024-01-01 02:03:04 1 12 +2024-04-25 2024-01-01 02:03:04.000000 2009-02-14 00:31:30 +2024-04-25 2024-01-01 02:03:04.000000 2009-02-14 00:31:30 +1986-04-25 13:00:00 +14 diff --git a/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql b/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql new file mode 100644 index 00000000000..771123b153a --- /dev/null +++ b/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql @@ -0,0 +1,252 @@ +SET session_timezone = 'Europe/Amsterdam'; + +-- Type conversion functions and operators. + + +-- 1. SQL standard CAST operator: `CAST(value AS Type)`. + +SELECT CAST(123 AS String); + +-- It convert between various data types, including parameterized data types + +SELECT CAST(1234567890 AS DateTime('Europe/Amsterdam')); + +-- and composite data types: + +SELECT CAST('[1, 2, 3]' AS Array(UInt8)); + +-- It's return type depends on the setting `cast_keep_nullable`. If it is enabled, if the source argument type is Nullable, the resulting data type will be also Nullable, even if it is not written explicitly: + +SET cast_keep_nullable = 1; +SELECT CAST(x AS UInt8) AS y, toTypeName(y) FROM VALUES('x Nullable(String)', ('123'), ('NULL')); + +SET cast_keep_nullable = 0; +SELECT CAST(x AS UInt8) AS y, toTypeName(y) FROM VALUES('x Nullable(String)', ('123'), ('NULL')); -- { serverError CANNOT_PARSE_TEXT } + +-- There are various type conversion rules, some worth noting. + +-- Conversion between numeric types can involve implementation defined overflow: + +SELECT CAST(257 AS UInt8); +SELECT CAST(-1 AS UInt8); + +-- Conversion from string acts like parsing, and for composite data types like Array, Tuple, it works in the same way as from the `Values` data format: + +SELECT CAST($$['Hello', 'wo\'rld\\']$$ AS Array(String)); + +-- ' +-- While for simple data types it does not interpret escape sequences: + +SELECT arrayJoin(CAST($$['Hello', 'wo\'rld\\']$$ AS Array(String))) AS x, CAST($$wo\'rld\\$$ AS FixedString(9)) AS y; + +-- The operator is case-insensitive: + +SELECT CAST(123 AS String); +SELECT cast(123 AS String); +SELECT Cast(123 AS String); + +-- Conversion from a floating point value to an integer will involve truncation towards zero: + +SELECT CAST(1.9, 'Int64'), CAST(-1.9, 'Int64'); + +-- Conversion from NULL into a non-Nullable type will throw an exception, as well as conversions from denormal floating point numbers (NaN, inf, -inf) to an integer, or conversion between arrays of different dimensions. + +-- However, you might find it amusing that an empty array of Nothing data type can be converted to arrays of any dimensions: + +SELECT [] AS x, CAST(x AS Array(Array(Array(Tuple(UInt64, String))))) AS y, toTypeName(x), toTypeName(y); + + +-- 2. The functional form of this operator: `CAST(value, 'Type')`: + +SELECT CAST(123, 'String'); + +-- This form is equivalent. Keep in mind that the type has to be a constant expression: + +SELECT CAST(123, 'Str'||'ing'); -- this works. + +-- This does not work: SELECT materialize('String') AS type, CAST(123, type); + +-- It is also case-insensitive: + +SELECT CasT(123, 'String'); + +-- The functional form exists for the consistency of implementation (as every operator also exists in the functional form and the functional form is represented in the query's Abstract Syntax Tree). Anyway, the functional form also makes sense for users, when they need to construct a data type name from a constant expression, or when they want to generate a query programmatically. + +-- It's worth noting that the operator form does not allow to specify the type name as a string literal: + +-- This does not work: SELECT CAST(123 AS 'String'); + +-- By only allowing it as an identifier, either bare word: + +SELECT CAST(123 AS String); + +-- Or as a MySQL or PostgreSQL quoted identifiers: + +SELECT CAST(123 AS `String`); +SELECT CAST(123 AS "String"); + +-- While the functional form only allows the type name as a string literal: + +SELECT CAST(123, 'String'); -- works +SELECT CAST(123, String); -- { serverError UNKNOWN_IDENTIFIER } + +-- However, you can cheat: + +SELECT 'String' AS String, CAST(123, String); + + +-- 3. The internal function `_CAST` which is different from `CAST` only by being not dependent on the value of `cast_keep_nullable` setting and other settings. + +-- This is needed when ClickHouse has to persist an expression for future use, like in table definitions, including primary and partition key and other indices. + +-- The function is not intended for being used directly. When a user uses a regular `CAST` operator or function in a table definition, it is transparently converted to `_CAST` to persist its behavior. However, the user can still use the internal version directly: + +SELECT _CAST(x, 'UInt8') AS y, toTypeName(y) FROM VALUES('x Nullable(String)', ('123'), ('456')); + +-- There is no operator form of this function: + +-- does not work, here UInt8 is interpreted as an alias for the value: +SELECT _CAST(123 AS UInt8); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT CAST(123 AS UInt8); -- works + + +-- 4. PostgreSQL-style cast syntax `::` + +SELECT 123::String; + +-- It has a difference from the `CAST` operator: if it is applied to a simple literal value, instead of performing a type conversion, it invokes the SQL parser directly on the corresponding text fragment of the query. The most important case will be the floating-point and decimal types. + +-- In this example, we parse `1.1` as Decimal and not involving any type conversion: + +SELECT 1.1::Decimal(30, 20); + +-- In this example, `1.1` is first parsed as usual, yielding a Float64 value, and then converted to Decimal, producing a wrong result: + +SELECT CAST(1.1 AS Decimal(30, 20)); + +-- We can change this behavior in the future. + +-- Another example: + +SELECT -1::UInt64; -- { serverError CANNOT_PARSE_NUMBER } + +SELECT CAST(-1 AS UInt64); -- conversion with overflow + +-- For composite data types, if a value is a literal, it is parsed directly: + +SELECT [1.1, 2.3]::Array(Decimal(30, 20)); + +-- But if the value contains expressions, the usage of the `::` operator will be equivalent to invoking the CAST operator on the expression: + +SELECT [1.1, 2.3 + 0]::Array(Decimal(30, 20)); + +-- The automatic column name for the result of an application of the `::` operator may be the same as for the result of an application of the CAST operator to a string containing the corresponding fragment of the query or to a corresponding expression: + +SELECT 1.1::Decimal(30, 20), CAST('1.1' AS Decimal(30, 20)), (1+1)::UInt8 FORMAT Vertical; + +-- The operator has the highest priority among others: + +SELECT 1-1::String; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- But one interesting example is the unary minus. Here the minus is not an operator, but part of the numeric literal: + +SELECT -1::String; + +-- Here it is an operator: + +SELECT 1 AS x, -x::String; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + + +-- 5. Accurate casting functions: `accurateCast`, `accurateCastOrNull`, `accurateCastOrDefault`. + +-- These functions check if the value is exactly representable in the target data type. + +-- The function `accurateCast` performs the conversion or throws an exception if the value is not exactly representable: + +SELECT accurateCast(1.123456789, 'Float32'); -- { serverError CANNOT_CONVERT_TYPE } + +-- The function `accurateCastOrNull` always wraps the target type into Nullable, and returns NULL if the value is not exactly representable: + +SELECT accurateCastOrNull(1.123456789, 'Float32'); + +-- The function `accurateCastOrDefault` takes an additional parameter, which must be of the target type, and returns it if the value is not exactly representable: + +SELECT accurateCastOrDefault(-1, 'UInt64', 0::UInt64); + +-- These functions are case-sensitive and there are no corresponding operators: + +SELECT ACCURATECAST(1, 'String'); -- { serverError UNKNOWN_FUNCTION }. + + +-- 6. Explicit conversion functions: + +-- `toString`, `toFixedString`, +-- `toUInt8`, `toUInt16`, `toUInt32`, `toUInt64`, `toUInt128`, `toUInt256`, +-- `toInt8`, `toInt16`, `toInt32`, `toInt64`, `toInt128`, `toInt256`, +-- `toFloat32`, `toFloat64`, +-- `toDecimal32`, `toDecimal64`, `toDecimal128`, `toDecimal256`, +-- `toDate`, `toDate32`, `toDateTime`, `toDateTime64`, +-- `toUUID`, `toIPv4`, `toIPv6`, +-- `toIntervalNanosecond`, `toIntervalMicrosecond`, `toIntervalMillisecond`, +-- `toIntervalSecond`, `toIntervalMinute`, `toIntervalHour`, +-- `toIntervalDay`, `toIntervalWeek`, `toIntervalMonth`, `toIntervalQuarter`, `toIntervalYear` + +-- These functions work under the same rules as the CAST operator, and can be thought as a elementary implementation parts of that operator. They allow implementation defined overflow while converting between numeric types. + +SELECT toUInt8(-1); + +-- These are ClickHouse-native conversion functions. They take an argument with the input value, and for some of data types (`FixedString`, `DateTime`, `DateTime64`, `Decimal`s) the subsequent arguments are constant expressions, defining the parameters of these data types, or the rules to interpret the source value. + +SELECT toFloat64(123); -- no arguments +SELECT toFixedString('Hello', 10) FORMAT TSV; -- the parameter of the FixedString data type, the function returns FixedString(10) +SELECT toFixedString('Hello', 5 + 5) FORMAT TSV; -- it can be a constant expression + +SELECT toDecimal32('123.456', 2); -- the scale of the Decimal data type + +SELECT toDateTime('2024-04-25 01:02:03', 'Europe/Amsterdam'); -- the time zone of DateTime +SELECT toDateTime64('2024-04-25 01:02:03', 6, 'Europe/Amsterdam'); -- the scale of DateTime64 and its time zone + +-- The length of FixedString and the scale of Decimal and DateTime64 types are mandatory arguments, while the time zone of the DateTime data type is optional. + +-- If the time zone is not specified, the time zone of the argument's data type is used, and if the argument is not a date time, the session time zone is used. + +SELECT toDateTime('2024-04-25 01:02:03'); +SELECT toDateTime64('2024-04-25 01:02:03', 6); + +-- Here the time zone can be specified as the rule of interpreration of the value during conversion: + +SELECT toString(now(), 'Europe/Amsterdam'); +SELECT toString(now()); + + +-- 7. SQL-compatibility type-defining operators: + +SELECT DATE '2024-04-25', TIMESTAMP '2024-01-01 02:03:04', INTERVAL 1 MINUTE, INTERVAL '12 hour'; + +-- These operators are interpreted as the corresponding explicit conversion functions. + + +-- 8. SQL-compatibility aliases for explicit conversion functions: + +SELECT DATE('2024-04-25'), TIMESTAMP('2024-01-01 02:03:04'), FROM_UNIXTIME(1234567890); + +-- These functions exist for compatibility with MySQL. They are case-insensive. + +SELECT date '2024-04-25', timeSTAMP('2024-01-01 02:03:04'), From_Unixtime(1234567890); + + +-- 9. Specialized conversion functions: + +-- `parseDateTimeBestEffort`, `parseDateTimeBestEffortUS`, `parseDateTime64BestEffort`, `parseDateTime64BestEffortUS`, `toUnixTimestamp` + +-- These functions are similar to explicit conversion functions, but provide special rules on how the conversion is performed. + +SELECT parseDateTimeBestEffort('25 Apr 1986 1pm'); + + +-- 10. Functions for converting between different components or rounding of date and time data types. + +SELECT toDayOfMonth(toDateTime(1234567890)); + +-- These functions are coverted in a separate topic. From f5a13a023bc1d19b688ec98902411252796f96d2 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 16 Mar 2024 18:58:16 +0100 Subject: [PATCH 349/374] Rename CacheGuard -> CachePriorityGuard (because it is more correct) --- src/Interpreters/Cache/EvictionCandidates.cpp | 2 +- src/Interpreters/Cache/EvictionCandidates.h | 2 +- src/Interpreters/Cache/FileCache.cpp | 6 ++-- src/Interpreters/Cache/FileCache.h | 8 ++--- src/Interpreters/Cache/Guards.h | 22 ++++++------- src/Interpreters/Cache/IFileCachePriority.h | 26 +++++++-------- .../Cache/LRUFileCachePriority.cpp | 26 +++++++-------- src/Interpreters/Cache/LRUFileCachePriority.h | 32 +++++++++---------- src/Interpreters/Cache/QueryLimit.cpp | 12 +++---- src/Interpreters/Cache/QueryLimit.h | 12 +++---- .../Cache/SLRUFileCachePriority.cpp | 24 +++++++------- .../Cache/SLRUFileCachePriority.h | 22 ++++++------- 12 files changed, 97 insertions(+), 97 deletions(-) diff --git a/src/Interpreters/Cache/EvictionCandidates.cpp b/src/Interpreters/Cache/EvictionCandidates.cpp index 7dceab4f95f..f1ae2baa347 100644 --- a/src/Interpreters/Cache/EvictionCandidates.cpp +++ b/src/Interpreters/Cache/EvictionCandidates.cpp @@ -32,7 +32,7 @@ void EvictionCandidates::add(LockedKey & locked_key, const FileSegmentMetadataPt ++candidates_size; } -void EvictionCandidates::evict(FileCacheQueryLimit::QueryContext * query_context, const CacheGuard::Lock & lock) +void EvictionCandidates::evict(FileCacheQueryLimit::QueryContext * query_context, const CachePriorityGuard::Lock & lock) { if (candidates.empty()) return; diff --git a/src/Interpreters/Cache/EvictionCandidates.h b/src/Interpreters/Cache/EvictionCandidates.h index 0557962d97f..e817d33d5fe 100644 --- a/src/Interpreters/Cache/EvictionCandidates.h +++ b/src/Interpreters/Cache/EvictionCandidates.h @@ -11,7 +11,7 @@ public: void add(LockedKey & locked_key, const FileSegmentMetadataPtr & candidate); - void evict(FileCacheQueryLimit::QueryContext * query_context, const CacheGuard::Lock &); + void evict(FileCacheQueryLimit::QueryContext * query_context, const CachePriorityGuard::Lock &); size_t size() const { return candidates_size; } diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index ea40ffcfa3c..65b6a3a172d 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -183,13 +183,13 @@ void FileCache::initialize() is_initialized = true; } -CacheGuard::Lock FileCache::lockCache() const +CachePriorityGuard::Lock FileCache::lockCache() const { ProfileEventTimeIncrement watch(ProfileEvents::FilesystemCacheLockCacheMicroseconds); return cache_guard.lock(); } -CacheGuard::Lock FileCache::tryLockCache(std::optional acquire_timeout) const +CachePriorityGuard::Lock FileCache::tryLockCache(std::optional acquire_timeout) const { return acquire_timeout.has_value() ? cache_guard.tryLockFor(acquire_timeout.value()) : cache_guard.tryLock(); } @@ -706,7 +706,7 @@ KeyMetadata::iterator FileCache::addFileSegment( size_t size, FileSegment::State state, const CreateFileSegmentSettings & create_settings, - const CacheGuard::Lock * lock) + const CachePriorityGuard::Lock * lock) { /// Create a file_segment_metadata and put it in `files` map by [key][offset]. diff --git a/src/Interpreters/Cache/FileCache.h b/src/Interpreters/Cache/FileCache.h index 007c4fd9483..8ea5f4dab40 100644 --- a/src/Interpreters/Cache/FileCache.h +++ b/src/Interpreters/Cache/FileCache.h @@ -173,8 +173,8 @@ public: void deactivateBackgroundOperations(); - CacheGuard::Lock lockCache() const; - CacheGuard::Lock tryLockCache(std::optional acquire_timeout = std::nullopt) const; + CachePriorityGuard::Lock lockCache() const; + CachePriorityGuard::Lock tryLockCache(std::optional acquire_timeout = std::nullopt) const; std::vector sync(); @@ -208,7 +208,7 @@ private: CacheMetadata metadata; FileCachePriorityPtr main_priority; - mutable CacheGuard cache_guard; + mutable CachePriorityGuard cache_guard; struct HitsCountStash { @@ -280,7 +280,7 @@ private: size_t size, FileSegment::State state, const CreateFileSegmentSettings & create_settings, - const CacheGuard::Lock *); + const CachePriorityGuard::Lock *); }; } diff --git a/src/Interpreters/Cache/Guards.h b/src/Interpreters/Cache/Guards.h index 0ac7cb80483..6193ee38755 100644 --- a/src/Interpreters/Cache/Guards.h +++ b/src/Interpreters/Cache/Guards.h @@ -10,17 +10,17 @@ namespace DB * 2. KeyGuard::Lock (hold till the end of the method) * * FileCache::tryReserve - * 1. CacheGuard::Lock + * 1. CachePriorityGuard::Lock * 2. KeyGuard::Lock (taken without metadata lock) * 3. any number of KeyGuard::Lock's for files which are going to be evicted (taken via metadata lock) * * FileCache::removeIfExists - * 1. CacheGuard::Lock + * 1. CachePriorityGuard::Lock * 2. KeyGuard::Lock (taken via metadata lock) * 3. FileSegmentGuard::Lock * * FileCache::removeAllReleasable - * 1. CacheGuard::Lock + * 1. CachePriorityGuard::Lock * 2. any number of KeyGuard::Lock's locks (takken via metadata lock), but at a moment of time only one key lock can be hold * 3. FileSegmentGuard::Lock * @@ -34,23 +34,23 @@ namespace DB * 2. FileSegmentGuard::Lock * * FileSegment::complete - * 1. CacheGuard::Lock + * 1. CachePriorityGuard::Lock * 2. KeyGuard::Lock (taken without metadata lock) * 3. FileSegmentGuard::Lock * * Rules: - * 1. Priority of locking: CacheGuard::Lock > CacheMetadataGuard::Lock > KeyGuard::Lock > FileSegmentGuard::Lock - * 2. If we take more than one key lock at a moment of time, we need to take CacheGuard::Lock (example: tryReserve()) + * 1. Priority of locking: CachePriorityGuard::Lock > CacheMetadataGuard::Lock > KeyGuard::Lock > FileSegmentGuard::Lock + * 2. If we take more than one key lock at a moment of time, we need to take CachePriorityGuard::Lock (example: tryReserve()) * * - * _CacheGuard_ + * _CachePriorityGuard_ * 1. FileCache::tryReserve * 2. FileCache::removeIfExists(key) * 3. FileCache::removeAllReleasable * 4. FileSegment::complete * * _KeyGuard_ _CacheMetadataGuard_ - * 1. all from CacheGuard 1. getOrSet/get/set + * 1. all from CachePriorityGuard 1. getOrSet/get/set * 2. getOrSet/get/Set * * *This table does not include locks taken for introspection and system tables. @@ -59,11 +59,11 @@ namespace DB /** * Cache priority queue guard. */ -struct CacheGuard : private boost::noncopyable +struct CachePriorityGuard : private boost::noncopyable { using Mutex = std::timed_mutex; - /// struct is used (not keyword `using`) to make CacheGuard::Lock non-interchangable with other guards locks - /// so, we wouldn't be able to pass CacheGuard::Lock to a function which accepts KeyGuard::Lock, for example + /// struct is used (not keyword `using`) to make CachePriorityGuard::Lock non-interchangable with other guards locks + /// so, we wouldn't be able to pass CachePriorityGuard::Lock to a function which accepts KeyGuard::Lock, for example struct Lock : public std::unique_lock { using Base = std::unique_lock; diff --git a/src/Interpreters/Cache/IFileCachePriority.h b/src/Interpreters/Cache/IFileCachePriority.h index bc036166940..58011780323 100644 --- a/src/Interpreters/Cache/IFileCachePriority.h +++ b/src/Interpreters/Cache/IFileCachePriority.h @@ -43,11 +43,11 @@ public: virtual EntryPtr getEntry() const = 0; - virtual size_t increasePriority(const CacheGuard::Lock &) = 0; + virtual size_t increasePriority(const CachePriorityGuard::Lock &) = 0; virtual void updateSize(int64_t size) = 0; - virtual void remove(const CacheGuard::Lock &) = 0; + virtual void remove(const CachePriorityGuard::Lock &) = 0; virtual void invalidate() = 0; @@ -57,13 +57,13 @@ public: virtual ~IFileCachePriority() = default; - size_t getElementsLimit(const CacheGuard::Lock &) const { return max_elements; } + size_t getElementsLimit(const CachePriorityGuard::Lock &) const { return max_elements; } - size_t getSizeLimit(const CacheGuard::Lock &) const { return max_size; } + size_t getSizeLimit(const CachePriorityGuard::Lock &) const { return max_size; } - virtual size_t getSize(const CacheGuard::Lock &) const = 0; + virtual size_t getSize(const CachePriorityGuard::Lock &) const = 0; - virtual size_t getElementsCount(const CacheGuard::Lock &) const = 0; + virtual size_t getElementsCount(const CachePriorityGuard::Lock &) const = 0; /// Throws exception if there is not enough size to fit it. virtual IteratorPtr add( /// NOLINT @@ -71,7 +71,7 @@ public: size_t offset, size_t size, const UserInfo & user, - const CacheGuard::Lock &, + const CachePriorityGuard::Lock &, bool best_effort = false) = 0; /// `reservee` is the entry for which are reserving now. @@ -79,11 +79,11 @@ public: /// for the corresponding file segment. virtual bool canFit( /// NOLINT size_t size, - const CacheGuard::Lock &, + const CachePriorityGuard::Lock &, IteratorPtr reservee = nullptr, bool best_effort = false) const = 0; - virtual void shuffle(const CacheGuard::Lock &) = 0; + virtual void shuffle(const CachePriorityGuard::Lock &) = 0; struct IPriorityDump { @@ -91,9 +91,9 @@ public: }; using PriorityDumpPtr = std::shared_ptr; - virtual PriorityDumpPtr dump(const CacheGuard::Lock &) = 0; + virtual PriorityDumpPtr dump(const CachePriorityGuard::Lock &) = 0; - using FinalizeEvictionFunc = std::function; + using FinalizeEvictionFunc = std::function; virtual bool collectCandidatesForEviction( size_t size, FileCacheReserveStat & stat, @@ -101,9 +101,9 @@ public: IFileCachePriority::IteratorPtr reservee, FinalizeEvictionFunc & finalize_eviction_func, const UserID & user_id, - const CacheGuard::Lock &) = 0; + const CachePriorityGuard::Lock &) = 0; - virtual bool modifySizeLimits(size_t max_size_, size_t max_elements_, double size_ratio_, const CacheGuard::Lock &) = 0; + virtual bool modifySizeLimits(size_t max_size_, size_t max_elements_, double size_ratio_, const CachePriorityGuard::Lock &) = 0; protected: IFileCachePriority(size_t max_size_, size_t max_elements_); diff --git a/src/Interpreters/Cache/LRUFileCachePriority.cpp b/src/Interpreters/Cache/LRUFileCachePriority.cpp index bce03b60024..08e65b577ca 100644 --- a/src/Interpreters/Cache/LRUFileCachePriority.cpp +++ b/src/Interpreters/Cache/LRUFileCachePriority.cpp @@ -43,13 +43,13 @@ IFileCachePriority::IteratorPtr LRUFileCachePriority::add( /// NOLINT size_t offset, size_t size, const UserInfo &, - const CacheGuard::Lock & lock, + const CachePriorityGuard::Lock & lock, bool) { return std::make_shared(add(std::make_shared(key_metadata->key, offset, size, key_metadata), lock)); } -LRUFileCachePriority::LRUIterator LRUFileCachePriority::add(EntryPtr entry, const CacheGuard::Lock & lock) +LRUFileCachePriority::LRUIterator LRUFileCachePriority::add(EntryPtr entry, const CachePriorityGuard::Lock & lock) { if (entry->size == 0) { @@ -93,7 +93,7 @@ LRUFileCachePriority::LRUIterator LRUFileCachePriority::add(EntryPtr entry, cons return LRUIterator(this, iterator); } -LRUFileCachePriority::LRUQueue::iterator LRUFileCachePriority::remove(LRUQueue::iterator it, const CacheGuard::Lock &) +LRUFileCachePriority::LRUQueue::iterator LRUFileCachePriority::remove(LRUQueue::iterator it, const CachePriorityGuard::Lock &) { /// If size is 0, entry is invalidated, current_elements_num was already updated. const auto & entry = **it; @@ -150,7 +150,7 @@ bool LRUFileCachePriority::LRUIterator::operator ==(const LRUIterator & other) c return cache_priority == other.cache_priority && iterator == other.iterator; } -void LRUFileCachePriority::iterate(IterateFunc && func, const CacheGuard::Lock & lock) +void LRUFileCachePriority::iterate(IterateFunc && func, const CachePriorityGuard::Lock & lock) { for (auto it = queue.begin(); it != queue.end();) { @@ -201,7 +201,7 @@ void LRUFileCachePriority::iterate(IterateFunc && func, const CacheGuard::Lock & bool LRUFileCachePriority::canFit( /// NOLINT size_t size, - const CacheGuard::Lock & lock, + const CachePriorityGuard::Lock & lock, IteratorPtr, bool) const { @@ -212,7 +212,7 @@ bool LRUFileCachePriority::canFit( size_t size, size_t released_size_assumption, size_t released_elements_assumption, - const CacheGuard::Lock &) const + const CachePriorityGuard::Lock &) const { return (max_size == 0 || (state->current_size + size - released_size_assumption <= max_size)) && (max_elements == 0 || state->current_elements_num + 1 - released_elements_assumption <= max_elements); @@ -225,7 +225,7 @@ bool LRUFileCachePriority::collectCandidatesForEviction( IFileCachePriority::IteratorPtr, FinalizeEvictionFunc &, const UserID &, - const CacheGuard::Lock & lock) + const CachePriorityGuard::Lock & lock) { if (canFit(size, lock)) return true; @@ -264,7 +264,7 @@ bool LRUFileCachePriority::collectCandidatesForEviction( return can_fit(); } -LRUFileCachePriority::LRUIterator LRUFileCachePriority::move(LRUIterator & it, LRUFileCachePriority & other, const CacheGuard::Lock &) +LRUFileCachePriority::LRUIterator LRUFileCachePriority::move(LRUIterator & it, LRUFileCachePriority & other, const CachePriorityGuard::Lock &) { const auto & entry = *it.getEntry(); if (entry.size == 0) @@ -297,7 +297,7 @@ LRUFileCachePriority::LRUIterator LRUFileCachePriority::move(LRUIterator & it, L return LRUIterator(this, it.iterator); } -IFileCachePriority::PriorityDumpPtr LRUFileCachePriority::dump(const CacheGuard::Lock & lock) +IFileCachePriority::PriorityDumpPtr LRUFileCachePriority::dump(const CachePriorityGuard::Lock & lock) { std::vector res; iterate([&](LockedKey &, const FileSegmentMetadataPtr & segment_metadata) @@ -309,7 +309,7 @@ IFileCachePriority::PriorityDumpPtr LRUFileCachePriority::dump(const CacheGuard: } bool LRUFileCachePriority::modifySizeLimits( - size_t max_size_, size_t max_elements_, double /* size_ratio_ */, const CacheGuard::Lock & lock) + size_t max_size_, size_t max_elements_, double /* size_ratio_ */, const CachePriorityGuard::Lock & lock) { if (max_size == max_size_ && max_elements == max_elements_) return false; /// Nothing to change. @@ -353,7 +353,7 @@ bool LRUFileCachePriority::modifySizeLimits( return true; } -void LRUFileCachePriority::LRUIterator::remove(const CacheGuard::Lock & lock) +void LRUFileCachePriority::LRUIterator::remove(const CachePriorityGuard::Lock & lock) { assertValid(); cache_priority->remove(iterator, lock); @@ -389,7 +389,7 @@ void LRUFileCachePriority::LRUIterator::updateSize(int64_t size) entry->size += size; } -size_t LRUFileCachePriority::LRUIterator::increasePriority(const CacheGuard::Lock &) +size_t LRUFileCachePriority::LRUIterator::increasePriority(const CachePriorityGuard::Lock &) { assertValid(); cache_priority->queue.splice(cache_priority->queue.end(), cache_priority->queue, iterator); @@ -402,7 +402,7 @@ void LRUFileCachePriority::LRUIterator::assertValid() const throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to use invalid iterator"); } -void LRUFileCachePriority::shuffle(const CacheGuard::Lock &) +void LRUFileCachePriority::shuffle(const CachePriorityGuard::Lock &) { std::vector its; its.reserve(queue.size()); diff --git a/src/Interpreters/Cache/LRUFileCachePriority.h b/src/Interpreters/Cache/LRUFileCachePriority.h index a74a4b8b621..dcd4ee0a24c 100644 --- a/src/Interpreters/Cache/LRUFileCachePriority.h +++ b/src/Interpreters/Cache/LRUFileCachePriority.h @@ -24,13 +24,13 @@ protected: public: LRUFileCachePriority(size_t max_size_, size_t max_elements_, StatePtr state_ = nullptr); - size_t getSize(const CacheGuard::Lock &) const override { return state->current_size; } + size_t getSize(const CachePriorityGuard::Lock &) const override { return state->current_size; } - size_t getElementsCount(const CacheGuard::Lock &) const override { return state->current_elements_num; } + size_t getElementsCount(const CachePriorityGuard::Lock &) const override { return state->current_elements_num; } bool canFit( /// NOLINT size_t size, - const CacheGuard::Lock &, + const CachePriorityGuard::Lock &, IteratorPtr reservee = nullptr, bool best_effort = false) const override; @@ -39,7 +39,7 @@ public: size_t offset, size_t size, const UserInfo & user, - const CacheGuard::Lock &, + const CachePriorityGuard::Lock &, bool best_effort = false) override; bool collectCandidatesForEviction( @@ -49,9 +49,9 @@ public: IFileCachePriority::IteratorPtr reservee, FinalizeEvictionFunc & finalize_eviction_func, const UserID & user_id, - const CacheGuard::Lock &) override; + const CachePriorityGuard::Lock &) override; - void shuffle(const CacheGuard::Lock &) override; + void shuffle(const CachePriorityGuard::Lock &) override; struct LRUPriorityDump : public IPriorityDump { @@ -59,11 +59,11 @@ public: explicit LRUPriorityDump(const std::vector & infos_) : infos(infos_) {} void merge(const LRUPriorityDump & other) { infos.insert(infos.end(), other.infos.begin(), other.infos.end()); } }; - PriorityDumpPtr dump(const CacheGuard::Lock &) override; + PriorityDumpPtr dump(const CachePriorityGuard::Lock &) override; - void pop(const CacheGuard::Lock & lock) { remove(queue.begin(), lock); } + void pop(const CachePriorityGuard::Lock & lock) { remove(queue.begin(), lock); } - bool modifySizeLimits(size_t max_size_, size_t max_elements_, double size_ratio_, const CacheGuard::Lock &) override; + bool modifySizeLimits(size_t max_size_, size_t max_elements_, double size_ratio_, const CachePriorityGuard::Lock &) override; private: class LRUIterator; @@ -77,9 +77,9 @@ private: void updateElementsCount(int64_t num); void updateSize(int64_t size); - bool canFit(size_t size, size_t released_size_assumption, size_t released_elements_assumption, const CacheGuard::Lock &) const; + bool canFit(size_t size, size_t released_size_assumption, size_t released_elements_assumption, const CachePriorityGuard::Lock &) const; - LRUQueue::iterator remove(LRUQueue::iterator it, const CacheGuard::Lock &); + LRUQueue::iterator remove(LRUQueue::iterator it, const CachePriorityGuard::Lock &); enum class IterationResult { @@ -88,10 +88,10 @@ private: REMOVE_AND_CONTINUE, }; using IterateFunc = std::function; - void iterate(IterateFunc && func, const CacheGuard::Lock &); + void iterate(IterateFunc && func, const CachePriorityGuard::Lock &); - LRUIterator move(LRUIterator & it, LRUFileCachePriority & other, const CacheGuard::Lock &); - LRUIterator add(EntryPtr entry, const CacheGuard::Lock &); + LRUIterator move(LRUIterator & it, LRUFileCachePriority & other, const CachePriorityGuard::Lock &); + LRUIterator add(EntryPtr entry, const CachePriorityGuard::Lock &); }; class LRUFileCachePriority::LRUIterator : public IFileCachePriority::Iterator @@ -108,9 +108,9 @@ public: EntryPtr getEntry() const override { return *iterator; } - size_t increasePriority(const CacheGuard::Lock &) override; + size_t increasePriority(const CachePriorityGuard::Lock &) override; - void remove(const CacheGuard::Lock &) override; + void remove(const CachePriorityGuard::Lock &) override; void invalidate() override; diff --git a/src/Interpreters/Cache/QueryLimit.cpp b/src/Interpreters/Cache/QueryLimit.cpp index ba4f5017772..9421005dc92 100644 --- a/src/Interpreters/Cache/QueryLimit.cpp +++ b/src/Interpreters/Cache/QueryLimit.cpp @@ -16,7 +16,7 @@ static bool isQueryInitialized() && !CurrentThread::getQueryId().empty(); } -FileCacheQueryLimit::QueryContextPtr FileCacheQueryLimit::tryGetQueryContext(const CacheGuard::Lock &) +FileCacheQueryLimit::QueryContextPtr FileCacheQueryLimit::tryGetQueryContext(const CachePriorityGuard::Lock &) { if (!isQueryInitialized()) return nullptr; @@ -25,7 +25,7 @@ FileCacheQueryLimit::QueryContextPtr FileCacheQueryLimit::tryGetQueryContext(con return (query_iter == query_map.end()) ? nullptr : query_iter->second; } -void FileCacheQueryLimit::removeQueryContext(const std::string & query_id, const CacheGuard::Lock &) +void FileCacheQueryLimit::removeQueryContext(const std::string & query_id, const CachePriorityGuard::Lock &) { auto query_iter = query_map.find(query_id); if (query_iter == query_map.end()) @@ -41,7 +41,7 @@ void FileCacheQueryLimit::removeQueryContext(const std::string & query_id, const FileCacheQueryLimit::QueryContextPtr FileCacheQueryLimit::getOrSetQueryContext( const std::string & query_id, const ReadSettings & settings, - const CacheGuard::Lock &) + const CachePriorityGuard::Lock &) { if (query_id.empty()) return nullptr; @@ -70,7 +70,7 @@ void FileCacheQueryLimit::QueryContext::add( size_t offset, size_t size, const FileCache::UserInfo & user, - const CacheGuard::Lock & lock) + const CachePriorityGuard::Lock & lock) { auto it = getPriority().add(key_metadata, offset, size, user, lock); auto [_, inserted] = records.emplace(FileCacheKeyAndOffset{key_metadata->key, offset}, it); @@ -87,7 +87,7 @@ void FileCacheQueryLimit::QueryContext::add( void FileCacheQueryLimit::QueryContext::remove( const Key & key, size_t offset, - const CacheGuard::Lock & lock) + const CachePriorityGuard::Lock & lock) { auto record = records.find({key, offset}); if (record == records.end()) @@ -100,7 +100,7 @@ void FileCacheQueryLimit::QueryContext::remove( IFileCachePriority::IteratorPtr FileCacheQueryLimit::QueryContext::tryGet( const Key & key, size_t offset, - const CacheGuard::Lock &) + const CachePriorityGuard::Lock &) { auto it = records.find({key, offset}); if (it == records.end()) diff --git a/src/Interpreters/Cache/QueryLimit.h b/src/Interpreters/Cache/QueryLimit.h index 419126601f0..7553eff82ba 100644 --- a/src/Interpreters/Cache/QueryLimit.h +++ b/src/Interpreters/Cache/QueryLimit.h @@ -13,14 +13,14 @@ public: class QueryContext; using QueryContextPtr = std::shared_ptr; - QueryContextPtr tryGetQueryContext(const CacheGuard::Lock & lock); + QueryContextPtr tryGetQueryContext(const CachePriorityGuard::Lock & lock); QueryContextPtr getOrSetQueryContext( const std::string & query_id, const ReadSettings & settings, - const CacheGuard::Lock &); + const CachePriorityGuard::Lock &); - void removeQueryContext(const std::string & query_id, const CacheGuard::Lock &); + void removeQueryContext(const std::string & query_id, const CachePriorityGuard::Lock &); class QueryContext { @@ -38,19 +38,19 @@ public: Priority::IteratorPtr tryGet( const Key & key, size_t offset, - const CacheGuard::Lock &); + const CachePriorityGuard::Lock &); void add( KeyMetadataPtr key_metadata, size_t offset, size_t size, const FileCacheUserInfo & user, - const CacheGuard::Lock &); + const CachePriorityGuard::Lock &); void remove( const Key & key, size_t offset, - const CacheGuard::Lock &); + const CachePriorityGuard::Lock &); private: using Records = std::unordered_map; diff --git a/src/Interpreters/Cache/SLRUFileCachePriority.cpp b/src/Interpreters/Cache/SLRUFileCachePriority.cpp index 43f1c1012ba..1767cb94be7 100644 --- a/src/Interpreters/Cache/SLRUFileCachePriority.cpp +++ b/src/Interpreters/Cache/SLRUFileCachePriority.cpp @@ -34,19 +34,19 @@ SLRUFileCachePriority::SLRUFileCachePriority( probationary_queue.max_size, protected_queue.max_elements); } -size_t SLRUFileCachePriority::getSize(const CacheGuard::Lock & lock) const +size_t SLRUFileCachePriority::getSize(const CachePriorityGuard::Lock & lock) const { return protected_queue.getSize(lock) + probationary_queue.getSize(lock); } -size_t SLRUFileCachePriority::getElementsCount(const CacheGuard::Lock & lock) const +size_t SLRUFileCachePriority::getElementsCount(const CachePriorityGuard::Lock & lock) const { return protected_queue.getElementsCount(lock) + probationary_queue.getElementsCount(lock); } bool SLRUFileCachePriority::canFit( /// NOLINT size_t size, - const CacheGuard::Lock & lock, + const CachePriorityGuard::Lock & lock, IteratorPtr reservee, bool best_effort) const { @@ -70,7 +70,7 @@ IFileCachePriority::IteratorPtr SLRUFileCachePriority::add( /// NOLINT size_t offset, size_t size, const UserInfo &, - const CacheGuard::Lock & lock, + const CachePriorityGuard::Lock & lock, bool is_startup) { if (is_startup) @@ -103,7 +103,7 @@ bool SLRUFileCachePriority::collectCandidatesForEviction( IFileCachePriority::IteratorPtr reservee, FinalizeEvictionFunc & finalize_eviction_func, const UserID & user_id, - const CacheGuard::Lock & lock) + const CachePriorityGuard::Lock & lock) { /// If `it` is nullptr, then it is the first space reservation attempt /// for a corresponding file segment, so it will be directly put into probationary queue. @@ -143,7 +143,7 @@ bool SLRUFileCachePriority::collectCandidatesForEviction( && !probationary_queue.collectCandidatesForEviction(size_to_downgrade, stat, res, reservee, noop, user_id, lock)) return false; - finalize_eviction_func = [=, this](const CacheGuard::Lock & lk) mutable + finalize_eviction_func = [=, this](const CachePriorityGuard::Lock & lk) mutable { for (const auto & [key, key_candidates] : *downgrade_candidates) { @@ -159,7 +159,7 @@ bool SLRUFileCachePriority::collectCandidatesForEviction( return true; } -void SLRUFileCachePriority::increasePriority(SLRUIterator & iterator, const CacheGuard::Lock & lock) +void SLRUFileCachePriority::increasePriority(SLRUIterator & iterator, const CachePriorityGuard::Lock & lock) { /// If entry is already in protected queue, /// we only need to increase its priority within the protected queue. @@ -242,7 +242,7 @@ void SLRUFileCachePriority::increasePriority(SLRUIterator & iterator, const Cach iterator.is_protected = true; } -IFileCachePriority::PriorityDumpPtr SLRUFileCachePriority::dump(const CacheGuard::Lock & lock) +IFileCachePriority::PriorityDumpPtr SLRUFileCachePriority::dump(const CachePriorityGuard::Lock & lock) { auto res = dynamic_pointer_cast(probationary_queue.dump(lock)); auto part_res = dynamic_pointer_cast(protected_queue.dump(lock)); @@ -250,14 +250,14 @@ IFileCachePriority::PriorityDumpPtr SLRUFileCachePriority::dump(const CacheGuard return res; } -void SLRUFileCachePriority::shuffle(const CacheGuard::Lock & lock) +void SLRUFileCachePriority::shuffle(const CachePriorityGuard::Lock & lock) { protected_queue.shuffle(lock); probationary_queue.shuffle(lock); } bool SLRUFileCachePriority::modifySizeLimits( - size_t max_size_, size_t max_elements_, double size_ratio_, const CacheGuard::Lock & lock) + size_t max_size_, size_t max_elements_, double size_ratio_, const CachePriorityGuard::Lock & lock) { if (max_size == max_size_ && max_elements == max_elements_ && size_ratio == size_ratio_) return false; /// Nothing to change. @@ -287,7 +287,7 @@ SLRUFileCachePriority::EntryPtr SLRUFileCachePriority::SLRUIterator::getEntry() return entry; } -size_t SLRUFileCachePriority::SLRUIterator::increasePriority(const CacheGuard::Lock & lock) +size_t SLRUFileCachePriority::SLRUIterator::increasePriority(const CachePriorityGuard::Lock & lock) { assertValid(); cache_priority->increasePriority(*this, lock); @@ -306,7 +306,7 @@ void SLRUFileCachePriority::SLRUIterator::invalidate() lru_iterator.invalidate(); } -void SLRUFileCachePriority::SLRUIterator::remove(const CacheGuard::Lock & lock) +void SLRUFileCachePriority::SLRUIterator::remove(const CachePriorityGuard::Lock & lock) { assertValid(); lru_iterator.remove(lock); diff --git a/src/Interpreters/Cache/SLRUFileCachePriority.h b/src/Interpreters/Cache/SLRUFileCachePriority.h index d97fa80a6c7..d81ce1bc480 100644 --- a/src/Interpreters/Cache/SLRUFileCachePriority.h +++ b/src/Interpreters/Cache/SLRUFileCachePriority.h @@ -21,13 +21,13 @@ public: LRUFileCachePriority::StatePtr probationary_state_ = nullptr, LRUFileCachePriority::StatePtr protected_state_ = nullptr); - size_t getSize(const CacheGuard::Lock & lock) const override; + size_t getSize(const CachePriorityGuard::Lock & lock) const override; - size_t getElementsCount(const CacheGuard::Lock &) const override; + size_t getElementsCount(const CachePriorityGuard::Lock &) const override; bool canFit( /// NOLINT size_t size, - const CacheGuard::Lock &, + const CachePriorityGuard::Lock &, IteratorPtr reservee = nullptr, bool best_effort = false) const override; @@ -36,7 +36,7 @@ public: size_t offset, size_t size, const UserInfo & user, - const CacheGuard::Lock &, + const CachePriorityGuard::Lock &, bool is_startup = false) override; bool collectCandidatesForEviction( @@ -46,13 +46,13 @@ public: IFileCachePriority::IteratorPtr reservee, FinalizeEvictionFunc & finalize_eviction_func, const UserID & user_id, - const CacheGuard::Lock &) override; + const CachePriorityGuard::Lock &) override; - void shuffle(const CacheGuard::Lock &) override; + void shuffle(const CachePriorityGuard::Lock &) override; - PriorityDumpPtr dump(const CacheGuard::Lock &) override; + PriorityDumpPtr dump(const CachePriorityGuard::Lock &) override; - bool modifySizeLimits(size_t max_size_, size_t max_elements_, double size_ratio_, const CacheGuard::Lock &) override; + bool modifySizeLimits(size_t max_size_, size_t max_elements_, double size_ratio_, const CachePriorityGuard::Lock &) override; private: double size_ratio; @@ -60,7 +60,7 @@ private: LRUFileCachePriority probationary_queue; LoggerPtr log = getLogger("SLRUFileCachePriority"); - void increasePriority(SLRUIterator & iterator, const CacheGuard::Lock & lock); + void increasePriority(SLRUIterator & iterator, const CachePriorityGuard::Lock & lock); }; class SLRUFileCachePriority::SLRUIterator : public IFileCachePriority::Iterator @@ -74,9 +74,9 @@ public: EntryPtr getEntry() const override; - size_t increasePriority(const CacheGuard::Lock &) override; + size_t increasePriority(const CachePriorityGuard::Lock &) override; - void remove(const CacheGuard::Lock &) override; + void remove(const CachePriorityGuard::Lock &) override; void invalidate() override; From 3fcede709e39db1525c8ed58bc6deb39814edd83 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 16 Mar 2024 19:03:42 +0100 Subject: [PATCH 350/374] Fix error --- .../0_stateless/03011_definitive_guide_to_cast.sql | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql b/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql index 771123b153a..7cc662eee29 100644 --- a/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql +++ b/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql @@ -212,12 +212,12 @@ SELECT toDateTime64('2024-04-25 01:02:03', 6, 'Europe/Amsterdam'); -- the scale -- If the time zone is not specified, the time zone of the argument's data type is used, and if the argument is not a date time, the session time zone is used. SELECT toDateTime('2024-04-25 01:02:03'); -SELECT toDateTime64('2024-04-25 01:02:03', 6); +SELECT toDateTime64('2024-04-25 01:02:03.456789', 6); --- Here the time zone can be specified as the rule of interpreration of the value during conversion: +-- Here the time zone can be specified as the rule of interpretation of the value during conversion: -SELECT toString(now(), 'Europe/Amsterdam'); -SELECT toString(now()); +SELECT toString(1710612085::DateTime, 'America/Los_Angeles'); +SELECT toString(1710612085::DateTime); -- 7. SQL-compatibility type-defining operators: From acd4e49dc2cbc61d03cc1177af40c5c0ce11243d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 16 Mar 2024 19:35:56 +0100 Subject: [PATCH 351/374] More guides --- .../03011_definitive_guide_to_cast.reference | 31 ++++-- .../03011_definitive_guide_to_cast.sql | 97 ++++++++++++++++++- 2 files changed, 118 insertions(+), 10 deletions(-) diff --git a/tests/queries/0_stateless/03011_definitive_guide_to_cast.reference b/tests/queries/0_stateless/03011_definitive_guide_to_cast.reference index f8f37fa7807..f20fe0b7425 100644 --- a/tests/queries/0_stateless/03011_definitive_guide_to_cast.reference +++ b/tests/queries/0_stateless/03011_definitive_guide_to_cast.reference @@ -8,11 +8,23 @@ ['Hello','wo\'rld\\'] Hello wo\\\'rld\\\\ wo\'rld\\ wo\\\'rld\\\\ -123 -123 -123 +133 210 +210 +[123,456] 1 -1 [] [] Array(Nothing) Array(Array(Array(Tuple(UInt64, String)))) +1970-01-01 01:00:00 +2009-02-14 00:31:30.123456 +1970-01-01 00:59:59.888889 +2009-02-14 00:31:30 +1970-01-01 01:00:00 +2299-12-31 23:59:59.000000 +2009-02-14 +2009-02-14 +123\0\0 +123 +123 +123 123 123 123 @@ -46,9 +58,16 @@ Hello\0\0\0\0\0 2024-04-25 01:02:03 2024-04-25 01:02:03.000000 2024-04-25 01:02:03 -2024-04-25 01:02:03.000000 -2024-03-16 16:22:33 -2024-03-16 16:22:33 +2024-04-25 01:02:03.456789 +2024-03-16 11:01:25 +2024-03-16 19:01:25 +123 \N \N \N +123 0 0 0 +Nullable(UInt8) UInt8 +123 +123 +123 +\N 2024-04-25 2024-01-01 02:03:04 1 12 2024-04-25 2024-01-01 02:03:04.000000 2009-02-14 00:31:30 2024-04-25 2024-01-01 02:03:04.000000 2009-02-14 00:31:30 diff --git a/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql b/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql index 7cc662eee29..91487666a54 100644 --- a/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql +++ b/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql @@ -39,11 +39,25 @@ SELECT CAST($$['Hello', 'wo\'rld\\']$$ AS Array(String)); SELECT arrayJoin(CAST($$['Hello', 'wo\'rld\\']$$ AS Array(String))) AS x, CAST($$wo\'rld\\$$ AS FixedString(9)) AS y; --- The operator is case-insensitive: +-- As conversion from String is similar to direct parsing rather than conversion from other types, +-- it can be stricter for numbers by not tolerating overflows in some cases: -SELECT CAST(123 AS String); -SELECT cast(123 AS String); -SELECT Cast(123 AS String); +SELECT CAST(-123 AS UInt8), CAST(1234 AS UInt8); + +SELECT CAST('-123' AS UInt8); -- { serverError CANNOT_PARSE_NUMBER } + +-- In some cases it still allows overflows, but it is implementation defined: + +SELECT CAST('1234' AS UInt8); + +-- Parsing from a string does not tolerate extra whitespace characters: + +SELECT CAST(' 123' AS UInt8); -- { serverError CANNOT_PARSE_TEXT } +SELECT CAST('123 ' AS UInt8); -- { serverError CANNOT_PARSE_TEXT } + +-- But for composite data types, it involves a more featured parser, that take care of whitespace inside the data structures: + +SELECT CAST('[ 123 ,456, ]' AS Array(UInt16)); -- Conversion from a floating point value to an integer will involve truncation towards zero: @@ -55,6 +69,54 @@ SELECT CAST(1.9, 'Int64'), CAST(-1.9, 'Int64'); SELECT [] AS x, CAST(x AS Array(Array(Array(Tuple(UInt64, String))))) AS y, toTypeName(x), toTypeName(y); +-- Conversion between numbers and DateTime/Date data types interprets the number as the number of seconds/days from the Unix epoch, +-- where Unix epoch starts from 1970-01-01T00:00:00Z (the midnight of Gregorian year 1970 in UTC), +-- and the number of seconds don't count the coordination seconds, as in Unix. + +-- For example, it is 1 AM in Amsterdam: + +SELECT CAST(0 AS DateTime('Europe/Amsterdam')); + +-- The numbers can be fractional and negative (for DateTime64): + +SELECT CAST(1234567890.123456 AS DateTime64(6, 'Europe/Amsterdam')); +SELECT CAST(-0.111111 AS DateTime64(6, 'Europe/Amsterdam')); + +-- If the result does not fit in the range of the corresponding time data types, it is truncated and saturated to the boundaries: + +SELECT CAST(1234567890.123456 AS DateTime('Europe/Amsterdam')); +SELECT CAST(-1 AS DateTime('Europe/Amsterdam')); + +SELECT CAST(1e20 AS DateTime64(6, 'Europe/Amsterdam')); + +-- A special case is DateTime64(9) - the maximum resolution, where is does not cover the usual range, +-- and in this case, it throws an exception on overflow (I don't mind if we change this behavior in the future): + + SELECT CAST(1e20 AS DateTime64(9, 'Europe/Amsterdam')); -- { serverError DECIMAL_OVERFLOW } + +-- If a number is converted to a Date data type, the value is interpreted as the number of days since the Unix epoch, +-- but if the number is larger than the range of the data type, it is interpreted as a unix timestamp +-- (the number of seconds since the Unix epoch), similarly how it is done for the DateTime data type, +-- for convenience (while the internal representation of Date is the number of days, +-- often people want the unix timestamp to be also parsed into the Date data type): + +SELECT CAST(14289 AS Date); +SELECT CAST(1234567890 AS Date); + +-- When converting to a FixedString, if the length of the result data type is larger than the value, the result is padded with zero bytes: + +SELECT CAST('123' AS FixedString(5)) FORMAT TSV; + +-- But if it does not fit, an exception is thrown: + +SELECT CAST('12345' AS FixedString(3)) FORMAT TSV; -- { serverError TOO_LARGE_STRING_SIZE } + +-- The operator is case-insensitive: + +SELECT CAST(123 AS String); +SELECT cast(123 AS String); +SELECT Cast(123 AS String); + -- 2. The functional form of this operator: `CAST(value, 'Type')`: @@ -219,6 +281,33 @@ SELECT toDateTime64('2024-04-25 01:02:03.456789', 6); SELECT toString(1710612085::DateTime, 'America/Los_Angeles'); SELECT toString(1710612085::DateTime); +-- Functions converting to numeric types, date and datetime, IP and UUID, also have versions with -OrNull and -OrZero fallbacks, +-- that don't throw exceptions on parsing errors. +-- They use the same rules to the accurateCast operator: + +SELECT toUInt8OrNull('123'), toUInt8OrNull('-123'), toUInt8OrNull('1234'), toUInt8OrNull(' 123'); +SELECT toUInt8OrZero('123'), toUInt8OrZero('-123'), toUInt8OrZero('1234'), toUInt8OrZero(' 123'); + +SELECT toTypeName(toUInt8OrNull('123')), toTypeName(toUInt8OrZero('123')); + +-- These functions are only applicable to string data types. +-- Although it is a room for extension: + +SELECT toUInt8OrNull(123); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- String and FixedString work: + +SELECT toUInt8OrNull(123::FixedString(3)); + +-- For the FixedString data type trailing zero bytes are allowed, because they are the padding for FixedString: + +SELECT toUInt8OrNull('123'::FixedString(4)); +SELECT toUInt8OrNull('123\0'::FixedString(4)); + +-- While for String, they don't: + +SELECT toUInt8OrNull('123\0'); + -- 7. SQL-compatibility type-defining operators: From 2d8676d61a58925ace94e4e71893c93fee9551c9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 16 Mar 2024 19:38:32 +0100 Subject: [PATCH 352/374] Update 03011_definitive_guide_to_cast.sql --- .../03011_definitive_guide_to_cast.sql | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql b/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql index 91487666a54..335b97c9db6 100644 --- a/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql +++ b/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql @@ -7,7 +7,7 @@ SET session_timezone = 'Europe/Amsterdam'; SELECT CAST(123 AS String); --- It convert between various data types, including parameterized data types +-- It converts between various data types, including parameterized data types SELECT CAST(1234567890 AS DateTime('Europe/Amsterdam')); @@ -15,7 +15,7 @@ SELECT CAST(1234567890 AS DateTime('Europe/Amsterdam')); SELECT CAST('[1, 2, 3]' AS Array(UInt8)); --- It's return type depends on the setting `cast_keep_nullable`. If it is enabled, if the source argument type is Nullable, the resulting data type will be also Nullable, even if it is not written explicitly: +-- Its return type depends on the setting `cast_keep_nullable`. If it is enabled, if the source argument type is Nullable, the resulting data type will be also Nullable, even if it is not written explicitly: SET cast_keep_nullable = 1; SELECT CAST(x AS UInt8) AS y, toTypeName(y) FROM VALUES('x Nullable(String)', ('123'), ('NULL')); @@ -25,7 +25,7 @@ SELECT CAST(x AS UInt8) AS y, toTypeName(y) FROM VALUES('x Nullable(String)', (' -- There are various type conversion rules, some worth noting. --- Conversion between numeric types can involve implementation defined overflow: +-- Conversion between numeric types can involve implementation-defined overflow: SELECT CAST(257 AS UInt8); SELECT CAST(-1 AS UInt8); @@ -35,7 +35,7 @@ SELECT CAST(-1 AS UInt8); SELECT CAST($$['Hello', 'wo\'rld\\']$$ AS Array(String)); -- ' --- While for simple data types it does not interpret escape sequences: +-- While for simple data types, it does not interpret escape sequences: SELECT arrayJoin(CAST($$['Hello', 'wo\'rld\\']$$ AS Array(String))) AS x, CAST($$wo\'rld\\$$ AS FixedString(9)) AS y; @@ -159,9 +159,9 @@ SELECT 'String' AS String, CAST(123, String); -- 3. The internal function `_CAST` which is different from `CAST` only by being not dependent on the value of `cast_keep_nullable` setting and other settings. --- This is needed when ClickHouse has to persist an expression for future use, like in table definitions, including primary and partition key and other indices. +-- This is needed when ClickHouse has to persist an expression for future use, like in table definitions, including primary and partition keys and other indices. --- The function is not intended for being used directly. When a user uses a regular `CAST` operator or function in a table definition, it is transparently converted to `_CAST` to persist its behavior. However, the user can still use the internal version directly: +-- The function is not intended to be used directly. When a user uses a regular `CAST` operator or function in a table definition, it is transparently converted to `_CAST` to persist its behavior. However, the user can still use the internal version directly: SELECT _CAST(x, 'UInt8') AS y, toTypeName(y) FROM VALUES('x Nullable(String)', ('123'), ('456')); @@ -179,7 +179,7 @@ SELECT 123::String; -- It has a difference from the `CAST` operator: if it is applied to a simple literal value, instead of performing a type conversion, it invokes the SQL parser directly on the corresponding text fragment of the query. The most important case will be the floating-point and decimal types. --- In this example, we parse `1.1` as Decimal and not involving any type conversion: +-- In this example, we parse `1.1` as Decimal and do not involve any type conversion: SELECT 1.1::Decimal(30, 20); @@ -211,7 +211,7 @@ SELECT 1.1::Decimal(30, 20), CAST('1.1' AS Decimal(30, 20)), (1+1)::UInt8 FORMAT SELECT 1-1::String; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } --- But one interesting example is the unary minus. Here the minus is not an operator, but part of the numeric literal: +-- But one interesting example is the unary minus. Here the minus is not an operator but part of the numeric literal: SELECT -1::String; @@ -236,7 +236,7 @@ SELECT accurateCastOrNull(1.123456789, 'Float32'); SELECT accurateCastOrDefault(-1, 'UInt64', 0::UInt64); --- These functions are case-sensitive and there are no corresponding operators: +-- These functions are case-sensitive, and there are no corresponding operators: SELECT ACCURATECAST(1, 'String'); -- { serverError UNKNOWN_FUNCTION }. @@ -254,11 +254,11 @@ SELECT ACCURATECAST(1, 'String'); -- { serverError UNKNOWN_FUNCTION }. -- `toIntervalSecond`, `toIntervalMinute`, `toIntervalHour`, -- `toIntervalDay`, `toIntervalWeek`, `toIntervalMonth`, `toIntervalQuarter`, `toIntervalYear` --- These functions work under the same rules as the CAST operator, and can be thought as a elementary implementation parts of that operator. They allow implementation defined overflow while converting between numeric types. +-- These functions work under the same rules as the CAST operator and can be thought as elementary implementation parts of that operator. They allow implementation-defined overflow while converting between numeric types. SELECT toUInt8(-1); --- These are ClickHouse-native conversion functions. They take an argument with the input value, and for some of data types (`FixedString`, `DateTime`, `DateTime64`, `Decimal`s) the subsequent arguments are constant expressions, defining the parameters of these data types, or the rules to interpret the source value. +-- These are ClickHouse-native conversion functions. They take an argument with the input value, and for some of the data types (`FixedString`, `DateTime`, `DateTime64`, `Decimal`s), the subsequent arguments are constant expressions, defining the parameters of these data types, or the rules to interpret the source value. SELECT toFloat64(123); -- no arguments SELECT toFixedString('Hello', 10) FORMAT TSV; -- the parameter of the FixedString data type, the function returns FixedString(10) @@ -276,7 +276,7 @@ SELECT toDateTime64('2024-04-25 01:02:03', 6, 'Europe/Amsterdam'); -- the scale SELECT toDateTime('2024-04-25 01:02:03'); SELECT toDateTime64('2024-04-25 01:02:03.456789', 6); --- Here the time zone can be specified as the rule of interpretation of the value during conversion: +-- Here, the time zone can be specified as the rule of interpretation of the value during conversion: SELECT toString(1710612085::DateTime, 'America/Los_Angeles'); SELECT toString(1710612085::DateTime); @@ -320,7 +320,7 @@ SELECT DATE '2024-04-25', TIMESTAMP '2024-01-01 02:03:04', INTERVAL 1 MINUTE, IN SELECT DATE('2024-04-25'), TIMESTAMP('2024-01-01 02:03:04'), FROM_UNIXTIME(1234567890); --- These functions exist for compatibility with MySQL. They are case-insensive. +-- These functions exist for compatibility with MySQL. They are case-insensitive. SELECT date '2024-04-25', timeSTAMP('2024-01-01 02:03:04'), From_Unixtime(1234567890); @@ -329,7 +329,7 @@ SELECT date '2024-04-25', timeSTAMP('2024-01-01 02:03:04'), From_Unixtime(123456 -- `parseDateTimeBestEffort`, `parseDateTimeBestEffortUS`, `parseDateTime64BestEffort`, `parseDateTime64BestEffortUS`, `toUnixTimestamp` --- These functions are similar to explicit conversion functions, but provide special rules on how the conversion is performed. +-- These functions are similar to explicit conversion functions but provide special rules on how the conversion is performed. SELECT parseDateTimeBestEffort('25 Apr 1986 1pm'); @@ -338,4 +338,4 @@ SELECT parseDateTimeBestEffort('25 Apr 1986 1pm'); SELECT toDayOfMonth(toDateTime(1234567890)); --- These functions are coverted in a separate topic. +-- These functions are covered in a separate topic. From 175efee0891223c6242f34e6bf08301a625ef407 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 16 Mar 2024 19:50:11 +0100 Subject: [PATCH 353/374] More guides --- .../0_stateless/03011_definitive_guide_to_cast.reference | 2 ++ tests/queries/0_stateless/03011_definitive_guide_to_cast.sql | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/tests/queries/0_stateless/03011_definitive_guide_to_cast.reference b/tests/queries/0_stateless/03011_definitive_guide_to_cast.reference index f20fe0b7425..b71011d75ff 100644 --- a/tests/queries/0_stateless/03011_definitive_guide_to_cast.reference +++ b/tests/queries/0_stateless/03011_definitive_guide_to_cast.reference @@ -61,6 +61,8 @@ Hello\0\0\0\0\0 2024-04-25 01:02:03.456789 2024-03-16 11:01:25 2024-03-16 19:01:25 +2024-03-16 19:01:25 +2024-03-16 11:01:25 123 \N \N \N 123 0 0 0 Nullable(UInt8) UInt8 diff --git a/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql b/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql index 335b97c9db6..e79572e1ddb 100644 --- a/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql +++ b/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql @@ -281,6 +281,11 @@ SELECT toDateTime64('2024-04-25 01:02:03.456789', 6); SELECT toString(1710612085::DateTime, 'America/Los_Angeles'); SELECT toString(1710612085::DateTime); +-- In the case when the time zone is not the part of the resulting data type, but a rule of interpretation of the source value, +-- it can be non-constant. Let's clarify: in this example, the resulting data type is a String; it does not have a time zone parameter: + +SELECT toString(1710612085::DateTime, tz) FROM Values('tz String', 'Europe/Amsterdam', 'America/Los_Angeles'); + -- Functions converting to numeric types, date and datetime, IP and UUID, also have versions with -OrNull and -OrZero fallbacks, -- that don't throw exceptions on parsing errors. -- They use the same rules to the accurateCast operator: From 65eb6f135c19459ace827035dd518b371c64ee49 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 16 Mar 2024 19:52:10 +0100 Subject: [PATCH 354/374] More guides --- tests/queries/0_stateless/03011_definitive_guide_to_cast.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql b/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql index e79572e1ddb..76819ad8313 100644 --- a/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql +++ b/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql @@ -55,7 +55,7 @@ SELECT CAST('1234' AS UInt8); SELECT CAST(' 123' AS UInt8); -- { serverError CANNOT_PARSE_TEXT } SELECT CAST('123 ' AS UInt8); -- { serverError CANNOT_PARSE_TEXT } --- But for composite data types, it involves a more featured parser, that take care of whitespace inside the data structures: +-- But for composite data types, it involves a more featured parser, that takes care of whitespace inside the data structures: SELECT CAST('[ 123 ,456, ]' AS Array(UInt16)); From e5f15b6ac4894f32c7dd82091a3ee8260ef05f86 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 16 Mar 2024 23:00:57 +0100 Subject: [PATCH 355/374] Fix errors --- src/Functions/FunctionsConversion.cpp | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 7f130b0cc86..0deb2960d34 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -364,9 +364,9 @@ struct ToDateTime64TransformUnsigned { static constexpr auto name = "toDateTime64"; - const DateTime64::NativeType scale_multiplier = 1; + const DateTime64::NativeType scale_multiplier; - ToDateTime64TransformUnsigned(UInt32 scale = 0) /// NOLINT + ToDateTime64TransformUnsigned(UInt32 scale) /// NOLINT : scale_multiplier(DecimalUtils::scaleMultiplier(scale)) {} @@ -388,9 +388,9 @@ struct ToDateTime64TransformSigned { static constexpr auto name = "toDateTime64"; - const DateTime64::NativeType scale_multiplier = 1; + const DateTime64::NativeType scale_multiplier; - ToDateTime64TransformSigned(UInt32 scale = 0) /// NOLINT + ToDateTime64TransformSigned(UInt32 scale) /// NOLINT : scale_multiplier(DecimalUtils::scaleMultiplier(scale)) {} @@ -412,9 +412,9 @@ struct ToDateTime64TransformFloat { static constexpr auto name = "toDateTime64"; - const UInt32 scale = 1; + const UInt32 scale; - ToDateTime64TransformFloat(UInt32 scale_ = 0) /// NOLINT + ToDateTime64TransformFloat(UInt32 scale_) /// NOLINT : scale(scale_) {} @@ -439,7 +439,7 @@ struct FromDateTime64Transform { static constexpr auto name = Transform::name; - const DateTime64::NativeType scale_multiplier = 1; + const DateTime64::NativeType scale_multiplier; FromDateTime64Transform(UInt32 scale) /// NOLINT : scale_multiplier(DecimalUtils::scaleMultiplier(scale)) @@ -456,9 +456,9 @@ struct ToDateTime64Transform { static constexpr auto name = "toDateTime64"; - const DateTime64::NativeType scale_multiplier = 1; + const DateTime64::NativeType scale_multiplier; - ToDateTime64Transform(UInt32 scale = 0) /// NOLINT + ToDateTime64Transform(UInt32 scale) /// NOLINT : scale_multiplier(DecimalUtils::scaleMultiplier(scale)) {} @@ -1309,14 +1309,14 @@ struct ConvertImpl && std::is_same_v) { return DateTimeTransformImpl, false>::template execute( - arguments, result_type, input_rows_count); + arguments, result_type, input_rows_count, additions); } else if constexpr (std::is_same_v && std::is_same_v && std::is_same_v) { return DateTimeTransformImpl, false>::template execute( - arguments, result_type, input_rows_count); + arguments, result_type, input_rows_count, additions); } else if constexpr (( std::is_same_v @@ -1325,7 +1325,7 @@ struct ConvertImpl && std::is_same_v) { return DateTimeTransformImpl, false>::template execute( - arguments, result_type, input_rows_count); + arguments, result_type, input_rows_count, additions); } /// Conversion of DateTime64 to Date or DateTime: discards fractional part. else if constexpr (std::is_same_v @@ -1351,7 +1351,7 @@ struct ConvertImpl && std::is_same_v) { return DateTimeTransformImpl::template execute( - arguments, result_type, input_rows_count); + arguments, result_type, input_rows_count, additions); } else if constexpr (IsDataTypeDateOrDateTime && std::is_same_v) From f1be9e67070a25362e4e67ce78540ce95cc4e952 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 17 Mar 2024 00:17:59 +0100 Subject: [PATCH 356/374] Trash --- src/Functions/FunctionsConversion.cpp | 94 ++++++++++----------------- 1 file changed, 34 insertions(+), 60 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 0deb2960d34..088076eeeb4 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -181,12 +181,12 @@ struct ToDateTimeImpl /// Implementation of toDate function. -template +template struct ToDateTransform32Or64 { static constexpr auto name = "toDate"; - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) + static NO_SANITIZE_UNDEFINED UInt16 execute(const FromType & from, const DateLUTImpl & time_zone) { if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) { @@ -203,12 +203,12 @@ struct ToDateTransform32Or64 }; -template +template struct ToDateTransform32Or64Signed { static constexpr auto name = "toDate"; - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) + static NO_SANITIZE_UNDEFINED Int32 execute(const FromType & from, const DateLUTImpl & time_zone) { // TODO: decide narrow or extended range based on FromType if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) @@ -222,17 +222,17 @@ struct ToDateTransform32Or64Signed return 0; } return (from <= DATE_LUT_MAX_DAY_NUM) - ? static_cast(from) + ? static_cast(from) : time_zone.toDayNum(std::min(time_t(from), time_t(MAX_DATE_TIMESTAMP))); } }; -template +template struct ToDateTransform8Or16Signed { static constexpr auto name = "toDate"; - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) + static NO_SANITIZE_UNDEFINED UInt16 execute(const FromType & from, const DateLUTImpl &) { if (from < 0) { @@ -247,15 +247,15 @@ struct ToDateTransform8Or16Signed /// Implementation of toDate32 function. -template +template struct ToDate32Transform32Or64 { static constexpr auto name = "toDate32"; - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) + static NO_SANITIZE_UNDEFINED Int32 execute(const FromType & from, const DateLUTImpl & time_zone) { if (from < DATE_LUT_MAX_EXTEND_DAY_NUM) - return static_cast(from); + return static_cast(from); else { if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) @@ -268,12 +268,12 @@ struct ToDate32Transform32Or64 } }; -template +template struct ToDate32Transform32Or64Signed { static constexpr auto name = "toDate32"; - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) + static NO_SANITIZE_UNDEFINED Int32 execute(const FromType & from, const DateLUTImpl & time_zone) { static const Int32 daynum_min_offset = -static_cast(time_zone.getDayNumOffsetEpoch()); @@ -287,17 +287,17 @@ struct ToDate32Transform32Or64Signed return daynum_min_offset; return (from < DATE_LUT_MAX_EXTEND_DAY_NUM) - ? static_cast(from) + ? static_cast(from) : time_zone.toDayNum(std::min(time_t(Int64(from)), time_t(MAX_DATETIME64_TIMESTAMP))); } }; -template +template struct ToDate32Transform8Or16Signed { static constexpr auto name = "toDate32"; - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) + static NO_SANITIZE_UNDEFINED Int32 execute(const FromType & from, const DateLUTImpl &) { return from; } @@ -383,6 +383,7 @@ struct ToDateTime64TransformUnsigned return DecimalUtils::decimalFromComponentsWithMultiplier(std::min(from, MAX_DATETIME64_TIMESTAMP), 0, scale_multiplier); } }; + template struct ToDateTime64TransformSigned { @@ -407,6 +408,7 @@ struct ToDateTime64TransformSigned return DecimalUtils::decimalFromComponentsWithMultiplier(from, 0, scale_multiplier); } }; + template struct ToDateTime64TransformFloat { @@ -432,26 +434,6 @@ struct ToDateTime64TransformFloat } }; -/** Conversion of DateTime64 to Date or DateTime: discards fractional part. - */ -template -struct FromDateTime64Transform -{ - static constexpr auto name = Transform::name; - - const DateTime64::NativeType scale_multiplier; - - FromDateTime64Transform(UInt32 scale) /// NOLINT - : scale_multiplier(DecimalUtils::scaleMultiplier(scale)) - {} - - auto execute(DateTime64::NativeType dt, const DateLUTImpl & time_zone) const - { - const auto c = DecimalUtils::splitWithScaleMultiplier(DateTime64(dt), scale_multiplier); - return Transform::execute(static_cast(c.whole), time_zone); - } -}; - struct ToDateTime64Transform { static constexpr auto name = "toDateTime64"; @@ -972,7 +954,7 @@ struct ConvertThroughParsing size_t next_offset = std::is_same_v ? (*offsets)[i] : (current_offset + fixed_string_size); size_t string_size = std::is_same_v ? next_offset - current_offset - 1 : fixed_string_size; - ReadBufferFromMemory read_buffer(&(*chars)[current_offset], string_size); + ReadBufferFromMemory read_buffer(chars->data() + current_offset, string_size); if constexpr (exception_mode == ConvertFromStringExceptionMode::Throw) { @@ -1087,23 +1069,15 @@ struct ConvertThroughParsing parsed = SerializationDecimal::tryReadText( vec_to[i], read_buffer, ToDataType::maxPrecision(), col_to->getScale()); } + else if (std::is_same_v && std::is_same_v + && fixed_string_size == IPV6_BINARY_LENGTH) + { + readBinary(vec_to[i], read_buffer); + parsed = true; + } else { - /// we want to utilize constexpr condition here, which is not mixable with value comparison - do - { - if constexpr (std::is_same_v && std::is_same_v) - { - if (fixed_string_size == IPV6_BINARY_LENGTH) - { - readBinary(vec_to[i], read_buffer); - parsed = true; - break; - } - } - - parsed = tryParseImpl(vec_to[i], read_buffer, local_time_zone, precise_float_parsing); - } while (false); + parsed = tryParseImpl(vec_to[i], read_buffer, local_time_zone, precise_float_parsing); } } @@ -1220,7 +1194,7 @@ struct ConvertImpl && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl, false>::template execute( + return DateTimeTransformImpl, false>::template execute( arguments, result_type, input_rows_count); } else if constexpr (( @@ -1229,7 +1203,7 @@ struct ConvertImpl && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl, false>::template execute( + return DateTimeTransformImpl, false>::template execute( arguments, result_type, input_rows_count); } else if constexpr (( @@ -1240,7 +1214,7 @@ struct ConvertImpl && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl, false>::template execute( + return DateTimeTransformImpl, false>::template execute( arguments, result_type, input_rows_count); } else if constexpr (( @@ -1249,7 +1223,7 @@ struct ConvertImpl && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl, false>::template execute( + return DateTimeTransformImpl, false>::template execute( arguments, result_type, input_rows_count); } else if constexpr (( @@ -1258,7 +1232,7 @@ struct ConvertImpl && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl, false>::template execute( + return DateTimeTransformImpl, false>::template execute( arguments, result_type, input_rows_count); } else if constexpr (( @@ -1269,7 +1243,7 @@ struct ConvertImpl && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl, false>::template execute( + return DateTimeTransformImpl, false>::template execute( arguments, result_type, input_rows_count); } /// Special case of converting Int8, Int16, Int32 or (U)Int64 (and also, for convenience, Float32, Float64) to DateTime. @@ -2475,7 +2449,7 @@ public: } template - ColumnPtr executeInternal(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count, UInt32 scale = 0) const + ColumnPtr executeInternal(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count, UInt32 scale) const { const IDataType * from_type = arguments[0].type.get(); @@ -2510,7 +2484,7 @@ public: if (scale == 0) { - result_column = executeInternal(arguments, result_type, input_rows_count); + result_column = executeInternal(arguments, result_type, input_rows_count, 0); } else { @@ -2519,7 +2493,7 @@ public: } else { - result_column = executeInternal(arguments, result_type, input_rows_count); + result_column = executeInternal(arguments, result_type, input_rows_count, 0); } if (!result_column) From ced06cb163bdf9280cde5ee49f9ab3eb506228e3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 17 Mar 2024 00:18:38 +0100 Subject: [PATCH 357/374] More guides --- .../03011_definitive_guide_to_cast.reference | 4 ++++ .../0_stateless/03011_definitive_guide_to_cast.sql | 12 +++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03011_definitive_guide_to_cast.reference b/tests/queries/0_stateless/03011_definitive_guide_to_cast.reference index b71011d75ff..7c875a24b6d 100644 --- a/tests/queries/0_stateless/03011_definitive_guide_to_cast.reference +++ b/tests/queries/0_stateless/03011_definitive_guide_to_cast.reference @@ -50,6 +50,8 @@ CAST(plus(1, 1), 'UInt8'): 2 -1 \N 0 +0 +1970-01-01 01:00:00 255 123 Hello\0\0\0\0\0 @@ -65,6 +67,8 @@ Hello\0\0\0\0\0 2024-03-16 11:01:25 123 \N \N \N 123 0 0 0 +123 10 10 10 +123 0 0 0 Nullable(UInt8) UInt8 123 123 diff --git a/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql b/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql index 76819ad8313..708db0adce0 100644 --- a/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql +++ b/tests/queries/0_stateless/03011_definitive_guide_to_cast.sql @@ -236,6 +236,14 @@ SELECT accurateCastOrNull(1.123456789, 'Float32'); SELECT accurateCastOrDefault(-1, 'UInt64', 0::UInt64); +-- If this parameter is omitted, it is assumed to be the default value of the corresponding data type: + +SELECT accurateCastOrDefault(-1, 'UInt64'); +SELECT accurateCastOrDefault(-1, 'DateTime'); + +-- Unfortunately, this does not work as expected: SELECT accurateCastOrDefault(-1, $$Enum8('None' = 1, 'Hello' = 2, 'World' = 3)$$); +-- https://github.com/ClickHouse/ClickHouse/issues/61495 + -- These functions are case-sensitive, and there are no corresponding operators: SELECT ACCURATECAST(1, 'String'); -- { serverError UNKNOWN_FUNCTION }. @@ -286,12 +294,14 @@ SELECT toString(1710612085::DateTime); SELECT toString(1710612085::DateTime, tz) FROM Values('tz String', 'Europe/Amsterdam', 'America/Los_Angeles'); --- Functions converting to numeric types, date and datetime, IP and UUID, also have versions with -OrNull and -OrZero fallbacks, +-- Functions converting to numeric types, date and datetime, IP and UUID, also have versions with -OrNull, -OrZero, and -OrDefault fallbacks, -- that don't throw exceptions on parsing errors. -- They use the same rules to the accurateCast operator: SELECT toUInt8OrNull('123'), toUInt8OrNull('-123'), toUInt8OrNull('1234'), toUInt8OrNull(' 123'); SELECT toUInt8OrZero('123'), toUInt8OrZero('-123'), toUInt8OrZero('1234'), toUInt8OrZero(' 123'); +SELECT toUInt8OrDefault('123', 10), toUInt8OrDefault('-123', 10), toUInt8OrDefault('1234', 10), toUInt8OrDefault(' 123', 10); +SELECT toUInt8OrDefault('123'), toUInt8OrDefault('-123'), toUInt8OrDefault('1234'), toUInt8OrDefault(' 123'); SELECT toTypeName(toUInt8OrNull('123')), toTypeName(toUInt8OrZero('123')); From d627fbef557422cff10dda9f890d6b805fda5a19 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 17 Mar 2024 00:26:07 +0100 Subject: [PATCH 358/374] Fix errors --- src/Functions/FunctionsConversion.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 088076eeeb4..ada7e4ac58d 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -208,7 +208,7 @@ struct ToDateTransform32Or64Signed { static constexpr auto name = "toDate"; - static NO_SANITIZE_UNDEFINED Int32 execute(const FromType & from, const DateLUTImpl & time_zone) + static NO_SANITIZE_UNDEFINED UInt16 execute(const FromType & from, const DateLUTImpl & time_zone) { // TODO: decide narrow or extended range based on FromType if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) @@ -222,7 +222,7 @@ struct ToDateTransform32Or64Signed return 0; } return (from <= DATE_LUT_MAX_DAY_NUM) - ? static_cast(from) + ? static_cast(from) : time_zone.toDayNum(std::min(time_t(from), time_t(MAX_DATE_TIMESTAMP))); } }; @@ -255,7 +255,9 @@ struct ToDate32Transform32Or64 static NO_SANITIZE_UNDEFINED Int32 execute(const FromType & from, const DateLUTImpl & time_zone) { if (from < DATE_LUT_MAX_EXTEND_DAY_NUM) + { return static_cast(from); + } else { if constexpr (date_time_overflow_behavior == FormatSettings::DateTimeOverflowBehavior::Throw) @@ -1214,7 +1216,7 @@ struct ConvertImpl && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl, false>::template execute( + return DateTimeTransformImpl, false>::template execute( arguments, result_type, input_rows_count); } else if constexpr (( @@ -1243,7 +1245,7 @@ struct ConvertImpl && std::is_same_v && std::is_same_v) { - return DateTimeTransformImpl, false>::template execute( + return DateTimeTransformImpl, false>::template execute( arguments, result_type, input_rows_count); } /// Special case of converting Int8, Int16, Int32 or (U)Int64 (and also, for convenience, Float32, Float64) to DateTime. From fac040e08eda9ad958785965f45ebc038155b2de Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 17 Mar 2024 00:34:32 +0100 Subject: [PATCH 359/374] Add documentation --- src/Functions/castOrDefault.cpp | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/Functions/castOrDefault.cpp b/src/Functions/castOrDefault.cpp index 57cb03e0349..ac04a883c11 100644 --- a/src/Functions/castOrDefault.cpp +++ b/src/Functions/castOrDefault.cpp @@ -329,7 +329,18 @@ REGISTER_FUNCTION(CastOrDefault) factory.registerFunction("toUInt64OrDefault", [](ContextPtr context){ return std::make_unique( std::make_shared(context, "toUInt64OrDefault", std::make_shared())); }); factory.registerFunction("toUInt128OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toUInt128OrDefault", std::make_shared())); }); + std::make_shared(context, "toUInt128OrDefault", std::make_shared())); }, + FunctionDocumentation{ + .description=R"( +Converts a string in the first argument of the function to UInt128 by parsing it. +If it cannot parse the value, returns the default value, which can be provided as the second function argument, and if provided, must be of UInt128 type. +If the default value is not provided in the second argument, it is assumed to be zero. +)", + .examples{ + {"Successful conversion", "SELECT toUInt128OrDefault('1', 2::UInt128)", "1"}, + {"Default value", "SELECT toUInt128OrDefault('upyachka', 123456789012345678901234567890::UInt128)", "123456789012345678901234567890"}}, + .categories{"ConversionFunctions"} + }); factory.registerFunction("toUInt256OrDefault", [](ContextPtr context){ return std::make_unique( std::make_shared(context, "toUInt256OrDefault", std::make_shared())); }); From 96f53df7ada476384b0665f52a7c22cd33ce2415 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 17 Mar 2024 12:02:02 +0100 Subject: [PATCH 360/374] Fix error --- src/Functions/castOrDefault.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Functions/castOrDefault.cpp b/src/Functions/castOrDefault.cpp index ac04a883c11..c858dfdd589 100644 --- a/src/Functions/castOrDefault.cpp +++ b/src/Functions/castOrDefault.cpp @@ -221,7 +221,7 @@ private: size_t scale = 0; std::string time_zone; - if (isDecimal(type)) + if (isDecimal(type) || isDateTime64(type)) { const auto & scale_argument = arguments[additional_argument_index]; @@ -338,7 +338,8 @@ If the default value is not provided in the second argument, it is assumed to be )", .examples{ {"Successful conversion", "SELECT toUInt128OrDefault('1', 2::UInt128)", "1"}, - {"Default value", "SELECT toUInt128OrDefault('upyachka', 123456789012345678901234567890::UInt128)", "123456789012345678901234567890"}}, + {"Default value", "SELECT toUInt128OrDefault('upyachka', 123456789012345678901234567890::UInt128)", "123456789012345678901234567890"}, + {"Implicit default value", "SELECT toUInt128OrDefault('upyachka')", "0"}}, .categories{"ConversionFunctions"} }); factory.registerFunction("toUInt256OrDefault", [](ContextPtr context){ return std::make_unique( From 921ec12192fc2461bc828653bd9c3900daee74c8 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 17 Mar 2024 12:29:45 +0100 Subject: [PATCH 361/374] Maybe not worse --- src/Functions/FunctionsConversion.cpp | 86 ++++++++------------------- 1 file changed, 24 insertions(+), 62 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index ada7e4ac58d..0d245f809bb 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -53,6 +53,7 @@ #include #include #include +#include #include #include #include @@ -4776,67 +4777,6 @@ arguments, result_type, input_rows_count); \ } }; -class MonotonicityHelper -{ -public: - using MonotonicityForRange = FunctionCast::MonotonicityForRange; - - template - static auto monotonicityForType(const DataType * const) - { - return FunctionTo::Type::Monotonic::get; - } - - static MonotonicityForRange getMonotonicityInformation(const DataTypePtr & from_type, const IDataType * to_type) - { - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (isEnum(from_type)) - { - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - if (const auto * type = checkAndGetDataType(to_type)) - return monotonicityForType(type); - } - /// other types like Null, FixedString, Array and Tuple have no monotonicity defined - return {}; - } -}; - } @@ -4853,7 +4793,29 @@ FunctionBasePtr createFunctionBaseCast( for (size_t i = 0; i < arguments.size(); ++i) data_types[i] = arguments[i].type; - auto monotonicity = MonotonicityHelper::getMonotonicityInformation(arguments.front().type, return_type.get()); + FunctionCast::MonotonicityForRange monotonicity; + + if (isEnum(arguments.front().type) + && castTypeToEither(return_type.get(), [&](auto & type) + { + monotonicity = FunctionTo>::Type::Monotonic::get; + return true; + })) + { + } + else if (castTypeToEither< + DataTypeUInt8, DataTypeUInt16, DataTypeUInt32, DataTypeUInt64, DataTypeUInt128, DataTypeUInt256, + DataTypeInt8, DataTypeInt16, DataTypeInt32, DataTypeInt64, DataTypeInt128, DataTypeInt256, + DataTypeFloat32, DataTypeFloat64, + DataTypeDate, DataTypeDate32, DataTypeDateTime, + DataTypeString>(return_type.get(), [&](auto & type) + { + monotonicity = FunctionTo>::Type::Monotonic::get; + return true; + })) + { + } + return std::make_unique(context, name, std::move(monotonicity), data_types, return_type, diagnostic, cast_type); } From 2af6d35752e064bfd7859b23a52948c222f7c716 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 17 Mar 2024 12:53:59 +0100 Subject: [PATCH 362/374] Less crap --- src/Functions/FunctionFactory.cpp | 12 +++ src/Functions/FunctionFactory.h | 7 ++ src/Functions/array/emptyArray.cpp | 3 +- src/Functions/castOrDefault.cpp | 100 ++++++++++----------- src/Functions/coverage.cpp | 6 +- src/Functions/currentProfiles.cpp | 6 +- src/Functions/fromUnixTimestamp64Micro.cpp | 3 +- src/Functions/fromUnixTimestamp64Milli.cpp | 3 +- src/Functions/fromUnixTimestamp64Nano.cpp | 3 +- src/Functions/snowflake.cpp | 12 +-- src/Functions/toUnixTimestamp64Micro.cpp | 3 +- src/Functions/toUnixTimestamp64Milli.cpp | 3 +- src/Functions/toUnixTimestamp64Nano.cpp | 3 +- 13 files changed, 86 insertions(+), 78 deletions(-) diff --git a/src/Functions/FunctionFactory.cpp b/src/Functions/FunctionFactory.cpp index 6a7274376b9..004ef745a93 100644 --- a/src/Functions/FunctionFactory.cpp +++ b/src/Functions/FunctionFactory.cpp @@ -49,6 +49,18 @@ void FunctionFactory::registerFunction( } } +void FunctionFactory::registerFunction( + const std::string & name, + FunctionSimpleCreator creator, + FunctionDocumentation doc, + CaseSensitiveness case_sensitiveness) +{ + registerFunction(name, [my_creator = std::move(creator)](ContextPtr context) + { + return std::make_unique(my_creator(context)); + }, std::move(doc), std::move(case_sensitiveness)); +} + FunctionOverloadResolverPtr FunctionFactory::getImpl( const std::string & name, diff --git a/src/Functions/FunctionFactory.h b/src/Functions/FunctionFactory.h index 588cae64e16..c2fd34f0488 100644 --- a/src/Functions/FunctionFactory.h +++ b/src/Functions/FunctionFactory.h @@ -17,6 +17,7 @@ namespace DB { using FunctionCreator = std::function; +using FunctionSimpleCreator = std::function; using FunctionFactoryData = std::pair; /** Creates function by name. @@ -66,6 +67,12 @@ public: FunctionDocumentation doc = {}, CaseSensitiveness case_sensitiveness = CaseSensitive); + void registerFunction( + const std::string & name, + FunctionSimpleCreator creator, + FunctionDocumentation doc = {}, + CaseSensitiveness case_sensitiveness = CaseSensitive); + FunctionDocumentation getDocumentation(const std::string & name) const; private: diff --git a/src/Functions/array/emptyArray.cpp b/src/Functions/array/emptyArray.cpp index 684f8af162a..77c191b3adc 100644 --- a/src/Functions/array/emptyArray.cpp +++ b/src/Functions/array/emptyArray.cpp @@ -49,8 +49,7 @@ private: void registerFunction(FunctionFactory & factory, const String & element_type) { factory.registerFunction(FunctionEmptyArray::getNameImpl(element_type), - [element_type](ContextPtr){ return std::make_unique( - std::make_shared(element_type)); }); + [element_type](ContextPtr){ return std::make_shared(element_type); }); } } diff --git a/src/Functions/castOrDefault.cpp b/src/Functions/castOrDefault.cpp index c858dfdd589..b5653fca1e9 100644 --- a/src/Functions/castOrDefault.cpp +++ b/src/Functions/castOrDefault.cpp @@ -320,16 +320,16 @@ REGISTER_FUNCTION(CastOrDefault) { factory.registerFunction(); - factory.registerFunction("toUInt8OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toUInt8OrDefault", std::make_shared())); }); - factory.registerFunction("toUInt16OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toUInt16OrDefault", std::make_shared())); }); - factory.registerFunction("toUInt32OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toUInt32OrDefault", std::make_shared())); }); - factory.registerFunction("toUInt64OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toUInt64OrDefault", std::make_shared())); }); - factory.registerFunction("toUInt128OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toUInt128OrDefault", std::make_shared())); }, + factory.registerFunction("toUInt8OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toUInt8OrDefault", std::make_shared()); }); + factory.registerFunction("toUInt16OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toUInt16OrDefault", std::make_shared()); }); + factory.registerFunction("toUInt32OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toUInt32OrDefault", std::make_shared()); }); + factory.registerFunction("toUInt64OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toUInt64OrDefault", std::make_shared()); }); + factory.registerFunction("toUInt128OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toUInt128OrDefault", std::make_shared()); }, FunctionDocumentation{ .description=R"( Converts a string in the first argument of the function to UInt128 by parsing it. @@ -342,51 +342,51 @@ If the default value is not provided in the second argument, it is assumed to be {"Implicit default value", "SELECT toUInt128OrDefault('upyachka')", "0"}}, .categories{"ConversionFunctions"} }); - factory.registerFunction("toUInt256OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toUInt256OrDefault", std::make_shared())); }); + factory.registerFunction("toUInt256OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toUInt256OrDefault", std::make_shared()); }); - factory.registerFunction("toInt8OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toInt8OrDefault", std::make_shared())); }); - factory.registerFunction("toInt16OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toInt16OrDefault", std::make_shared())); }); - factory.registerFunction("toInt32OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toInt32OrDefault", std::make_shared())); }); - factory.registerFunction("toInt64OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toInt64OrDefault", std::make_shared())); }); - factory.registerFunction("toInt128OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toInt128OrDefault", std::make_shared())); }); - factory.registerFunction("toInt256OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toInt256OrDefault", std::make_shared())); }); + factory.registerFunction("toInt8OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toInt8OrDefault", std::make_shared()); }); + factory.registerFunction("toInt16OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toInt16OrDefault", std::make_shared()); }); + factory.registerFunction("toInt32OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toInt32OrDefault", std::make_shared()); }); + factory.registerFunction("toInt64OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toInt64OrDefault", std::make_shared()); }); + factory.registerFunction("toInt128OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toInt128OrDefault", std::make_shared()); }); + factory.registerFunction("toInt256OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toInt256OrDefault", std::make_shared()); }); - factory.registerFunction("toFloat32OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toFloat32OrDefault", std::make_shared())); }); - factory.registerFunction("toFloat64OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toFloat64OrDefault", std::make_shared())); }); + factory.registerFunction("toFloat32OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toFloat32OrDefault", std::make_shared()); }); + factory.registerFunction("toFloat64OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toFloat64OrDefault", std::make_shared()); }); - factory.registerFunction("toDateOrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toDateOrDefault", std::make_shared())); }); - factory.registerFunction("toDate32OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toDate32OrDefault", std::make_shared())); }); - factory.registerFunction("toDateTimeOrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toDateTimeOrDefault", std::make_shared())); }); - factory.registerFunction("toDateTime64OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toDateTime64OrDefault", std::make_shared(3 /* default scale */))); }); + factory.registerFunction("toDateOrDefault", [](ContextPtr context){ + return std::make_shared(context, "toDateOrDefault", std::make_shared()); }); + factory.registerFunction("toDate32OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toDate32OrDefault", std::make_shared()); }); + factory.registerFunction("toDateTimeOrDefault", [](ContextPtr context){ + return std::make_shared(context, "toDateTimeOrDefault", std::make_shared()); }); + factory.registerFunction("toDateTime64OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toDateTime64OrDefault", std::make_shared(3 /* default scale */)); }); - factory.registerFunction("toDecimal32OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toDecimal32OrDefault", createDecimalMaxPrecision(0))); }); - factory.registerFunction("toDecimal64OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toDecimal64OrDefault", createDecimalMaxPrecision(0))); }); - factory.registerFunction("toDecimal128OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toDecimal128OrDefault", createDecimalMaxPrecision(0))); }); - factory.registerFunction("toDecimal256OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toDecimal256OrDefault", createDecimalMaxPrecision(0))); }); + factory.registerFunction("toDecimal32OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toDecimal32OrDefault", createDecimalMaxPrecision(0)); }); + factory.registerFunction("toDecimal64OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toDecimal64OrDefault", createDecimalMaxPrecision(0)); }); + factory.registerFunction("toDecimal128OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toDecimal128OrDefault", createDecimalMaxPrecision(0)); }); + factory.registerFunction("toDecimal256OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toDecimal256OrDefault", createDecimalMaxPrecision(0)); }); - factory.registerFunction("toUUIDOrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toUUIDOrDefault", std::make_shared())); }); - factory.registerFunction("toIPv4OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toIPv4OrDefault", std::make_shared())); }); - factory.registerFunction("toIPv6OrDefault", [](ContextPtr context){ return std::make_unique( - std::make_shared(context, "toIPv6OrDefault", std::make_shared())); }); + factory.registerFunction("toUUIDOrDefault", [](ContextPtr context){ + return std::make_shared(context, "toUUIDOrDefault", std::make_shared()); }); + factory.registerFunction("toIPv4OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toIPv4OrDefault", std::make_shared()); }); + factory.registerFunction("toIPv6OrDefault", [](ContextPtr context){ + return std::make_shared(context, "toIPv6OrDefault", std::make_shared()); }); } } diff --git a/src/Functions/coverage.cpp b/src/Functions/coverage.cpp index a1a43d0cf58..97f807e22b7 100644 --- a/src/Functions/coverage.cpp +++ b/src/Functions/coverage.cpp @@ -93,7 +93,7 @@ public: REGISTER_FUNCTION(Coverage) { - factory.registerFunction("coverageCurrent", [](ContextPtr){ return std::make_unique(std::make_shared(Kind::Current)); }, + factory.registerFunction("coverageCurrent", [](ContextPtr){ return std::make_shared(Kind::Current); }, FunctionDocumentation { .description=R"( @@ -124,7 +124,7 @@ See https://clang.llvm.org/docs/SanitizerCoverage.html for more information. .categories{"Introspection"} }); - factory.registerFunction("coverageCumulative", [](ContextPtr){ return std::make_unique(std::make_shared(Kind::Cumulative)); }, + factory.registerFunction("coverageCumulative", [](ContextPtr){ return std::make_shared(Kind::Cumulative); }, FunctionDocumentation { .description=R"( @@ -140,7 +140,7 @@ See the `coverageCurrent` function for the details. .categories{"Introspection"} }); - factory.registerFunction("coverageAll", [](ContextPtr){ return std::make_unique(std::make_shared(Kind::All)); }, + factory.registerFunction("coverageAll", [](ContextPtr){ return std::make_shared(Kind::All); }, FunctionDocumentation { .description=R"( diff --git a/src/Functions/currentProfiles.cpp b/src/Functions/currentProfiles.cpp index 77c8a20ccee..8f14943e011 100644 --- a/src/Functions/currentProfiles.cpp +++ b/src/Functions/currentProfiles.cpp @@ -98,9 +98,9 @@ namespace REGISTER_FUNCTION(Profiles) { - factory.registerFunction("currentProfiles", [](ContextPtr context){ return std::make_unique(std::make_shared(context, Kind::currentProfiles)); }); - factory.registerFunction("enabledProfiles", [](ContextPtr context){ return std::make_unique(std::make_shared(context, Kind::enabledProfiles)); }); - factory.registerFunction("defaultProfiles", [](ContextPtr context){ return std::make_unique(std::make_shared(context, Kind::defaultProfiles)); }); + factory.registerFunction("currentProfiles", [](ContextPtr context){ return std::make_shared(context, Kind::currentProfiles); }); + factory.registerFunction("enabledProfiles", [](ContextPtr context){ return std::make_shared(context, Kind::enabledProfiles); }); + factory.registerFunction("defaultProfiles", [](ContextPtr context){ return std::make_shared(context, Kind::defaultProfiles); }); } } diff --git a/src/Functions/fromUnixTimestamp64Micro.cpp b/src/Functions/fromUnixTimestamp64Micro.cpp index 191e2137a0d..d96e0232335 100644 --- a/src/Functions/fromUnixTimestamp64Micro.cpp +++ b/src/Functions/fromUnixTimestamp64Micro.cpp @@ -7,8 +7,7 @@ namespace DB REGISTER_FUNCTION(FromUnixTimestamp64Micro) { factory.registerFunction("fromUnixTimestamp64Micro", - [](ContextPtr context){ return std::make_unique( - std::make_shared(6, "fromUnixTimestamp64Micro", context)); }); + [](ContextPtr context){ return std::make_shared(6, "fromUnixTimestamp64Micro", context); }); } } diff --git a/src/Functions/fromUnixTimestamp64Milli.cpp b/src/Functions/fromUnixTimestamp64Milli.cpp index c6d4fcd30a2..aa77e8043c1 100644 --- a/src/Functions/fromUnixTimestamp64Milli.cpp +++ b/src/Functions/fromUnixTimestamp64Milli.cpp @@ -7,8 +7,7 @@ namespace DB REGISTER_FUNCTION(FromUnixTimestamp64Milli) { factory.registerFunction("fromUnixTimestamp64Milli", - [](ContextPtr context){ return std::make_unique( - std::make_shared(3, "fromUnixTimestamp64Milli", context)); }); + [](ContextPtr context){ return std::make_shared(3, "fromUnixTimestamp64Milli", context); }); } } diff --git a/src/Functions/fromUnixTimestamp64Nano.cpp b/src/Functions/fromUnixTimestamp64Nano.cpp index 2b5a7addbfc..f9d69219933 100644 --- a/src/Functions/fromUnixTimestamp64Nano.cpp +++ b/src/Functions/fromUnixTimestamp64Nano.cpp @@ -7,8 +7,7 @@ namespace DB REGISTER_FUNCTION(FromUnixTimestamp64Nano) { factory.registerFunction("fromUnixTimestamp64Nano", - [](ContextPtr context){ return std::make_unique( - std::make_shared(9, "fromUnixTimestamp64Nano", context)); }); + [](ContextPtr context){ return std::make_shared(9, "fromUnixTimestamp64Nano", context); }); } } diff --git a/src/Functions/snowflake.cpp b/src/Functions/snowflake.cpp index f2dd1f1c51d..4a2d502a31a 100644 --- a/src/Functions/snowflake.cpp +++ b/src/Functions/snowflake.cpp @@ -249,28 +249,24 @@ public: REGISTER_FUNCTION(DateTimeToSnowflake) { factory.registerFunction("dateTimeToSnowflake", - [](ContextPtr){ return std::make_unique( - std::make_shared("dateTimeToSnowflake")); }); + [](ContextPtr){ return std::make_shared("dateTimeToSnowflake"); }); } REGISTER_FUNCTION(DateTime64ToSnowflake) { factory.registerFunction("dateTime64ToSnowflake", - [](ContextPtr){ return std::make_unique( - std::make_shared("dateTime64ToSnowflake")); }); + [](ContextPtr){ return std::make_shared("dateTime64ToSnowflake"); }); } REGISTER_FUNCTION(SnowflakeToDateTime) { factory.registerFunction("snowflakeToDateTime", - [](ContextPtr context){ return std::make_unique( - std::make_shared("snowflakeToDateTime", context)); }); + [](ContextPtr context){ return std::make_shared("snowflakeToDateTime", context); }); } REGISTER_FUNCTION(SnowflakeToDateTime64) { factory.registerFunction("snowflakeToDateTime64", - [](ContextPtr context){ return std::make_unique( - std::make_shared("snowflakeToDateTime64", context)); }); + [](ContextPtr context){ return std::make_shared("snowflakeToDateTime64", context); }); } } diff --git a/src/Functions/toUnixTimestamp64Micro.cpp b/src/Functions/toUnixTimestamp64Micro.cpp index fd35e2a7a73..964ad5a2c18 100644 --- a/src/Functions/toUnixTimestamp64Micro.cpp +++ b/src/Functions/toUnixTimestamp64Micro.cpp @@ -7,8 +7,7 @@ namespace DB REGISTER_FUNCTION(ToUnixTimestamp64Micro) { factory.registerFunction("toUnixTimestamp64Micro", - [](ContextPtr){ return std::make_unique( - std::make_shared(6, "toUnixTimestamp64Micro")); }); + [](ContextPtr){ return std::make_shared(6, "toUnixTimestamp64Micro"); }); } } diff --git a/src/Functions/toUnixTimestamp64Milli.cpp b/src/Functions/toUnixTimestamp64Milli.cpp index e6a680f941a..bc92a6d1fe3 100644 --- a/src/Functions/toUnixTimestamp64Milli.cpp +++ b/src/Functions/toUnixTimestamp64Milli.cpp @@ -7,8 +7,7 @@ namespace DB REGISTER_FUNCTION(ToUnixTimestamp64Milli) { factory.registerFunction("toUnixTimestamp64Milli", - [](ContextPtr){ return std::make_unique( - std::make_shared(3, "toUnixTimestamp64Milli")); }); + [](ContextPtr){ return std::make_shared(3, "toUnixTimestamp64Milli"); }); } } diff --git a/src/Functions/toUnixTimestamp64Nano.cpp b/src/Functions/toUnixTimestamp64Nano.cpp index 257f011603c..8829b00bf56 100644 --- a/src/Functions/toUnixTimestamp64Nano.cpp +++ b/src/Functions/toUnixTimestamp64Nano.cpp @@ -7,8 +7,7 @@ namespace DB REGISTER_FUNCTION(ToUnixTimestamp64Nano) { factory.registerFunction("toUnixTimestamp64Nano", - [](ContextPtr){ return std::make_unique( - std::make_shared(9, "toUnixTimestamp64Nano")); }); + [](ContextPtr){ return std::make_shared(9, "toUnixTimestamp64Nano"); }); } } From 427a8b3264cd9000b6f7ab13f13ceac0878d000a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 17 Mar 2024 13:20:49 +0100 Subject: [PATCH 363/374] Less crap --- programs/local/LocalServer.cpp | 4 ++++ src/Functions/CRC.cpp | 21 ++++++++------------- src/Functions/FunctionFQDN.cpp | 2 +- src/Functions/FunctionFactory.h | 21 ++++++--------------- src/Functions/FunctionsConversion.cpp | 15 ++++++--------- src/Functions/FunctionsRound.cpp | 10 +++++----- src/Functions/caseWithExpression.cpp | 4 +--- src/Functions/getFuzzerData.h | 8 ++------ src/Functions/multiIf.cpp | 6 ++---- src/Functions/toFixedString.h | 2 -- 10 files changed, 35 insertions(+), 58 deletions(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 50f9c242712..0dd3823cc6d 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -1013,13 +1013,16 @@ extern "C" int LLVMFuzzerInitialize(int * pargc, char *** pargv) auto it = argv.begin() + 1; for (; *it; ++it) + { if (strcmp(*it, "--") == 0) { ++it; break; } + } while (*it) + { if (strncmp(*it, "--", 2) != 0) { *(p++) = *it; @@ -1027,6 +1030,7 @@ extern "C" int LLVMFuzzerInitialize(int * pargc, char *** pargv) } else ++it; + } *pargc = static_cast(p - &(*pargv)[0]); *p = nullptr; diff --git a/src/Functions/CRC.cpp b/src/Functions/CRC.cpp index ba13fcf78f1..49d6dd6fa52 100644 --- a/src/Functions/CRC.cpp +++ b/src/Functions/CRC.cpp @@ -51,7 +51,7 @@ struct CRC32IEEEImpl : public CRCImpl static constexpr auto name = "CRC32IEEE"; }; -struct CRC32ZLIBImpl +struct CRC32ZLibImpl { using ReturnType = UInt32; static constexpr auto name = "CRC32"; @@ -133,13 +133,14 @@ private: } }; -template +template using FunctionCRC = FunctionStringOrArrayToT, T, typename T::ReturnType>; + // The same as IEEE variant, but uses 0xffffffff as initial value // This is the default // -// (And zlib is used here, since it has optimized version) -using FunctionCRC32ZLIB = FunctionCRC; +// (And ZLib is used here, since it has optimized version) +using FunctionCRC32ZLib = FunctionCRC; // Uses CRC-32-IEEE 802.3 polynomial using FunctionCRC32IEEE = FunctionCRC; // Uses CRC-64-ECMA polynomial @@ -147,17 +148,11 @@ using FunctionCRC64ECMA = FunctionCRC; } -template -void registerFunctionCRCImpl(FunctionFactory & factory) -{ - factory.registerFunction(T::name, {}, FunctionFactory::CaseInsensitive); -} - REGISTER_FUNCTION(CRC) { - registerFunctionCRCImpl(factory); - registerFunctionCRCImpl(factory); - registerFunctionCRCImpl(factory); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/FunctionFQDN.cpp b/src/Functions/FunctionFQDN.cpp index b054ff8e1d7..108a96216fd 100644 --- a/src/Functions/FunctionFQDN.cpp +++ b/src/Functions/FunctionFQDN.cpp @@ -47,7 +47,7 @@ public: REGISTER_FUNCTION(FQDN) { factory.registerFunction({}, FunctionFactory::CaseInsensitive); - factory.registerFunction("fullHostName"); + factory.registerAlias("fullHostName", "FQDN"); } } diff --git a/src/Functions/FunctionFactory.h b/src/Functions/FunctionFactory.h index c2fd34f0488..bb43d4719b8 100644 --- a/src/Functions/FunctionFactory.h +++ b/src/Functions/FunctionFactory.h @@ -35,15 +35,6 @@ public: registerFunction(Function::name, std::move(doc), case_sensitiveness); } - template - void registerFunction(const std::string & name, FunctionDocumentation doc = {}, CaseSensitiveness case_sensitiveness = CaseSensitive) - { - if constexpr (std::is_base_of_v) - registerFunction(name, &adaptFunctionToOverloadResolver, std::move(doc), case_sensitiveness); - else - registerFunction(name, &Function::create, std::move(doc), case_sensitiveness); - } - /// This function is used by YQL - innovative transactional DBMS that depends on ClickHouse by source code. std::vector getAllNames() const; @@ -81,17 +72,17 @@ private: Functions functions; Functions case_insensitive_functions; - template - static FunctionOverloadResolverPtr adaptFunctionToOverloadResolver(ContextPtr context) - { - return std::make_unique(Function::create(context)); - } - const Functions & getMap() const override { return functions; } const Functions & getCaseInsensitiveMap() const override { return case_insensitive_functions; } String getFactoryName() const override { return "FunctionFactory"; } + + template + void registerFunction(const std::string & name, FunctionDocumentation doc = {}, CaseSensitiveness case_sensitiveness = CaseSensitive) + { + registerFunction(name, &Function::create, std::move(doc), case_sensitiveness); + } }; const String & getFunctionCanonicalNameIfAny(const String & name); diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 0d245f809bb..ceff4f3fd7e 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -1955,9 +1955,6 @@ public: static constexpr bool to_decimal = IsDataTypeDecimal && !to_datetime64; static FunctionPtr create(ContextPtr context) { return std::make_shared(context); } - static FunctionPtr create() { return std::make_shared(); } - - FunctionConvert() = default; explicit FunctionConvert(ContextPtr context_) : context(context_) {} String getName() const override @@ -3295,9 +3292,9 @@ arguments, result_type, input_rows_count); \ }; } - static WrapperType createStringWrapper(const DataTypePtr & from_type) + WrapperType createStringWrapper(const DataTypePtr & from_type) const { - FunctionPtr function = FunctionToString::create(); + FunctionPtr function = FunctionToString::create(context); return createFunctionAdaptor(function, from_type); } @@ -3318,9 +3315,9 @@ arguments, result_type, input_rows_count); \ #define GENERATE_INTERVAL_CASE(INTERVAL_KIND) \ case IntervalKind::Kind::INTERVAL_KIND: \ - return createFunctionAdaptor(FunctionConvert::create(), from_type); + return createFunctionAdaptor(FunctionConvert::create(context), from_type); - static WrapperType createIntervalWrapper(const DataTypePtr & from_type, IntervalKind kind) + WrapperType createIntervalWrapper(const DataTypePtr & from_type, IntervalKind kind) const { switch (kind.kind) { @@ -4207,7 +4204,7 @@ arguments, result_type, input_rows_count); \ return createStringToEnumWrapper(); else if (isNativeNumber(from_type) || isEnum(from_type)) { - auto function = Function::create(); + auto function = Function::create(context); return createFunctionAdaptor(function, from_type); } else @@ -4846,7 +4843,7 @@ REGISTER_FUNCTION(Conversion) /// MySQL compatibility alias. Cannot be registered as alias, /// because we don't want it to be normalized to toDate in queries, /// otherwise CREATE DICTIONARY query breaks. - factory.registerFunction("DATE", {}, FunctionFactory::CaseInsensitive); + factory.registerFunction("DATE", &FunctionToDate::create, {}, FunctionFactory::CaseInsensitive); factory.registerFunction(); factory.registerFunction(); diff --git a/src/Functions/FunctionsRound.cpp b/src/Functions/FunctionsRound.cpp index 02fe1d659de..059476acb40 100644 --- a/src/Functions/FunctionsRound.cpp +++ b/src/Functions/FunctionsRound.cpp @@ -7,11 +7,11 @@ namespace DB REGISTER_FUNCTION(Round) { - factory.registerFunction("round", {}, FunctionFactory::CaseInsensitive); - factory.registerFunction("roundBankers", {}, FunctionFactory::CaseSensitive); - factory.registerFunction("floor", {}, FunctionFactory::CaseInsensitive); - factory.registerFunction("ceil", {}, FunctionFactory::CaseInsensitive); - factory.registerFunction("trunc", {}, FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseSensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); factory.registerFunction(); /// Compatibility aliases. diff --git a/src/Functions/caseWithExpression.cpp b/src/Functions/caseWithExpression.cpp index 9547cd200b2..71fccc8436e 100644 --- a/src/Functions/caseWithExpression.cpp +++ b/src/Functions/caseWithExpression.cpp @@ -113,9 +113,7 @@ REGISTER_FUNCTION(CaseWithExpression) factory.registerFunction(); /// These are obsolete function names. - factory.registerFunction("caseWithExpr"); + factory.registerAlias("caseWithExpr", "caseWithExpression"); } } - - diff --git a/src/Functions/getFuzzerData.h b/src/Functions/getFuzzerData.h index 635ca2bdce9..8d7b3c090c4 100644 --- a/src/Functions/getFuzzerData.h +++ b/src/Functions/getFuzzerData.h @@ -7,6 +7,7 @@ namespace DB { + class FunctionGetFuzzerData : public IFunction { inline static String fuzz_data; @@ -14,12 +15,7 @@ class FunctionGetFuzzerData : public IFunction public: static constexpr auto name = "getFuzzerData"; - inline static FunctionPtr create(ContextPtr) { return create(); } - - static FunctionPtr create() - { - return std::make_shared(); - } + inline static FunctionPtr create(ContextPtr) { return std::make_shared(); } inline String getName() const override { return name; } diff --git a/src/Functions/multiIf.cpp b/src/Functions/multiIf.cpp index 81304f3afbd..49c45d0c0be 100644 --- a/src/Functions/multiIf.cpp +++ b/src/Functions/multiIf.cpp @@ -549,8 +549,8 @@ REGISTER_FUNCTION(MultiIf) factory.registerFunction(); /// These are obsolete function names. - factory.registerFunction("caseWithoutExpr"); - factory.registerFunction("caseWithoutExpression"); + factory.registerAlias("caseWithoutExpr", "multiIf"); + factory.registerAlias("caseWithoutExpression", "multiIf"); } FunctionOverloadResolverPtr createInternalMultiIfOverloadResolver(bool allow_execute_multiif_columnar, bool allow_experimental_variant_type, bool use_variant_as_common_type) @@ -559,5 +559,3 @@ FunctionOverloadResolverPtr createInternalMultiIfOverloadResolver(bool allow_exe } } - - diff --git a/src/Functions/toFixedString.h b/src/Functions/toFixedString.h index 7bee666c5dd..9c7ffc48004 100644 --- a/src/Functions/toFixedString.h +++ b/src/Functions/toFixedString.h @@ -34,7 +34,6 @@ class FunctionToFixedString : public IFunction public: static constexpr auto name = "toFixedString"; static FunctionPtr create(ContextPtr) { return std::make_shared(); } - static FunctionPtr create() { return std::make_shared(); } String getName() const override { @@ -158,4 +157,3 @@ public: }; } - From 7c35f1d07e5d9c344bdb3a14e6f03efd76212e4e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 17 Mar 2024 13:23:09 +0100 Subject: [PATCH 364/374] Less crap --- src/Functions/getFuzzerData.cpp | 48 ++++++++++++++++++++++++++++++++- src/Functions/getFuzzerData.h | 46 ------------------------------- 2 files changed, 47 insertions(+), 47 deletions(-) delete mode 100644 src/Functions/getFuzzerData.h diff --git a/src/Functions/getFuzzerData.cpp b/src/Functions/getFuzzerData.cpp index 6d748619926..5c536477401 100644 --- a/src/Functions/getFuzzerData.cpp +++ b/src/Functions/getFuzzerData.cpp @@ -1,13 +1,59 @@ #ifdef FUZZING_MODE -#include + +#include +#include +#include +#include + namespace DB { +namespace +{ + +class FunctionGetFuzzerData : public IFunction +{ + inline static String fuzz_data; + +public: + static constexpr auto name = "getFuzzerData"; + + inline static FunctionPtr create(ContextPtr) { return std::make_shared(); } + + inline String getName() const override { return name; } + + inline size_t getNumberOfArguments() const override { return 0; } + + DataTypePtr getReturnTypeImpl(const DataTypes & /*arguments*/) const override + { + return std::make_shared(); + } + + inline bool isDeterministic() const override { return false; } + + bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName &, + const DataTypePtr &, + size_t input_rows_count) const override + { + return DataTypeString().createColumnConst(input_rows_count, fuzz_data); + } + + static void update(const String & fuzz_data_) + { + fuzz_data = fuzz_data_; + } +}; + +} + REGISTER_FUNCTION(GetFuzzerData) { factory.registerFunction(); } } + #endif diff --git a/src/Functions/getFuzzerData.h b/src/Functions/getFuzzerData.h deleted file mode 100644 index 8d7b3c090c4..00000000000 --- a/src/Functions/getFuzzerData.h +++ /dev/null @@ -1,46 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -namespace DB -{ - -class FunctionGetFuzzerData : public IFunction -{ - inline static String fuzz_data; - -public: - static constexpr auto name = "getFuzzerData"; - - inline static FunctionPtr create(ContextPtr) { return std::make_shared(); } - - inline String getName() const override { return name; } - - inline size_t getNumberOfArguments() const override { return 0; } - - DataTypePtr getReturnTypeImpl(const DataTypes & /*arguments*/) const override - { - return std::make_shared(); - } - - inline bool isDeterministic() const override { return false; } - - bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } - - ColumnPtr executeImpl(const ColumnsWithTypeAndName &, - const DataTypePtr &, - size_t input_rows_count) const override - { - return DataTypeString().createColumnConst(input_rows_count, fuzz_data); - } - - static void update(const String & fuzz_data_) - { - fuzz_data = fuzz_data_; - } -}; - -} From eec0bf2f52e0f7465de2e4b78af5886d13d14893 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 17 Mar 2024 13:34:34 +0100 Subject: [PATCH 365/374] Fix style --- src/Functions/castOrDefault.cpp | 100 ++++++++++++++++---------------- 1 file changed, 50 insertions(+), 50 deletions(-) diff --git a/src/Functions/castOrDefault.cpp b/src/Functions/castOrDefault.cpp index b5653fca1e9..44b39811882 100644 --- a/src/Functions/castOrDefault.cpp +++ b/src/Functions/castOrDefault.cpp @@ -320,16 +320,16 @@ REGISTER_FUNCTION(CastOrDefault) { factory.registerFunction(); - factory.registerFunction("toUInt8OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toUInt8OrDefault", std::make_shared()); }); - factory.registerFunction("toUInt16OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toUInt16OrDefault", std::make_shared()); }); - factory.registerFunction("toUInt32OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toUInt32OrDefault", std::make_shared()); }); - factory.registerFunction("toUInt64OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toUInt64OrDefault", std::make_shared()); }); - factory.registerFunction("toUInt128OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toUInt128OrDefault", std::make_shared()); }, + factory.registerFunction("toUInt8OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toUInt8OrDefault", std::make_shared()); }); + factory.registerFunction("toUInt16OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toUInt16OrDefault", std::make_shared()); }); + factory.registerFunction("toUInt32OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toUInt32OrDefault", std::make_shared()); }); + factory.registerFunction("toUInt64OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toUInt64OrDefault", std::make_shared()); }); + factory.registerFunction("toUInt128OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toUInt128OrDefault", std::make_shared()); }, FunctionDocumentation{ .description=R"( Converts a string in the first argument of the function to UInt128 by parsing it. @@ -342,51 +342,51 @@ If the default value is not provided in the second argument, it is assumed to be {"Implicit default value", "SELECT toUInt128OrDefault('upyachka')", "0"}}, .categories{"ConversionFunctions"} }); - factory.registerFunction("toUInt256OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toUInt256OrDefault", std::make_shared()); }); + factory.registerFunction("toUInt256OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toUInt256OrDefault", std::make_shared()); }); - factory.registerFunction("toInt8OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toInt8OrDefault", std::make_shared()); }); - factory.registerFunction("toInt16OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toInt16OrDefault", std::make_shared()); }); - factory.registerFunction("toInt32OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toInt32OrDefault", std::make_shared()); }); - factory.registerFunction("toInt64OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toInt64OrDefault", std::make_shared()); }); - factory.registerFunction("toInt128OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toInt128OrDefault", std::make_shared()); }); - factory.registerFunction("toInt256OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toInt256OrDefault", std::make_shared()); }); + factory.registerFunction("toInt8OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toInt8OrDefault", std::make_shared()); }); + factory.registerFunction("toInt16OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toInt16OrDefault", std::make_shared()); }); + factory.registerFunction("toInt32OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toInt32OrDefault", std::make_shared()); }); + factory.registerFunction("toInt64OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toInt64OrDefault", std::make_shared()); }); + factory.registerFunction("toInt128OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toInt128OrDefault", std::make_shared()); }); + factory.registerFunction("toInt256OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toInt256OrDefault", std::make_shared()); }); - factory.registerFunction("toFloat32OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toFloat32OrDefault", std::make_shared()); }); - factory.registerFunction("toFloat64OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toFloat64OrDefault", std::make_shared()); }); + factory.registerFunction("toFloat32OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toFloat32OrDefault", std::make_shared()); }); + factory.registerFunction("toFloat64OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toFloat64OrDefault", std::make_shared()); }); - factory.registerFunction("toDateOrDefault", [](ContextPtr context){ - return std::make_shared(context, "toDateOrDefault", std::make_shared()); }); - factory.registerFunction("toDate32OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toDate32OrDefault", std::make_shared()); }); - factory.registerFunction("toDateTimeOrDefault", [](ContextPtr context){ - return std::make_shared(context, "toDateTimeOrDefault", std::make_shared()); }); - factory.registerFunction("toDateTime64OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toDateTime64OrDefault", std::make_shared(3 /* default scale */)); }); + factory.registerFunction("toDateOrDefault", [](ContextPtr context) + { return std::make_shared(context, "toDateOrDefault", std::make_shared()); }); + factory.registerFunction("toDate32OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toDate32OrDefault", std::make_shared()); }); + factory.registerFunction("toDateTimeOrDefault", [](ContextPtr context) + { return std::make_shared(context, "toDateTimeOrDefault", std::make_shared()); }); + factory.registerFunction("toDateTime64OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toDateTime64OrDefault", std::make_shared(3 /* default scale */)); }); - factory.registerFunction("toDecimal32OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toDecimal32OrDefault", createDecimalMaxPrecision(0)); }); - factory.registerFunction("toDecimal64OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toDecimal64OrDefault", createDecimalMaxPrecision(0)); }); - factory.registerFunction("toDecimal128OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toDecimal128OrDefault", createDecimalMaxPrecision(0)); }); - factory.registerFunction("toDecimal256OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toDecimal256OrDefault", createDecimalMaxPrecision(0)); }); + factory.registerFunction("toDecimal32OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toDecimal32OrDefault", createDecimalMaxPrecision(0)); }); + factory.registerFunction("toDecimal64OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toDecimal64OrDefault", createDecimalMaxPrecision(0)); }); + factory.registerFunction("toDecimal128OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toDecimal128OrDefault", createDecimalMaxPrecision(0)); }); + factory.registerFunction("toDecimal256OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toDecimal256OrDefault", createDecimalMaxPrecision(0)); }); - factory.registerFunction("toUUIDOrDefault", [](ContextPtr context){ - return std::make_shared(context, "toUUIDOrDefault", std::make_shared()); }); - factory.registerFunction("toIPv4OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toIPv4OrDefault", std::make_shared()); }); - factory.registerFunction("toIPv6OrDefault", [](ContextPtr context){ - return std::make_shared(context, "toIPv6OrDefault", std::make_shared()); }); + factory.registerFunction("toUUIDOrDefault", [](ContextPtr context) + { return std::make_shared(context, "toUUIDOrDefault", std::make_shared()); }); + factory.registerFunction("toIPv4OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toIPv4OrDefault", std::make_shared()); }); + factory.registerFunction("toIPv6OrDefault", [](ContextPtr context) + { return std::make_shared(context, "toIPv6OrDefault", std::make_shared()); }); } } From 938a54c85c9b82bd51607080239574b0bb83d311 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 17 Mar 2024 14:12:48 +0100 Subject: [PATCH 366/374] Update reference --- .../02415_all_new_functions_must_be_documented.reference | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference index e15002da69c..8b85ac48c16 100644 --- a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference +++ b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference @@ -194,10 +194,7 @@ blockSerializedSize blockSize buildId byteSize -caseWithExpr caseWithExpression -caseWithoutExpr -caseWithoutExpression catboostEvaluate cbrt ceil @@ -312,7 +309,6 @@ fromUnixTimestamp64Micro fromUnixTimestamp64Milli fromUnixTimestamp64Nano fromUnixTimestampInJodaSyntax -fullHostName fuzzBits gccMurmurHash gcd From 4527b3e0d4b1e02285fcfc3579c8c48c5b7657b2 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 17 Mar 2024 15:17:58 +0100 Subject: [PATCH 367/374] Remove useless code --- CMakeLists.txt | 5 ----- cmake/fuzzer.cmake | 17 ----------------- 2 files changed, 22 deletions(-) delete mode 100644 cmake/fuzzer.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index eff6dd3ff6a..957bb3f71de 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -110,11 +110,6 @@ endif() # - sanitize.cmake add_library(global-libs INTERFACE) -# We don't want to instrument everything with fuzzer, but only specific targets (see below), -# also, since we build our own llvm, we specifically don't want to instrument -# libFuzzer library itself - it would result in infinite recursion -#include (cmake/fuzzer.cmake) - include (cmake/sanitize.cmake) option(ENABLE_COLORED_BUILD "Enable colors in compiler output" ON) diff --git a/cmake/fuzzer.cmake b/cmake/fuzzer.cmake deleted file mode 100644 index dd0c4b080fe..00000000000 --- a/cmake/fuzzer.cmake +++ /dev/null @@ -1,17 +0,0 @@ -# see ./CMakeLists.txt for variable declaration -if (FUZZER) - if (FUZZER STREQUAL "libfuzzer") - # NOTE: Eldar Zaitov decided to name it "libfuzzer" instead of "fuzzer" to keep in mind another possible fuzzer backends. - # NOTE: no-link means that all the targets are built with instrumentation for fuzzer, but only some of them - # (tests) have entry point for fuzzer and it's not checked. - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link -DFUZZER=1") - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link -DFUZZER=1") - - # NOTE: oss-fuzz can change LIB_FUZZING_ENGINE variable - if (NOT LIB_FUZZING_ENGINE) - set (LIB_FUZZING_ENGINE "-fsanitize=fuzzer") - endif () - else () - message (FATAL_ERROR "Unknown fuzzer type: ${FUZZER}") - endif () -endif() From 0a3e42401ce11036a64a2d7a7c30f425d8b700f2 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 17 Mar 2024 15:44:36 +0100 Subject: [PATCH 368/374] Fix fuzzers --- .../aggregate_function_state_deserialization_fuzzer.cpp | 3 +++ src/Common/tests/gtest_global_context.cpp | 6 ------ src/Common/tests/gtest_global_context.h | 2 -- src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp | 3 +++ src/Formats/fuzzers/format_fuzzer.cpp | 3 +++ src/Interpreters/fuzzers/execute_query_fuzzer.cpp | 3 +++ 6 files changed, 12 insertions(+), 8 deletions(-) diff --git a/src/AggregateFunctions/fuzzers/aggregate_function_state_deserialization_fuzzer.cpp b/src/AggregateFunctions/fuzzers/aggregate_function_state_deserialization_fuzzer.cpp index 290da81944d..425364efb9c 100644 --- a/src/AggregateFunctions/fuzzers/aggregate_function_state_deserialization_fuzzer.cpp +++ b/src/AggregateFunctions/fuzzers/aggregate_function_state_deserialization_fuzzer.cpp @@ -27,6 +27,9 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) auto initialize = [&]() mutable { + if (context) + return true; + shared_context = Context::createShared(); context = Context::createGlobal(shared_context.get()); context->makeGlobalContext(); diff --git a/src/Common/tests/gtest_global_context.cpp b/src/Common/tests/gtest_global_context.cpp index ec86c953c5b..0c1556766a9 100644 --- a/src/Common/tests/gtest_global_context.cpp +++ b/src/Common/tests/gtest_global_context.cpp @@ -10,9 +10,3 @@ ContextHolder & getMutableContext() static ContextHolder holder; return holder; } - -void destroyContext() -{ - auto & holder = getMutableContext(); - return holder.destroy(); -} diff --git a/src/Common/tests/gtest_global_context.h b/src/Common/tests/gtest_global_context.h index f846a0dbe4f..7ae8bb32f70 100644 --- a/src/Common/tests/gtest_global_context.h +++ b/src/Common/tests/gtest_global_context.h @@ -28,5 +28,3 @@ struct ContextHolder const ContextHolder & getContext(); ContextHolder & getMutableContext(); - -void destroyContext(); diff --git a/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp b/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp index e40734e0a57..0ae325871fb 100644 --- a/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp +++ b/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp @@ -24,6 +24,9 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) auto initialize = [&]() mutable { + if (context) + return true; + shared_context = Context::createShared(); context = Context::createGlobal(shared_context.get()); context->makeGlobalContext(); diff --git a/src/Formats/fuzzers/format_fuzzer.cpp b/src/Formats/fuzzers/format_fuzzer.cpp index 583d1173a01..46661e4828c 100644 --- a/src/Formats/fuzzers/format_fuzzer.cpp +++ b/src/Formats/fuzzers/format_fuzzer.cpp @@ -32,6 +32,9 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) auto initialize = [&]() mutable { + if (context) + return true; + shared_context = Context::createShared(); context = Context::createGlobal(shared_context.get()); context->makeGlobalContext(); diff --git a/src/Interpreters/fuzzers/execute_query_fuzzer.cpp b/src/Interpreters/fuzzers/execute_query_fuzzer.cpp index edff202d547..a02ce66e6b5 100644 --- a/src/Interpreters/fuzzers/execute_query_fuzzer.cpp +++ b/src/Interpreters/fuzzers/execute_query_fuzzer.cpp @@ -25,6 +25,9 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) auto initialize = [&]() mutable { + if (context) + return true; + shared_context = Context::createShared(); context = Context::createGlobal(shared_context.get()); context->makeGlobalContext(); From 44e918bc67efe0b1c36059693ee2eb709d31c59c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 17 Mar 2024 16:22:33 +0100 Subject: [PATCH 369/374] Revive `getFuzzerData` --- CMakeLists.txt | 9 ++++----- programs/local/LocalServer.cpp | 6 ------ programs/main.cpp | 15 +++++++-------- src/Interpreters/AsynchronousMetricLog.cpp | 1 - 4 files changed, 11 insertions(+), 20 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 957bb3f71de..8c4e16eace2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -549,7 +549,9 @@ if (ENABLE_RUST) endif() endif() -if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND NOT SANITIZE AND NOT SANITIZE_COVERAGE AND OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64)) +if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" + AND NOT SANITIZE AND NOT SANITIZE_COVERAGE AND NOT ENABLE_FUZZING + AND OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64)) set(CHECK_LARGE_OBJECT_SIZES_DEFAULT ON) else () set(CHECK_LARGE_OBJECT_SIZES_DEFAULT OFF) @@ -572,10 +574,7 @@ if (FUZZER) if (NOT(target_type STREQUAL "INTERFACE_LIBRARY" OR target_type STREQUAL "UTILITY")) target_compile_options(${target} PRIVATE "-fsanitize=fuzzer-no-link") endif() - # clickhouse fuzzer isn't working correctly - # initial PR https://github.com/ClickHouse/ClickHouse/pull/27526 - #if (target MATCHES ".+_fuzzer" OR target STREQUAL "clickhouse") - if (target_type STREQUAL "EXECUTABLE" AND target MATCHES ".+_fuzzer") + if (target_type STREQUAL "EXECUTABLE" AND (target MATCHES ".+_fuzzer" OR target STREQUAL "clickhouse")) message(STATUS "${target} instrumented with fuzzer") target_link_libraries(${target} PUBLIC ch_contrib::fuzzer) # Add to fuzzers bundle diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 50f9c242712..10fbda0fe46 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -1000,12 +1000,6 @@ extern "C" int LLVMFuzzerInitialize(int * pargc, char *** pargv) { std::vector argv(*pargv, *pargv + (*pargc + 1)); - if (!isClickhouseApp("local", argv)) - { - std::cerr << "\033[31m" << "ClickHouse compiled in fuzzing mode, only clickhouse local is available." << "\033[0m" << std::endl; - exit(1); - } - /// As a user you can add flags to clickhouse binary in fuzzing mode as follows /// clickhouse local -- diff --git a/programs/main.cpp b/programs/main.cpp index c5f1b30f60e..0a35594bd30 100644 --- a/programs/main.cpp +++ b/programs/main.cpp @@ -68,7 +68,6 @@ namespace using MainFunc = int (*)(int, char**); #if !defined(FUZZING_MODE) - /// Add an item here to register new application std::pair clickhouse_applications[] = { @@ -105,13 +104,6 @@ std::pair clickhouse_applications[] = {"restart", mainEntryClickHouseRestart}, }; -/// Add an item here to register a new short name -std::pair clickhouse_short_names[] = -{ - {"chl", "local"}, - {"chc", "client"}, -}; - int printHelp(int, char **) { std::cerr << "Use one of the following commands:" << std::endl; @@ -121,6 +113,13 @@ int printHelp(int, char **) } #endif +/// Add an item here to register a new short name +std::pair clickhouse_short_names[] = +{ + {"chl", "local"}, + {"chc", "client"}, +}; + enum class InstructionFail { diff --git a/src/Interpreters/AsynchronousMetricLog.cpp b/src/Interpreters/AsynchronousMetricLog.cpp index f905f72e7a7..5cf7f951eec 100644 --- a/src/Interpreters/AsynchronousMetricLog.cpp +++ b/src/Interpreters/AsynchronousMetricLog.cpp @@ -8,7 +8,6 @@ #include #include #include -#include namespace DB From 01136bbc3beb01eb1f150747412db2030115507a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 17 Mar 2024 19:53:58 +0100 Subject: [PATCH 370/374] Limit backtracking in parser --- programs/compressor/Compressor.cpp | 2 +- programs/format/Format.cpp | 2 +- programs/keeper-client/KeeperClient.cpp | 3 +- src/Access/AccessEntityIO.cpp | 2 +- src/Access/RowPolicyCache.cpp | 2 +- src/Access/UsersConfigAccessStorage.cpp | 2 +- .../parseAggregateFunctionParameters.cpp | 3 +- src/Backups/BackupInfo.cpp | 2 +- src/Backups/RestorerFromBackup.cpp | 4 +- src/Client/ClientBase.cpp | 24 +-- src/Client/ClientBase.h | 2 +- src/Client/QueryFuzzer.cpp | 6 +- .../NamedCollections/NamedCollectionUtils.cpp | 2 +- .../tests/gtest_compressionCodec.cpp | 2 +- src/Core/Defines.h | 2 + src/Core/Settings.h | 1 + src/Core/SettingsChangesHistory.h | 1 + src/DataTypes/DataTypeFactory.cpp | 5 +- src/Databases/DDLDependencyVisitor.cpp | 3 +- src/Databases/DatabaseDictionary.cpp | 4 +- src/Databases/DatabaseFilesystem.cpp | 2 +- src/Databases/DatabaseHDFS.cpp | 2 +- src/Databases/DatabaseOnDisk.cpp | 18 +- src/Databases/DatabaseOrdinary.cpp | 2 +- src/Databases/DatabaseReplicated.cpp | 7 +- src/Databases/DatabaseS3.cpp | 2 +- src/Databases/DatabasesCommon.cpp | 5 +- src/Databases/DatabasesCommon.h | 3 +- src/Databases/MySQL/DatabaseMySQL.cpp | 14 +- .../MySQL/tryConvertStringLiterals.cpp | 2 +- .../MySQL/tryParseTableIDFromDDL.cpp | 2 +- .../MySQL/tryQuoteUnrecognizedTokens.cpp | 2 +- src/Databases/SQLite/DatabaseSQLite.cpp | 6 +- .../tests/gtest_dictionary_configuration.cpp | 8 +- src/Formats/SchemaInferenceUtils.cpp | 2 +- src/Functions/FunctionSQLJSON.h | 9 +- .../UserDefinedSQLObjectsBackup.cpp | 2 +- .../UserDefinedSQLObjectsDiskStorage.cpp | 3 +- .../UserDefinedSQLObjectsZooKeeperStorage.cpp | 3 +- src/Functions/formatQuery.cpp | 168 +++++++++--------- ...InterpreterShowCreateAccessEntityQuery.cpp | 2 +- src/Interpreters/AsynchronousMetricLog.cpp | 10 +- src/Interpreters/DDLTask.cpp | 5 +- .../IInterpreterUnionOrSelectQuery.cpp | 2 +- src/Interpreters/InterpreterCreateQuery.cpp | 10 +- src/Interpreters/InterpreterDeleteQuery.cpp | 3 +- .../InterpreterKillQueryQuery.cpp | 2 +- src/Interpreters/InterpreterSelectQuery.cpp | 4 +- .../JoinToSubqueryTransformVisitor.cpp | 2 +- .../MySQL/tests/gtest_create_rewritten.cpp | 2 +- src/Interpreters/SystemLog.cpp | 4 +- src/Interpreters/executeQuery.cpp | 10 +- .../getCustomKeyFilterForParallelReplicas.cpp | 2 +- src/Interpreters/loadMetadata.cpp | 4 +- .../parseColumnsListForTableFunction.cpp | 4 +- .../tests/gtest_comparison_graph.cpp | 2 +- .../tests/gtest_cycle_aliases.cpp | 10 +- .../tests/gtest_table_overrides.cpp | 4 +- src/Parsers/ExpressionListParsers.cpp | 2 +- src/Parsers/IParser.cpp | 33 ++++ src/Parsers/IParser.h | 15 +- .../KustoFunctions/IParserKQLFunction.cpp | 8 +- .../Kusto/KustoFunctions/IParserKQLFunction.h | 4 +- .../KustoFunctions/KQLCastingFunctions.cpp | 2 +- .../KustoFunctions/KQLDynamicFunctions.cpp | 10 +- .../Kusto/KustoFunctions/KQLIPFunctions.cpp | 32 ++-- .../KustoFunctions/KQLStringFunctions.cpp | 4 +- src/Parsers/Kusto/ParserKQLDistinct.cpp | 2 +- src/Parsers/Kusto/ParserKQLExtend.cpp | 4 +- src/Parsers/Kusto/ParserKQLFilter.cpp | 2 +- src/Parsers/Kusto/ParserKQLLimit.cpp | 2 +- src/Parsers/Kusto/ParserKQLMVExpand.cpp | 16 +- src/Parsers/Kusto/ParserKQLMVExpand.h | 2 +- src/Parsers/Kusto/ParserKQLMakeSeries.cpp | 20 +-- src/Parsers/Kusto/ParserKQLMakeSeries.h | 2 +- src/Parsers/Kusto/ParserKQLPrint.cpp | 2 +- src/Parsers/Kusto/ParserKQLProject.cpp | 2 +- src/Parsers/Kusto/ParserKQLQuery.cpp | 18 +- src/Parsers/Kusto/ParserKQLQuery.h | 6 +- src/Parsers/Kusto/ParserKQLSort.cpp | 2 +- src/Parsers/Kusto/ParserKQLStatement.cpp | 2 +- src/Parsers/Kusto/ParserKQLSummarize.cpp | 8 +- src/Parsers/Kusto/parseKQLQuery.cpp | 23 ++- src/Parsers/Kusto/parseKQLQuery.h | 26 +-- .../tests/gtest_alter_command_parser.cpp | 2 +- .../MySQL/tests/gtest_alter_parser.cpp | 2 +- .../MySQL/tests/gtest_column_parser.cpp | 4 +- .../MySQL/tests/gtest_constraint_parser.cpp | 10 +- .../MySQL/tests/gtest_create_parser.cpp | 8 +- .../MySQL/tests/gtest_index_parser.cpp | 50 +++--- .../tests/gtest_partition_options_parser.cpp | 26 +-- .../MySQL/tests/gtest_partition_parser.cpp | 21 ++- .../MySQL/tests/gtest_reference_parser.cpp | 21 ++- .../MySQL/tests/gtest_subpartition_parser.cpp | 5 +- .../tests/gtest_table_options_parser.cpp | 4 +- src/Parsers/PRQL/ParserPRQLQuery.cpp | 4 +- src/Parsers/PRQL/ParserPRQLQuery.h | 3 +- src/Parsers/ParserAlterQuery.cpp | 3 +- src/Parsers/ParserCreateQuery.h | 2 +- src/Parsers/QueryParameterVisitor.cpp | 2 +- src/Parsers/TokenIterator.h | 12 -- src/Parsers/examples/create_parser.cpp | 2 +- src/Parsers/examples/select_parser.cpp | 2 +- src/Parsers/fuzzers/select_parser_fuzzer.cpp | 3 +- src/Parsers/parseQuery.cpp | 28 +-- src/Parsers/parseQuery.h | 16 +- src/Parsers/tests/gtest_Parser.cpp | 13 +- src/Parsers/tests/gtest_common.cpp | 8 +- src/Parsers/tests/gtest_dictionary_parser.cpp | 18 +- src/Parsers/tests/gtest_format_hiliting.cpp | 2 +- src/Planner/PlannerJoinTree.cpp | 2 +- src/Planner/Utils.cpp | 2 +- .../Impl/ConstantExpressionTemplate.cpp | 2 +- .../Formats/Impl/MySQLDumpRowInputFormat.cpp | 3 +- .../Formats/Impl/ValuesBlockInputFormat.cpp | 4 +- .../QueryPlan/ReadFromMergeTree.cpp | 2 +- src/Server/GRPCServer.cpp | 2 +- src/Server/PostgreSQLHandler.cpp | 1 + src/Storages/ColumnsDescription.cpp | 4 +- src/Storages/ConstraintsDescription.cpp | 2 +- src/Storages/IndicesDescription.cpp | 2 +- src/Storages/KeyDescription.cpp | 2 +- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 2 +- .../MergeTreeDataPartWriterCompact.cpp | 2 +- .../MergeTreeDataPartWriterOnDisk.cpp | 4 +- .../MergeTree/MergeTreeDataPartWriterWide.cpp | 2 +- .../MergeTree/MergeTreeDataSelectExecutor.cpp | 2 +- .../ReplicatedMergeTreeTableMetadata.cpp | 4 +- src/Storages/MutationCommands.cpp | 2 +- .../StorageMaterializedPostgreSQL.cpp | 2 +- src/Storages/ProjectionsDescription.cpp | 2 +- .../System/StorageSystemDDLWorkerQueue.cpp | 3 +- .../System/attachInformationSchemaTables.cpp | 2 +- src/Storages/TTLDescription.cpp | 2 +- src/Storages/getStructureOfRemoteTable.cpp | 4 +- ..._transform_query_for_external_database.cpp | 4 +- src/TableFunctions/Hive/TableFunctionHive.cpp | 4 +- src/TableFunctions/TableFunctionExplain.cpp | 8 +- 138 files changed, 549 insertions(+), 466 deletions(-) create mode 100644 src/Parsers/IParser.cpp diff --git a/programs/compressor/Compressor.cpp b/programs/compressor/Compressor.cpp index 7125fdc744f..050bb495024 100644 --- a/programs/compressor/Compressor.cpp +++ b/programs/compressor/Compressor.cpp @@ -143,7 +143,7 @@ int mainEntryClickHouseCompressor(int argc, char ** argv) ParserCodec codec_parser; std::string codecs_line = boost::algorithm::join(codecs, ","); - auto ast = parseQuery(codec_parser, "(" + codecs_line + ")", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + auto ast = parseQuery(codec_parser, "(" + codecs_line + ")", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); codec = CompressionCodecFactory::instance().get(ast, nullptr); } else diff --git a/programs/format/Format.cpp b/programs/format/Format.cpp index c92106e2f90..50f801f2560 100644 --- a/programs/format/Format.cpp +++ b/programs/format/Format.cpp @@ -234,7 +234,7 @@ int mainEntryClickHouseFormat(int argc, char ** argv) size_t approx_query_length = multiple ? find_first_symbols<';'>(pos, end) - pos : end - pos; ASTPtr res = parseQueryAndMovePosition( - parser, pos, end, "query", multiple, cmd_settings.max_query_size, cmd_settings.max_parser_depth); + parser, pos, end, "query", multiple, cmd_settings.max_query_size, cmd_settings.max_parser_depth, cmd_settings.max_parser_backtracks); std::unique_ptr insert_query_payload = nullptr; /// If the query is INSERT ... VALUES, then we will try to parse the data. diff --git a/programs/keeper-client/KeeperClient.cpp b/programs/keeper-client/KeeperClient.cpp index 595fc65e50e..8297fab5ed9 100644 --- a/programs/keeper-client/KeeperClient.cpp +++ b/programs/keeper-client/KeeperClient.cpp @@ -44,7 +44,7 @@ String KeeperClient::executeFourLetterCommand(const String & command) std::vector KeeperClient::getCompletions(const String & prefix) const { Tokens tokens(prefix.data(), prefix.data() + prefix.size(), 0, false); - IParser::Pos pos(tokens, 0); + IParser::Pos pos(tokens, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); if (pos->type != TokenType::BareWord) return registered_commands_and_four_letter_words; @@ -278,6 +278,7 @@ bool KeeperClient::processQueryText(const String & text) /* allow_multi_statements = */ true, /* max_query_size = */ 0, /* max_parser_depth = */ 0, + /* max_parser_backtracks = */ 0, /* skip_insignificant = */ false); if (!res) diff --git a/src/Access/AccessEntityIO.cpp b/src/Access/AccessEntityIO.cpp index 80bb63b04bf..b0dfd74c53b 100644 --- a/src/Access/AccessEntityIO.cpp +++ b/src/Access/AccessEntityIO.cpp @@ -62,7 +62,7 @@ AccessEntityPtr deserializeAccessEntityImpl(const String & definition) const char * end = begin + definition.size(); while (pos < end) { - queries.emplace_back(parseQueryAndMovePosition(parser, pos, end, "", true, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH)); + queries.emplace_back(parseQueryAndMovePosition(parser, pos, end, "", true, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS)); while (isWhitespaceASCII(*pos) || *pos == ';') ++pos; } diff --git a/src/Access/RowPolicyCache.cpp b/src/Access/RowPolicyCache.cpp index 13140099a63..c1c4928d0da 100644 --- a/src/Access/RowPolicyCache.cpp +++ b/src/Access/RowPolicyCache.cpp @@ -86,7 +86,7 @@ void RowPolicyCache::PolicyInfo::setPolicy(const RowPolicyPtr & policy_) try { ParserExpression parser; - parsed_filters[filter_type_i] = parseQuery(parser, filter, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + parsed_filters[filter_type_i] = parseQuery(parser, filter, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); } catch (...) { diff --git a/src/Access/UsersConfigAccessStorage.cpp b/src/Access/UsersConfigAccessStorage.cpp index e9b2e1397ab..b4b843fc77e 100644 --- a/src/Access/UsersConfigAccessStorage.cpp +++ b/src/Access/UsersConfigAccessStorage.cpp @@ -66,7 +66,7 @@ namespace String error_message; const char * pos = string_query.data(); - auto ast = tryParseQuery(parser, pos, pos + string_query.size(), error_message, false, "", false, 0, 0); + auto ast = tryParseQuery(parser, pos, pos + string_query.size(), error_message, false, "", false, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS, true); if (!ast) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Failed to parse grant query. Error: {}", error_message); diff --git a/src/AggregateFunctions/parseAggregateFunctionParameters.cpp b/src/AggregateFunctions/parseAggregateFunctionParameters.cpp index db1efe224d1..593be1e0a79 100644 --- a/src/AggregateFunctions/parseAggregateFunctionParameters.cpp +++ b/src/AggregateFunctions/parseAggregateFunctionParameters.cpp @@ -81,7 +81,8 @@ void getAggregateFunctionNameAndParametersArray( ParserExpressionList params_parser(false); ASTPtr args_ast = parseQuery(params_parser, parameters_str.data(), parameters_str.data() + parameters_str.size(), - "parameters of aggregate function in " + error_context, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + "parameters of aggregate function in " + error_context, + 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); if (args_ast->children.empty()) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Incorrect list of parameters to aggregate function {}", diff --git a/src/Backups/BackupInfo.cpp b/src/Backups/BackupInfo.cpp index 2bff400d4fe..461f613ecd2 100644 --- a/src/Backups/BackupInfo.cpp +++ b/src/Backups/BackupInfo.cpp @@ -25,7 +25,7 @@ String BackupInfo::toString() const BackupInfo BackupInfo::fromString(const String & str) { ParserIdentifierWithOptionalParameters parser; - ASTPtr ast = parseQuery(parser, str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + ASTPtr ast = parseQuery(parser, str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); return fromAST(*ast); } diff --git a/src/Backups/RestorerFromBackup.cpp b/src/Backups/RestorerFromBackup.cpp index 87c143f0fe2..e20e8eb66c6 100644 --- a/src/Backups/RestorerFromBackup.cpp +++ b/src/Backups/RestorerFromBackup.cpp @@ -424,7 +424,7 @@ void RestorerFromBackup::findTableInBackupImpl(const QualifiedTableName & table_ readStringUntilEOF(create_query_str, *read_buffer); read_buffer.reset(); ParserCreateQuery create_parser; - ASTPtr create_table_query = parseQuery(create_parser, create_query_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + ASTPtr create_table_query = parseQuery(create_parser, create_query_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); applyCustomStoragePolicy(create_table_query); renameDatabaseAndTableNameInCreateQuery(create_table_query, renaming_map, context->getGlobalContext()); String create_table_query_str = serializeAST(*create_table_query); @@ -534,7 +534,7 @@ void RestorerFromBackup::findDatabaseInBackupImpl(const String & database_name_i readStringUntilEOF(create_query_str, *read_buffer); read_buffer.reset(); ParserCreateQuery create_parser; - ASTPtr create_database_query = parseQuery(create_parser, create_query_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + ASTPtr create_database_query = parseQuery(create_parser, create_query_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); renameDatabaseAndTableNameInCreateQuery(create_database_query, renaming_map, context->getGlobalContext()); String create_database_query_str = serializeAST(*create_database_query); diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 48962880b8f..d561a64895b 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -345,7 +345,7 @@ ASTPtr ClientBase::parseQuery(const char *& pos, const char * end, bool allow_mu if (dialect == Dialect::kusto) parser = std::make_unique(end, global_context->getSettings().allow_settings_after_format_in_insert); else if (dialect == Dialect::prql) - parser = std::make_unique(max_length, settings.max_parser_depth); + parser = std::make_unique(max_length, settings.max_parser_depth, settings.max_parser_backtracks); else parser = std::make_unique(end, global_context->getSettings().allow_settings_after_format_in_insert); @@ -353,9 +353,9 @@ ASTPtr ClientBase::parseQuery(const char *& pos, const char * end, bool allow_mu { String message; if (dialect == Dialect::kusto) - res = tryParseKQLQuery(*parser, pos, end, message, true, "", allow_multi_statements, max_length, settings.max_parser_depth); + res = tryParseKQLQuery(*parser, pos, end, message, true, "", allow_multi_statements, max_length, settings.max_parser_depth, settings.max_parser_backtracks, true); else - res = tryParseQuery(*parser, pos, end, message, true, "", allow_multi_statements, max_length, settings.max_parser_depth); + res = tryParseQuery(*parser, pos, end, message, true, "", allow_multi_statements, max_length, settings.max_parser_depth, settings.max_parser_backtracks, true); if (!res) { @@ -366,9 +366,9 @@ ASTPtr ClientBase::parseQuery(const char *& pos, const char * end, bool allow_mu else { if (dialect == Dialect::kusto) - res = parseKQLQueryAndMovePosition(*parser, pos, end, "", allow_multi_statements, max_length, settings.max_parser_depth); + res = parseKQLQueryAndMovePosition(*parser, pos, end, "", allow_multi_statements, max_length, settings.max_parser_depth, settings.max_parser_backtracks); else - res = parseQueryAndMovePosition(*parser, pos, end, "", allow_multi_statements, max_length, settings.max_parser_depth); + res = parseQueryAndMovePosition(*parser, pos, end, "", allow_multi_statements, max_length, settings.max_parser_depth, settings.max_parser_backtracks); } if (is_interactive) @@ -385,12 +385,12 @@ ASTPtr ClientBase::parseQuery(const char *& pos, const char * end, bool allow_mu /// Consumes trailing semicolons and tries to consume the same-line trailing comment. -void ClientBase::adjustQueryEnd(const char *& this_query_end, const char * all_queries_end, uint32_t max_parser_depth) +void ClientBase::adjustQueryEnd(const char *& this_query_end, const char * all_queries_end, uint32_t max_parser_depth, uint32_t max_parser_backtracks) { // We have to skip the trailing semicolon that might be left // after VALUES parsing or just after a normal semicolon-terminated query. Tokens after_query_tokens(this_query_end, all_queries_end); - IParser::Pos after_query_iterator(after_query_tokens, max_parser_depth); + IParser::Pos after_query_iterator(after_query_tokens, max_parser_depth, max_parser_backtracks); while (after_query_iterator.isValid() && after_query_iterator->type == TokenType::Semicolon) { this_query_end = after_query_iterator->end; @@ -1984,6 +1984,7 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText( return MultiQueryProcessingStage::QUERIES_END; unsigned max_parser_depth = static_cast(global_context->getSettingsRef().max_parser_depth); + unsigned max_parser_backtracks = static_cast(global_context->getSettingsRef().max_parser_backtracks); // If there are only comments left until the end of file, we just // stop. The parser can't handle this situation because it always @@ -1994,7 +1995,7 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText( // and it makes more sense to treat them as such. { Tokens tokens(this_query_begin, all_queries_end); - IParser::Pos token_iterator(tokens, max_parser_depth); + IParser::Pos token_iterator(tokens, max_parser_depth, max_parser_backtracks); if (!token_iterator.isValid()) return MultiQueryProcessingStage::QUERIES_END; } @@ -2015,7 +2016,7 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText( if (ignore_error) { Tokens tokens(this_query_begin, all_queries_end); - IParser::Pos token_iterator(tokens, max_parser_depth); + IParser::Pos token_iterator(tokens, max_parser_depth, max_parser_backtracks); while (token_iterator->type != TokenType::Semicolon && token_iterator.isValid()) ++token_iterator; this_query_begin = token_iterator->end; @@ -2055,7 +2056,7 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText( // after we have processed the query. But even this guess is // beneficial so that we see proper trailing comments in "echo" and // server log. - adjustQueryEnd(this_query_end, all_queries_end, max_parser_depth); + adjustQueryEnd(this_query_end, all_queries_end, max_parser_depth, max_parser_backtracks); return MultiQueryProcessingStage::EXECUTE_QUERY; } @@ -2251,7 +2252,8 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text) this_query_end = insert_ast->end; adjustQueryEnd( this_query_end, all_queries_end, - static_cast(global_context->getSettingsRef().max_parser_depth)); + static_cast(global_context->getSettingsRef().max_parser_depth), + static_cast(global_context->getSettingsRef().max_parser_backtracks)); } // Report error. diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index dd08e7c059b..7a9e9666e67 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -94,7 +94,7 @@ protected: void processParsedSingleQuery(const String & full_query, const String & query_to_execute, ASTPtr parsed_query, std::optional echo_query_ = {}, bool report_error = false); - static void adjustQueryEnd(const char *& this_query_end, const char * all_queries_end, uint32_t max_parser_depth); + static void adjustQueryEnd(const char *& this_query_end, const char * all_queries_end, uint32_t max_parser_depth, uint32_t max_parser_backtracks); ASTPtr parseQuery(const char *& pos, const char * end, bool allow_multi_statements) const; static void setupSignalHandler(); diff --git a/src/Client/QueryFuzzer.cpp b/src/Client/QueryFuzzer.cpp index 0a7cb1b36db..7be01686258 100644 --- a/src/Client/QueryFuzzer.cpp +++ b/src/Client/QueryFuzzer.cpp @@ -569,7 +569,8 @@ void QueryFuzzer::fuzzColumnDeclaration(ASTColumnDeclaration & column) auto data_type = fuzzDataType(DataTypeFactory::instance().get(column.type)); ParserDataType parser; - column.type = parseQuery(parser, data_type->getName(), DBMS_DEFAULT_MAX_QUERY_SIZE, DBMS_DEFAULT_MAX_PARSER_DEPTH); + column.type = parseQuery(parser, data_type->getName(), + DBMS_DEFAULT_MAX_QUERY_SIZE, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); } } @@ -821,7 +822,8 @@ static ASTPtr tryParseInsertQuery(const String & full_query) ParserInsertQuery parser(end, false); String message; - return tryParseQuery(parser, pos, end, message, false, "", false, DBMS_DEFAULT_MAX_QUERY_SIZE, DBMS_DEFAULT_MAX_PARSER_DEPTH); + return tryParseQuery(parser, pos, end, message, false, "", false, + DBMS_DEFAULT_MAX_QUERY_SIZE, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS, true); } ASTs QueryFuzzer::getInsertQueriesForFuzzedTables(const String & full_query) diff --git a/src/Common/NamedCollections/NamedCollectionUtils.cpp b/src/Common/NamedCollections/NamedCollectionUtils.cpp index fe0f42467c7..9b569390b3c 100644 --- a/src/Common/NamedCollections/NamedCollectionUtils.cpp +++ b/src/Common/NamedCollections/NamedCollectionUtils.cpp @@ -302,7 +302,7 @@ private: readStringUntilEOF(query, in); ParserCreateNamedCollectionQuery parser; - auto ast = parseQuery(parser, query, "in file " + path, 0, settings.max_parser_depth); + auto ast = parseQuery(parser, query, "in file " + path, 0, settings.max_parser_depth, settings.max_parser_backtracks); const auto & create_query = ast->as(); return create_query; } diff --git a/src/Compression/tests/gtest_compressionCodec.cpp b/src/Compression/tests/gtest_compressionCodec.cpp index 24f16a55c25..16573e035e0 100644 --- a/src/Compression/tests/gtest_compressionCodec.cpp +++ b/src/Compression/tests/gtest_compressionCodec.cpp @@ -442,7 +442,7 @@ CompressionCodecPtr makeCodec(const std::string & codec_string, const DataTypePt { const std::string codec_statement = "(" + codec_string + ")"; Tokens tokens(codec_statement.begin().base(), codec_statement.end().base()); - IParser::Pos token_iterator(tokens, 0); + IParser::Pos token_iterator(tokens, 0, 0); Expected expected; ASTPtr codec_ast; diff --git a/src/Core/Defines.h b/src/Core/Defines.h index cc6f49aa361..a8dd26519c2 100644 --- a/src/Core/Defines.h +++ b/src/Core/Defines.h @@ -63,6 +63,8 @@ static constexpr auto DBMS_DEFAULT_LOCK_ACQUIRE_TIMEOUT_SEC = 120; /// Default limit on recursion depth of recursive descend parser. static constexpr auto DBMS_DEFAULT_MAX_PARSER_DEPTH = 1000; +/// Default limit on the amount of backtracking of recursive descend parser. +static constexpr auto DBMS_DEFAULT_MAX_PARSER_BACKTRACKS = 1000000; /// Default limit on query size. static constexpr auto DBMS_DEFAULT_MAX_QUERY_SIZE = 262144; diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 48f6b4d621c..e6adb00137f 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -607,6 +607,7 @@ class IColumn; M(Bool, use_compact_format_in_distributed_parts_names, true, "Changes format of directories names for distributed table insert parts.", 0) \ M(Bool, validate_polygons, true, "Throw exception if polygon is invalid in function pointInPolygon (e.g. self-tangent, self-intersecting). If the setting is false, the function will accept invalid polygons but may silently return wrong result.", 0) \ M(UInt64, max_parser_depth, DBMS_DEFAULT_MAX_PARSER_DEPTH, "Maximum parser depth (recursion depth of recursive descend parser).", 0) \ + M(UInt64, max_parser_backtracks, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS, "Maximum parser backtracking (how many times it tries different alternatives in the recursive descend parsing process).", 0) \ M(Bool, allow_settings_after_format_in_insert, false, "Allow SETTINGS after FORMAT, but note, that this is not always safe (note: this is a compatibility setting).", 0) \ M(Seconds, periodic_live_view_refresh, 60, "Interval after which periodically refreshed live view is forced to refresh.", 0) \ M(Bool, transform_null_in, false, "If enabled, NULL values will be matched with 'IN' operator as if they are considered equal.", 0) \ diff --git a/src/Core/SettingsChangesHistory.h b/src/Core/SettingsChangesHistory.h index 4914f97a6fb..8e2b2915c2a 100644 --- a/src/Core/SettingsChangesHistory.h +++ b/src/Core/SettingsChangesHistory.h @@ -95,6 +95,7 @@ static std::map sett {"throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert", false, true, "Deduplication is dependent materialized view cannot work together with async inserts."}, {"parallel_replicas_allow_in_with_subquery", false, true, "If true, subquery for IN will be executed on every follower replica"}, {"filesystem_cache_reserve_space_wait_lock_timeout_milliseconds", 1000, 1000, "Wait time to lock cache for sapce reservation in filesystem cache"}, + {"max_parser_backtracks", 0, 1000000, "Limiting the complexity of parsing"}, }}, {"24.2", {{"allow_suspicious_variant_types", true, false, "Don't allow creating Variant type with suspicious variants by default"}, {"validate_experimental_and_suspicious_types_inside_nested_types", false, true, "Validate usage of experimental and suspicious types inside nested types"}, diff --git a/src/DataTypes/DataTypeFactory.cpp b/src/DataTypes/DataTypeFactory.cpp index d154b386ace..844384f3c95 100644 --- a/src/DataTypes/DataTypeFactory.cpp +++ b/src/DataTypes/DataTypeFactory.cpp @@ -56,13 +56,14 @@ DataTypePtr DataTypeFactory::getImpl(const String & full_name) const { String out_err; const char * start = full_name.data(); - ast = tryParseQuery(parser, start, start + full_name.size(), out_err, false, "data type", false, DBMS_DEFAULT_MAX_QUERY_SIZE, data_type_max_parse_depth); + ast = tryParseQuery(parser, start, start + full_name.size(), out_err, false, "data type", false, + DBMS_DEFAULT_MAX_QUERY_SIZE, data_type_max_parse_depth, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS, true); if (!ast) return nullptr; } else { - ast = parseQuery(parser, full_name.data(), full_name.data() + full_name.size(), "data type", false, data_type_max_parse_depth); + ast = parseQuery(parser, full_name.data(), full_name.data() + full_name.size(), "data type", false, data_type_max_parse_depth, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); } return getImpl(ast); diff --git a/src/Databases/DDLDependencyVisitor.cpp b/src/Databases/DDLDependencyVisitor.cpp index cb85119e3b0..75a01a6190f 100644 --- a/src/Databases/DDLDependencyVisitor.cpp +++ b/src/Databases/DDLDependencyVisitor.cpp @@ -444,8 +444,9 @@ namespace ParserSelectWithUnionQuery parser; String description = fmt::format("Query for ClickHouse dictionary {}", data.table_name); String fixed_query = removeWhereConditionPlaceholder(query); + const Settings & settings = data.context->getSettingsRef(); ASTPtr select = parseQuery(parser, fixed_query, description, - data.context->getSettingsRef().max_query_size, data.context->getSettingsRef().max_parser_depth); + settings.max_query_size, settings.max_parser_depth, settings.max_parser_backtracks); DDLDependencyVisitor::Visitor visitor{data}; visitor.visit(select); diff --git a/src/Databases/DatabaseDictionary.cpp b/src/Databases/DatabaseDictionary.cpp index 9a65c7a46ef..76fdb4fa961 100644 --- a/src/Databases/DatabaseDictionary.cpp +++ b/src/Databases/DatabaseDictionary.cpp @@ -115,7 +115,7 @@ ASTPtr DatabaseDictionary::getCreateTableQueryImpl(const String & table_name, Co const char * pos = query.data(); std::string error_message; auto ast = tryParseQuery(parser, pos, pos + query.size(), error_message, - /* hilite = */ false, "", /* allow_multi_statements = */ false, 0, settings.max_parser_depth); + /* hilite = */ false, "", /* allow_multi_statements = */ false, 0, settings.max_parser_depth, settings.max_parser_backtracks, true); if (!ast && throw_on_error) throw Exception::createDeprecated(error_message, ErrorCodes::SYNTAX_ERROR); @@ -134,7 +134,7 @@ ASTPtr DatabaseDictionary::getCreateDatabaseQuery() const } auto settings = getContext()->getSettingsRef(); ParserCreateQuery parser; - return parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth); + return parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth, settings.max_parser_backtracks); } void DatabaseDictionary::shutdown() diff --git a/src/Databases/DatabaseFilesystem.cpp b/src/Databases/DatabaseFilesystem.cpp index 5af1e1ae0d2..05af0acf978 100644 --- a/src/Databases/DatabaseFilesystem.cpp +++ b/src/Databases/DatabaseFilesystem.cpp @@ -187,7 +187,7 @@ ASTPtr DatabaseFilesystem::getCreateDatabaseQuery() const const String query = fmt::format("CREATE DATABASE {} ENGINE = Filesystem('{}')", backQuoteIfNeed(getDatabaseName()), path); ParserCreateQuery parser; - ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth); + ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth, settings.max_parser_backtracks); if (const auto database_comment = getDatabaseComment(); !database_comment.empty()) { diff --git a/src/Databases/DatabaseHDFS.cpp b/src/Databases/DatabaseHDFS.cpp index 3a1e6b16ccf..2688ff2443c 100644 --- a/src/Databases/DatabaseHDFS.cpp +++ b/src/Databases/DatabaseHDFS.cpp @@ -183,7 +183,7 @@ ASTPtr DatabaseHDFS::getCreateDatabaseQuery() const ParserCreateQuery parser; const String query = fmt::format("CREATE DATABASE {} ENGINE = HDFS('{}')", backQuoteIfNeed(getDatabaseName()), source); - ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth); + ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth, settings.max_parser_backtracks); if (const auto database_comment = getDatabaseComment(); !database_comment.empty()) { diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index fcb073644c5..dcfc1916450 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -526,7 +526,7 @@ ASTPtr DatabaseOnDisk::getCreateDatabaseQuery() const /// If database.sql doesn't exist, then engine is Ordinary String query = "CREATE DATABASE " + backQuoteIfNeed(getDatabaseName()) + " ENGINE = Ordinary"; ParserCreateQuery parser; - ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth); + ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth, settings.max_parser_backtracks); } if (const auto database_comment = getDatabaseComment(); !database_comment.empty()) @@ -707,7 +707,7 @@ ASTPtr DatabaseOnDisk::parseQueryFromMetadata( const char * pos = query.data(); std::string error_message; auto ast = tryParseQuery(parser, pos, pos + query.size(), error_message, /* hilite = */ false, - "in file " + metadata_file_path, /* allow_multi_statements = */ false, 0, settings.max_parser_depth); + "in file " + metadata_file_path, /* allow_multi_statements = */ false, 0, settings.max_parser_depth, settings.max_parser_backtracks, true); if (!ast && throw_on_error) throw Exception::createDeprecated(error_message, ErrorCodes::SYNTAX_ERROR); @@ -765,12 +765,14 @@ ASTPtr DatabaseOnDisk::getCreateQueryFromStorage(const String & table_name, cons auto ast_storage = std::make_shared(); ast_storage->set(ast_storage->engine, ast_engine); - unsigned max_parser_depth = static_cast(getContext()->getSettingsRef().max_parser_depth); - auto create_table_query = DB::getCreateQueryFromStorage(storage, - ast_storage, - false, - max_parser_depth, - throw_on_error); + const Settings & settings = getContext()->getSettingsRef(); + auto create_table_query = DB::getCreateQueryFromStorage( + storage, + ast_storage, + false, + static_cast(settings.max_parser_depth), + static_cast(settings.max_parser_backtracks), + throw_on_error); create_table_query->set(create_table_query->as()->comment, std::make_shared("SYSTEM TABLE is built on the fly.")); diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index bc552b9c927..161dd3d3f60 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -469,7 +469,7 @@ void DatabaseOrdinary::alterTable(ContextPtr local_context, const StorageID & ta statement.data() + statement.size(), "in file " + table_metadata_path, 0, - local_context->getSettingsRef().max_parser_depth); + local_context->getSettingsRef().max_parser_depth, local_context->getSettingsRef().max_parser_backtracks); applyMetadataChangesToCreateQuery(ast, metadata); diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 9cf19a251f7..3b6a712510d 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -812,7 +812,8 @@ static UUID getTableUUIDIfReplicated(const String & metadata, ContextPtr context ParserCreateQuery parser; auto size = context->getSettingsRef().max_query_size; auto depth = context->getSettingsRef().max_parser_depth; - ASTPtr query = parseQuery(parser, metadata, size, depth); + auto backtracks = context->getSettingsRef().max_parser_backtracks; + ASTPtr query = parseQuery(parser, metadata, size, depth, backtracks); const ASTCreateQuery & create = query->as(); if (!create.storage || !create.storage->engine) return UUIDHelpers::Nil; @@ -1234,7 +1235,7 @@ ASTPtr DatabaseReplicated::parseQueryFromMetadataInZooKeeper(const String & node { ParserCreateQuery parser; String description = "in ZooKeeper " + zookeeper_path + "/metadata/" + node_name; - auto ast = parseQuery(parser, query, description, 0, getContext()->getSettingsRef().max_parser_depth); + auto ast = parseQuery(parser, query, description, 0, getContext()->getSettingsRef().max_parser_depth, getContext()->getSettingsRef().max_parser_backtracks); auto & create = ast->as(); if (create.uuid == UUIDHelpers::Nil || create.getTable() != TABLE_WITH_UUID_NAME_PLACEHOLDER || create.database) @@ -1559,7 +1560,7 @@ DatabaseReplicated::getTablesForBackup(const FilterByNameFunction & filter, cons for (const auto & [table_name, metadata] : snapshot) { ParserCreateQuery parser; - auto create_table_query = parseQuery(parser, metadata, 0, getContext()->getSettingsRef().max_parser_depth); + auto create_table_query = parseQuery(parser, metadata, 0, getContext()->getSettingsRef().max_parser_depth, getContext()->getSettingsRef().max_parser_backtracks); auto & create = create_table_query->as(); create.attach = false; diff --git a/src/Databases/DatabaseS3.cpp b/src/Databases/DatabaseS3.cpp index d2ca5a05ea4..159a5242dbe 100644 --- a/src/Databases/DatabaseS3.cpp +++ b/src/Databases/DatabaseS3.cpp @@ -191,7 +191,7 @@ ASTPtr DatabaseS3::getCreateDatabaseQuery() const creation_args += fmt::format(", '{}', '{}'", config.access_key_id.value(), config.secret_access_key.value()); const String query = fmt::format("CREATE DATABASE {} ENGINE = S3({})", backQuoteIfNeed(getDatabaseName()), creation_args); - ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth); + ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth, settings.max_parser_backtracks); if (const auto database_comment = getDatabaseComment(); !database_comment.empty()) { diff --git a/src/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp index 963cf0064df..f8d6ad69ba8 100644 --- a/src/Databases/DatabasesCommon.cpp +++ b/src/Databases/DatabasesCommon.cpp @@ -108,7 +108,8 @@ void applyMetadataChangesToCreateQuery(const ASTPtr & query, const StorageInMemo } -ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr & ast_storage, bool only_ordinary, uint32_t max_parser_depth, bool throw_on_error) +ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr & ast_storage, bool only_ordinary, + uint32_t max_parser_depth, uint32_t max_parser_backtracks, bool throw_on_error) { auto table_id = storage->getStorageID(); auto metadata_ptr = storage->getInMemoryMetadataPtr(); @@ -148,7 +149,7 @@ ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr & ast_ Expected expected; expected.max_parsed_pos = string_end; Tokens tokens(type_name.c_str(), string_end); - IParser::Pos pos(tokens, max_parser_depth); + IParser::Pos pos(tokens, max_parser_depth, max_parser_backtracks); ParserDataType parser; if (!parser.parse(pos, ast_type, expected)) { diff --git a/src/Databases/DatabasesCommon.h b/src/Databases/DatabasesCommon.h index 4e9d967c11a..81a3c55a435 100644 --- a/src/Databases/DatabasesCommon.h +++ b/src/Databases/DatabasesCommon.h @@ -13,7 +13,8 @@ namespace DB { void applyMetadataChangesToCreateQuery(const ASTPtr & query, const StorageInMemoryMetadata & metadata); -ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr & ast_storage, bool only_ordinary, uint32_t max_parser_depth, bool throw_on_error); +ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr & ast_storage, bool only_ordinary, + uint32_t max_parser_depth, uint32_t max_parser_backtracks, bool throw_on_error); /// Cleans a CREATE QUERY from temporary flags like "IF NOT EXISTS", "OR REPLACE", "AS SELECT" (for non-views), etc. void cleanupObjectDefinitionFromTemporaryFlags(ASTCreateQuery & query); diff --git a/src/Databases/MySQL/DatabaseMySQL.cpp b/src/Databases/MySQL/DatabaseMySQL.cpp index 96a5c3a18ce..d9b0f7f9ac7 100644 --- a/src/Databases/MySQL/DatabaseMySQL.cpp +++ b/src/Databases/MySQL/DatabaseMySQL.cpp @@ -174,12 +174,14 @@ ASTPtr DatabaseMySQL::getCreateTableQueryImpl(const String & table_name, Context ast_storage->settings = nullptr; } - unsigned max_parser_depth = static_cast(getContext()->getSettingsRef().max_parser_depth); - auto create_table_query = DB::getCreateQueryFromStorage(storage, - table_storage_define, - true, - max_parser_depth, - throw_on_error); + const Settings & settings = getContext()->getSettingsRef(); + auto create_table_query = DB::getCreateQueryFromStorage( + storage, + table_storage_define, + true, + static_cast(settings.max_parser_depth), + static_cast(settings.max_parser_backtracks), + throw_on_error); return create_table_query; } diff --git a/src/Databases/MySQL/tryConvertStringLiterals.cpp b/src/Databases/MySQL/tryConvertStringLiterals.cpp index ab392b301e8..ac65d510f67 100644 --- a/src/Databases/MySQL/tryConvertStringLiterals.cpp +++ b/src/Databases/MySQL/tryConvertStringLiterals.cpp @@ -61,7 +61,7 @@ static bool tryReadCharset( bool tryConvertStringLiterals(String & query) { Tokens tokens(query.data(), query.data() + query.size()); - IParser::Pos pos(tokens, 0); + IParser::Pos pos(tokens, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); Expected expected; String rewritten_query; rewritten_query.reserve(query.size()); diff --git a/src/Databases/MySQL/tryParseTableIDFromDDL.cpp b/src/Databases/MySQL/tryParseTableIDFromDDL.cpp index a01eb311450..4fe0f44c767 100644 --- a/src/Databases/MySQL/tryParseTableIDFromDDL.cpp +++ b/src/Databases/MySQL/tryParseTableIDFromDDL.cpp @@ -10,7 +10,7 @@ StorageID tryParseTableIDFromDDL(const String & query, const String & default_da { bool is_ddl = false; Tokens tokens(query.data(), query.data() + query.size()); - IParser::Pos pos(tokens, 0); + IParser::Pos pos(tokens, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); Expected expected; if (ParserKeyword("CREATE TEMPORARY TABLE").ignore(pos, expected) || ParserKeyword("CREATE TABLE").ignore(pos, expected)) { diff --git a/src/Databases/MySQL/tryQuoteUnrecognizedTokens.cpp b/src/Databases/MySQL/tryQuoteUnrecognizedTokens.cpp index c5a366698e6..9ecc81c693f 100644 --- a/src/Databases/MySQL/tryQuoteUnrecognizedTokens.cpp +++ b/src/Databases/MySQL/tryQuoteUnrecognizedTokens.cpp @@ -37,7 +37,7 @@ static void quoteLiteral( bool tryQuoteUnrecognizedTokens(String & query) { Tokens tokens(query.data(), query.data() + query.size()); - IParser::Pos pos(tokens, 0); + IParser::Pos pos(tokens, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); Expected expected; String rewritten_query; const char * copy_from = query.data(); diff --git a/src/Databases/SQLite/DatabaseSQLite.cpp b/src/Databases/SQLite/DatabaseSQLite.cpp index b3d5288cdf7..b7a82fd9d0f 100644 --- a/src/Databases/SQLite/DatabaseSQLite.cpp +++ b/src/Databases/SQLite/DatabaseSQLite.cpp @@ -194,10 +194,10 @@ ASTPtr DatabaseSQLite::getCreateTableQueryImpl(const String & table_name, Contex /// Add table_name to engine arguments storage_engine_arguments->children.insert(storage_engine_arguments->children.begin() + 1, std::make_shared(table_id.table_name)); - unsigned max_parser_depth = static_cast(getContext()->getSettingsRef().max_parser_depth); + const Settings & settings = getContext()->getSettingsRef(); + auto create_table_query = DB::getCreateQueryFromStorage(storage, table_storage_define, true, - max_parser_depth, - throw_on_error); + static_cast(settings.max_parser_depth), static_cast(settings.max_parser_backtracks), throw_on_error); return create_table_query; } diff --git a/src/Dictionaries/tests/gtest_dictionary_configuration.cpp b/src/Dictionaries/tests/gtest_dictionary_configuration.cpp index 989ce5c8f18..08aad663a8c 100644 --- a/src/Dictionaries/tests/gtest_dictionary_configuration.cpp +++ b/src/Dictionaries/tests/gtest_dictionary_configuration.cpp @@ -48,7 +48,7 @@ TEST(ConvertDictionaryAST, SimpleDictConfiguration) " COMMENT 'hello world!'"; ParserCreateDictionaryQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0, 0); ASTCreateQuery * create = ast->as(); DictionaryConfigurationPtr config = getDictionaryConfigurationFromAST(*create, getContext().context); @@ -119,7 +119,7 @@ TEST(ConvertDictionaryAST, TrickyAttributes) " SOURCE(CLICKHOUSE(HOST 'localhost'))"; ParserCreateDictionaryQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0, 0); ASTCreateQuery * create = ast->as(); DictionaryConfigurationPtr config = getDictionaryConfigurationFromAST(*create, getContext().context); @@ -164,7 +164,7 @@ TEST(ConvertDictionaryAST, ComplexKeyAndLayoutWithParams) " LIFETIME(MIN 1 MAX 10)"; ParserCreateDictionaryQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0, 0); ASTCreateQuery * create = ast->as(); DictionaryConfigurationPtr config = getDictionaryConfigurationFromAST(*create, getContext().context); @@ -215,7 +215,7 @@ TEST(ConvertDictionaryAST, ComplexSource) " RANGE(MIN second_column MAX third_column)"; ParserCreateDictionaryQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0, 0); ASTCreateQuery * create = ast->as(); DictionaryConfigurationPtr config = getDictionaryConfigurationFromAST(*create, getContext().context); /// source diff --git a/src/Formats/SchemaInferenceUtils.cpp b/src/Formats/SchemaInferenceUtils.cpp index cb574551d26..0bba7a1f424 100644 --- a/src/Formats/SchemaInferenceUtils.cpp +++ b/src/Formats/SchemaInferenceUtils.cpp @@ -1054,7 +1054,7 @@ namespace { if (depth > settings.max_parser_depth) throw Exception(ErrorCodes::TOO_DEEP_RECURSION, - "Maximum parse depth ({}) exceeded. Consider raising max_parser_depth setting.", settings.max_parser_depth); + "Maximum parse depth ({}) exceeded. Consider raising max_parser_depth setting.", settings.max_parser_depth); assertChar('{', buf); skipWhitespaceIfAny(buf); diff --git a/src/Functions/FunctionSQLJSON.h b/src/Functions/FunctionSQLJSON.h index 3efa40df9be..37db514fd1f 100644 --- a/src/Functions/FunctionSQLJSON.h +++ b/src/Functions/FunctionSQLJSON.h @@ -123,7 +123,7 @@ public: class Executor { public: - static ColumnPtr run(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count, uint32_t parse_depth, const ContextPtr & context) + static ColumnPtr run(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count, uint32_t parse_depth, uint32_t parse_backtracks, const ContextPtr & context) { MutableColumnPtr to{result_type->createColumn()}; to->reserve(input_rows_count); @@ -161,7 +161,7 @@ public: /// Tokenize the query Tokens tokens(query.data(), query.data() + query.size()); /// Max depth 0 indicates that depth is not limited - IParser::Pos token_iterator(tokens, parse_depth); + IParser::Pos token_iterator(tokens, parse_depth, parse_backtracks); /// Parse query and create AST tree Expected expected; @@ -232,16 +232,17 @@ public: /// 3. Parser(Tokens, ASTPtr) -> complete AST /// 4. Execute functions: call getNextItem on generator and handle each item unsigned parse_depth = static_cast(getContext()->getSettingsRef().max_parser_depth); + unsigned parse_backtracks = static_cast(getContext()->getSettingsRef().max_parser_backtracks); #if USE_SIMDJSON if (getContext()->getSettingsRef().allow_simdjson) return FunctionSQLJSONHelpers::Executor< Name, Impl>, - SimdJSONParser>::run(arguments, result_type, input_rows_count, parse_depth, getContext()); + SimdJSONParser>::run(arguments, result_type, input_rows_count, parse_depth, parse_backtracks, getContext()); #endif return FunctionSQLJSONHelpers:: Executor>, DummyJSONParser>::run( - arguments, result_type, input_rows_count, parse_depth, getContext()); + arguments, result_type, input_rows_count, parse_depth, parse_backtracks, getContext()); } }; diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsBackup.cpp b/src/Functions/UserDefined/UserDefinedSQLObjectsBackup.cpp index 3ec5393fa6f..b7c7e5847bd 100644 --- a/src/Functions/UserDefined/UserDefinedSQLObjectsBackup.cpp +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsBackup.cpp @@ -128,7 +128,7 @@ restoreUserDefinedSQLObjects(RestorerFromBackup & restorer, const String & data_ statement_def.data() + statement_def.size(), "in file " + filepath + " from backup " + backup->getNameForLogging(), 0, - context->getSettingsRef().max_parser_depth); + context->getSettingsRef().max_parser_depth, context->getSettingsRef().max_parser_backtracks); break; } } diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.cpp b/src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.cpp index 34946db7d9e..b083c540083 100644 --- a/src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.cpp +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.cpp @@ -92,7 +92,8 @@ ASTPtr UserDefinedSQLObjectsDiskStorage::tryLoadObject(UserDefinedSQLObjectType object_create_query.data() + object_create_query.size(), "", 0, - global_context->getSettingsRef().max_parser_depth); + global_context->getSettingsRef().max_parser_depth, + global_context->getSettingsRef().max_parser_backtracks); return ast; } } diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.cpp b/src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.cpp index c43b223ffeb..4ec34c15efc 100644 --- a/src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.cpp +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.cpp @@ -314,7 +314,8 @@ ASTPtr UserDefinedSQLObjectsZooKeeperStorage::parseObjectData(const String & obj object_data.data() + object_data.size(), "", 0, - global_context->getSettingsRef().max_parser_depth); + global_context->getSettingsRef().max_parser_depth, + global_context->getSettingsRef().max_parser_backtracks); return ast; } } diff --git a/src/Functions/formatQuery.cpp b/src/Functions/formatQuery.cpp index 92403d2e88e..d7addcc284e 100644 --- a/src/Functions/formatQuery.cpp +++ b/src/Functions/formatQuery.cpp @@ -17,6 +17,9 @@ namespace ErrorCodes extern const int ILLEGAL_COLUMN; } +namespace +{ + enum class OutputFormatting { SingleLine, @@ -29,21 +32,16 @@ enum class ErrorHandling Null }; -template class FunctionFormatQuery : public IFunction { public: - static constexpr auto name = Name::name; - static FunctionPtr create(ContextPtr context) - { - const auto & settings = context->getSettings(); - return std::make_shared(settings.max_query_size, settings.max_parser_depth); - } - - FunctionFormatQuery(size_t max_query_size_, size_t max_parser_depth_) - : max_query_size(max_query_size_) - , max_parser_depth(max_parser_depth_) + FunctionFormatQuery(ContextPtr context, String name_, OutputFormatting output_formatting_, ErrorHandling error_handling_) + : name(name_), output_formatting(output_formatting_), error_handling(error_handling_) { + const Settings & settings = context->getSettings(); + max_query_size = settings.max_query_size; + max_parser_depth = settings.max_parser_depth; + max_parser_backtracks = settings.max_parser_backtracks; } String getName() const override { return name; } @@ -59,7 +57,7 @@ public: validateFunctionArgumentTypes(*this, arguments, args); DataTypePtr string_type = std::make_shared(); - if constexpr (error_handling == ErrorHandling::Null) + if (error_handling == ErrorHandling::Null) return std::make_shared(string_type); else return string_type; @@ -70,7 +68,7 @@ public: const ColumnPtr col_query = arguments[0].column; ColumnUInt8::MutablePtr col_null_map; - if constexpr (error_handling == ErrorHandling::Null) + if (error_handling == ErrorHandling::Null) col_null_map = ColumnUInt8::create(input_rows_count, 0); if (const ColumnString * col_query_string = checkAndGetColumn(col_query.get())) @@ -78,7 +76,7 @@ public: auto col_res = ColumnString::create(); formatVector(col_query_string->getChars(), col_query_string->getOffsets(), col_res->getChars(), col_res->getOffsets(), col_null_map); - if constexpr (error_handling == ErrorHandling::Null) + if (error_handling == ErrorHandling::Null) return ColumnNullable::create(std::move(col_res), std::move(col_null_map)); else return col_res; @@ -113,11 +111,11 @@ private: try { - ast = parseQuery(parser, begin, end, /*query_description*/ {}, max_query_size, max_parser_depth); + ast = parseQuery(parser, begin, end, /*query_description*/ {}, max_query_size, max_parser_depth, max_parser_backtracks); } catch (...) { - if constexpr (error_handling == ErrorHandling::Null) + if (error_handling == ErrorHandling::Null) { const size_t res_data_new_size = res_data_size + 1; if (res_data_new_size > res_data.size()) @@ -135,7 +133,6 @@ private: } else { - static_assert(error_handling == ErrorHandling::Exception); throw; } } @@ -160,92 +157,91 @@ private: res_data.resize(res_data_size); } - const size_t max_query_size; - const size_t max_parser_depth; + String name; + OutputFormatting output_formatting; + ErrorHandling error_handling; + + size_t max_query_size; + size_t max_parser_depth; + size_t max_parser_backtracks; }; -struct NameFormatQuery -{ - static constexpr auto name = "formatQuery"; -}; - -struct NameFormatQueryOrNull -{ - static constexpr auto name = "formatQueryOrNull"; -}; - -struct NameFormatQuerySingleLine -{ - static constexpr auto name = "formatQuerySingleLine"; -}; - -struct NameFormatQuerySingleLineOrNull -{ - static constexpr auto name = "formatQuerySingleLineOrNull"; -}; +} REGISTER_FUNCTION(formatQuery) { - factory.registerFunction>(FunctionDocumentation{ - .description = "Returns a formatted, possibly multi-line, version of the given SQL query. Throws in case of a parsing error.\n[example:multiline]", - .syntax = "formatQuery(query)", - .arguments = {{"query", "The SQL query to be formatted. [String](../../sql-reference/data-types/string.md)"}}, - .returned_value = "The formatted query. [String](../../sql-reference/data-types/string.md).", - .examples{ - {"multiline", - "SELECT formatQuery('select a, b FRom tab WHERE a > 3 and b < 3');", - "SELECT\n" - " a,\n" - " b\n" - "FROM tab\n" - "WHERE (a > 3) AND (b < 3)"}}, - .categories{"Other"}}); + factory.registerFunction( + "formatQuery", + [](ContextPtr context) { return std::make_shared(context, "formatQuery", OutputFormatting::MultiLine, ErrorHandling::Exception); }, + FunctionDocumentation{ + .description = "Returns a formatted, possibly multi-line, version of the given SQL query. Throws in case of a parsing error.\n[example:multiline]", + .syntax = "formatQuery(query)", + .arguments = {{"query", "The SQL query to be formatted. [String](../../sql-reference/data-types/string.md)"}}, + .returned_value = "The formatted query. [String](../../sql-reference/data-types/string.md).", + .examples{ + {"multiline", + "SELECT formatQuery('select a, b FRom tab WHERE a > 3 and b < 3');", + "SELECT\n" + " a,\n" + " b\n" + "FROM tab\n" + "WHERE (a > 3) AND (b < 3)"}}, + .categories{"Other"}}); } REGISTER_FUNCTION(formatQueryOrNull) { - factory.registerFunction>(FunctionDocumentation{ - .description = "Returns a formatted, possibly multi-line, version of the given SQL query. Returns NULL in case of a parsing error.\n[example:multiline]", - .syntax = "formatQueryOrNull(query)", - .arguments = {{"query", "The SQL query to be formatted. [String](../../sql-reference/data-types/string.md)"}}, - .returned_value = "The formatted query. [String](../../sql-reference/data-types/string.md).", - .examples{ - {"multiline", - "SELECT formatQuery('select a, b FRom tab WHERE a > 3 and b < 3');", - "SELECT\n" - " a,\n" - " b\n" - "FROM tab\n" - "WHERE (a > 3) AND (b < 3)"}}, - .categories{"Other"}}); + factory.registerFunction( + "formatQueryOrNull", + [](ContextPtr context) { return std::make_shared(context, "formatQueryOrNull", OutputFormatting::MultiLine, ErrorHandling::Null); }, + FunctionDocumentation{ + .description = "Returns a formatted, possibly multi-line, version of the given SQL query. Returns NULL in case of a parsing error.\n[example:multiline]", + .syntax = "formatQueryOrNull(query)", + .arguments = {{"query", "The SQL query to be formatted. [String](../../sql-reference/data-types/string.md)"}}, + .returned_value = "The formatted query. [String](../../sql-reference/data-types/string.md).", + .examples{ + {"multiline", + "SELECT formatQuery('select a, b FRom tab WHERE a > 3 and b < 3');", + "SELECT\n" + " a,\n" + " b\n" + "FROM tab\n" + "WHERE (a > 3) AND (b < 3)"}}, + .categories{"Other"}}); } REGISTER_FUNCTION(formatQuerySingleLine) { - factory.registerFunction>(FunctionDocumentation{ - .description = "Like formatQuery() but the returned formatted string contains no line breaks. Throws in case of a parsing error.\n[example:multiline]", - .syntax = "formatQuerySingleLine(query)", - .arguments = {{"query", "The SQL query to be formatted. [String](../../sql-reference/data-types/string.md)"}}, - .returned_value = "The formatted query. [String](../../sql-reference/data-types/string.md).", - .examples{ - {"multiline", - "SELECT formatQuerySingleLine('select a, b FRom tab WHERE a > 3 and b < 3');", - "SELECT a, b FROM tab WHERE (a > 3) AND (b < 3)"}}, - .categories{"Other"}}); + factory.registerFunction( + "formatQuerySingleLine", + [](ContextPtr context) { return std::make_shared(context, "formatQuerySingleLine", OutputFormatting::SingleLine, ErrorHandling::Exception); }, + FunctionDocumentation{ + .description = "Like formatQuery() but the returned formatted string contains no line breaks. Throws in case of a parsing error.\n[example:multiline]", + .syntax = "formatQuerySingleLine(query)", + .arguments = {{"query", "The SQL query to be formatted. [String](../../sql-reference/data-types/string.md)"}}, + .returned_value = "The formatted query. [String](../../sql-reference/data-types/string.md).", + .examples{ + {"multiline", + "SELECT formatQuerySingleLine('select a, b FRom tab WHERE a > 3 and b < 3');", + "SELECT a, b FROM tab WHERE (a > 3) AND (b < 3)"}}, + .categories{"Other"}}); } REGISTER_FUNCTION(formatQuerySingleLineOrNull) { - factory.registerFunction>(FunctionDocumentation{ - .description = "Like formatQuery() but the returned formatted string contains no line breaks. Returns NULL in case of a parsing error.\n[example:multiline]", - .syntax = "formatQuerySingleLineOrNull(query)", - .arguments = {{"query", "The SQL query to be formatted. [String](../../sql-reference/data-types/string.md)"}}, - .returned_value = "The formatted query. [String](../../sql-reference/data-types/string.md).", - .examples{ - {"multiline", - "SELECT formatQuerySingleLine('select a, b FRom tab WHERE a > 3 and b < 3');", - "SELECT a, b FROM tab WHERE (a > 3) AND (b < 3)"}}, - .categories{"Other"}}); + factory.registerFunction( + "formatQuerySingleLineOrNull", + [](ContextPtr context) { return std::make_shared(context, "formatQuerySingleLineOrNull", OutputFormatting::SingleLine, ErrorHandling::Null); }, + FunctionDocumentation{ + .description = "Like formatQuery() but the returned formatted string contains no line breaks. Returns NULL in case of a parsing error.\n[example:multiline]", + .syntax = "formatQuerySingleLineOrNull(query)", + .arguments = {{"query", "The SQL query to be formatted. [String](../../sql-reference/data-types/string.md)"}}, + .returned_value = "The formatted query. [String](../../sql-reference/data-types/string.md).", + .examples{ + {"multiline", + "SELECT formatQuerySingleLine('select a, b FRom tab WHERE a > 3 and b < 3');", + "SELECT a, b FROM tab WHERE (a > 3) AND (b < 3)"}}, + .categories{"Other"}}); } } diff --git a/src/Interpreters/Access/InterpreterShowCreateAccessEntityQuery.cpp b/src/Interpreters/Access/InterpreterShowCreateAccessEntityQuery.cpp index a55588baeaa..1147d74c146 100644 --- a/src/Interpreters/Access/InterpreterShowCreateAccessEntityQuery.cpp +++ b/src/Interpreters/Access/InterpreterShowCreateAccessEntityQuery.cpp @@ -206,7 +206,7 @@ namespace if (!filter.empty()) { ParserExpression parser; - ASTPtr expr = parseQuery(parser, filter, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + ASTPtr expr = parseQuery(parser, filter, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); query->filters.emplace_back(type, std::move(expr)); } } diff --git a/src/Interpreters/AsynchronousMetricLog.cpp b/src/Interpreters/AsynchronousMetricLog.cpp index 5cf7f951eec..dc67bd91550 100644 --- a/src/Interpreters/AsynchronousMetricLog.cpp +++ b/src/Interpreters/AsynchronousMetricLog.cpp @@ -21,31 +21,31 @@ ColumnsDescription AsynchronousMetricLogElement::getColumnsDescription() { "hostname", std::make_shared(std::make_shared()), - parseQuery(codec_parser, "(ZSTD(1))", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH), + parseQuery(codec_parser, "(ZSTD(1))", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS), "Hostname of the server executing the query." }, { "event_date", std::make_shared(), - parseQuery(codec_parser, "(Delta(2), ZSTD(1))", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH), + parseQuery(codec_parser, "(Delta(2), ZSTD(1))", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS), "Event date." }, { "event_time", std::make_shared(), - parseQuery(codec_parser, "(Delta(4), ZSTD(1))", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH), + parseQuery(codec_parser, "(Delta(4), ZSTD(1))", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS), "Event time." }, { "metric", std::make_shared(std::make_shared()), - parseQuery(codec_parser, "(ZSTD(1))", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH), + parseQuery(codec_parser, "(ZSTD(1))", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS), "Metric name." }, { "value", std::make_shared(), - parseQuery(codec_parser, "(ZSTD(3))", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH), + parseQuery(codec_parser, "(ZSTD(3))", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS), "Metric value." } }; diff --git a/src/Interpreters/DDLTask.cpp b/src/Interpreters/DDLTask.cpp index fe2baea6b4e..e10f3ecfbc9 100644 --- a/src/Interpreters/DDLTask.cpp +++ b/src/Interpreters/DDLTask.cpp @@ -154,7 +154,8 @@ void DDLLogEntry::parse(const String & data) rb >> "settings: " >> settings_str >> "\n"; ParserSetQuery parser{true}; constexpr UInt64 max_depth = 16; - ASTPtr settings_ast = parseQuery(parser, settings_str, Context::getGlobalContextInstance()->getSettingsRef().max_query_size, max_depth); + constexpr UInt64 max_backtracks = DBMS_DEFAULT_MAX_PARSER_BACKTRACKS; + ASTPtr settings_ast = parseQuery(parser, settings_str, Context::getGlobalContextInstance()->getSettingsRef().max_query_size, max_depth, max_backtracks); settings.emplace(std::move(settings_ast->as()->changes)); } } @@ -197,7 +198,7 @@ void DDLTaskBase::parseQueryFromEntry(ContextPtr context) ParserQuery parser_query(end, settings.allow_settings_after_format_in_insert); String description; - query = parseQuery(parser_query, begin, end, description, 0, settings.max_parser_depth); + query = parseQuery(parser_query, begin, end, description, 0, settings.max_parser_depth, settings.max_parser_backtracks); } void DDLTaskBase::formatRewrittenQuery(ContextPtr context) diff --git a/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp b/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp index 60110916760..fed29b410db 100644 --- a/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp +++ b/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp @@ -96,7 +96,7 @@ static ASTPtr parseAdditionalPostFilter(const Context & context) ParserExpression parser; return parseQuery( parser, filter.data(), filter.data() + filter.size(), - "additional filter", settings.max_query_size, settings.max_parser_depth); + "additional filter", settings.max_query_size, settings.max_parser_depth, settings.max_parser_backtracks); } static ActionsDAGPtr makeAdditionalPostFilter(ASTPtr & ast, ContextPtr context, const Block & header) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index edd7452c130..2a08e8458a4 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -381,7 +381,7 @@ ASTPtr InterpreterCreateQuery::formatColumns(const NamesAndTypesList & columns) String type_name = column.type->getName(); const char * pos = type_name.data(); const char * end = pos + type_name.size(); - column_declaration->type = parseQuery(type_parser, pos, end, "data type", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + column_declaration->type = parseQuery(type_parser, pos, end, "data type", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); columns_list->children.emplace_back(column_declaration); } @@ -401,7 +401,7 @@ ASTPtr InterpreterCreateQuery::formatColumns(const NamesAndTypesList & columns, String type_name = alias_column.type->getName(); const char * type_pos = type_name.data(); const char * type_end = type_pos + type_name.size(); - column_declaration->type = parseQuery(type_parser, type_pos, type_end, "data type", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + column_declaration->type = parseQuery(type_parser, type_pos, type_end, "data type", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); column_declaration->default_specifier = "ALIAS"; @@ -409,7 +409,7 @@ ASTPtr InterpreterCreateQuery::formatColumns(const NamesAndTypesList & columns, const char * alias_pos = alias.data(); const char * alias_end = alias_pos + alias.size(); ParserExpression expression_parser; - column_declaration->default_expression = parseQuery(expression_parser, alias_pos, alias_end, "expression", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + column_declaration->default_expression = parseQuery(expression_parser, alias_pos, alias_end, "expression", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); column_declaration->children.push_back(column_declaration->default_expression); columns_list->children.emplace_back(column_declaration); @@ -433,7 +433,7 @@ ASTPtr InterpreterCreateQuery::formatColumns(const ColumnsDescription & columns) String type_name = column.type->getName(); const char * type_name_pos = type_name.data(); const char * type_name_end = type_name_pos + type_name.size(); - column_declaration->type = parseQuery(type_parser, type_name_pos, type_name_end, "data type", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + column_declaration->type = parseQuery(type_parser, type_name_pos, type_name_end, "data type", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); if (column.default_desc.expression) { @@ -1852,10 +1852,12 @@ void InterpreterCreateQuery::addColumnsDescriptionToCreateQueryIfNecessary(ASTCr auto ast_storage = std::make_shared(); unsigned max_parser_depth = static_cast(getContext()->getSettingsRef().max_parser_depth); + unsigned max_parser_backtracks = static_cast(getContext()->getSettingsRef().max_parser_backtracks); auto query_from_storage = DB::getCreateQueryFromStorage(storage, ast_storage, false, max_parser_depth, + max_parser_backtracks, true); auto & create_query_from_storage = query_from_storage->as(); diff --git a/src/Interpreters/InterpreterDeleteQuery.cpp b/src/Interpreters/InterpreterDeleteQuery.cpp index 97ae9649ae8..8fb0dabb5b5 100644 --- a/src/Interpreters/InterpreterDeleteQuery.cpp +++ b/src/Interpreters/InterpreterDeleteQuery.cpp @@ -97,7 +97,8 @@ BlockIO InterpreterDeleteQuery::execute() alter_query.data() + alter_query.size(), "ALTER query", 0, - DBMS_DEFAULT_MAX_PARSER_DEPTH); + DBMS_DEFAULT_MAX_PARSER_DEPTH, + DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); auto context = Context::createCopy(getContext()); context->setSetting("mutations_sync", 2); /// Lightweight delete is always synchronous diff --git a/src/Interpreters/InterpreterKillQueryQuery.cpp b/src/Interpreters/InterpreterKillQueryQuery.cpp index 86196270ed1..26dae6a1df3 100644 --- a/src/Interpreters/InterpreterKillQueryQuery.cpp +++ b/src/Interpreters/InterpreterKillQueryQuery.cpp @@ -281,7 +281,7 @@ BlockIO InterpreterKillQueryQuery::execute() const auto with_round_bracket = alter_command.front() == '('; ParserAlterCommand parser{with_round_bracket}; auto command_ast - = parseQuery(parser, alter_command, 0, getContext()->getSettingsRef().max_parser_depth); + = parseQuery(parser, alter_command, 0, getContext()->getSettingsRef().max_parser_depth, getContext()->getSettingsRef().max_parser_backtracks); required_access_rights = InterpreterAlterQuery::getRequiredAccessForCommand( command_ast->as(), table_id.database_name, table_id.table_name); if (!access->isGranted(required_access_rights)) diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index a314492c5b0..07f4e94680c 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -160,7 +160,7 @@ FilterDAGInfoPtr generateFilterActions( { ParserExpression expr_parser; /// We should add back quotes around column name as it can contain dots. - expr_list->children.push_back(parseQuery(expr_parser, backQuoteIfNeed(column_str), 0, context->getSettingsRef().max_parser_depth)); + expr_list->children.push_back(parseQuery(expr_parser, backQuoteIfNeed(column_str), 0, context->getSettingsRef().max_parser_depth, context->getSettingsRef().max_parser_backtracks)); } select_ast->setExpression(ASTSelectQuery::Expression::TABLES, std::make_shared()); @@ -331,7 +331,7 @@ ASTPtr parseAdditionalFilterConditionForTable( const auto & settings = context.getSettingsRef(); return parseQuery( parser, filter.data(), filter.data() + filter.size(), - "additional filter", settings.max_query_size, settings.max_parser_depth); + "additional filter", settings.max_query_size, settings.max_parser_depth, settings.max_parser_backtracks); } } diff --git a/src/Interpreters/JoinToSubqueryTransformVisitor.cpp b/src/Interpreters/JoinToSubqueryTransformVisitor.cpp index 6251a9604e1..5c4ae528fc1 100644 --- a/src/Interpreters/JoinToSubqueryTransformVisitor.cpp +++ b/src/Interpreters/JoinToSubqueryTransformVisitor.cpp @@ -43,7 +43,7 @@ ASTPtr makeSubqueryTemplate(const String & table_alias) String query_template = "(select * from _t)"; if (!table_alias.empty()) query_template += " as " + table_alias; - ASTPtr subquery_template = parseQuery(parser, query_template, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + ASTPtr subquery_template = parseQuery(parser, query_template, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); if (!subquery_template) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot parse subquery template"); return subquery_template; diff --git a/src/Interpreters/MySQL/tests/gtest_create_rewritten.cpp b/src/Interpreters/MySQL/tests/gtest_create_rewritten.cpp index 9f6e9b930fd..6d6077a0295 100644 --- a/src/Interpreters/MySQL/tests/gtest_create_rewritten.cpp +++ b/src/Interpreters/MySQL/tests/gtest_create_rewritten.cpp @@ -19,7 +19,7 @@ using namespace DB; static inline ASTPtr tryRewrittenCreateQuery(const String & query, ContextPtr context) { ParserExternalDDLQuery external_ddl_parser; - ASTPtr ast = parseQuery(external_ddl_parser, "EXTERNAL DDL FROM MySQL(test_database, test_database) " + query, 0, 0); + ASTPtr ast = parseQuery(external_ddl_parser, "EXTERNAL DDL FROM MySQL(test_database, test_database) " + query, 0, 0, 0); return MySQLInterpreter::InterpreterCreateImpl::getRewrittenQueries( *ast->as()->external_ddl->as(), diff --git a/src/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp index a74b5c67726..e4cbbb8f5f7 100644 --- a/src/Interpreters/SystemLog.cpp +++ b/src/Interpreters/SystemLog.cpp @@ -216,7 +216,7 @@ std::shared_ptr createSystemLog( /// Validate engine definition syntax to prevent some configuration errors. ParserStorageWithComment storage_parser; auto storage_ast = parseQuery(storage_parser, log_settings.engine.data(), log_settings.engine.data() + log_settings.engine.size(), - "Storage to create table for " + config_prefix, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + "Storage to create table for " + config_prefix, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); auto & storage_with_comment = storage_ast->as(); /// Add comment to AST. So it will be saved when the table will be renamed. @@ -647,7 +647,7 @@ ASTPtr SystemLog::getCreateTableQuery() ASTPtr storage_with_comment_ast = parseQuery( storage_parser, storage_def.data(), storage_def.data() + storage_def.size(), - "Storage to create table for " + LogElement::name(), 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + "Storage to create table for " + LogElement::name(), 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); StorageWithComment & storage_with_comment = storage_with_comment_ast->as(); diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 88021038ebb..7dd46534fdf 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -746,18 +746,18 @@ static std::tuple executeQueryImpl( { ParserKQLStatement parser(end, settings.allow_settings_after_format_in_insert); /// TODO: parser should fail early when max_query_size limit is reached. - ast = parseKQLQuery(parser, begin, end, "", max_query_size, settings.max_parser_depth); + ast = parseKQLQuery(parser, begin, end, "", max_query_size, settings.max_parser_depth, settings.max_parser_backtracks); } else if (settings.dialect == Dialect::prql && !internal) { - ParserPRQLQuery parser(max_query_size, settings.max_parser_depth); - ast = parseQuery(parser, begin, end, "", max_query_size, settings.max_parser_depth); + ParserPRQLQuery parser(max_query_size, settings.max_parser_depth, settings.max_parser_backtracks); + ast = parseQuery(parser, begin, end, "", max_query_size, settings.max_parser_depth, settings.max_parser_backtracks); } else { ParserQuery parser(end, settings.allow_settings_after_format_in_insert); /// TODO: parser should fail early when max_query_size limit is reached. - ast = parseQuery(parser, begin, end, "", max_query_size, settings.max_parser_depth); + ast = parseQuery(parser, begin, end, "", max_query_size, settings.max_parser_depth, settings.max_parser_backtracks); #ifndef NDEBUG /// Verify that AST formatting is consistent: @@ -774,7 +774,7 @@ static std::tuple executeQueryImpl( ast2 = parseQuery(parser, formatted1.data(), formatted1.data() + formatted1.size(), - "", new_max_query_size, settings.max_parser_depth); + "", new_max_query_size, settings.max_parser_depth, settings.max_parser_backtracks); } catch (const Exception & e) { diff --git a/src/Interpreters/getCustomKeyFilterForParallelReplicas.cpp b/src/Interpreters/getCustomKeyFilterForParallelReplicas.cpp index 1295a4d5a75..d78b6ab0c4d 100644 --- a/src/Interpreters/getCustomKeyFilterForParallelReplicas.cpp +++ b/src/Interpreters/getCustomKeyFilterForParallelReplicas.cpp @@ -122,7 +122,7 @@ ASTPtr parseCustomKeyForTable(const String & custom_key, const Context & context const auto & settings = context.getSettingsRef(); return parseQuery( parser, custom_key.data(), custom_key.data() + custom_key.size(), - "parallel replicas custom key", settings.max_query_size, settings.max_parser_depth); + "parallel replicas custom key", settings.max_query_size, settings.max_parser_depth, settings.max_parser_backtracks); } } diff --git a/src/Interpreters/loadMetadata.cpp b/src/Interpreters/loadMetadata.cpp index 2723eb37350..226472175b3 100644 --- a/src/Interpreters/loadMetadata.cpp +++ b/src/Interpreters/loadMetadata.cpp @@ -55,9 +55,11 @@ static void executeCreateQuery( bool create, bool has_force_restore_data_flag) { + const Settings & settings = context->getSettingsRef(); ParserCreateQuery parser; ASTPtr ast = parseQuery( - parser, query.data(), query.data() + query.size(), "in file " + file_name, 0, context->getSettingsRef().max_parser_depth); + parser, query.data(), query.data() + query.size(), "in file " + file_name, + 0, settings.max_parser_depth, settings.max_parser_backtracks); auto & ast_create_query = ast->as(); ast_create_query.setDatabase(database); diff --git a/src/Interpreters/parseColumnsListForTableFunction.cpp b/src/Interpreters/parseColumnsListForTableFunction.cpp index 78b72022a9a..30a41c090d5 100644 --- a/src/Interpreters/parseColumnsListForTableFunction.cpp +++ b/src/Interpreters/parseColumnsListForTableFunction.cpp @@ -115,7 +115,7 @@ ColumnsDescription parseColumnsListFromString(const std::string & structure, con ParserColumnDeclarationList parser(true, true); const Settings & settings = context->getSettingsRef(); - ASTPtr columns_list_raw = parseQuery(parser, structure, "columns declaration list", settings.max_query_size, settings.max_parser_depth); + ASTPtr columns_list_raw = parseQuery(parser, structure, "columns declaration list", settings.max_query_size, settings.max_parser_depth, settings.max_parser_backtracks); auto * columns_list = dynamic_cast(columns_list_raw.get()); if (!columns_list) @@ -136,7 +136,7 @@ bool tryParseColumnsListFromString(const std::string & structure, ColumnsDescrip const char * start = structure.data(); const char * end = structure.data() + structure.size(); ASTPtr columns_list_raw = tryParseQuery( - parser, start, end, error, false, "columns declaration list", false, settings.max_query_size, settings.max_parser_depth); + parser, start, end, error, false, "columns declaration list", false, settings.max_query_size, settings.max_parser_depth, settings.max_parser_backtracks, true); if (!columns_list_raw) return false; diff --git a/src/Interpreters/tests/gtest_comparison_graph.cpp b/src/Interpreters/tests/gtest_comparison_graph.cpp index 96a78241c8e..ac24a8de368 100644 --- a/src/Interpreters/tests/gtest_comparison_graph.cpp +++ b/src/Interpreters/tests/gtest_comparison_graph.cpp @@ -12,7 +12,7 @@ using namespace DB; static ComparisonGraph getGraph(const String & query) { ParserExpressionList parser(false); - ASTPtr ast = parseQuery(parser, query, 0, 0); + ASTPtr ast = parseQuery(parser, query, 0, 0, 0); return ComparisonGraph(ast->children); } diff --git a/src/Interpreters/tests/gtest_cycle_aliases.cpp b/src/Interpreters/tests/gtest_cycle_aliases.cpp index 2bdeac90f8f..5ff3fbe1c2d 100644 --- a/src/Interpreters/tests/gtest_cycle_aliases.cpp +++ b/src/Interpreters/tests/gtest_cycle_aliases.cpp @@ -14,10 +14,10 @@ TEST(QueryNormalizer, SimpleLoopAlias) { String query = "a as a"; ParserExpressionList parser(false); - ASTPtr ast = parseQuery(parser, query, 0, 0); + ASTPtr ast = parseQuery(parser, query, 0, 0, 0); Aliases aliases; - aliases["a"] = parseQuery(parser, "a as a", 0, 0)->children[0]; + aliases["a"] = parseQuery(parser, "a as a", 0, 0, 0)->children[0]; Settings settings; QueryNormalizer::Data normalizer_data(aliases, {}, false, settings, false); @@ -28,11 +28,11 @@ TEST(QueryNormalizer, SimpleCycleAlias) { String query = "a as b, b as a"; ParserExpressionList parser(false); - ASTPtr ast = parseQuery(parser, query, 0, 0); + ASTPtr ast = parseQuery(parser, query, 0, 0, 0); Aliases aliases; - aliases["a"] = parseQuery(parser, "b as a", 0, 0)->children[0]; - aliases["b"] = parseQuery(parser, "a as b", 0, 0)->children[0]; + aliases["a"] = parseQuery(parser, "b as a", 0, 0, 0)->children[0]; + aliases["b"] = parseQuery(parser, "a as b", 0, 0, 0)->children[0]; Settings settings; QueryNormalizer::Data normalizer_data(aliases, {}, false, settings, true); diff --git a/src/Interpreters/tests/gtest_table_overrides.cpp b/src/Interpreters/tests/gtest_table_overrides.cpp index 779bc7a53a4..09aa2e1f37f 100644 --- a/src/Interpreters/tests/gtest_table_overrides.cpp +++ b/src/Interpreters/tests/gtest_table_overrides.cpp @@ -34,11 +34,11 @@ TEST_P(TableOverrideTest, applyOverrides) const auto & [database_query, table_query, expected_query] = GetParam(); ParserCreateQuery parser; ASTPtr database_ast; - ASSERT_NO_THROW(database_ast = parseQuery(parser, database_query, 0, 0)); + ASSERT_NO_THROW(database_ast = parseQuery(parser, database_query, 0, 0, 0)); auto * database = database_ast->as(); ASSERT_NE(nullptr, database); ASTPtr table_ast; - ASSERT_NO_THROW(table_ast = parseQuery(parser, table_query, 0, 0)); + ASSERT_NO_THROW(table_ast = parseQuery(parser, table_query, 0, 0, 0)); auto * table = table_ast->as(); ASSERT_NE(nullptr, table); auto table_name = table->table->as()->name(); diff --git a/src/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp index 6d267a7d215..1e7d0158878 100644 --- a/src/Parsers/ExpressionListParsers.cpp +++ b/src/Parsers/ExpressionListParsers.cpp @@ -1918,7 +1918,7 @@ public: && string_literal->as().value.tryGet(literal)) { Tokens tokens(literal.data(), literal.data() + literal.size()); - IParser::Pos token_pos(tokens, 0); + IParser::Pos token_pos(tokens, pos.max_depth, pos.max_backtracks); Expected token_expected; ASTPtr expr; diff --git a/src/Parsers/IParser.cpp b/src/Parsers/IParser.cpp new file mode 100644 index 00000000000..d1e9ace89b6 --- /dev/null +++ b/src/Parsers/IParser.cpp @@ -0,0 +1,33 @@ +#include +#include + +namespace DB +{ + +IParser::Pos & IParser::Pos::operator=(const IParser::Pos & rhs) +{ + depth = rhs.depth; + max_depth = rhs.max_depth; + + if (rhs.backtracks > backtracks) + backtracks = rhs.backtracks; + + max_backtracks = rhs.max_backtracks; + + if (rhs < *this) + { + ++backtracks; + if (max_backtracks && backtracks > max_backtracks) + throw Exception(ErrorCodes::TOO_DEEP_RECURSION, "Maximum amount of backtracking ({}) exceeded in the parser. " + "Consider rising max_parser_backtracks parameter.", max_backtracks); + } + + TokenIterator::operator=(rhs); + + if (backtracks % 1000 == 0) + std::cerr << backtracks << "\n"; + + return *this; +} + +} diff --git a/src/Parsers/IParser.h b/src/Parsers/IParser.h index 198ec0346ff..291f8ee7d44 100644 --- a/src/Parsers/IParser.h +++ b/src/Parsers/IParser.h @@ -62,11 +62,18 @@ public: uint32_t depth = 0; uint32_t max_depth = 0; - Pos(Tokens & tokens_, uint32_t max_depth_) : TokenIterator(tokens_), max_depth(max_depth_) + uint32_t backtracks = 0; + uint32_t max_backtracks = 0; + + Pos(Tokens & tokens_, uint32_t max_depth_, uint32_t max_backtracks_) + : TokenIterator(tokens_), max_depth(max_depth_), max_backtracks(max_backtracks_) { } - Pos(TokenIterator token_iterator_, uint32_t max_depth_) : TokenIterator(token_iterator_), max_depth(max_depth_) { } + Pos(TokenIterator token_iterator_, uint32_t max_depth_, uint32_t max_backtracks_) + : TokenIterator(token_iterator_), max_depth(max_depth_), max_backtracks(max_backtracks_) + { + } ALWAYS_INLINE void increaseDepth() { @@ -97,6 +104,10 @@ public: throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error in parser: incorrect calculation of parse depth"); --depth; } + + Pos(const Pos & rhs) = default; + + Pos & operator=(const Pos & rhs); }; /** Get the text of this parser parses. */ diff --git a/src/Parsers/Kusto/KustoFunctions/IParserKQLFunction.cpp b/src/Parsers/Kusto/KustoFunctions/IParserKQLFunction.cpp index 152c29e5941..1d77007a37c 100644 --- a/src/Parsers/Kusto/KustoFunctions/IParserKQLFunction.cpp +++ b/src/Parsers/Kusto/KustoFunctions/IParserKQLFunction.cpp @@ -279,13 +279,13 @@ String IParserKQLFunction::getKQLFunctionName(IParser::Pos & pos) } String IParserKQLFunction::kqlCallToExpression( - const std::string_view function_name, const std::initializer_list params, const uint32_t max_depth) + const std::string_view function_name, const std::initializer_list params, uint32_t max_depth, uint32_t max_backtracks) { - return kqlCallToExpression(function_name, std::span(params), max_depth); + return kqlCallToExpression(function_name, std::span(params), max_depth, max_backtracks); } String IParserKQLFunction::kqlCallToExpression( - const std::string_view function_name, const std::span params, const uint32_t max_depth) + const std::string_view function_name, const std::span params, uint32_t max_depth, uint32_t max_backtracks) { const auto params_str = std::accumulate( std::cbegin(params), @@ -302,7 +302,7 @@ String IParserKQLFunction::kqlCallToExpression( const auto kql_call = std::format("{}({})", function_name, params_str); DB::Tokens call_tokens(kql_call.c_str(), kql_call.c_str() + kql_call.length()); - DB::IParser::Pos tokens_pos(call_tokens, max_depth); + DB::IParser::Pos tokens_pos(call_tokens, max_depth, max_backtracks); return DB::IParserKQLFunction::getExpression(tokens_pos); } diff --git a/src/Parsers/Kusto/KustoFunctions/IParserKQLFunction.h b/src/Parsers/Kusto/KustoFunctions/IParserKQLFunction.h index 147436551f9..f5069e80745 100644 --- a/src/Parsers/Kusto/KustoFunctions/IParserKQLFunction.h +++ b/src/Parsers/Kusto/KustoFunctions/IParserKQLFunction.h @@ -77,8 +77,8 @@ public: static std::optional getOptionalArgument(const String & function_name, DB::IParser::Pos & pos, ArgumentState argument_state = ArgumentState::Parsed); static String - kqlCallToExpression(std::string_view function_name, std::initializer_list params, uint32_t max_depth); - static String kqlCallToExpression(std::string_view function_name, std::span params, uint32_t max_depth); + kqlCallToExpression(std::string_view function_name, std::initializer_list params, uint32_t max_depth, uint32_t max_backtracks); + static String kqlCallToExpression(std::string_view function_name, std::span params, uint32_t max_depth, uint32_t max_backtracks); static String escapeSingleQuotes(const String & input); protected: diff --git a/src/Parsers/Kusto/KustoFunctions/KQLCastingFunctions.cpp b/src/Parsers/Kusto/KustoFunctions/KQLCastingFunctions.cpp index b0eec16f56f..87841e295ba 100644 --- a/src/Parsers/Kusto/KustoFunctions/KQLCastingFunctions.cpp +++ b/src/Parsers/Kusto/KustoFunctions/KQLCastingFunctions.cpp @@ -99,7 +99,7 @@ bool ToTimeSpan::convertImpl(String & out, IParser::Pos & pos) ++pos; try { - auto result = kqlCallToExpression("time", {arg}, pos.max_depth); + auto result = kqlCallToExpression("time", {arg}, pos.max_depth, pos.max_backtracks); out = std::format("{}", result); } catch (...) diff --git a/src/Parsers/Kusto/KustoFunctions/KQLDynamicFunctions.cpp b/src/Parsers/Kusto/KustoFunctions/KQLDynamicFunctions.cpp index 924ac9f6490..e90be363e4b 100644 --- a/src/Parsers/Kusto/KustoFunctions/KQLDynamicFunctions.cpp +++ b/src/Parsers/Kusto/KustoFunctions/KQLDynamicFunctions.cpp @@ -99,7 +99,7 @@ bool ArrayRotateRight::convertImpl(String & out, IParser::Pos & pos) const auto array = getArgument(function_name, pos, ArgumentState::Raw); const auto count = getArgument(function_name, pos, ArgumentState::Raw); - out = kqlCallToExpression("array_rotate_left", {array, "-1 * " + count}, pos.max_depth); + out = kqlCallToExpression("array_rotate_left", {array, "-1 * " + count}, pos.max_depth, pos.max_backtracks); return true; } @@ -140,7 +140,7 @@ bool ArrayShiftRight::convertImpl(String & out, IParser::Pos & pos) "array_shift_left", fill ? std::initializer_list{array, negated_count, *fill} : std::initializer_list{array, negated_count}, - pos.max_depth); + pos.max_depth, pos.max_backtracks); return true; } @@ -233,8 +233,8 @@ bool JaccardIndex::convertImpl(String & out, IParser::Pos & pos) const auto rhs = getArgument(function_name, pos, ArgumentState::Raw); out = std::format( "divide(length({0}), length({1}))", - kqlCallToExpression("set_intersect", {lhs, rhs}, pos.max_depth), - kqlCallToExpression("set_union", {lhs, rhs}, pos.max_depth)); + kqlCallToExpression("set_intersect", {lhs, rhs}, pos.max_depth, pos.max_backtracks), + kqlCallToExpression("set_union", {lhs, rhs}, pos.max_depth, pos.max_backtracks)); return true; } @@ -292,7 +292,7 @@ bool SetDifference::convertImpl(String & out, IParser::Pos & pos) while (auto next_array = getOptionalArgument(function_name, pos, ArgumentState::Raw)) arrays.push_back(*next_array); - return kqlCallToExpression("set_union", std::vector(arrays.cbegin(), arrays.cend()), pos.max_depth); + return kqlCallToExpression("set_union", std::vector(arrays.cbegin(), arrays.cend()), pos.max_depth, pos.max_backtracks); }); out = std::format("arrayFilter(x -> not has({1}, x), arrayDistinct({0}))", lhs, rhs); diff --git a/src/Parsers/Kusto/KustoFunctions/KQLIPFunctions.cpp b/src/Parsers/Kusto/KustoFunctions/KQLIPFunctions.cpp index 6f853b16fbc..06566dc54ec 100644 --- a/src/Parsers/Kusto/KustoFunctions/KQLIPFunctions.cpp +++ b/src/Parsers/Kusto/KustoFunctions/KQLIPFunctions.cpp @@ -34,10 +34,10 @@ bool Ipv4Compare::convertImpl(String & out, IParser::Pos & pos) "sign(IPv4StringToNumOrNull(toString((tupleElement(IPv4CIDRToRange(assumeNotNull(lhs_ip_{5}), " "toUInt8(min2({4}, min2(assumeNotNull(lhs_mask_{5}), assumeNotNull(rhs_mask_{5})))) as mask_{5}), 1))))" " - IPv4StringToNumOrNull(toString((tupleElement(IPv4CIDRToRange(assumeNotNull(rhs_ip_{5}), mask_{5}), 1))))))", - kqlCallToExpression("parse_ipv4", {lhs}, pos.max_depth), - kqlCallToExpression("ipv4_netmask_suffix", {lhs}, pos.max_depth), - kqlCallToExpression("parse_ipv4", {rhs}, pos.max_depth), - kqlCallToExpression("ipv4_netmask_suffix", {rhs}, pos.max_depth), + kqlCallToExpression("parse_ipv4", {lhs}, pos.max_depth, pos.max_backtracks), + kqlCallToExpression("ipv4_netmask_suffix", {lhs}, pos.max_depth, pos.max_backtracks), + kqlCallToExpression("parse_ipv4", {rhs}, pos.max_depth, pos.max_backtracks), + kqlCallToExpression("ipv4_netmask_suffix", {rhs}, pos.max_depth, pos.max_backtracks), mask ? *mask : "32", generateUniqueIdentifier()); return true; @@ -56,8 +56,8 @@ bool Ipv4IsInRange::convertImpl(String & out, IParser::Pos & pos) "or isNull({1} as range_start_ip_{3}) or isNull({2} as range_mask_{3}), null, " "bitXor(range_start_ip_{3}, bitAnd(ip_{3}, bitNot(toUInt32(intExp2(toInt32(32 - range_mask_{3})) - 1)))) = 0) ", ip_address, - kqlCallToExpression("parse_ipv4", {ip_range}, pos.max_depth), - kqlCallToExpression("ipv4_netmask_suffix", {ip_range}, pos.max_depth), + kqlCallToExpression("parse_ipv4", {ip_range}, pos.max_depth, pos.max_backtracks), + kqlCallToExpression("ipv4_netmask_suffix", {ip_range}, pos.max_depth, pos.max_backtracks), generateUniqueIdentifier()); return true; } @@ -71,7 +71,7 @@ bool Ipv4IsMatch::convertImpl(String & out, IParser::Pos & pos) const auto lhs = getArgument(function_name, pos, ArgumentState::Raw); const auto rhs = getArgument(function_name, pos, ArgumentState::Raw); const auto mask = getOptionalArgument(function_name, pos, ArgumentState::Raw); - out = std::format("equals({}, 0)", kqlCallToExpression("ipv4_compare", {lhs, rhs, mask ? *mask : "32"}, pos.max_depth)); + out = std::format("equals({}, 0)", kqlCallToExpression("ipv4_compare", {lhs, rhs, mask ? *mask : "32"}, pos.max_depth, pos.max_backtracks)); return true; } @@ -196,7 +196,7 @@ bool Ipv6IsMatch::convertImpl(String & out, IParser::Pos & pos) const auto lhs = getArgument(function_name, pos, ArgumentState::Raw); const auto rhs = getArgument(function_name, pos, ArgumentState::Raw); const auto mask = getOptionalArgument(function_name, pos, ArgumentState::Raw); - out = std::format("equals({}, 0)", kqlCallToExpression("ipv6_compare", {lhs, rhs, mask ? *mask : "128"}, pos.max_depth)); + out = std::format("equals({}, 0)", kqlCallToExpression("ipv6_compare", {lhs, rhs, mask ? *mask : "128"}, pos.max_depth, pos.max_backtracks)); return true; } @@ -228,9 +228,9 @@ bool ParseIpv6Mask::convertImpl(String & out, IParser::Pos & pos) const auto unique_identifier = generateUniqueIdentifier(); out = std::format( "if(empty({0} as ipv4_{3}), {1}, {2})", - kqlCallToExpression("format_ipv4", {"trim_start('::', " + ip_address + ")", mask + " - 96"}, pos.max_depth), - kqlCallToExpression("parse_ipv6", {"strcat(tostring(parse_ipv6(" + ip_address + ")), '/', tostring(" + mask + "))"}, pos.max_depth), - kqlCallToExpression("parse_ipv6", {"ipv4_" + unique_identifier}, pos.max_depth), + kqlCallToExpression("format_ipv4", {"trim_start('::', " + ip_address + ")", mask + " - 96"}, pos.max_depth, pos.max_backtracks), + kqlCallToExpression("parse_ipv6", {"strcat(tostring(parse_ipv6(" + ip_address + ")), '/', tostring(" + mask + "))"}, pos.max_depth, pos.max_backtracks), + kqlCallToExpression("parse_ipv6", {"ipv4_" + unique_identifier}, pos.max_depth, pos.max_backtracks), unique_identifier); return true; } @@ -247,9 +247,9 @@ bool FormatIpv4::convertImpl(String & out, IParser::Pos & pos) "ifNull(if(isNotNull(toUInt32OrNull(toString({0})) as param_as_uint32_{3}) and toTypeName({0}) = 'String' or ({1}) < 0 " "or isNull(ifNull(param_as_uint32_{3}, {2}) as ip_as_number_{3}), null, " "IPv4NumToString(bitAnd(ip_as_number_{3}, bitNot(toUInt32(intExp2(toInt32(32 - ({1}))) - 1))))), '')", - ParserKQLBase::getExprFromToken(ip_address, pos.max_depth), + ParserKQLBase::getExprFromToken(ip_address, pos.max_depth, pos.max_backtracks), mask ? *mask : "32", - kqlCallToExpression("parse_ipv4", {"tostring(" + ip_address + ")"}, pos.max_depth), + kqlCallToExpression("parse_ipv4", {"tostring(" + ip_address + ")"}, pos.max_depth, pos.max_backtracks), generateUniqueIdentifier()); return true; } @@ -266,10 +266,10 @@ bool FormatIpv4Mask::convertImpl(String & out, IParser::Pos & pos) out = std::format( "if(empty({1} as formatted_ip_{2}) or position(toTypeName({0}), 'Int') = 0 or not {0} between 0 and 32, '', " "concat(formatted_ip_{2}, '/', toString(toInt64(min2({0}, ifNull({3} as suffix_{2}, 32))))))", - ParserKQLBase::getExprFromToken(calculated_mask, pos.max_depth), - kqlCallToExpression("format_ipv4", {ip_address, calculated_mask}, pos.max_depth), + ParserKQLBase::getExprFromToken(calculated_mask, pos.max_depth, pos.max_backtracks), + kqlCallToExpression("format_ipv4", {ip_address, calculated_mask}, pos.max_depth, pos.max_backtracks), generateUniqueIdentifier(), - kqlCallToExpression("ipv4_netmask_suffix", {"tostring(" + ip_address + ")"}, pos.max_depth)); + kqlCallToExpression("ipv4_netmask_suffix", {"tostring(" + ip_address + ")"}, pos.max_depth, pos.max_backtracks)); return true; } } diff --git a/src/Parsers/Kusto/KustoFunctions/KQLStringFunctions.cpp b/src/Parsers/Kusto/KustoFunctions/KQLStringFunctions.cpp index 82cfa68b180..18c986c2191 100644 --- a/src/Parsers/Kusto/KustoFunctions/KQLStringFunctions.cpp +++ b/src/Parsers/Kusto/KustoFunctions/KQLStringFunctions.cpp @@ -442,7 +442,7 @@ bool ParseJSON::convertImpl(String & out, IParser::Pos & pos) { --pos; auto arg = getArgument(fn_name, pos); - auto result = kqlCallToExpression("dynamic", {arg}, pos.max_depth); + auto result = kqlCallToExpression("dynamic", {arg}, pos.max_depth, pos.max_backtracks); out = std::format("{}", result); } else @@ -729,7 +729,7 @@ bool Trim::convertImpl(String & out, IParser::Pos & pos) const auto regex = getArgument(fn_name, pos, ArgumentState::Raw); const auto source = getArgument(fn_name, pos, ArgumentState::Raw); - out = kqlCallToExpression("trim_start", {regex, std::format("trim_end({0}, {1})", regex, source)}, pos.max_depth); + out = kqlCallToExpression("trim_start", {regex, std::format("trim_end({0}, {1})", regex, source)}, pos.max_depth, pos.max_backtracks); return true; } diff --git a/src/Parsers/Kusto/ParserKQLDistinct.cpp b/src/Parsers/Kusto/ParserKQLDistinct.cpp index 2de4d2c28e7..3ec823a61b5 100644 --- a/src/Parsers/Kusto/ParserKQLDistinct.cpp +++ b/src/Parsers/Kusto/ParserKQLDistinct.cpp @@ -12,7 +12,7 @@ bool ParserKQLDistinct::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) expr = getExprFromToken(pos); Tokens tokens(expr.c_str(), expr.c_str() + expr.size()); - IParser::Pos new_pos(tokens, pos.max_depth); + IParser::Pos new_pos(tokens, pos.max_depth, pos.max_backtracks); if (!ParserNotEmptyExpressionList(false).parse(new_pos, select_expression_list, expected)) return false; diff --git a/src/Parsers/Kusto/ParserKQLExtend.cpp b/src/Parsers/Kusto/ParserKQLExtend.cpp index b37618f69fd..41ce296bd25 100644 --- a/src/Parsers/Kusto/ParserKQLExtend.cpp +++ b/src/Parsers/Kusto/ParserKQLExtend.cpp @@ -23,7 +23,7 @@ bool ParserKQLExtend ::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) String except_str; String new_extend_str; Tokens ntokens(extend_expr.c_str(), extend_expr.c_str() + extend_expr.size()); - IParser::Pos npos(ntokens, pos.max_depth); + IParser::Pos npos(ntokens, pos.max_depth, pos.max_backtracks); String alias; @@ -77,7 +77,7 @@ bool ParserKQLExtend ::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) String expr = std::format("SELECT * {}, {} from prev", except_str, new_extend_str); Tokens tokens(expr.c_str(), expr.c_str() + expr.size()); - IParser::Pos new_pos(tokens, pos.max_depth); + IParser::Pos new_pos(tokens, pos.max_depth, pos.max_backtracks); if (!ParserSelectQuery().parse(new_pos, select_query, expected)) return false; diff --git a/src/Parsers/Kusto/ParserKQLFilter.cpp b/src/Parsers/Kusto/ParserKQLFilter.cpp index 74d8610ecd4..b060ce8d2c7 100644 --- a/src/Parsers/Kusto/ParserKQLFilter.cpp +++ b/src/Parsers/Kusto/ParserKQLFilter.cpp @@ -14,7 +14,7 @@ bool ParserKQLFilter::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ASTPtr where_expression; Tokens token_filter(expr.c_str(), expr.c_str() + expr.size()); - IParser::Pos pos_filter(token_filter, pos.max_depth); + IParser::Pos pos_filter(token_filter, pos.max_depth, pos.max_backtracks); if (!ParserExpressionWithOptionalAlias(false).parse(pos_filter, where_expression, expected)) return false; diff --git a/src/Parsers/Kusto/ParserKQLLimit.cpp b/src/Parsers/Kusto/ParserKQLLimit.cpp index 910f0e8e1a3..0eb460757b1 100644 --- a/src/Parsers/Kusto/ParserKQLLimit.cpp +++ b/src/Parsers/Kusto/ParserKQLLimit.cpp @@ -14,7 +14,7 @@ bool ParserKQLLimit::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) auto expr = getExprFromToken(pos); Tokens tokens(expr.c_str(), expr.c_str() + expr.size()); - IParser::Pos new_pos(tokens, pos.max_depth); + IParser::Pos new_pos(tokens, pos.max_depth, pos.max_backtracks); if (!ParserExpressionWithOptionalAlias(false).parse(new_pos, limit_length, expected)) return false; diff --git a/src/Parsers/Kusto/ParserKQLMVExpand.cpp b/src/Parsers/Kusto/ParserKQLMVExpand.cpp index 7d242dffaf7..d174e9b5911 100644 --- a/src/Parsers/Kusto/ParserKQLMVExpand.cpp +++ b/src/Parsers/Kusto/ParserKQLMVExpand.cpp @@ -69,7 +69,7 @@ bool ParserKQLMVExpand::parseColumnArrayExprs(ColumnArrayExprs & column_array_ex auto add_columns = [&] { - column_array_expr = getExprFromToken(String(expr_begin_pos->begin, expr_end_pos->end), pos.max_depth); + column_array_expr = getExprFromToken(String(expr_begin_pos->begin, expr_end_pos->end), pos.max_depth, pos.max_backtracks); if (alias.empty()) { @@ -189,7 +189,7 @@ bool ParserKQLMVExpand::parserMVExpand(KQLMVExpand & kql_mv_expand, Pos & pos, E return true; } -bool ParserKQLMVExpand::genQuery(KQLMVExpand & kql_mv_expand, ASTPtr & select_node, int32_t max_depth) +bool ParserKQLMVExpand::genQuery(KQLMVExpand & kql_mv_expand, ASTPtr & select_node, uint32_t max_depth, uint32_t max_backtracks) { String expand_str; String cast_type_column_remove, cast_type_column_rename; @@ -253,7 +253,7 @@ bool ParserKQLMVExpand::genQuery(KQLMVExpand & kql_mv_expand, ASTPtr & select_no if (cast_type_column_remove.empty()) { query = std::format("Select {} {} From {} {}", columns, extra_columns, input, expand_str); - if (!parseSQLQueryByString(std::make_unique(), query, sub_query_node, max_depth)) + if (!parseSQLQueryByString(std::make_unique(), query, sub_query_node, max_depth, max_backtracks)) return false; if (!setSubQuerySource(sub_query_node, select_node, false, false)) return false; @@ -262,14 +262,14 @@ bool ParserKQLMVExpand::genQuery(KQLMVExpand & kql_mv_expand, ASTPtr & select_no else { query = std::format("(Select {} {} From {} {})", columns, extra_columns, input, expand_str); - if (!parseSQLQueryByString(std::make_unique(), query, sub_query_node, max_depth)) + if (!parseSQLQueryByString(std::make_unique(), query, sub_query_node, max_depth, max_backtracks)) return false; if (!setSubQuerySource(sub_query_node, select_node, true, false)) return false; select_node = std::move(sub_query_node); auto rename_query = std::format("(Select * {}, {} From {})", cast_type_column_remove, cast_type_column_rename, "query"); - if (!parseSQLQueryByString(std::make_unique(), rename_query, sub_query_node, max_depth)) + if (!parseSQLQueryByString(std::make_unique(), rename_query, sub_query_node, max_depth, max_backtracks)) return false; if (!setSubQuerySource(sub_query_node, select_node, true, true)) return false; @@ -277,7 +277,7 @@ bool ParserKQLMVExpand::genQuery(KQLMVExpand & kql_mv_expand, ASTPtr & select_no select_node = std::move(sub_query_node); query = std::format("Select * {}, {} from {}", cast_type_column_restore, cast_type_column_restore_name, "rename_query"); - if (!parseSQLQueryByString(std::make_unique(), query, sub_query_node, max_depth)) + if (!parseSQLQueryByString(std::make_unique(), query, sub_query_node, max_depth, max_backtracks)) return false; sub_query_node->as()->setExpression(ASTSelectQuery::Expression::TABLES, std::move(select_node)); select_node = std::move(sub_query_node); @@ -294,12 +294,12 @@ bool ParserKQLMVExpand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) KQLMVExpand kql_mv_expand; if (!parserMVExpand(kql_mv_expand, pos, expected)) return false; - if (!genQuery(kql_mv_expand, node, pos.max_depth)) + if (!genQuery(kql_mv_expand, node, pos.max_depth, pos.max_backtracks)) return false; const String setting_str = "enable_unaligned_array_join = 1"; Tokens token_settings(setting_str.c_str(), setting_str.c_str() + setting_str.size()); - IParser::Pos pos_settings(token_settings, pos.max_depth); + IParser::Pos pos_settings(token_settings, pos.max_depth, pos.max_backtracks); if (!ParserSetQuery(true).parse(pos_settings, setting, expected)) return false; diff --git a/src/Parsers/Kusto/ParserKQLMVExpand.h b/src/Parsers/Kusto/ParserKQLMVExpand.h index 61f206bb00d..068aee53f58 100644 --- a/src/Parsers/Kusto/ParserKQLMVExpand.h +++ b/src/Parsers/Kusto/ParserKQLMVExpand.h @@ -33,7 +33,7 @@ protected: static bool parseColumnArrayExprs(ColumnArrayExprs & column_array_exprs, Pos & pos, Expected & expected); static bool parserMVExpand(KQLMVExpand & kql_mv_expand, Pos & pos, Expected & expected); - static bool genQuery(KQLMVExpand & kql_mv_expand, ASTPtr & select_node, int32_t max_depth); + static bool genQuery(KQLMVExpand & kql_mv_expand, ASTPtr & select_node, uint32_t max_depth, uint32_t max_backtracks); const char * getName() const override { return "KQL mv-expand"; } bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; diff --git a/src/Parsers/Kusto/ParserKQLMakeSeries.cpp b/src/Parsers/Kusto/ParserKQLMakeSeries.cpp index e89423e2fc9..4759efc0025 100644 --- a/src/Parsers/Kusto/ParserKQLMakeSeries.cpp +++ b/src/Parsers/Kusto/ParserKQLMakeSeries.cpp @@ -142,7 +142,7 @@ bool ParserKQLMakeSeries ::parseFromToStepClause(FromToStepClause & from_to_step || ParserKQLDateTypeTimespan().parseConstKQLTimespan(from_to_step.step_str)) { from_to_step.is_timespan = true; - from_to_step.step = std::stod(getExprFromToken(from_to_step.step_str, pos.max_depth)); + from_to_step.step = std::stod(getExprFromToken(from_to_step.step_str, pos.max_depth, pos.max_backtracks)); } else from_to_step.step = std::stod(from_to_step.step_str); @@ -150,7 +150,7 @@ bool ParserKQLMakeSeries ::parseFromToStepClause(FromToStepClause & from_to_step return true; } -bool ParserKQLMakeSeries ::makeSeries(KQLMakeSeries & kql_make_series, ASTPtr & select_node, const uint32_t & max_depth) +bool ParserKQLMakeSeries ::makeSeries(KQLMakeSeries & kql_make_series, ASTPtr & select_node, uint32_t max_depth, uint32_t max_backtracks) { const uint64_t era_diff = 62135596800; // this magic number is the differicen is second form 0001-01-01 (Azure start time ) and 1970-01-01 (CH start time) @@ -166,15 +166,15 @@ bool ParserKQLMakeSeries ::makeSeries(KQLMakeSeries & kql_make_series, ASTPtr & auto step = from_to_step.step; if (!kql_make_series.from_to_step.from_str.empty()) - start_str = getExprFromToken(kql_make_series.from_to_step.from_str, max_depth); + start_str = getExprFromToken(kql_make_series.from_to_step.from_str, max_depth, max_backtracks); if (!kql_make_series.from_to_step.to_str.empty()) - end_str = getExprFromToken(from_to_step.to_str, max_depth); + end_str = getExprFromToken(from_to_step.to_str, max_depth, max_backtracks); auto date_type_cast = [&](String & src) { Tokens tokens(src.c_str(), src.c_str() + src.size()); - IParser::Pos pos(tokens, max_depth); + IParser::Pos pos(tokens, max_depth, max_backtracks); String res; while (isValidKQLPos(pos)) { @@ -201,7 +201,7 @@ bool ParserKQLMakeSeries ::makeSeries(KQLMakeSeries & kql_make_series, ASTPtr & { std::vector group_expression_tokens; Tokens tokens(group_expression.c_str(), group_expression.c_str() + group_expression.size()); - IParser::Pos pos(tokens, max_depth); + IParser::Pos pos(tokens, max_depth, max_backtracks); while (isValidKQLPos(pos)) { if (String(pos->begin, pos->end) == "AS") @@ -296,7 +296,7 @@ bool ParserKQLMakeSeries ::makeSeries(KQLMakeSeries & kql_make_series, ASTPtr & ASTPtr sub_query_node; - if (!ParserSimpleCHSubquery(select_node).parseByString(sub_sub_query, sub_query_node, max_depth)) + if (!ParserSimpleCHSubquery(select_node).parseByString(sub_sub_query, sub_query_node, max_depth, max_backtracks)) return false; select_node->as()->setExpression(ASTSelectQuery::Expression::TABLES, std::move(sub_query_node)); @@ -351,7 +351,7 @@ bool ParserKQLMakeSeries ::makeSeries(KQLMakeSeries & kql_make_series, ASTPtr & else main_query = std::format("{},{}", group_expression_alias, final_axis_agg_alias_list); - if (!ParserSimpleCHSubquery(select_node).parseByString(sub_query, sub_query_node, max_depth)) + if (!ParserSimpleCHSubquery(select_node).parseByString(sub_query, sub_query_node, max_depth, max_backtracks)) return false; select_node->as()->setExpression(ASTSelectQuery::Expression::TABLES, std::move(sub_query_node)); @@ -411,10 +411,10 @@ bool ParserKQLMakeSeries ::parseImpl(Pos & pos, ASTPtr & node, Expected & expect subquery_columns += ", " + column_str; } - makeSeries(kql_make_series, node, pos.max_depth); + makeSeries(kql_make_series, node, pos.max_depth, pos.max_backtracks); Tokens token_main_query(kql_make_series.main_query.c_str(), kql_make_series.main_query.c_str() + kql_make_series.main_query.size()); - IParser::Pos pos_main_query(token_main_query, pos.max_depth); + IParser::Pos pos_main_query(token_main_query, pos.max_depth, pos.max_backtracks); if (!ParserNotEmptyExpressionList(true).parse(pos_main_query, select_expression_list, expected)) return false; diff --git a/src/Parsers/Kusto/ParserKQLMakeSeries.h b/src/Parsers/Kusto/ParserKQLMakeSeries.h index ef7cc4976f6..6a32e76eff3 100644 --- a/src/Parsers/Kusto/ParserKQLMakeSeries.h +++ b/src/Parsers/Kusto/ParserKQLMakeSeries.h @@ -42,7 +42,7 @@ protected: String main_query; }; - static bool makeSeries(KQLMakeSeries & kql_make_series, ASTPtr & select_node, const uint32_t & max_depth); + static bool makeSeries(KQLMakeSeries & kql_make_series, ASTPtr & select_node, uint32_t max_depth, uint32_t max_backtracks); static bool parseAggregationColumns(AggregationColumns & aggregation_columns, Pos & pos); static bool parseFromToStepClause(FromToStepClause & from_to_step, Pos & pos); diff --git a/src/Parsers/Kusto/ParserKQLPrint.cpp b/src/Parsers/Kusto/ParserKQLPrint.cpp index bd9980ea96d..37483439f14 100644 --- a/src/Parsers/Kusto/ParserKQLPrint.cpp +++ b/src/Parsers/Kusto/ParserKQLPrint.cpp @@ -10,7 +10,7 @@ bool ParserKQLPrint::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) const String expr = getExprFromToken(pos); Tokens tokens(expr.c_str(), expr.c_str() + expr.size()); - IParser::Pos new_pos(tokens, pos.max_depth); + IParser::Pos new_pos(tokens, pos.max_depth, pos.max_backtracks); if (!ParserNotEmptyExpressionList(true).parse(new_pos, select_expression_list, expected)) return false; diff --git a/src/Parsers/Kusto/ParserKQLProject.cpp b/src/Parsers/Kusto/ParserKQLProject.cpp index fdc458b7707..eab9ee082c5 100644 --- a/src/Parsers/Kusto/ParserKQLProject.cpp +++ b/src/Parsers/Kusto/ParserKQLProject.cpp @@ -12,7 +12,7 @@ bool ParserKQLProject ::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) expr = getExprFromToken(pos); Tokens tokens(expr.c_str(), expr.c_str() + expr.size()); - IParser::Pos new_pos(tokens, pos.max_depth); + IParser::Pos new_pos(tokens, pos.max_depth, pos.max_backtracks); if (!ParserNotEmptyExpressionList(false).parse(new_pos, select_expression_list, expected)) return false; diff --git a/src/Parsers/Kusto/ParserKQLQuery.cpp b/src/Parsers/Kusto/ParserKQLQuery.cpp index a54a2b0eda9..6fd9c95ec6f 100644 --- a/src/Parsers/Kusto/ParserKQLQuery.cpp +++ b/src/Parsers/Kusto/ParserKQLQuery.cpp @@ -33,20 +33,20 @@ namespace ErrorCodes extern const int SYNTAX_ERROR; } -bool ParserKQLBase::parseByString(const String expr, ASTPtr & node, const uint32_t max_depth) +bool ParserKQLBase::parseByString(String expr, ASTPtr & node, uint32_t max_depth, uint32_t max_backtracks) { Expected expected; Tokens tokens(expr.c_str(), expr.c_str() + expr.size()); - IParser::Pos pos(tokens, max_depth); + IParser::Pos pos(tokens, max_depth, max_backtracks); return parse(pos, node, expected); } -bool ParserKQLBase::parseSQLQueryByString(ParserPtr && parser, String & query, ASTPtr & select_node, int32_t max_depth) +bool ParserKQLBase::parseSQLQueryByString(ParserPtr && parser, String & query, ASTPtr & select_node, uint32_t max_depth, uint32_t max_backtracks) { Expected expected; Tokens token_subquery(query.c_str(), query.c_str() + query.size()); - IParser::Pos pos_subquery(token_subquery, max_depth); + IParser::Pos pos_subquery(token_subquery, max_depth, max_backtracks); if (!parser->parse(pos_subquery, select_node, expected)) return false; return true; @@ -121,10 +121,10 @@ bool ParserKQLBase::setSubQuerySource(ASTPtr & select_query, ASTPtr & source, bo return true; } -String ParserKQLBase::getExprFromToken(const String & text, const uint32_t max_depth) +String ParserKQLBase::getExprFromToken(const String & text, uint32_t max_depth, uint32_t max_backtracks) { Tokens tokens(text.c_str(), text.c_str() + text.size()); - IParser::Pos pos(tokens, max_depth); + IParser::Pos pos(tokens, max_depth, max_backtracks); return getExprFromToken(pos); } @@ -523,7 +523,7 @@ bool ParserKQLQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) String sub_query = std::format("({})", String(operation_pos.front().second->begin, last_pos->end)); Tokens token_subquery(sub_query.c_str(), sub_query.c_str() + sub_query.size()); - IParser::Pos pos_subquery(token_subquery, pos.max_depth); + IParser::Pos pos_subquery(token_subquery, pos.max_depth, pos.max_backtracks); if (!ParserKQLSubquery().parse(pos_subquery, tables, expected)) return false; @@ -544,7 +544,7 @@ bool ParserKQLQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) if (oprator) { Tokens token_clause(op_calsue.c_str(), op_calsue.c_str() + op_calsue.size()); - IParser::Pos pos_clause(token_clause, pos.max_depth); + IParser::Pos pos_clause(token_clause, pos.max_depth, pos.max_backtracks); if (!oprator->parse(pos_clause, node, expected)) return false; } @@ -577,7 +577,7 @@ bool ParserKQLQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { auto expr = String("*"); Tokens tokens(expr.c_str(), expr.c_str() + expr.size()); - IParser::Pos new_pos(tokens, pos.max_depth); + IParser::Pos new_pos(tokens, pos.max_depth, pos.max_backtracks); if (!std::make_unique()->parse(new_pos, node, expected)) return false; } diff --git a/src/Parsers/Kusto/ParserKQLQuery.h b/src/Parsers/Kusto/ParserKQLQuery.h index a594f43ceec..e003ee3ee8b 100644 --- a/src/Parsers/Kusto/ParserKQLQuery.h +++ b/src/Parsers/Kusto/ParserKQLQuery.h @@ -9,11 +9,11 @@ class ParserKQLBase : public IParserBase { public: static String getExprFromToken(Pos & pos); - static String getExprFromToken(const String & text, uint32_t max_depth); + static String getExprFromToken(const String & text, uint32_t max_depth, uint32_t max_backtracks); static String getExprFromPipe(Pos & pos); static bool setSubQuerySource(ASTPtr & select_query, ASTPtr & source, bool dest_is_subquery, bool src_is_subquery); - static bool parseSQLQueryByString(ParserPtr && parser, String & query, ASTPtr & select_node, int32_t max_depth); - bool parseByString(String expr, ASTPtr & node, uint32_t max_depth); + static bool parseSQLQueryByString(ParserPtr && parser, String & query, ASTPtr & select_node, uint32_t max_depth, uint32_t max_backtracks); + bool parseByString(String expr, ASTPtr & node, uint32_t max_depth, uint32_t max_backtracks); }; class ParserKQLQuery : public IParserBase diff --git a/src/Parsers/Kusto/ParserKQLSort.cpp b/src/Parsers/Kusto/ParserKQLSort.cpp index 7e5ac2b17e7..852ba50698d 100644 --- a/src/Parsers/Kusto/ParserKQLSort.cpp +++ b/src/Parsers/Kusto/ParserKQLSort.cpp @@ -19,7 +19,7 @@ bool ParserKQLSort::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) auto expr = getExprFromToken(pos); Tokens tokens(expr.c_str(), expr.c_str() + expr.size()); - IParser::Pos new_pos(tokens, pos.max_depth); + IParser::Pos new_pos(tokens, pos.max_depth, pos.max_backtracks); auto pos_backup = new_pos; if (!order_list.parse(pos_backup, order_expression_list, expected)) diff --git a/src/Parsers/Kusto/ParserKQLStatement.cpp b/src/Parsers/Kusto/ParserKQLStatement.cpp index 668696fa9dc..fbf2110e664 100644 --- a/src/Parsers/Kusto/ParserKQLStatement.cpp +++ b/src/Parsers/Kusto/ParserKQLStatement.cpp @@ -95,7 +95,7 @@ bool ParserKQLTableFunction::parseImpl(Pos & pos, ASTPtr & node, Expected & expe } ++pos; Tokens token_kql(kql_statement.c_str(), kql_statement.c_str() + kql_statement.size()); - IParser::Pos pos_kql(token_kql, pos.max_depth); + IParser::Pos pos_kql(token_kql, pos.max_depth, pos.max_backtracks); if (kql_p.parse(pos_kql, select, expected)) { diff --git a/src/Parsers/Kusto/ParserKQLSummarize.cpp b/src/Parsers/Kusto/ParserKQLSummarize.cpp index a45717930bb..47d706d0b4b 100644 --- a/src/Parsers/Kusto/ParserKQLSummarize.cpp +++ b/src/Parsers/Kusto/ParserKQLSummarize.cpp @@ -192,10 +192,10 @@ bool ParserKQLSummarize::parseImpl(Pos & pos, ASTPtr & node, Expected & expected expr_columns = expr_columns + "," + expr_aggregation; } - String converted_columns = getExprFromToken(expr_columns, pos.max_depth); + String converted_columns = getExprFromToken(expr_columns, pos.max_depth, pos.max_backtracks); Tokens token_converted_columns(converted_columns.c_str(), converted_columns.c_str() + converted_columns.size()); - IParser::Pos pos_converted_columns(token_converted_columns, pos.max_depth); + IParser::Pos pos_converted_columns(token_converted_columns, pos.max_depth, pos.max_backtracks); if (!ParserNotEmptyExpressionList(true).parse(pos_converted_columns, select_expression_list, expected)) return false; @@ -204,10 +204,10 @@ bool ParserKQLSummarize::parseImpl(Pos & pos, ASTPtr & node, Expected & expected if (groupby) { - String converted_groupby = getExprFromToken(expr_groupby, pos.max_depth); + String converted_groupby = getExprFromToken(expr_groupby, pos.max_depth, pos.max_backtracks); Tokens token_converted_groupby(converted_groupby.c_str(), converted_groupby.c_str() + converted_groupby.size()); - IParser::Pos postoken_converted_groupby(token_converted_groupby, pos.max_depth); + IParser::Pos postoken_converted_groupby(token_converted_groupby, pos.max_depth, pos.max_backtracks); if (!ParserNotEmptyExpressionList(false).parse(postoken_converted_groupby, group_expression_list, expected)) return false; diff --git a/src/Parsers/Kusto/parseKQLQuery.cpp b/src/Parsers/Kusto/parseKQLQuery.cpp index bcc04ef7001..34a009873f8 100644 --- a/src/Parsers/Kusto/parseKQLQuery.cpp +++ b/src/Parsers/Kusto/parseKQLQuery.cpp @@ -322,12 +322,13 @@ ASTPtr tryParseKQLQuery( bool allow_multi_statements, size_t max_query_size, size_t max_parser_depth, + size_t max_parser_backtracks, bool skip_insignificant) { const char * query_begin = _out_query_end; Tokens tokens(query_begin, all_queries_end, max_query_size, skip_insignificant); /// NOTE: consider use UInt32 for max_parser_depth setting. - IParser::Pos token_iterator(tokens, static_cast(max_parser_depth)); + IParser::Pos token_iterator(tokens, static_cast(max_parser_depth), static_cast(max_parser_backtracks)); if (token_iterator->isEnd() || token_iterator->type == TokenType::Semicolon) @@ -441,10 +442,11 @@ ASTPtr parseKQLQueryAndMovePosition( const std::string & query_description, bool allow_multi_statements, size_t max_query_size, - size_t max_parser_depth) + size_t max_parser_depth, + size_t max_parser_backtracks) { std::string error_message; - ASTPtr res = tryParseKQLQuery(parser, pos, end, error_message, false, query_description, allow_multi_statements, max_query_size, max_parser_depth); + ASTPtr res = tryParseKQLQuery(parser, pos, end, error_message, false, query_description, allow_multi_statements, max_query_size, max_parser_depth, max_parser_backtracks); if (res) return res; @@ -458,9 +460,10 @@ ASTPtr parseKQLQuery( const char * end, const std::string & query_description, size_t max_query_size, - size_t max_parser_depth) + size_t max_parser_depth, + size_t max_parser_backtracks) { - return parseKQLQueryAndMovePosition(parser, begin, end, query_description, false, max_query_size, max_parser_depth); + return parseKQLQueryAndMovePosition(parser, begin, end, query_description, false, max_query_size, max_parser_depth, max_parser_backtracks); } ASTPtr parseKQLQuery( @@ -468,18 +471,20 @@ ASTPtr parseKQLQuery( const std::string & query, const std::string & query_description, size_t max_query_size, - size_t max_parser_depth) + size_t max_parser_depth, + size_t max_parser_backtracks) { - return parseKQLQuery(parser, query.data(), query.data() + query.size(), query_description, max_query_size, max_parser_depth); + return parseKQLQuery(parser, query.data(), query.data() + query.size(), query_description, max_query_size, max_parser_depth, max_parser_backtracks); } ASTPtr parseKQLQuery( IParser & parser, const std::string & query, size_t max_query_size, - size_t max_parser_depth) + size_t max_parser_depth, + size_t max_parser_backtracks) { - return parseKQLQuery(parser, query.data(), query.data() + query.size(), parser.getName(), max_query_size, max_parser_depth); + return parseKQLQuery(parser, query.data(), query.data() + query.size(), parser.getName(), max_query_size, max_parser_depth, max_parser_backtracks); } } diff --git a/src/Parsers/Kusto/parseKQLQuery.h b/src/Parsers/Kusto/parseKQLQuery.h index fca017e70fe..9e52ba56307 100644 --- a/src/Parsers/Kusto/parseKQLQuery.h +++ b/src/Parsers/Kusto/parseKQLQuery.h @@ -3,6 +3,7 @@ #include #include #include + namespace DB { @@ -10,10 +11,6 @@ namespace DB * Used in syntax error message. */ -} -namespace DB -{ - class IParser; /// Parse query or set 'out_error_message'. @@ -24,11 +21,11 @@ ASTPtr tryParseKQLQuery( std::string & out_error_message, bool hilite, const std::string & description, - bool allow_multi_statements, /// If false, check for non-space characters after semicolon and set error message if any. - size_t max_query_size, /// If (end - pos) > max_query_size and query is longer than max_query_size then throws "Max query size exceeded". - /// Disabled if zero. Is used in order to check query size if buffer can contains data for INSERT query. + bool allow_multi_statements, + size_t max_query_size, size_t max_parser_depth, - bool skip_insignificant = true); /// If true, lexer will skip all insignificant tokens (e.g. whitespaces) + size_t max_parser_backtracks, + bool skip_insignificant = true); /// Parse query or throw an exception with error message. @@ -39,7 +36,8 @@ ASTPtr parseKQLQueryAndMovePosition( const std::string & description, bool allow_multi_statements, size_t max_query_size, - size_t max_parser_depth); + size_t max_parser_depth, + size_t max_parser_backtracks); ASTPtr parseKQLQuery( IParser & parser, @@ -47,18 +45,22 @@ ASTPtr parseKQLQuery( const char * end, const std::string & description, size_t max_query_size, - size_t max_parser_depth); + size_t max_parser_depth, + size_t max_parser_backtracks); ASTPtr parseKQLQuery( IParser & parser, const std::string & query, const std::string & query_description, size_t max_query_size, - size_t max_parser_depth); + size_t max_parser_depth, + size_t max_parser_backtracks); ASTPtr parseKQLQuery( IParser & parser, const std::string & query, size_t max_query_size, - size_t max_parser_depth); + size_t max_parser_depth, + size_t max_parser_backtracks); + } diff --git a/src/Parsers/MySQL/tests/gtest_alter_command_parser.cpp b/src/Parsers/MySQL/tests/gtest_alter_command_parser.cpp index d406cdbd3b9..4db96646e16 100644 --- a/src/Parsers/MySQL/tests/gtest_alter_command_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_alter_command_parser.cpp @@ -11,7 +11,7 @@ using namespace DB::MySQLParser; static inline ASTPtr tryParserQuery(IParser & parser, const String & query) { - return parseQuery(parser, query.data(), query.data() + query.size(), "", 0, 0); + return parseQuery(parser, query.data(), query.data() + query.size(), "", 0, 0, 0); } TEST(ParserAlterCommand, AddAlterCommand) diff --git a/src/Parsers/MySQL/tests/gtest_alter_parser.cpp b/src/Parsers/MySQL/tests/gtest_alter_parser.cpp index 4ebbe332710..2b12d7bdcf1 100644 --- a/src/Parsers/MySQL/tests/gtest_alter_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_alter_parser.cpp @@ -9,7 +9,7 @@ using namespace DB::MySQLParser; static inline ASTPtr tryParserQuery(IParser & parser, const String & query) { - return parseQuery(parser, query.data(), query.data() + query.size(), "", 0, 0); + return parseQuery(parser, query.data(), query.data() + query.size(), "", 0, 0, 0); } TEST(ParserAlterQuery, AlterQuery) diff --git a/src/Parsers/MySQL/tests/gtest_column_parser.cpp b/src/Parsers/MySQL/tests/gtest_column_parser.cpp index b1c7c778bea..21c37e4ee2e 100644 --- a/src/Parsers/MySQL/tests/gtest_column_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_column_parser.cpp @@ -17,7 +17,7 @@ TEST(ParserColumn, AllNonGeneratedColumnOption) String input = "col_01 VARCHAR(100) NOT NULL DEFAULT NULL AUTO_INCREMENT UNIQUE KEY PRIMARY KEY COMMENT 'column comment' COLLATE utf8 " "COLUMN_FORMAT FIXED STORAGE MEMORY REFERENCES tbl_name (col_01) CHECK 1"; - ASTPtr ast = parseQuery(p_column, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(p_column, input.data(), input.data() + input.size(), "", 0, 0, 0); EXPECT_EQ(ast->as()->name, "col_01"); EXPECT_EQ(ast->as()->data_type->as()->name, "VARCHAR"); EXPECT_EQ(ast->as()->data_type->as()->arguments->children[0]->as()->value.safeGet(), 100); @@ -42,7 +42,7 @@ TEST(ParserColumn, AllGeneratedColumnOption) String input = "col_01 VARCHAR(100) NULL UNIQUE KEY PRIMARY KEY COMMENT 'column comment' COLLATE utf8 " "REFERENCES tbl_name (col_01) CHECK 1 GENERATED ALWAYS AS (1) STORED"; - ASTPtr ast = parseQuery(p_column, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(p_column, input.data(), input.data() + input.size(), "", 0, 0, 0); EXPECT_EQ(ast->as()->name, "col_01"); EXPECT_EQ(ast->as()->data_type->as()->name, "VARCHAR"); EXPECT_EQ(ast->as()->data_type->as()->arguments->children[0]->as()->value.safeGet(), 100); diff --git a/src/Parsers/MySQL/tests/gtest_constraint_parser.cpp b/src/Parsers/MySQL/tests/gtest_constraint_parser.cpp index 9c9124c9f58..a06f2ade24a 100644 --- a/src/Parsers/MySQL/tests/gtest_constraint_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_constraint_parser.cpp @@ -14,7 +14,7 @@ TEST(ParserConstraint, CheckConstraint) ParserDeclareConstraint p_constraint; String constraint_01 = "CONSTRAINT symbol_name CHECK col_01 = 1"; - ASTPtr ast_constraint_01 = parseQuery(p_constraint, constraint_01.data(), constraint_01.data() + constraint_01.size(), "", 0, 0); + ASTPtr ast_constraint_01 = parseQuery(p_constraint, constraint_01.data(), constraint_01.data() + constraint_01.size(), "", 0, 0, 0); EXPECT_EQ(ast_constraint_01->as()->constraint_name, "symbol_name"); auto * check_expression_01 = ast_constraint_01->as()->check_expression->as(); EXPECT_EQ(check_expression_01->name, "equals"); @@ -22,7 +22,7 @@ TEST(ParserConstraint, CheckConstraint) EXPECT_EQ(check_expression_01->arguments->children[1]->as()->value.safeGet(), 1); String constraint_02 = "CONSTRAINT CHECK col_01 = 1"; - ASTPtr ast_constraint_02 = parseQuery(p_constraint, constraint_02.data(), constraint_02.data() + constraint_02.size(), "", 0, 0); + ASTPtr ast_constraint_02 = parseQuery(p_constraint, constraint_02.data(), constraint_02.data() + constraint_02.size(), "", 0, 0, 0); EXPECT_EQ(ast_constraint_02->as()->constraint_name, ""); auto * check_expression_02 = ast_constraint_02->as()->check_expression->as(); EXPECT_EQ(check_expression_02->name, "equals"); @@ -30,7 +30,7 @@ TEST(ParserConstraint, CheckConstraint) EXPECT_EQ(check_expression_02->arguments->children[1]->as()->value.safeGet(), 1); String constraint_03 = "CHECK col_01 = 1"; - ASTPtr ast_constraint_03 = parseQuery(p_constraint, constraint_03.data(), constraint_03.data() + constraint_03.size(), "", 0, 0); + ASTPtr ast_constraint_03 = parseQuery(p_constraint, constraint_03.data(), constraint_03.data() + constraint_03.size(), "", 0, 0, 0); EXPECT_EQ(ast_constraint_03->as()->constraint_name, ""); auto * check_expression_03 = ast_constraint_03->as()->check_expression->as(); EXPECT_EQ(check_expression_03->name, "equals"); @@ -38,7 +38,7 @@ TEST(ParserConstraint, CheckConstraint) EXPECT_EQ(check_expression_03->arguments->children[1]->as()->value.safeGet(), 1); String constraint_04 = "CONSTRAINT CHECK col_01 = 1 ENFORCED"; - ASTPtr ast_constraint_04 = parseQuery(p_constraint, constraint_04.data(), constraint_04.data() + constraint_04.size(), "", 0, 0); + ASTPtr ast_constraint_04 = parseQuery(p_constraint, constraint_04.data(), constraint_04.data() + constraint_04.size(), "", 0, 0, 0); EXPECT_TRUE(ast_constraint_04->as()->enforced); EXPECT_EQ(ast_constraint_04->as()->constraint_name, ""); auto * check_expression_04 = ast_constraint_04->as()->check_expression->as(); @@ -47,7 +47,7 @@ TEST(ParserConstraint, CheckConstraint) EXPECT_EQ(check_expression_04->arguments->children[1]->as()->value.safeGet(), 1); String constraint_05 = "CONSTRAINT CHECK col_01 = 1 NOT ENFORCED"; - ASTPtr ast_constraint_05 = parseQuery(p_constraint, constraint_05.data(), constraint_05.data() + constraint_05.size(), "", 0, 0); + ASTPtr ast_constraint_05 = parseQuery(p_constraint, constraint_05.data(), constraint_05.data() + constraint_05.size(), "", 0, 0, 0); EXPECT_FALSE(ast_constraint_05->as()->enforced); EXPECT_EQ(ast_constraint_05->as()->constraint_name, ""); auto * check_expression_05 = ast_constraint_05->as()->check_expression->as(); diff --git a/src/Parsers/MySQL/tests/gtest_create_parser.cpp b/src/Parsers/MySQL/tests/gtest_create_parser.cpp index 2f65eb6e592..8512b88ffc1 100644 --- a/src/Parsers/MySQL/tests/gtest_create_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_create_parser.cpp @@ -15,16 +15,16 @@ TEST(CreateTableParser, LikeCreate) { ParserCreateQuery p_create_query; String like_create_01 = "CREATE TABLE IF NOT EXISTS table_name LIKE table_name_01"; - parseQuery(p_create_query, like_create_01.data(), like_create_01.data() + like_create_01.size(), "", 0, 0); + parseQuery(p_create_query, like_create_01.data(), like_create_01.data() + like_create_01.size(), "", 0, 0, 0); String like_create_02 = "CREATE TABLE IF NOT EXISTS table_name (LIKE table_name_01)"; - parseQuery(p_create_query, like_create_02.data(), like_create_02.data() + like_create_02.size(), "", 0, 0); + parseQuery(p_create_query, like_create_02.data(), like_create_02.data() + like_create_02.size(), "", 0, 0, 0); } TEST(CreateTableParser, SimpleCreate) { ParserCreateQuery p_create_query; String input = "CREATE TABLE IF NOT EXISTS table_name(col_01 VARCHAR(100), INDEX (col_01), CHECK 1) ENGINE INNODB PARTITION BY HASH(col_01)"; - ASTPtr ast = parseQuery(p_create_query, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(p_create_query, input.data(), input.data() + input.size(), "", 0, 0, 0); EXPECT_TRUE(ast->as()->if_not_exists); EXPECT_EQ(ast->as()->columns_list->as()->columns->children.size(), 1); EXPECT_EQ(ast->as()->columns_list->as()->indices->children.size(), 1); @@ -37,7 +37,7 @@ TEST(CreateTableParser, SS) { ParserCreateQuery p_create_query; String input = "CREATE TABLE `test_table_1` (`a` int DEFAULT NULL, `b` int DEFAULT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci"; - ASTPtr ast = parseQuery(p_create_query, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(p_create_query, input.data(), input.data() + input.size(), "", 0, 0, 0); WriteBufferFromOStream buf(std::cerr, 4096); ast->dumpTree(buf); buf.finalize(); diff --git a/src/Parsers/MySQL/tests/gtest_index_parser.cpp b/src/Parsers/MySQL/tests/gtest_index_parser.cpp index a8be6787b2c..187bac3e090 100644 --- a/src/Parsers/MySQL/tests/gtest_index_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_index_parser.cpp @@ -14,7 +14,7 @@ TEST(ParserIndex, AllIndexOptions) String input = "INDEX (col_01, col_02(100), col_03 DESC) KEY_BLOCK_SIZE 3 USING HASH WITH PARSER parser_name COMMENT 'index comment' VISIBLE"; ParserDeclareIndex p_index; - ASTPtr ast = parseQuery(p_index, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(p_index, input.data(), input.data() + input.size(), "", 0, 0, 0); ASTDeclareIndex * declare_index = ast->as(); EXPECT_EQ(declare_index->index_columns->children[0]->as()->name(), "col_01"); @@ -33,7 +33,7 @@ TEST(ParserIndex, OptionalIndexOptions) String input = "INDEX (col_01, col_02(100), col_03 DESC) USING HASH INVISIBLE KEY_BLOCK_SIZE 3"; ParserDeclareIndex p_index; - ASTPtr ast = parseQuery(p_index, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(p_index, input.data(), input.data() + input.size(), "", 0, 0, 0); ASTDeclareIndex * declare_index = ast->as(); EXPECT_EQ(declare_index->index_columns->children[0]->as()->name(), "col_01"); @@ -50,28 +50,28 @@ TEST(ParserIndex, OrdinaryIndex) { ParserDeclareIndex p_index; String non_unique_index_01 = "KEY index_name USING HASH (col_01) INVISIBLE"; - parseQuery(p_index, non_unique_index_01.data(), non_unique_index_01.data() + non_unique_index_01.size(), "", 0, 0); + parseQuery(p_index, non_unique_index_01.data(), non_unique_index_01.data() + non_unique_index_01.size(), "", 0, 0, 0); String non_unique_index_02 = "INDEX index_name USING HASH (col_01) INVISIBLE"; - parseQuery(p_index, non_unique_index_02.data(), non_unique_index_02.data() + non_unique_index_02.size(), "", 0, 0); + parseQuery(p_index, non_unique_index_02.data(), non_unique_index_02.data() + non_unique_index_02.size(), "", 0, 0, 0); String fulltext_index_01 = "FULLTEXT index_name (col_01) INVISIBLE"; - parseQuery(p_index, fulltext_index_01.data(), fulltext_index_01.data() + fulltext_index_01.size(), "", 0, 0); + parseQuery(p_index, fulltext_index_01.data(), fulltext_index_01.data() + fulltext_index_01.size(), "", 0, 0, 0); String fulltext_index_02 = "FULLTEXT INDEX index_name (col_01) INVISIBLE"; - parseQuery(p_index, fulltext_index_02.data(), fulltext_index_02.data() + fulltext_index_02.size(), "", 0, 0); + parseQuery(p_index, fulltext_index_02.data(), fulltext_index_02.data() + fulltext_index_02.size(), "", 0, 0, 0); String fulltext_index_03 = "FULLTEXT KEY index_name (col_01) INVISIBLE"; - parseQuery(p_index, fulltext_index_03.data(), fulltext_index_03.data() + fulltext_index_03.size(), "", 0, 0); + parseQuery(p_index, fulltext_index_03.data(), fulltext_index_03.data() + fulltext_index_03.size(), "", 0, 0, 0); String spatial_index_01 = "SPATIAL index_name (col_01) INVISIBLE"; - parseQuery(p_index, spatial_index_01.data(), spatial_index_01.data() + spatial_index_01.size(), "", 0, 0); + parseQuery(p_index, spatial_index_01.data(), spatial_index_01.data() + spatial_index_01.size(), "", 0, 0, 0); String spatial_index_02 = "SPATIAL INDEX index_name (col_01) INVISIBLE"; - parseQuery(p_index, spatial_index_02.data(), spatial_index_02.data() + spatial_index_02.size(), "", 0, 0); + parseQuery(p_index, spatial_index_02.data(), spatial_index_02.data() + spatial_index_02.size(), "", 0, 0, 0); String spatial_index_03 = "SPATIAL KEY index_name (col_01) INVISIBLE"; - parseQuery(p_index, spatial_index_03.data(), spatial_index_03.data() + spatial_index_03.size(), "", 0, 0); + parseQuery(p_index, spatial_index_03.data(), spatial_index_03.data() + spatial_index_03.size(), "", 0, 0, 0); } TEST(ParserIndex, ConstraintIndex) @@ -79,47 +79,47 @@ TEST(ParserIndex, ConstraintIndex) ParserDeclareIndex p_index; String primary_key_01 = "PRIMARY KEY (col_01) INVISIBLE"; - parseQuery(p_index, primary_key_01.data(), primary_key_01.data() + primary_key_01.size(), "", 0, 0); + parseQuery(p_index, primary_key_01.data(), primary_key_01.data() + primary_key_01.size(), "", 0, 0, 0); String primary_key_02 = "PRIMARY KEY USING BTREE (col_01) INVISIBLE"; - parseQuery(p_index, primary_key_02.data(), primary_key_02.data() + primary_key_02.size(), "", 0, 0); + parseQuery(p_index, primary_key_02.data(), primary_key_02.data() + primary_key_02.size(), "", 0, 0, 0); String primary_key_03 = "CONSTRAINT PRIMARY KEY USING BTREE (col_01) INVISIBLE"; - parseQuery(p_index, primary_key_03.data(), primary_key_03.data() + primary_key_03.size(), "", 0, 0); + parseQuery(p_index, primary_key_03.data(), primary_key_03.data() + primary_key_03.size(), "", 0, 0, 0); String primary_key_04 = "CONSTRAINT index_name PRIMARY KEY USING BTREE (col_01) INVISIBLE"; - parseQuery(p_index, primary_key_04.data(), primary_key_04.data() + primary_key_04.size(), "", 0, 0); + parseQuery(p_index, primary_key_04.data(), primary_key_04.data() + primary_key_04.size(), "", 0, 0, 0); String unique_key_01 = "UNIQUE (col_01) INVISIBLE"; - parseQuery(p_index, unique_key_01.data(), unique_key_01.data() + unique_key_01.size(), "", 0, 0); + parseQuery(p_index, unique_key_01.data(), unique_key_01.data() + unique_key_01.size(), "", 0, 0, 0); String unique_key_02 = "UNIQUE INDEX (col_01) INVISIBLE"; - parseQuery(p_index, unique_key_02.data(), unique_key_02.data() + unique_key_02.size(), "", 0, 0); + parseQuery(p_index, unique_key_02.data(), unique_key_02.data() + unique_key_02.size(), "", 0, 0, 0); String unique_key_03 = "UNIQUE KEY (col_01) INVISIBLE"; - parseQuery(p_index, unique_key_03.data(), unique_key_03.data() + unique_key_03.size(), "", 0, 0); + parseQuery(p_index, unique_key_03.data(), unique_key_03.data() + unique_key_03.size(), "", 0, 0, 0); String unique_key_04 = "UNIQUE KEY index_name (col_01) INVISIBLE"; - parseQuery(p_index, unique_key_04.data(), unique_key_04.data() + unique_key_04.size(), "", 0, 0); + parseQuery(p_index, unique_key_04.data(), unique_key_04.data() + unique_key_04.size(), "", 0, 0, 0); String unique_key_05 = "UNIQUE KEY index_name USING HASH (col_01) INVISIBLE"; - parseQuery(p_index, unique_key_05.data(), unique_key_05.data() + unique_key_05.size(), "", 0, 0); + parseQuery(p_index, unique_key_05.data(), unique_key_05.data() + unique_key_05.size(), "", 0, 0, 0); String unique_key_06 = "CONSTRAINT UNIQUE KEY index_name USING HASH (col_01) INVISIBLE"; - parseQuery(p_index, unique_key_06.data(), unique_key_06.data() + unique_key_06.size(), "", 0, 0); + parseQuery(p_index, unique_key_06.data(), unique_key_06.data() + unique_key_06.size(), "", 0, 0, 0); String unique_key_07 = "CONSTRAINT index_name UNIQUE KEY index_name_1 USING HASH (col_01) INVISIBLE"; - parseQuery(p_index, unique_key_07.data(), unique_key_07.data() + unique_key_07.size(), "", 0, 0); + parseQuery(p_index, unique_key_07.data(), unique_key_07.data() + unique_key_07.size(), "", 0, 0, 0); String foreign_key_01 = "FOREIGN KEY (col_01) REFERENCES tbl_name (col_01)"; - parseQuery(p_index, foreign_key_01.data(), foreign_key_01.data() + foreign_key_01.size(), "", 0, 0); + parseQuery(p_index, foreign_key_01.data(), foreign_key_01.data() + foreign_key_01.size(), "", 0, 0, 0); String foreign_key_02 = "FOREIGN KEY index_name (col_01) REFERENCES tbl_name (col_01)"; - parseQuery(p_index, foreign_key_02.data(), foreign_key_02.data() + foreign_key_02.size(), "", 0, 0); + parseQuery(p_index, foreign_key_02.data(), foreign_key_02.data() + foreign_key_02.size(), "", 0, 0, 0); String foreign_key_03 = "CONSTRAINT FOREIGN KEY index_name (col_01) REFERENCES tbl_name (col_01)"; - parseQuery(p_index, foreign_key_03.data(), foreign_key_03.data() + foreign_key_03.size(), "", 0, 0); + parseQuery(p_index, foreign_key_03.data(), foreign_key_03.data() + foreign_key_03.size(), "", 0, 0, 0); String foreign_key_04 = "CONSTRAINT index_name FOREIGN KEY index_name_01 (col_01) REFERENCES tbl_name (col_01)"; - parseQuery(p_index, foreign_key_04.data(), foreign_key_04.data() + foreign_key_04.size(), "", 0, 0); + parseQuery(p_index, foreign_key_04.data(), foreign_key_04.data() + foreign_key_04.size(), "", 0, 0, 0); } diff --git a/src/Parsers/MySQL/tests/gtest_partition_options_parser.cpp b/src/Parsers/MySQL/tests/gtest_partition_options_parser.cpp index 01b757e5891..6ec8d73530e 100644 --- a/src/Parsers/MySQL/tests/gtest_partition_options_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_partition_options_parser.cpp @@ -14,14 +14,14 @@ TEST(ParserPartitionOptions, HashPatitionOptions) String hash_partition = "PARTITION BY HASH(col_01)"; ParserDeclarePartitionOptions p_partition_options; - ASTPtr ast_01 = parseQuery(p_partition_options, hash_partition.data(), hash_partition.data() + hash_partition.size(), "", 0, 0); + ASTPtr ast_01 = parseQuery(p_partition_options, hash_partition.data(), hash_partition.data() + hash_partition.size(), "", 0, 0, 0); ASTDeclarePartitionOptions * declare_partition_options_01 = ast_01->as(); EXPECT_EQ(declare_partition_options_01->partition_type, "hash"); EXPECT_EQ(declare_partition_options_01->partition_expression->as()->name(), "col_01"); String linear_hash_partition = "PARTITION BY LINEAR HASH(col_01)"; - ASTPtr ast_02 = parseQuery(p_partition_options, linear_hash_partition.data(), linear_hash_partition.data() + linear_hash_partition.size(), "", 0, 0); + ASTPtr ast_02 = parseQuery(p_partition_options, linear_hash_partition.data(), linear_hash_partition.data() + linear_hash_partition.size(), "", 0, 0, 0); ASTDeclarePartitionOptions * declare_partition_options_02 = ast_02->as(); EXPECT_EQ(declare_partition_options_02->partition_type, "linear_hash"); @@ -33,14 +33,14 @@ TEST(ParserPartitionOptions, KeyPatitionOptions) String key_partition = "PARTITION BY KEY(col_01)"; ParserDeclarePartitionOptions p_partition_options; - ASTPtr ast_01 = parseQuery(p_partition_options, key_partition.data(), key_partition.data() + key_partition.size(), "", 0, 0); + ASTPtr ast_01 = parseQuery(p_partition_options, key_partition.data(), key_partition.data() + key_partition.size(), "", 0, 0, 0); ASTDeclarePartitionOptions * declare_partition_options_01 = ast_01->as(); EXPECT_EQ(declare_partition_options_01->partition_type, "key"); EXPECT_EQ(declare_partition_options_01->partition_expression->as()->name(), "col_01"); String linear_key_partition = "PARTITION BY LINEAR KEY(col_01, col_02)"; - ASTPtr ast_02 = parseQuery(p_partition_options, linear_key_partition.data(), linear_key_partition.data() + linear_key_partition.size(), "", 0, 0); + ASTPtr ast_02 = parseQuery(p_partition_options, linear_key_partition.data(), linear_key_partition.data() + linear_key_partition.size(), "", 0, 0, 0); ASTDeclarePartitionOptions * declare_partition_options_02 = ast_02->as(); EXPECT_EQ(declare_partition_options_02->partition_type, "linear_key"); @@ -49,7 +49,7 @@ TEST(ParserPartitionOptions, KeyPatitionOptions) EXPECT_EQ(columns_list->children[1]->as()->name(), "col_02"); String key_partition_with_algorithm = "PARTITION BY KEY ALGORITHM=1 (col_01)"; - ASTPtr ast_03 = parseQuery(p_partition_options, key_partition_with_algorithm.data(), key_partition_with_algorithm.data() + key_partition_with_algorithm.size(), "", 0, 0); + ASTPtr ast_03 = parseQuery(p_partition_options, key_partition_with_algorithm.data(), key_partition_with_algorithm.data() + key_partition_with_algorithm.size(), "", 0, 0, 0); ASTDeclarePartitionOptions * declare_partition_options_03 = ast_03->as(); EXPECT_EQ(declare_partition_options_03->partition_type, "key_1"); @@ -61,14 +61,14 @@ TEST(ParserPartitionOptions, RangePatitionOptions) String range_partition = "PARTITION BY RANGE(col_01)"; ParserDeclarePartitionOptions p_partition_options; - ASTPtr ast_01 = parseQuery(p_partition_options, range_partition.data(), range_partition.data() + range_partition.size(), "", 0, 0); + ASTPtr ast_01 = parseQuery(p_partition_options, range_partition.data(), range_partition.data() + range_partition.size(), "", 0, 0, 0); ASTDeclarePartitionOptions * declare_partition_options_01 = ast_01->as(); EXPECT_EQ(declare_partition_options_01->partition_type, "range"); EXPECT_EQ(declare_partition_options_01->partition_expression->as()->name(), "col_01"); String range_columns_partition = "PARTITION BY RANGE COLUMNS(col_01, col_02)"; - ASTPtr ast_02 = parseQuery(p_partition_options, range_columns_partition.data(), range_columns_partition.data() + range_columns_partition.size(), "", 0, 0); + ASTPtr ast_02 = parseQuery(p_partition_options, range_columns_partition.data(), range_columns_partition.data() + range_columns_partition.size(), "", 0, 0, 0); ASTDeclarePartitionOptions * declare_partition_options_02 = ast_02->as(); EXPECT_EQ(declare_partition_options_02->partition_type, "range"); @@ -82,14 +82,14 @@ TEST(ParserPartitionOptions, ListPatitionOptions) String range_partition = "PARTITION BY LIST(col_01)"; ParserDeclarePartitionOptions p_partition_options; - ASTPtr ast_01 = parseQuery(p_partition_options, range_partition.data(), range_partition.data() + range_partition.size(), "", 0, 0); + ASTPtr ast_01 = parseQuery(p_partition_options, range_partition.data(), range_partition.data() + range_partition.size(), "", 0, 0, 0); ASTDeclarePartitionOptions * declare_partition_options_01 = ast_01->as(); EXPECT_EQ(declare_partition_options_01->partition_type, "list"); EXPECT_EQ(declare_partition_options_01->partition_expression->as()->name(), "col_01"); String range_columns_partition = "PARTITION BY LIST COLUMNS(col_01, col_02)"; - ASTPtr ast_02 = parseQuery(p_partition_options, range_columns_partition.data(), range_columns_partition.data() + range_columns_partition.size(), "", 0, 0); + ASTPtr ast_02 = parseQuery(p_partition_options, range_columns_partition.data(), range_columns_partition.data() + range_columns_partition.size(), "", 0, 0, 0); ASTDeclarePartitionOptions * declare_partition_options_02 = ast_02->as(); EXPECT_EQ(declare_partition_options_02->partition_type, "list"); @@ -103,7 +103,7 @@ TEST(ParserPartitionOptions, PatitionNumberOptions) String numbers_partition = "PARTITION BY KEY(col_01) PARTITIONS 2"; ParserDeclarePartitionOptions p_partition_options; - ASTPtr ast = parseQuery(p_partition_options, numbers_partition.data(), numbers_partition.data() + numbers_partition.size(), "", 0, 0); + ASTPtr ast = parseQuery(p_partition_options, numbers_partition.data(), numbers_partition.data() + numbers_partition.size(), "", 0, 0, 0); ASTDeclarePartitionOptions * declare_partition_options = ast->as(); EXPECT_EQ(declare_partition_options->partition_type, "key"); @@ -116,7 +116,7 @@ TEST(ParserPartitionOptions, PatitionWithSubpartitionOptions) String partition_with_subpartition = "PARTITION BY KEY(col_01) PARTITIONS 3 SUBPARTITION BY HASH(col_02) SUBPARTITIONS 4"; ParserDeclarePartitionOptions p_partition_options; - ASTPtr ast = parseQuery(p_partition_options, partition_with_subpartition.data(), partition_with_subpartition.data() + partition_with_subpartition.size(), "", 0, 0); + ASTPtr ast = parseQuery(p_partition_options, partition_with_subpartition.data(), partition_with_subpartition.data() + partition_with_subpartition.size(), "", 0, 0, 0); ASTDeclarePartitionOptions * declare_partition_options = ast->as(); EXPECT_EQ(declare_partition_options->partition_type, "key"); @@ -134,7 +134,7 @@ TEST(ParserPartitionOptions, PatitionOptionsWithDeclarePartition) ParserDeclarePartitionOptions p_partition_options; ASTPtr ast = parseQuery(p_partition_options, partition_options_with_declare.data(), - partition_options_with_declare.data() + partition_options_with_declare.size(), "", 0, 0); + partition_options_with_declare.data() + partition_options_with_declare.size(), "", 0, 0, 0); ASTDeclarePartitionOptions * declare_partition_options = ast->as(); EXPECT_EQ(declare_partition_options->partition_type, "key"); @@ -153,7 +153,7 @@ TEST(ParserPartitionOptions, PatitionOptionsWithDeclarePartitions) ParserDeclarePartitionOptions p_partition_options; ASTPtr ast = parseQuery(p_partition_options, partition_options_with_declare.data(), - partition_options_with_declare.data() + partition_options_with_declare.size(), "", 0, 0); + partition_options_with_declare.data() + partition_options_with_declare.size(), "", 0, 0, 0); ASTDeclarePartitionOptions * declare_partition_options = ast->as(); EXPECT_EQ(declare_partition_options->partition_type, "key"); diff --git a/src/Parsers/MySQL/tests/gtest_partition_parser.cpp b/src/Parsers/MySQL/tests/gtest_partition_parser.cpp index 458c7acd553..07c7c03dbb7 100644 --- a/src/Parsers/MySQL/tests/gtest_partition_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_partition_parser.cpp @@ -17,7 +17,7 @@ TEST(ParserPartition, AllPatitionOptions) " TABLESPACE table_space_name"; ParserDeclarePartition p_partition; - ASTPtr ast = parseQuery(p_partition, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(p_partition, input.data(), input.data() + input.size(), "", 0, 0, 0); ASTDeclarePartition * declare_partition = ast->as(); EXPECT_EQ(declare_partition->partition_name, "partition_name"); @@ -35,7 +35,7 @@ TEST(ParserPartition, OptionalPatitionOptions) { String input = "PARTITION partition_name STORAGE engine = engine_name max_rows 1000 min_rows 0 tablespace table_space_name"; ParserDeclarePartition p_partition; - ASTPtr ast = parseQuery(p_partition, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(p_partition, input.data(), input.data() + input.size(), "", 0, 0, 0); ASTDeclarePartition * declare_partition = ast->as(); EXPECT_EQ(declare_partition->partition_name, "partition_name"); @@ -50,7 +50,7 @@ TEST(ParserPartition, PatitionOptionsWithLessThan) { ParserDeclarePartition p_partition; String partition_01 = "PARTITION partition_01 VALUES LESS THAN (1991) STORAGE engine = engine_name"; - ASTPtr ast_partition_01 = parseQuery(p_partition, partition_01.data(), partition_01.data() + partition_01.size(), "", 0, 0); + ASTPtr ast_partition_01 = parseQuery(p_partition, partition_01.data(), partition_01.data() + partition_01.size(), "", 0, 0, 0); ASTDeclarePartition * declare_partition_01 = ast_partition_01->as(); EXPECT_EQ(declare_partition_01->partition_name, "partition_01"); @@ -59,7 +59,7 @@ TEST(ParserPartition, PatitionOptionsWithLessThan) EXPECT_EQ(declare_options_01->changes["engine"]->as()->name(), "engine_name"); String partition_02 = "PARTITION partition_02 VALUES LESS THAN MAXVALUE STORAGE engine = engine_name"; - ASTPtr ast_partition_02 = parseQuery(p_partition, partition_02.data(), partition_02.data() + partition_02.size(), "", 0, 0); + ASTPtr ast_partition_02 = parseQuery(p_partition, partition_02.data(), partition_02.data() + partition_02.size(), "", 0, 0, 0); ASTDeclarePartition * declare_partition_02 = ast_partition_02->as(); EXPECT_EQ(declare_partition_02->partition_name, "partition_02"); @@ -68,7 +68,7 @@ TEST(ParserPartition, PatitionOptionsWithLessThan) EXPECT_EQ(declare_options_02->changes["engine"]->as()->name(), "engine_name"); String partition_03 = "PARTITION partition_03 VALUES LESS THAN (50, MAXVALUE) STORAGE engine = engine_name"; - ASTPtr ast_partition_03 = parseQuery(p_partition, partition_03.data(), partition_03.data() + partition_03.size(), "", 0, 0); + ASTPtr ast_partition_03 = parseQuery(p_partition, partition_03.data(), partition_03.data() + partition_03.size(), "", 0, 0, 0); ASTDeclarePartition * declare_partition_03 = ast_partition_03->as(); EXPECT_EQ(declare_partition_03->partition_name, "partition_03"); @@ -79,7 +79,7 @@ TEST(ParserPartition, PatitionOptionsWithLessThan) EXPECT_EQ(declare_options_03->changes["engine"]->as()->name(), "engine_name"); String partition_04 = "PARTITION partition_04 VALUES LESS THAN (MAXVALUE, MAXVALUE) STORAGE engine = engine_name"; - ASTPtr ast_partition_04 = parseQuery(p_partition, partition_04.data(), partition_04.data() + partition_04.size(), "", 0, 0); + ASTPtr ast_partition_04 = parseQuery(p_partition, partition_04.data(), partition_04.data() + partition_04.size(), "", 0, 0, 0); ASTDeclarePartition * declare_partition_04 = ast_partition_04->as(); EXPECT_EQ(declare_partition_04->partition_name, "partition_04"); @@ -94,7 +94,7 @@ TEST(ParserPartition, PatitionOptionsWithInExpression) { ParserDeclarePartition p_partition; String partition_01 = "PARTITION partition_01 VALUES IN (NULL, 1991, MAXVALUE) STORAGE engine = engine_name"; - ASTPtr ast_partition_01 = parseQuery(p_partition, partition_01.data(), partition_01.data() + partition_01.size(), "", 0, 0); + ASTPtr ast_partition_01 = parseQuery(p_partition, partition_01.data(), partition_01.data() + partition_01.size(), "", 0, 0, 0); ASTDeclarePartition * declare_partition_01 = ast_partition_01->as(); EXPECT_EQ(declare_partition_01->partition_name, "partition_01"); @@ -106,7 +106,7 @@ TEST(ParserPartition, PatitionOptionsWithInExpression) EXPECT_EQ(declare_options_01->changes["engine"]->as()->name(), "engine_name"); String partition_02 = "PARTITION partition_02 VALUES IN ((NULL, 1991), (1991, NULL), (MAXVALUE, MAXVALUE)) STORAGE engine = engine_name"; - ASTPtr ast_partition_02 = parseQuery(p_partition, partition_02.data(), partition_02.data() + partition_02.size(), "", 0, 0); + ASTPtr ast_partition_02 = parseQuery(p_partition, partition_02.data(), partition_02.data() + partition_02.size(), "", 0, 0, 0); ASTDeclarePartition * declare_partition_02 = ast_partition_02->as(); EXPECT_EQ(declare_partition_02->partition_name, "partition_02"); @@ -132,18 +132,17 @@ TEST(ParserPartition, PatitionOptionsWithSubpartitions) { ParserDeclarePartition p_partition; String partition_01 = "PARTITION partition_01 VALUES IN (NULL, 1991, MAXVALUE) STORAGE engine = engine_name (SUBPARTITION s_p01)"; - ASTPtr ast_partition_01 = parseQuery(p_partition, partition_01.data(), partition_01.data() + partition_01.size(), "", 0, 0); + ASTPtr ast_partition_01 = parseQuery(p_partition, partition_01.data(), partition_01.data() + partition_01.size(), "", 0, 0, 0); ASTDeclarePartition * declare_partition_01 = ast_partition_01->as(); EXPECT_EQ(declare_partition_01->partition_name, "partition_01"); EXPECT_TRUE(declare_partition_01->subpartitions->as()->children[0]->as()); String partition_02 = "PARTITION partition_02 VALUES IN (NULL, 1991, MAXVALUE) STORAGE engine = engine_name (SUBPARTITION s_p01, SUBPARTITION s_p02)"; - ASTPtr ast_partition_02 = parseQuery(p_partition, partition_02.data(), partition_02.data() + partition_02.size(), "", 0, 0); + ASTPtr ast_partition_02 = parseQuery(p_partition, partition_02.data(), partition_02.data() + partition_02.size(), "", 0, 0, 0); ASTDeclarePartition * declare_partition_02 = ast_partition_02->as(); EXPECT_EQ(declare_partition_02->partition_name, "partition_02"); EXPECT_TRUE(declare_partition_02->subpartitions->as()->children[0]->as()); EXPECT_TRUE(declare_partition_02->subpartitions->as()->children[1]->as()); } - diff --git a/src/Parsers/MySQL/tests/gtest_reference_parser.cpp b/src/Parsers/MySQL/tests/gtest_reference_parser.cpp index 7447f16fc7c..d5b3c9b596d 100644 --- a/src/Parsers/MySQL/tests/gtest_reference_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_reference_parser.cpp @@ -12,12 +12,12 @@ TEST(ParserReference, SimpleReference) ParserDeclareReference p_reference; String reference_01 = "REFERENCES table_name (ref_col_01)"; - ASTPtr ast_reference_01 = parseQuery(p_reference, reference_01.data(), reference_01.data() + reference_01.size(), "", 0, 0); + ASTPtr ast_reference_01 = parseQuery(p_reference, reference_01.data(), reference_01.data() + reference_01.size(), "", 0, 0, 0); EXPECT_EQ(ast_reference_01->as()->reference_table_name, "table_name"); EXPECT_EQ(ast_reference_01->as()->reference_expression->as()->name(), "ref_col_01"); String reference_02 = "REFERENCES table_name (ref_col_01, ref_col_02)"; - ASTPtr ast_reference_02 = parseQuery(p_reference, reference_02.data(), reference_02.data() + reference_02.size(), "", 0, 0); + ASTPtr ast_reference_02 = parseQuery(p_reference, reference_02.data(), reference_02.data() + reference_02.size(), "", 0, 0, 0); EXPECT_EQ(ast_reference_02->as()->reference_table_name, "table_name"); ASTPtr arguments = ast_reference_02->as()->reference_expression->as()->arguments; EXPECT_EQ(arguments->children[0]->as()->name(), "ref_col_01"); @@ -28,19 +28,19 @@ TEST(ParserReference, ReferenceDifferenceKind) { ParserDeclareReference p_reference; String reference_01 = "REFERENCES table_name (ref_col_01) MATCH FULL"; - ASTPtr ast_reference_01 = parseQuery(p_reference, reference_01.data(), reference_01.data() + reference_01.size(), "", 0, 0); + ASTPtr ast_reference_01 = parseQuery(p_reference, reference_01.data(), reference_01.data() + reference_01.size(), "", 0, 0, 0); EXPECT_EQ(ast_reference_01->as()->reference_table_name, "table_name"); EXPECT_EQ(ast_reference_01->as()->reference_expression->as()->name(), "ref_col_01"); EXPECT_EQ(ast_reference_01->as()->kind, ASTDeclareReference::MATCH_FULL); String reference_02 = "REFERENCES table_name (ref_col_01) MATCH PARTIAL"; - ASTPtr ast_reference_02 = parseQuery(p_reference, reference_02.data(), reference_02.data() + reference_02.size(), "", 0, 0); + ASTPtr ast_reference_02 = parseQuery(p_reference, reference_02.data(), reference_02.data() + reference_02.size(), "", 0, 0, 0); EXPECT_EQ(ast_reference_02->as()->reference_table_name, "table_name"); EXPECT_EQ(ast_reference_02->as()->reference_expression->as()->name(), "ref_col_01"); EXPECT_EQ(ast_reference_02->as()->kind, ASTDeclareReference::MATCH_PARTIAL); String reference_03 = "REFERENCES table_name (ref_col_01) MATCH SIMPLE"; - ASTPtr ast_reference_03 = parseQuery(p_reference, reference_03.data(), reference_03.data() + reference_03.size(), "", 0, 0); + ASTPtr ast_reference_03 = parseQuery(p_reference, reference_03.data(), reference_03.data() + reference_03.size(), "", 0, 0, 0); EXPECT_EQ(ast_reference_03->as()->reference_table_name, "table_name"); EXPECT_EQ(ast_reference_03->as()->reference_expression->as()->name(), "ref_col_01"); EXPECT_EQ(ast_reference_03->as()->kind, ASTDeclareReference::MATCH_SIMPLE); @@ -50,7 +50,7 @@ TEST(ParserReference, ReferenceDifferenceOption) { ParserDeclareReference p_reference; String reference_01 = "REFERENCES table_name (ref_col_01) MATCH FULL ON DELETE RESTRICT ON UPDATE RESTRICT"; - ASTPtr ast_reference_01 = parseQuery(p_reference, reference_01.data(), reference_01.data() + reference_01.size(), "", 0, 0); + ASTPtr ast_reference_01 = parseQuery(p_reference, reference_01.data(), reference_01.data() + reference_01.size(), "", 0, 0, 0); EXPECT_EQ(ast_reference_01->as()->reference_table_name, "table_name"); EXPECT_EQ(ast_reference_01->as()->reference_expression->as()->name(), "ref_col_01"); EXPECT_EQ(ast_reference_01->as()->kind, ASTDeclareReference::MATCH_FULL); @@ -58,7 +58,7 @@ TEST(ParserReference, ReferenceDifferenceOption) EXPECT_EQ(ast_reference_01->as()->on_update_option, ASTDeclareReference::RESTRICT); String reference_02 = "REFERENCES table_name (ref_col_01) MATCH FULL ON DELETE CASCADE ON UPDATE CASCADE"; - ASTPtr ast_reference_02 = parseQuery(p_reference, reference_02.data(), reference_02.data() + reference_02.size(), "", 0, 0); + ASTPtr ast_reference_02 = parseQuery(p_reference, reference_02.data(), reference_02.data() + reference_02.size(), "", 0, 0, 0); EXPECT_EQ(ast_reference_02->as()->reference_table_name, "table_name"); EXPECT_EQ(ast_reference_02->as()->reference_expression->as()->name(), "ref_col_01"); EXPECT_EQ(ast_reference_02->as()->kind, ASTDeclareReference::MATCH_FULL); @@ -66,7 +66,7 @@ TEST(ParserReference, ReferenceDifferenceOption) EXPECT_EQ(ast_reference_02->as()->on_update_option, ASTDeclareReference::CASCADE); String reference_03 = "REFERENCES table_name (ref_col_01) MATCH FULL ON DELETE SET NULL ON UPDATE SET NULL"; - ASTPtr ast_reference_03 = parseQuery(p_reference, reference_03.data(), reference_03.data() + reference_03.size(), "", 0, 0); + ASTPtr ast_reference_03 = parseQuery(p_reference, reference_03.data(), reference_03.data() + reference_03.size(), "", 0, 0, 0); EXPECT_EQ(ast_reference_03->as()->reference_table_name, "table_name"); EXPECT_EQ(ast_reference_03->as()->reference_expression->as()->name(), "ref_col_01"); EXPECT_EQ(ast_reference_03->as()->kind, ASTDeclareReference::MATCH_FULL); @@ -74,7 +74,7 @@ TEST(ParserReference, ReferenceDifferenceOption) EXPECT_EQ(ast_reference_03->as()->on_update_option, ASTDeclareReference::SET_NULL); String reference_04 = "REFERENCES table_name (ref_col_01) MATCH FULL ON UPDATE NO ACTION ON DELETE NO ACTION"; - ASTPtr ast_reference_04 = parseQuery(p_reference, reference_04.data(), reference_04.data() + reference_04.size(), "", 0, 0); + ASTPtr ast_reference_04 = parseQuery(p_reference, reference_04.data(), reference_04.data() + reference_04.size(), "", 0, 0, 0); EXPECT_EQ(ast_reference_04->as()->reference_table_name, "table_name"); EXPECT_EQ(ast_reference_04->as()->reference_expression->as()->name(), "ref_col_01"); EXPECT_EQ(ast_reference_04->as()->kind, ASTDeclareReference::MATCH_FULL); @@ -82,11 +82,10 @@ TEST(ParserReference, ReferenceDifferenceOption) EXPECT_EQ(ast_reference_04->as()->on_update_option, ASTDeclareReference::NO_ACTION); String reference_05 = "REFERENCES table_name (ref_col_01) MATCH FULL ON UPDATE SET DEFAULT ON DELETE SET DEFAULT"; - ASTPtr ast_reference_05 = parseQuery(p_reference, reference_05.data(), reference_05.data() + reference_05.size(), "", 0, 0); + ASTPtr ast_reference_05 = parseQuery(p_reference, reference_05.data(), reference_05.data() + reference_05.size(), "", 0, 0, 0); EXPECT_EQ(ast_reference_05->as()->reference_table_name, "table_name"); EXPECT_EQ(ast_reference_05->as()->reference_expression->as()->name(), "ref_col_01"); EXPECT_EQ(ast_reference_05->as()->kind, ASTDeclareReference::MATCH_FULL); EXPECT_EQ(ast_reference_05->as()->on_delete_option, ASTDeclareReference::SET_DEFAULT); EXPECT_EQ(ast_reference_05->as()->on_update_option, ASTDeclareReference::SET_DEFAULT); } - diff --git a/src/Parsers/MySQL/tests/gtest_subpartition_parser.cpp b/src/Parsers/MySQL/tests/gtest_subpartition_parser.cpp index b375f73c55c..1876cd1d028 100644 --- a/src/Parsers/MySQL/tests/gtest_subpartition_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_subpartition_parser.cpp @@ -14,7 +14,7 @@ TEST(ParserSubpartition, AllSubpatitionOptions) " DATA DIRECTORY 'data_directory' INDEX DIRECTORY 'index_directory' max_rows 1000 MIN_ROWs 0" " TABLESPACE table_space_name"; MySQLParser::ParserDeclareSubPartition p_subpartition; - ASTPtr ast = parseQuery(p_subpartition, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(p_subpartition, input.data(), input.data() + input.size(), "", 0, 0, 0); ASTDeclareSubPartition * declare_subpartition = ast->as(); EXPECT_EQ(declare_subpartition->logical_name, "subpartition_name"); @@ -32,7 +32,7 @@ TEST(ParserSubpartition, OptionalSubpatitionOptions) { String input = "SUBPARTITION subpartition_name STORAGE engine = engine_name max_rows 1000 min_rows 0 tablespace table_space_name"; MySQLParser::ParserDeclareSubPartition p_subpartition; - ASTPtr ast = parseQuery(p_subpartition, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(p_subpartition, input.data(), input.data() + input.size(), "", 0, 0, 0); ASTDeclareSubPartition * declare_subpartition = ast->as(); EXPECT_EQ(declare_subpartition->logical_name, "subpartition_name"); @@ -42,4 +42,3 @@ TEST(ParserSubpartition, OptionalSubpatitionOptions) EXPECT_EQ(declare_options->changes["max_rows"]->as()->value.safeGet(), 1000); EXPECT_EQ(declare_options->changes["tablespace"]->as()->name(), "table_space_name"); } - diff --git a/src/Parsers/MySQL/tests/gtest_table_options_parser.cpp b/src/Parsers/MySQL/tests/gtest_table_options_parser.cpp index 42b9279c96d..a84da7cb9d5 100644 --- a/src/Parsers/MySQL/tests/gtest_table_options_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_table_options_parser.cpp @@ -18,7 +18,7 @@ TEST(ParserTableOptions, AllSubpatitionOptions) " STATS_PERSISTENT DEFAULT STATS_SAMPLE_PAGES 3 TABLESPACE tablespace_name STORAGE MEMORY UNION (table_01, table_02)"; ParserDeclareTableOptions p_table_options; - ASTPtr ast = parseQuery(p_table_options, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(p_table_options, input.data(), input.data() + input.size(), "", 0, 0, 0); ASTDeclareOptions * declare_options = ast->as(); EXPECT_EQ(declare_options->changes["auto_increment"]->as()->value.safeGet(), 1); @@ -56,7 +56,7 @@ TEST(ParserTableOptions, OptionalTableOptions) { String input = "STATS_AUTO_RECALC DEFAULT AUTO_INCREMENt = 1 "; ParserDeclareTableOptions p_table_options; - ASTPtr ast = parseQuery(p_table_options, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(p_table_options, input.data(), input.data() + input.size(), "", 0, 0, 0); ASTDeclareOptions * declare_options = ast->as(); EXPECT_EQ(declare_options->changes["auto_increment"]->as()->value.safeGet(), 1); diff --git a/src/Parsers/PRQL/ParserPRQLQuery.cpp b/src/Parsers/PRQL/ParserPRQLQuery.cpp index b3733b727dc..fb1796714cb 100644 --- a/src/Parsers/PRQL/ParserPRQLQuery.cpp +++ b/src/Parsers/PRQL/ParserPRQLQuery.cpp @@ -69,7 +69,9 @@ bool ParserPRQLQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) "", false, max_query_size, - max_parser_depth); + max_parser_depth, + max_parser_backtracks, + true); if (!node) throw Exception( diff --git a/src/Parsers/PRQL/ParserPRQLQuery.h b/src/Parsers/PRQL/ParserPRQLQuery.h index 4fc450df6b6..88bf97f69d1 100644 --- a/src/Parsers/PRQL/ParserPRQLQuery.h +++ b/src/Parsers/PRQL/ParserPRQLQuery.h @@ -13,9 +13,10 @@ private: // These fields are not used when PRQL is disabled at build time. [[maybe_unused]] size_t max_query_size; [[maybe_unused]] size_t max_parser_depth; + [[maybe_unused]] size_t max_parser_backtracks; public: - ParserPRQLQuery(size_t max_query_size_, size_t max_parser_depth_) : max_query_size{max_query_size_}, max_parser_depth{max_parser_depth_} + ParserPRQLQuery(size_t max_query_size_, size_t max_parser_depth_, size_t max_parser_backtracks_) : max_query_size(max_query_size_), max_parser_depth(max_parser_depth_), max_parser_backtracks(max_parser_backtracks_) { } diff --git a/src/Parsers/ParserAlterQuery.cpp b/src/Parsers/ParserAlterQuery.cpp index b1cc7622e00..4bc95e67afb 100644 --- a/src/Parsers/ParserAlterQuery.cpp +++ b/src/Parsers/ParserAlterQuery.cpp @@ -865,7 +865,8 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected else if (s_modify_sql_security.ignore(pos, expected)) { /// This is a hack so we can reuse parser from create and don't have to write `MODIFY SQL SECURITY SQL SECURITY INVOKER` - pos -= 2; + --pos; + --pos; if (!sql_security_p.parse(pos, command_sql_security, expected)) return false; command->type = ASTAlterCommand::MODIFY_SQL_SECURITY; diff --git a/src/Parsers/ParserCreateQuery.h b/src/Parsers/ParserCreateQuery.h index 440a8bc1dc7..30bce57f9d9 100644 --- a/src/Parsers/ParserCreateQuery.h +++ b/src/Parsers/ParserCreateQuery.h @@ -286,7 +286,7 @@ bool IParserColumnDeclaration::parseImpl(Pos & pos, ASTPtr & node, E { const String type_int("INT"); Tokens tokens(type_int.data(), type_int.data() + type_int.size()); - Pos tmp_pos(tokens, 0); + Pos tmp_pos(tokens, pos.max_depth, pos.max_backtracks); Expected tmp_expected; ParserDataType().parse(tmp_pos, type, tmp_expected); } diff --git a/src/Parsers/QueryParameterVisitor.cpp b/src/Parsers/QueryParameterVisitor.cpp index b8679cc3b96..9afd9a8615c 100644 --- a/src/Parsers/QueryParameterVisitor.cpp +++ b/src/Parsers/QueryParameterVisitor.cpp @@ -43,7 +43,7 @@ NameSet analyzeReceiveQueryParams(const std::string & query) const char * query_end = query.data() + query.size(); ParserQuery parser(query_end); - ASTPtr extract_query_ast = parseQuery(parser, query_begin, query_end, "analyzeReceiveQueryParams", 0, 0); + ASTPtr extract_query_ast = parseQuery(parser, query_begin, query_end, "analyzeReceiveQueryParams", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); QueryParameterVisitor(query_params).visit(extract_query_ast); NameSet query_param_names; diff --git a/src/Parsers/TokenIterator.h b/src/Parsers/TokenIterator.h index 8cb59aa12e2..192f2f55e6a 100644 --- a/src/Parsers/TokenIterator.h +++ b/src/Parsers/TokenIterator.h @@ -62,18 +62,6 @@ public: return *this; } - ALWAYS_INLINE TokenIterator & operator-=(int value) - { - index -= value; - return *this; - } - - ALWAYS_INLINE TokenIterator & operator+=(int value) - { - index += value; - return *this; - } - ALWAYS_INLINE bool operator<(const TokenIterator & rhs) const { return index < rhs.index; } ALWAYS_INLINE bool operator<=(const TokenIterator & rhs) const { return index <= rhs.index; } ALWAYS_INLINE bool operator==(const TokenIterator & rhs) const { return index == rhs.index; } diff --git a/src/Parsers/examples/create_parser.cpp b/src/Parsers/examples/create_parser.cpp index c241b353b4f..b628c79435c 100644 --- a/src/Parsers/examples/create_parser.cpp +++ b/src/Parsers/examples/create_parser.cpp @@ -13,7 +13,7 @@ int main(int, char **) std::string input = "CREATE TABLE hits (URL String, UserAgentMinor2 FixedString(2), EventTime DateTime) ENGINE = Log"; ParserCreateQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0, 0); WriteBufferFromOStream out(std::cerr, 4096); formatAST(*ast, out); diff --git a/src/Parsers/examples/select_parser.cpp b/src/Parsers/examples/select_parser.cpp index 15295170c6b..3ed358121f6 100644 --- a/src/Parsers/examples/select_parser.cpp +++ b/src/Parsers/examples/select_parser.cpp @@ -23,7 +23,7 @@ try " FORMAT TabSeparated"; ParserQueryWithOutput parser(input.data() + input.size()); - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0, 0); std::cout << "Success." << std::endl; WriteBufferFromOStream out(std::cerr, 4096); diff --git a/src/Parsers/fuzzers/select_parser_fuzzer.cpp b/src/Parsers/fuzzers/select_parser_fuzzer.cpp index ae490ed4e56..aed83853c33 100644 --- a/src/Parsers/fuzzers/select_parser_fuzzer.cpp +++ b/src/Parsers/fuzzers/select_parser_fuzzer.cpp @@ -15,7 +15,8 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) DB::ParserQueryWithOutput parser(input.data() + input.size()); const UInt64 max_parser_depth = 1000; - DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, max_parser_depth); + const UInt64 max_parser_backtracks = 1000000; + DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, max_parser_depth, max_parser_backtracks); const UInt64 max_ast_depth = 1000; ast->checkDepth(max_ast_depth); diff --git a/src/Parsers/parseQuery.cpp b/src/Parsers/parseQuery.cpp index 8f9977c0b8d..7aad0b010a5 100644 --- a/src/Parsers/parseQuery.cpp +++ b/src/Parsers/parseQuery.cpp @@ -234,12 +234,13 @@ ASTPtr tryParseQuery( bool allow_multi_statements, size_t max_query_size, size_t max_parser_depth, + size_t max_parser_backtracks, bool skip_insignificant) { const char * query_begin = _out_query_end; Tokens tokens(query_begin, all_queries_end, max_query_size, skip_insignificant); /// NOTE: consider use UInt32 for max_parser_depth setting. - IParser::Pos token_iterator(tokens, static_cast(max_parser_depth)); + IParser::Pos token_iterator(tokens, static_cast(max_parser_depth), static_cast(max_parser_backtracks)); if (token_iterator->isEnd() || token_iterator->type == TokenType::Semicolon) @@ -356,10 +357,13 @@ ASTPtr parseQueryAndMovePosition( const std::string & query_description, bool allow_multi_statements, size_t max_query_size, - size_t max_parser_depth) + size_t max_parser_depth, + size_t max_parser_backtracks) { std::string error_message; - ASTPtr res = tryParseQuery(parser, pos, end, error_message, false, query_description, allow_multi_statements, max_query_size, max_parser_depth); + ASTPtr res = tryParseQuery( + parser, pos, end, error_message, false, query_description, allow_multi_statements, + max_query_size, max_parser_depth, max_parser_backtracks, true); if (res) return res; @@ -374,9 +378,10 @@ ASTPtr parseQuery( const char * end, const std::string & query_description, size_t max_query_size, - size_t max_parser_depth) + size_t max_parser_depth, + size_t max_parser_backtracks) { - return parseQueryAndMovePosition(parser, begin, end, query_description, false, max_query_size, max_parser_depth); + return parseQueryAndMovePosition(parser, begin, end, query_description, false, max_query_size, max_parser_depth, max_parser_backtracks); } @@ -385,9 +390,10 @@ ASTPtr parseQuery( const std::string & query, const std::string & query_description, size_t max_query_size, - size_t max_parser_depth) + size_t max_parser_depth, + size_t max_parser_backtracks) { - return parseQuery(parser, query.data(), query.data() + query.size(), query_description, max_query_size, max_parser_depth); + return parseQuery(parser, query.data(), query.data() + query.size(), query_description, max_query_size, max_parser_depth, max_parser_backtracks); } @@ -395,9 +401,10 @@ ASTPtr parseQuery( IParser & parser, const std::string & query, size_t max_query_size, - size_t max_parser_depth) + size_t max_parser_depth, + size_t max_parser_backtracks) { - return parseQuery(parser, query.data(), query.data() + query.size(), parser.getName(), max_query_size, max_parser_depth); + return parseQuery(parser, query.data(), query.data() + query.size(), parser.getName(), max_query_size, max_parser_depth, max_parser_backtracks); } @@ -406,6 +413,7 @@ std::pair splitMultipartQuery( std::vector & queries_list, size_t max_query_size, size_t max_parser_depth, + size_t max_parser_backtracks, bool allow_settings_after_format_in_insert) { ASTPtr ast; @@ -422,7 +430,7 @@ std::pair splitMultipartQuery( { begin = pos; - ast = parseQueryAndMovePosition(parser, pos, end, "", true, max_query_size, max_parser_depth); + ast = parseQueryAndMovePosition(parser, pos, end, "", true, max_query_size, max_parser_depth, max_parser_backtracks); auto * insert = ast->as(); diff --git a/src/Parsers/parseQuery.h b/src/Parsers/parseQuery.h index a087f145d2c..93c1a465267 100644 --- a/src/Parsers/parseQuery.h +++ b/src/Parsers/parseQuery.h @@ -19,7 +19,8 @@ ASTPtr tryParseQuery( size_t max_query_size, /// If (end - pos) > max_query_size and query is longer than max_query_size then throws "Max query size exceeded". /// Disabled if zero. Is used in order to check query size if buffer can contains data for INSERT query. size_t max_parser_depth, - bool skip_insignificant = true); /// If true, lexer will skip all insignificant tokens (e.g. whitespaces) + size_t max_parser_backtracks, + bool skip_insignificant); /// If true, lexer will skip all insignificant tokens (e.g. whitespaces) /// Parse query or throw an exception with error message. @@ -30,7 +31,8 @@ ASTPtr parseQueryAndMovePosition( const std::string & description, bool allow_multi_statements, size_t max_query_size, - size_t max_parser_depth); + size_t max_parser_depth, + size_t max_parser_backtracks); ASTPtr parseQuery( IParser & parser, @@ -38,20 +40,23 @@ ASTPtr parseQuery( const char * end, const std::string & description, size_t max_query_size, - size_t max_parser_depth); + size_t max_parser_depth, + size_t max_parser_backtracks); ASTPtr parseQuery( IParser & parser, const std::string & query, const std::string & query_description, size_t max_query_size, - size_t max_parser_depth); + size_t max_parser_depth, + size_t max_parser_backtracks); ASTPtr parseQuery( IParser & parser, const std::string & query, size_t max_query_size, - size_t max_parser_depth); + size_t max_parser_depth, + size_t max_parser_backtracks); /** Split queries separated by ; on to list of single queries @@ -63,6 +68,7 @@ std::pair splitMultipartQuery( std::vector & queries_list, size_t max_query_size, size_t max_parser_depth, + size_t max_parser_backtracks, bool allow_settings_after_format_in_insert); } diff --git a/src/Parsers/tests/gtest_Parser.cpp b/src/Parsers/tests/gtest_Parser.cpp index 19947cd38cc..f0abc68f966 100644 --- a/src/Parsers/tests/gtest_Parser.cpp +++ b/src/Parsers/tests/gtest_Parser.cpp @@ -1,7 +1,4 @@ -#include -#include #include -#include #include #include #include @@ -10,7 +7,6 @@ #include #include #include -#include #include #include #include @@ -54,12 +50,12 @@ TEST_P(ParserTest, parseQuery) { if (std::string(expected_ast).starts_with("throws")) { - EXPECT_THROW(parseQuery(*parser, input_text.begin(), input_text.end(), 0, 0), DB::Exception); + EXPECT_THROW(parseQuery(*parser, input_text.begin(), input_text.end(), 0, 0, 0), DB::Exception); } else { ASTPtr ast; - ASSERT_NO_THROW(ast = parseQuery(*parser, input_text.begin(), input_text.end(), 0, 0)); + ASSERT_NO_THROW(ast = parseQuery(*parser, input_text.begin(), input_text.end(), 0, 0, 0)); if (std::string("CREATE USER or ALTER USER query") != parser->getName() && std::string("ATTACH access entity query") != parser->getName()) { @@ -106,7 +102,7 @@ TEST_P(ParserTest, parseQuery) } else { - ASSERT_THROW(parseQuery(*parser, input_text.begin(), input_text.end(), 0, 0), DB::Exception); + ASSERT_THROW(parseQuery(*parser, input_text.begin(), input_text.end(), 0, 0, 0), DB::Exception); } } @@ -649,12 +645,13 @@ INSTANTIATE_TEST_SUITE_P(ParserKQLQuery, ParserKQLTest, static constexpr size_t kDummyMaxQuerySize = 256 * 1024; static constexpr size_t kDummyMaxParserDepth = 256; +static constexpr size_t kDummyMaxParserBacktracks = 1000000; INSTANTIATE_TEST_SUITE_P( ParserPRQL, ParserTest, ::testing::Combine( - ::testing::Values(std::make_shared(kDummyMaxQuerySize, kDummyMaxParserDepth)), + ::testing::Values(std::make_shared(kDummyMaxQuerySize, kDummyMaxParserDepth, kDummyMaxParserBacktracks)), ::testing::ValuesIn(std::initializer_list{ { "from albums\ngroup {author_id} (\n aggregate {first_published = min published}\n)\njoin a=author side:left (==author_id)\njoin p=purchases side:right (==author_id)\ngroup {a.id, p.purchase_id} (\n aggregate {avg_sell = min first_published}\n)", diff --git a/src/Parsers/tests/gtest_common.cpp b/src/Parsers/tests/gtest_common.cpp index 52d3ceb47e2..8ff9400d8a2 100644 --- a/src/Parsers/tests/gtest_common.cpp +++ b/src/Parsers/tests/gtest_common.cpp @@ -28,7 +28,7 @@ TEST_P(ParserRegexTest, parseQuery) ASSERT_TRUE(expected_ast); DB::ASTPtr ast; - ASSERT_NO_THROW(ast = parseQuery(*parser, input_text.begin(), input_text.end(), 0, 0)); + ASSERT_NO_THROW(ast = parseQuery(*parser, input_text.begin(), input_text.end(), 0, 0, 0)); DB::WriteBufferFromOwnString buf; formatAST(*ast->clone(), buf, false, false); EXPECT_THAT(buf.str(), ::testing::MatchesRegex(expected_ast)); @@ -45,12 +45,12 @@ TEST_P(ParserKQLTest, parseKQLQuery) { if (std::string(expected_ast).starts_with("throws")) { - EXPECT_THROW(parseKQLQuery(*parser, input_text.begin(), input_text.end(), 0, 0), DB::Exception); + EXPECT_THROW(parseKQLQuery(*parser, input_text.begin(), input_text.end(), 0, 0, 0), DB::Exception); } else { DB::ASTPtr ast; - ASSERT_NO_THROW(ast = parseKQLQuery(*parser, input_text.begin(), input_text.end(), 0, 0)); + ASSERT_NO_THROW(ast = parseKQLQuery(*parser, input_text.begin(), input_text.end(), 0, 0, 0)); if (std::string("CREATE USER or ALTER USER query") != parser->getName() && std::string("ATTACH access entity query") != parser->getName()) { @@ -78,6 +78,6 @@ TEST_P(ParserKQLTest, parseKQLQuery) } else { - ASSERT_THROW(parseKQLQuery(*parser, input_text.begin(), input_text.end(), 0, 0), DB::Exception); + ASSERT_THROW(parseKQLQuery(*parser, input_text.begin(), input_text.end(), 0, 0, 0), DB::Exception); } } diff --git a/src/Parsers/tests/gtest_dictionary_parser.cpp b/src/Parsers/tests/gtest_dictionary_parser.cpp index c0a975f7a38..a1ba46125a7 100644 --- a/src/Parsers/tests/gtest_dictionary_parser.cpp +++ b/src/Parsers/tests/gtest_dictionary_parser.cpp @@ -40,7 +40,7 @@ TEST(ParserDictionaryDDL, SimpleDictionary) " RANGE(MIN second_column MAX third_column)"; ParserCreateDictionaryQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0, 0); ASTCreateQuery * create = ast->as(); EXPECT_EQ(create->getTable(), "dict1"); EXPECT_EQ(create->getDatabase(), "test"); @@ -136,7 +136,7 @@ TEST(ParserDictionaryDDL, AttributesWithMultipleProperties) " SOURCE(CLICKHOUSE(HOST 'localhost'))"; ParserCreateDictionaryQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0, 0); ASTCreateQuery * create = ast->as(); EXPECT_EQ(create->getTable(), "dict2"); EXPECT_EQ(create->getDatabase(), ""); @@ -183,7 +183,7 @@ TEST(ParserDictionaryDDL, CustomAttributePropertiesOrder) " LIFETIME(300)"; ParserCreateDictionaryQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0, 0); ASTCreateQuery * create = ast->as(); /// test attributes @@ -238,7 +238,7 @@ TEST(ParserDictionaryDDL, NestedSource) " RANGE(MIN second_column MAX third_column)"; ParserCreateDictionaryQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0, 0); ASTCreateQuery * create = ast->as(); EXPECT_EQ(create->getTable(), "dict4"); EXPECT_EQ(create->getDatabase(), ""); @@ -286,7 +286,7 @@ TEST(ParserDictionaryDDL, Formatting) " RANGE(MIN second_column MAX third_column)"; ParserCreateDictionaryQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0, 0); ASTCreateQuery * create = ast->as(); auto str = serializeAST(*create); EXPECT_EQ(str, "CREATE DICTIONARY test.dict5 (`key_column1` UInt64 DEFAULT 1 HIERARCHICAL INJECTIVE, `key_column2` String DEFAULT '', `second_column` UInt8 EXPRESSION intDiv(50, rand() % 1000), `third_column` UInt8) PRIMARY KEY key_column1, key_column2 SOURCE(MYSQL(HOST 'localhost' PORT 9000 USER 'default' REPLICA (HOST '127.0.0.1' PRIORITY 1) PASSWORD '')) LIFETIME(MIN 1 MAX 10) LAYOUT(CACHE(SIZE_IN_CELLS 50)) RANGE(MIN second_column MAX third_column)"); @@ -297,7 +297,7 @@ TEST(ParserDictionaryDDL, ParseDropQuery) String input1 = "DROP DICTIONARY test.dict1"; ParserDropQuery parser; - ASTPtr ast1 = parseQuery(parser, input1.data(), input1.data() + input1.size(), "", 0, 0); + ASTPtr ast1 = parseQuery(parser, input1.data(), input1.data() + input1.size(), "", 0, 0, 0); ASTDropQuery * drop1 = ast1->as(); EXPECT_TRUE(drop1->is_dictionary); @@ -308,7 +308,7 @@ TEST(ParserDictionaryDDL, ParseDropQuery) String input2 = "DROP DICTIONARY IF EXISTS dict2"; - ASTPtr ast2 = parseQuery(parser, input2.data(), input2.data() + input2.size(), "", 0, 0); + ASTPtr ast2 = parseQuery(parser, input2.data(), input2.data() + input2.size(), "", 0, 0, 0); ASTDropQuery * drop2 = ast2->as(); EXPECT_TRUE(drop2->is_dictionary); @@ -323,7 +323,7 @@ TEST(ParserDictionaryDDL, ParsePropertiesQueries) String input1 = "SHOW CREATE DICTIONARY test.dict1"; ParserTablePropertiesQuery parser; - ASTPtr ast1 = parseQuery(parser, input1.data(), input1.data() + input1.size(), "", 0, 0); + ASTPtr ast1 = parseQuery(parser, input1.data(), input1.data() + input1.size(), "", 0, 0, 0); ASTShowCreateDictionaryQuery * show1 = ast1->as(); EXPECT_EQ(show1->getTable(), "dict1"); @@ -332,7 +332,7 @@ TEST(ParserDictionaryDDL, ParsePropertiesQueries) String input2 = "EXISTS DICTIONARY dict2"; - ASTPtr ast2 = parseQuery(parser, input2.data(), input2.data() + input2.size(), "", 0, 0); + ASTPtr ast2 = parseQuery(parser, input2.data(), input2.data() + input2.size(), "", 0, 0, 0); ASTExistsDictionaryQuery * show2 = ast2->as(); EXPECT_EQ(show2->getTable(), "dict2"); diff --git a/src/Parsers/tests/gtest_format_hiliting.cpp b/src/Parsers/tests/gtest_format_hiliting.cpp index e87b093db9d..00e8197af1f 100644 --- a/src/Parsers/tests/gtest_format_hiliting.cpp +++ b/src/Parsers/tests/gtest_format_hiliting.cpp @@ -50,7 +50,7 @@ void compare(const String & expected, const String & query) { using namespace DB; ParserQuery parser(query.data() + query.size()); - ASTPtr ast = parseQuery(parser, query, 0, 0); + ASTPtr ast = parseQuery(parser, query, 0, 0, 0); WriteBufferFromOwnString write_buffer; IAST::FormatSettings settings(write_buffer, true, true); diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 7b3fb0c5c91..8ca8f0f258b 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -538,7 +538,7 @@ FilterDAGInfo buildAdditionalFiltersIfNeeded(const StoragePtr & storage, ParserExpression parser; additional_filter_ast = parseQuery( parser, filter.data(), filter.data() + filter.size(), - "additional filter", settings.max_query_size, settings.max_parser_depth); + "additional filter", settings.max_query_size, settings.max_parser_depth, settings.max_parser_backtracks); break; } } diff --git a/src/Planner/Utils.cpp b/src/Planner/Utils.cpp index bd0b831ee58..a04f9f502e2 100644 --- a/src/Planner/Utils.cpp +++ b/src/Planner/Utils.cpp @@ -523,7 +523,7 @@ ASTPtr parseAdditionalResultFilter(const Settings & settings) ParserExpression parser; auto additional_result_filter_ast = parseQuery( parser, additional_result_filter.data(), additional_result_filter.data() + additional_result_filter.size(), - "additional result filter", settings.max_query_size, settings.max_parser_depth); + "additional result filter", settings.max_query_size, settings.max_parser_depth, settings.max_parser_backtracks); return additional_result_filter_ast; } diff --git a/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp b/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp index f91f7cf536b..9d056b42101 100644 --- a/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp +++ b/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp @@ -537,7 +537,7 @@ bool ConstantExpressionTemplate::parseLiteralAndAssertType( ParserArrayOfLiterals parser_array; ParserTupleOfLiterals parser_tuple; - IParser::Pos iterator(token_iterator, static_cast(settings.max_parser_depth)); + IParser::Pos iterator(token_iterator, static_cast(settings.max_parser_depth), static_cast(settings.max_parser_backtracks)); while (iterator->begin < istr.position()) ++iterator; Expected expected; diff --git a/src/Processors/Formats/Impl/MySQLDumpRowInputFormat.cpp b/src/Processors/Formats/Impl/MySQLDumpRowInputFormat.cpp index 9c7f095e661..67bdd1cf877 100644 --- a/src/Processors/Formats/Impl/MySQLDumpRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/MySQLDumpRowInputFormat.cpp @@ -274,7 +274,8 @@ static bool tryToExtractStructureFromCreateQuery(ReadBuffer & in, NamesAndTypesL String error; const char * start = create_query_str.data(); const char * end = create_query_str.data() + create_query_str.size(); - ASTPtr query = tryParseQuery(parser, start, end, error, false, "MySQL create query", false, DBMS_DEFAULT_MAX_QUERY_SIZE, DBMS_DEFAULT_MAX_PARSER_DEPTH); + ASTPtr query = tryParseQuery(parser, start, end, error, false, "MySQL create query", false, + DBMS_DEFAULT_MAX_QUERY_SIZE, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS, true); if (!query) return false; diff --git a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp index 8659dcd2318..353de76eea8 100644 --- a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp @@ -194,7 +194,7 @@ void ValuesBlockInputFormat::readUntilTheEndOfRowAndReTokenize(size_t current_co auto * row_end = buf->position(); buf->rollbackToCheckpoint(); tokens.emplace(buf->position(), row_end); - token_iterator.emplace(*tokens, static_cast(context->getSettingsRef().max_parser_depth)); + token_iterator.emplace(*tokens, static_cast(context->getSettingsRef().max_parser_depth), static_cast(context->getSettingsRef().max_parser_backtracks)); auto const & first = (*token_iterator).get(); if (first.isError() || first.isEnd()) { @@ -418,7 +418,7 @@ bool ValuesBlockInputFormat::parseExpression(IColumn & column, size_t column_idx { Expected expected; /// Keep a copy to the start of the column tokens to use if later if necessary - ti_start = IParser::Pos(*token_iterator, static_cast(settings.max_parser_depth)); + ti_start = IParser::Pos(*token_iterator, static_cast(settings.max_parser_depth), static_cast(settings.max_parser_backtracks)); parsed = parser.parse(*token_iterator, ast, expected); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 21e3cfcceab..fb92be6eed9 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -1343,7 +1343,7 @@ static void buildIndexes( { const auto & indices = settings.ignore_data_skipping_indices.toString(); Tokens tokens(indices.data(), indices.data() + indices.size(), settings.max_query_size); - IParser::Pos pos(tokens, static_cast(settings.max_parser_depth)); + IParser::Pos pos(tokens, static_cast(settings.max_parser_depth), static_cast(settings.max_parser_backtracks)); Expected expected; /// Use an unordered list rather than string vector diff --git a/src/Server/GRPCServer.cpp b/src/Server/GRPCServer.cpp index 15765f99b4b..f21991e8d58 100644 --- a/src/Server/GRPCServer.cpp +++ b/src/Server/GRPCServer.cpp @@ -885,7 +885,7 @@ namespace const char * begin = query_text.data(); const char * end = begin + query_text.size(); ParserQuery parser(end, settings.allow_settings_after_format_in_insert); - ast = parseQuery(parser, begin, end, "", settings.max_query_size, settings.max_parser_depth); + ast = parseQuery(parser, begin, end, "", settings.max_query_size, settings.max_parser_depth, settings.max_parser_backtracks); /// Choose input format. insert_query = ast->as(); diff --git a/src/Server/PostgreSQLHandler.cpp b/src/Server/PostgreSQLHandler.cpp index c62dc8109ea..83e06628185 100644 --- a/src/Server/PostgreSQLHandler.cpp +++ b/src/Server/PostgreSQLHandler.cpp @@ -284,6 +284,7 @@ void PostgreSQLHandler::processQuery() auto parse_res = splitMultipartQuery(query->query, queries, settings.max_query_size, settings.max_parser_depth, + settings.max_parser_backtracks, settings.allow_settings_after_format_in_insert); if (!parse_res.second) throw Exception(ErrorCodes::SYNTAX_ERROR, "Cannot parse and execute the following part of query: {}", String(parse_res.first)); diff --git a/src/Storages/ColumnsDescription.cpp b/src/Storages/ColumnsDescription.cpp index e08dac3a332..16b89f24243 100644 --- a/src/Storages/ColumnsDescription.cpp +++ b/src/Storages/ColumnsDescription.cpp @@ -145,7 +145,7 @@ void ColumnDescription::readText(ReadBuffer & buf) readEscapedStringUntilEOL(modifiers, buf); ParserColumnDeclaration column_parser(/* require type */ true); - ASTPtr ast = parseQuery(column_parser, "x T " + modifiers, "column parser", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + ASTPtr ast = parseQuery(column_parser, "x T " + modifiers, "column parser", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); if (auto * col_ast = ast->as()) { @@ -211,7 +211,7 @@ void ColumnsDescription::setAliases(NamesAndAliases aliases) const char * alias_expression_pos = alias.expression.data(); const char * alias_expression_end = alias_expression_pos + alias.expression.size(); ParserExpression expression_parser; - description.default_desc.expression = parseQuery(expression_parser, alias_expression_pos, alias_expression_end, "expression", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + description.default_desc.expression = parseQuery(expression_parser, alias_expression_pos, alias_expression_end, "expression", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); add(std::move(description)); } diff --git a/src/Storages/ConstraintsDescription.cpp b/src/Storages/ConstraintsDescription.cpp index 219c3fd0c97..d492de2c2b2 100644 --- a/src/Storages/ConstraintsDescription.cpp +++ b/src/Storages/ConstraintsDescription.cpp @@ -45,7 +45,7 @@ ConstraintsDescription ConstraintsDescription::parse(const String & str) ConstraintsDescription res; ParserConstraintDeclarationList parser; - ASTPtr list = parseQuery(parser, str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + ASTPtr list = parseQuery(parser, str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); for (const auto & constraint : list->children) res.constraints.push_back(constraint); diff --git a/src/Storages/IndicesDescription.cpp b/src/Storages/IndicesDescription.cpp index c723fa4225c..14555dca63b 100644 --- a/src/Storages/IndicesDescription.cpp +++ b/src/Storages/IndicesDescription.cpp @@ -173,7 +173,7 @@ IndicesDescription IndicesDescription::parse(const String & str, const ColumnsDe return result; ParserIndexDeclarationList parser; - ASTPtr list = parseQuery(parser, str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + ASTPtr list = parseQuery(parser, str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); for (const auto & index : list->children) result.emplace_back(IndexDescription::getIndexFromAST(index, columns, context)); diff --git a/src/Storages/KeyDescription.cpp b/src/Storages/KeyDescription.cpp index c407cef627d..d63b40e2b11 100644 --- a/src/Storages/KeyDescription.cpp +++ b/src/Storages/KeyDescription.cpp @@ -171,7 +171,7 @@ KeyDescription KeyDescription::parse(const String & str, const ColumnsDescriptio return result; ParserExpression parser; - ASTPtr ast = parseQuery(parser, "(" + str + ")", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + ASTPtr ast = parseQuery(parser, "(" + str + ")", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); FunctionNameNormalizer().visit(ast.get()); return getKeyFromAST(ast, columns, context); diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 984d06e6a61..023202019e4 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -957,7 +957,7 @@ void IMergeTreeDataPart::loadDefaultCompressionCodec() try { ParserCodec codec_parser; - auto codec_ast = parseQuery(codec_parser, codec_line.data() + buf.getPosition(), codec_line.data() + codec_line.length(), "codec parser", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + auto codec_ast = parseQuery(codec_parser, codec_line.data() + buf.getPosition(), codec_line.data() + codec_line.length(), "codec parser", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); default_codec = CompressionCodecFactory::instance().get(codec_ast, {}); } catch (const DB::Exception & ex) diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp index 1721fd15b8d..fe45d0bee54 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp @@ -12,7 +12,7 @@ namespace ErrorCodes static CompressionCodecPtr getMarksCompressionCodec(const String & marks_compression_codec) { ParserCodec codec_parser; - auto ast = parseQuery(codec_parser, "(" + Poco::toUpper(marks_compression_codec) + ")", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + auto ast = parseQuery(codec_parser, "(" + Poco::toUpper(marks_compression_codec) + ")", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); return CompressionCodecFactory::instance().get(ast, nullptr); } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp index fd83d2ebfe9..a31da5bc4fe 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp @@ -243,7 +243,7 @@ void MergeTreeDataPartWriterOnDisk::initPrimaryIndex() if (compress_primary_key) { ParserCodec codec_parser; - auto ast = parseQuery(codec_parser, "(" + Poco::toUpper(settings.primary_key_compression_codec) + ")", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + auto ast = parseQuery(codec_parser, "(" + Poco::toUpper(settings.primary_key_compression_codec) + ")", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); CompressionCodecPtr primary_key_compression_codec = CompressionCodecFactory::instance().get(ast, nullptr); index_compressor_stream = std::make_unique(*index_file_hashing_stream, primary_key_compression_codec, settings.primary_key_compress_block_size); index_source_hashing_stream = std::make_unique(*index_compressor_stream); @@ -268,7 +268,7 @@ void MergeTreeDataPartWriterOnDisk::initStatistics() void MergeTreeDataPartWriterOnDisk::initSkipIndices() { ParserCodec codec_parser; - auto ast = parseQuery(codec_parser, "(" + Poco::toUpper(settings.marks_compression_codec) + ")", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + auto ast = parseQuery(codec_parser, "(" + Poco::toUpper(settings.marks_compression_codec) + ")", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); CompressionCodecPtr marks_compression_codec = CompressionCodecFactory::instance().get(ast, nullptr); for (const auto & skip_index : skip_indices) diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index d79590ded21..6a3b08d4d65 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -135,7 +135,7 @@ void MergeTreeDataPartWriterWide::addStreams( compression_codec = CompressionCodecFactory::instance().get(effective_codec_desc, nullptr, default_codec, true); ParserCodec codec_parser; - auto ast = parseQuery(codec_parser, "(" + Poco::toUpper(settings.marks_compression_codec) + ")", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + auto ast = parseQuery(codec_parser, "(" + Poco::toUpper(settings.marks_compression_codec) + ")", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); CompressionCodecPtr marks_compression_codec = CompressionCodecFactory::instance().get(ast, nullptr); const auto column_desc = metadata_snapshot->columns.tryGetColumnDescription(GetColumnsOptions(GetColumnsOptions::AllPhysical), column.getNameInStorage()); diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index ef679b61a79..6471f510291 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -606,7 +606,7 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd Strings forced_indices; { Tokens tokens(indices.data(), indices.data() + indices.size(), settings.max_query_size); - IParser::Pos pos(tokens, static_cast(settings.max_parser_depth)); + IParser::Pos pos(tokens, static_cast(settings.max_parser_depth), static_cast(settings.max_parser_backtracks)); Expected expected; if (!parseIdentifiersOrStringLiterals(pos, expected, forced_indices)) throw Exception(ErrorCodes::CANNOT_PARSE_TEXT, "Cannot parse force_data_skipping_indices ('{}')", indices); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp index 41188891118..0ca7a4d74d9 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp @@ -432,7 +432,7 @@ StorageInMemoryMetadata ReplicatedMergeTreeTableMetadata::Diff::getNewMetadata(c auto parse_key_expr = [] (const String & key_expr) { ParserNotEmptyExpressionList parser(false); - auto new_sorting_key_expr_list = parseQuery(parser, key_expr, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + auto new_sorting_key_expr_list = parseQuery(parser, key_expr, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); ASTPtr order_by_ast; if (new_sorting_key_expr_list->children.size() == 1) @@ -489,7 +489,7 @@ StorageInMemoryMetadata ReplicatedMergeTreeTableMetadata::Diff::getNewMetadata(c if (!new_ttl_table.empty()) { ParserTTLExpressionList parser; - auto ttl_for_table_ast = parseQuery(parser, new_ttl_table, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + auto ttl_for_table_ast = parseQuery(parser, new_ttl_table, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); new_metadata.table_ttl = TTLTableDescription::getTTLForTableFromAST( ttl_for_table_ast, new_metadata.columns, context, new_metadata.primary_key, true /* allow_suspicious; because it is replication */); } diff --git a/src/Storages/MutationCommands.cpp b/src/Storages/MutationCommands.cpp index f6ec277c270..aaf5c1b5d87 100644 --- a/src/Storages/MutationCommands.cpp +++ b/src/Storages/MutationCommands.cpp @@ -228,7 +228,7 @@ void MutationCommands::readText(ReadBuffer & in) ParserAlterCommandList p_alter_commands; auto commands_ast = parseQuery( - p_alter_commands, commands_str.data(), commands_str.data() + commands_str.length(), "mutation commands list", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + p_alter_commands, commands_str.data(), commands_str.data() + commands_str.length(), "mutation commands list", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); for (const auto & child : commands_ast->children) { diff --git a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp index 64d329f74b2..f686fbda664 100644 --- a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp @@ -479,7 +479,7 @@ ASTPtr StorageMaterializedPostgreSQL::getCreateNestedTableQuery( ASTPtr result; Tokens tokens(attr.attr_def.data(), attr.attr_def.data() + attr.attr_def.size()); - IParser::Pos pos(tokens, DBMS_DEFAULT_MAX_PARSER_DEPTH); + IParser::Pos pos(tokens, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); if (!expr_parser.parse(pos, result, expected)) { throw Exception(ErrorCodes::BAD_ARGUMENTS, "Failed to parse default expression: {}", attr.attr_def); diff --git a/src/Storages/ProjectionsDescription.cpp b/src/Storages/ProjectionsDescription.cpp index 08ebe3a10d0..0bcbedee41a 100644 --- a/src/Storages/ProjectionsDescription.cpp +++ b/src/Storages/ProjectionsDescription.cpp @@ -341,7 +341,7 @@ ProjectionsDescription ProjectionsDescription::parse(const String & str, const C return result; ParserProjectionDeclarationList parser; - ASTPtr list = parseQuery(parser, str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + ASTPtr list = parseQuery(parser, str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); for (const auto & projection_ast : list->children) { diff --git a/src/Storages/System/StorageSystemDDLWorkerQueue.cpp b/src/Storages/System/StorageSystemDDLWorkerQueue.cpp index ac5dd6c05d0..635686780a0 100644 --- a/src/Storages/System/StorageSystemDDLWorkerQueue.cpp +++ b/src/Storages/System/StorageSystemDDLWorkerQueue.cpp @@ -78,7 +78,8 @@ static String clusterNameFromDDLQuery(ContextPtr context, const DDLTask & task) ParserQuery parser_query(end, settings.allow_settings_after_format_in_insert); ASTPtr query = parseQuery(parser_query, begin, end, description, settings.max_query_size, - settings.max_parser_depth); + settings.max_parser_depth, + settings.max_parser_backtracks); String cluster_name; if (const auto * query_on_cluster = dynamic_cast(query.get())) diff --git a/src/Storages/System/attachInformationSchemaTables.cpp b/src/Storages/System/attachInformationSchemaTables.cpp index 3482867bbf7..5afdd7a02ac 100644 --- a/src/Storages/System/attachInformationSchemaTables.cpp +++ b/src/Storages/System/attachInformationSchemaTables.cpp @@ -478,7 +478,7 @@ static void createInformationSchemaView(ContextMutablePtr context, IDatabase & d ParserCreateQuery parser; ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "Attach query from embedded resource " + metadata_resource_name, - DBMS_DEFAULT_MAX_QUERY_SIZE, DBMS_DEFAULT_MAX_PARSER_DEPTH); + DBMS_DEFAULT_MAX_QUERY_SIZE, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); auto & ast_create = ast->as(); assert(view_name == ast_create.getTable()); diff --git a/src/Storages/TTLDescription.cpp b/src/Storages/TTLDescription.cpp index a675afbdc26..3d1ce76dff1 100644 --- a/src/Storages/TTLDescription.cpp +++ b/src/Storages/TTLDescription.cpp @@ -425,7 +425,7 @@ TTLTableDescription TTLTableDescription::parse(const String & str, const Columns return result; ParserTTLExpressionList parser; - ASTPtr ast = parseQuery(parser, str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + ASTPtr ast = parseQuery(parser, str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); FunctionNameNormalizer().visit(ast.get()); return getTTLForTableFromAST(ast, columns, context, primary_key, context->getSettingsRef().allow_suspicious_ttl_expressions); diff --git a/src/Storages/getStructureOfRemoteTable.cpp b/src/Storages/getStructureOfRemoteTable.cpp index 32266f20923..c545367b63d 100644 --- a/src/Storages/getStructureOfRemoteTable.cpp +++ b/src/Storages/getStructureOfRemoteTable.cpp @@ -32,6 +32,7 @@ ColumnsDescription getStructureOfRemoteTableInShard( const ASTPtr & table_func_ptr) { String query; + const Settings & settings = context->getSettingsRef(); if (table_func_ptr) { @@ -110,7 +111,8 @@ ColumnsDescription getStructureOfRemoteTableInShard( column.default_desc.kind = columnDefaultKindFromString(kind_name); String expr_str = (*default_expr)[i].get(); column.default_desc.expression = parseQuery( - expr_parser, expr_str.data(), expr_str.data() + expr_str.size(), "default expression", 0, context->getSettingsRef().max_parser_depth); + expr_parser, expr_str.data(), expr_str.data() + expr_str.size(), "default expression", + 0, settings.max_parser_depth, settings.max_parser_backtracks); } res.add(column); diff --git a/src/Storages/tests/gtest_transform_query_for_external_database.cpp b/src/Storages/tests/gtest_transform_query_for_external_database.cpp index 475cf5a4eae..7e2d393c3d1 100644 --- a/src/Storages/tests/gtest_transform_query_for_external_database.cpp +++ b/src/Storages/tests/gtest_transform_query_for_external_database.cpp @@ -118,7 +118,7 @@ static void checkOld( const std::string & expected) { ParserSelectQuery parser; - ASTPtr ast = parseQuery(parser, query, 1000, 1000); + ASTPtr ast = parseQuery(parser, query, 1000, 1000, 1000000); SelectQueryInfo query_info; SelectQueryOptions select_options; query_info.syntax_analyzer_result @@ -161,7 +161,7 @@ static void checkNewAnalyzer( const std::string & expected) { ParserSelectQuery parser; - ASTPtr ast = parseQuery(parser, query, 1000, 1000); + ASTPtr ast = parseQuery(parser, query, 1000, 1000, 1000000); SelectQueryOptions select_query_options; auto query_tree = buildQueryTree(ast, state.context); diff --git a/src/TableFunctions/Hive/TableFunctionHive.cpp b/src/TableFunctions/Hive/TableFunctionHive.cpp index e840d5fc8be..80494dbe5a8 100644 --- a/src/TableFunctions/Hive/TableFunctionHive.cpp +++ b/src/TableFunctions/Hive/TableFunctionHive.cpp @@ -17,6 +17,7 @@ #include #include + namespace DB { @@ -99,7 +100,8 @@ StoragePtr TableFunctionHive::executeImpl( "(" + partition_by_def + ")", "partition by declaration list", settings.max_query_size, - settings.max_parser_depth); + settings.max_parser_depth, + settings.max_parser_backtracks); StoragePtr storage; storage = std::make_shared( hive_metastore_url, diff --git a/src/TableFunctions/TableFunctionExplain.cpp b/src/TableFunctions/TableFunctionExplain.cpp index 400fc81e6d4..8607597fa67 100644 --- a/src/TableFunctions/TableFunctionExplain.cpp +++ b/src/TableFunctions/TableFunctionExplain.cpp @@ -63,7 +63,7 @@ std::vector TableFunctionExplain::skipAnalysisForArguments(const QueryTr return {}; } -void TableFunctionExplain::parseArguments(const ASTPtr & ast_function, ContextPtr /*context*/) +void TableFunctionExplain::parseArguments(const ASTPtr & ast_function, ContextPtr context) { const auto * function = ast_function->as(); if (!function || !function->arguments) @@ -94,12 +94,12 @@ void TableFunctionExplain::parseArguments(const ASTPtr & ast_function, ContextPt const auto & settings_str = settings_arg->value.get(); if (!settings_str.empty()) { - constexpr UInt64 max_size = 4096; - constexpr UInt64 max_depth = 16; + const Settings & settings = context->getSettingsRef(); /// parse_only_internals_ = true - we don't want to parse `SET` keyword ParserSetQuery settings_parser(/* parse_only_internals_ = */ true); - ASTPtr settings_ast = parseQuery(settings_parser, settings_str, max_size, max_depth); + ASTPtr settings_ast = parseQuery(settings_parser, settings_str, + settings.max_query_size, settings.max_parser_depth, settings.max_parser_backtracks); explain_query->setSettings(std::move(settings_ast)); } From f0c9fe6bc90798d3d4e402817e14c83b831663f1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 17 Mar 2024 19:54:20 +0100 Subject: [PATCH 371/374] Limit backtracking in parser --- src/Parsers/IParser.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/Parsers/IParser.cpp b/src/Parsers/IParser.cpp index d1e9ace89b6..5679fba1a0c 100644 --- a/src/Parsers/IParser.cpp +++ b/src/Parsers/IParser.cpp @@ -24,9 +24,6 @@ IParser::Pos & IParser::Pos::operator=(const IParser::Pos & rhs) TokenIterator::operator=(rhs); - if (backtracks % 1000 == 0) - std::cerr << backtracks << "\n"; - return *this; } From 65d091cc65362f9f86e1efa5a51001a67c11b03c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 17 Mar 2024 20:16:23 +0100 Subject: [PATCH 372/374] Limit backtracking in parser --- src/Functions/getFuzzerData.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/getFuzzerData.cpp b/src/Functions/getFuzzerData.cpp index 5c536477401..a6f8dd1de2c 100644 --- a/src/Functions/getFuzzerData.cpp +++ b/src/Functions/getFuzzerData.cpp @@ -41,7 +41,7 @@ public: return DataTypeString().createColumnConst(input_rows_count, fuzz_data); } - static void update(const String & fuzz_data_) + [[maybe_unused]] static void update(const String & fuzz_data_) { fuzz_data = fuzz_data_; } From 13797b9712994edfe37d5bcfbca9d19d174ea95b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 17 Mar 2024 20:21:07 +0100 Subject: [PATCH 373/374] Fix style --- src/Common/ErrorCodes.cpp | 1 + src/Parsers/IParser.cpp | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 9f2572cbfc6..0f01036aa06 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -584,6 +584,7 @@ M(703, INVALID_IDENTIFIER) \ M(704, QUERY_CACHE_USED_WITH_NONDETERMINISTIC_FUNCTIONS) \ M(705, TABLE_NOT_EMPTY) \ + M(706, TOO_SLOW_PARSING) \ \ M(900, DISTRIBUTED_CACHE_ERROR) \ M(901, CANNOT_USE_DISTRIBUTED_CACHE) \ diff --git a/src/Parsers/IParser.cpp b/src/Parsers/IParser.cpp index 5679fba1a0c..41981a4bb8a 100644 --- a/src/Parsers/IParser.cpp +++ b/src/Parsers/IParser.cpp @@ -4,6 +4,11 @@ namespace DB { +namespace ErrorCodes +{ + extern const int TOO_SLOW_PARSING; +} + IParser::Pos & IParser::Pos::operator=(const IParser::Pos & rhs) { depth = rhs.depth; @@ -18,7 +23,7 @@ IParser::Pos & IParser::Pos::operator=(const IParser::Pos & rhs) { ++backtracks; if (max_backtracks && backtracks > max_backtracks) - throw Exception(ErrorCodes::TOO_DEEP_RECURSION, "Maximum amount of backtracking ({}) exceeded in the parser. " + throw Exception(ErrorCodes::TOO_SLOW_PARSING, "Maximum amount of backtracking ({}) exceeded in the parser. " "Consider rising max_parser_backtracks parameter.", max_backtracks); } From 95cfba9439b1c298fec390521510af32b6a5d66e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 17 Mar 2024 20:26:43 +0100 Subject: [PATCH 374/374] Add a test --- src/Common/ErrorCodes.cpp | 9 ++++----- .../0_stateless/03012_parser_backtracking.reference | 1 + tests/queries/0_stateless/03012_parser_backtracking.sh | 7 +++++++ 3 files changed, 12 insertions(+), 5 deletions(-) create mode 100644 tests/queries/0_stateless/03012_parser_backtracking.reference create mode 100755 tests/queries/0_stateless/03012_parser_backtracking.sh diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 0f01036aa06..75ba9cff81e 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -584,11 +584,6 @@ M(703, INVALID_IDENTIFIER) \ M(704, QUERY_CACHE_USED_WITH_NONDETERMINISTIC_FUNCTIONS) \ M(705, TABLE_NOT_EMPTY) \ - M(706, TOO_SLOW_PARSING) \ - \ - M(900, DISTRIBUTED_CACHE_ERROR) \ - M(901, CANNOT_USE_DISTRIBUTED_CACHE) \ - \ M(706, LIBSSH_ERROR) \ M(707, GCP_ERROR) \ M(708, ILLEGAL_STATISTIC) \ @@ -600,6 +595,10 @@ M(715, CANNOT_DETECT_FORMAT) \ M(716, CANNOT_FORGET_PARTITION) \ M(717, EXPERIMENTAL_FEATURE_ERROR) \ + M(718, TOO_SLOW_PARSING) \ + \ + M(900, DISTRIBUTED_CACHE_ERROR) \ + M(901, CANNOT_USE_DISTRIBUTED_CACHE) \ \ M(999, KEEPER_EXCEPTION) \ M(1000, POCO_EXCEPTION) \ diff --git a/tests/queries/0_stateless/03012_parser_backtracking.reference b/tests/queries/0_stateless/03012_parser_backtracking.reference new file mode 100644 index 00000000000..84727754516 --- /dev/null +++ b/tests/queries/0_stateless/03012_parser_backtracking.reference @@ -0,0 +1 @@ +TOO_SLOW_PARSING diff --git a/tests/queries/0_stateless/03012_parser_backtracking.sh b/tests/queries/0_stateless/03012_parser_backtracking.sh new file mode 100755 index 00000000000..889753fb048 --- /dev/null +++ b/tests/queries/0_stateless/03012_parser_backtracking.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_LOCAL --query "SELECT((((((((((SELECT(((((((((SELECT((((((((((SELECT(((((((((SELECT((((((((((SELECT(((((((((SELECT 1+)))))))))))))))))))))))))))))))))))))))))))))))))))))))))" 2>&1 | grep -o -F 'TOO_SLOW_PARSING'