mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 01:51:59 +00:00
Merge branch 'master' into zvonand-issue-49290
This commit is contained in:
commit
0b6688ffe4
4
contrib/CMakeLists.txt
vendored
4
contrib/CMakeLists.txt
vendored
@ -146,7 +146,7 @@ add_contrib (amqpcpp-cmake AMQP-CPP) # requires: libuv
|
|||||||
add_contrib (cassandra-cmake cassandra) # requires: libuv
|
add_contrib (cassandra-cmake cassandra) # requires: libuv
|
||||||
if (NOT OS_DARWIN)
|
if (NOT OS_DARWIN)
|
||||||
add_contrib (curl-cmake curl)
|
add_contrib (curl-cmake curl)
|
||||||
add_contrib (azure-cmake azure)
|
add_contrib (azure-cmake azure) # requires: curl
|
||||||
add_contrib (sentry-native-cmake sentry-native) # requires: curl
|
add_contrib (sentry-native-cmake sentry-native) # requires: curl
|
||||||
endif()
|
endif()
|
||||||
add_contrib (fmtlib-cmake fmtlib)
|
add_contrib (fmtlib-cmake fmtlib)
|
||||||
@ -157,7 +157,7 @@ add_contrib (librdkafka-cmake librdkafka) # requires: libgsasl
|
|||||||
add_contrib (nats-io-cmake nats-io)
|
add_contrib (nats-io-cmake nats-io)
|
||||||
add_contrib (isa-l-cmake isa-l)
|
add_contrib (isa-l-cmake isa-l)
|
||||||
add_contrib (libhdfs3-cmake libhdfs3) # requires: google-protobuf, krb5, isa-l
|
add_contrib (libhdfs3-cmake libhdfs3) # requires: google-protobuf, krb5, isa-l
|
||||||
add_contrib (hive-metastore-cmake hive-metastore) # requires: thrift/avro/arrow/libhdfs3
|
add_contrib (hive-metastore-cmake hive-metastore) # requires: thrift, avro, arrow, libhdfs3
|
||||||
add_contrib (cppkafka-cmake cppkafka)
|
add_contrib (cppkafka-cmake cppkafka)
|
||||||
add_contrib (libpqxx-cmake libpqxx)
|
add_contrib (libpqxx-cmake libpqxx)
|
||||||
add_contrib (libpq-cmake libpq)
|
add_contrib (libpq-cmake libpq)
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
option (ENABLE_AZURE_BLOB_STORAGE "Enable Azure blob storage" ${ENABLE_LIBRARIES})
|
option (ENABLE_AZURE_BLOB_STORAGE "Enable Azure blob storage" ${ENABLE_LIBRARIES})
|
||||||
|
|
||||||
if (NOT ENABLE_AZURE_BLOB_STORAGE OR BUILD_STANDALONE_KEEPER OR OS_FREEBSD OR (NOT ARCH_AMD64))
|
if (NOT ENABLE_AZURE_BLOB_STORAGE OR BUILD_STANDALONE_KEEPER OR OS_FREEBSD)
|
||||||
message(STATUS "Not using Azure blob storage")
|
message(STATUS "Not using Azure blob storage")
|
||||||
return()
|
return()
|
||||||
endif()
|
endif()
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
if(NOT ARCH_AARCH64 AND NOT OS_FREEBSD AND NOT APPLE AND NOT ARCH_PPC64LE AND NOT ARCH_S390X)
|
if(NOT OS_FREEBSD AND NOT APPLE AND NOT ARCH_PPC64LE AND NOT ARCH_S390X)
|
||||||
option(ENABLE_HDFS "Enable HDFS" ${ENABLE_LIBRARIES})
|
option(ENABLE_HDFS "Enable HDFS" ${ENABLE_LIBRARIES})
|
||||||
elseif(ENABLE_HDFS)
|
elseif(ENABLE_HDFS)
|
||||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot use HDFS3 with current configuration")
|
message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot use HDFS3 with current configuration")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(NOT ENABLE_HDFS)
|
if(NOT ENABLE_HDFS)
|
||||||
message(STATUS "Not using hdfs")
|
message(STATUS "Not using HDFS")
|
||||||
return()
|
return()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/engines/table-engines/integrations/ExternalDistributed
|
slug: /en/engines/table-engines/integrations/ExternalDistributed
|
||||||
sidebar_position: 12
|
sidebar_position: 55
|
||||||
sidebar_label: ExternalDistributed
|
sidebar_label: ExternalDistributed
|
||||||
title: ExternalDistributed
|
title: ExternalDistributed
|
||||||
---
|
---
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/engines/table-engines/integrations/azureBlobStorage
|
slug: /en/engines/table-engines/integrations/azureBlobStorage
|
||||||
|
sidebar_position: 10
|
||||||
sidebar_label: Azure Blob Storage
|
sidebar_label: Azure Blob Storage
|
||||||
---
|
---
|
||||||
|
|
||||||
@ -29,8 +30,8 @@ CREATE TABLE azure_blob_storage_table (name String, value UInt32)
|
|||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE TABLE test_table (key UInt64, data String)
|
CREATE TABLE test_table (key UInt64, data String)
|
||||||
ENGINE = AzureBlobStorage('DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite1:10000/devstoreaccount1/;',
|
ENGINE = AzureBlobStorage('DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite1:10000/devstoreaccount1/;',
|
||||||
'test_container', 'test_table', 'CSV');
|
'test_container', 'test_table', 'CSV');
|
||||||
|
|
||||||
INSERT INTO test_table VALUES (1, 'a'), (2, 'b'), (3, 'c');
|
INSERT INTO test_table VALUES (1, 'a'), (2, 'b'), (3, 'c');
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/engines/table-engines/integrations/deltalake
|
slug: /en/engines/table-engines/integrations/deltalake
|
||||||
|
sidebar_position: 40
|
||||||
sidebar_label: DeltaLake
|
sidebar_label: DeltaLake
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/engines/table-engines/integrations/embedded-rocksdb
|
slug: /en/engines/table-engines/integrations/embedded-rocksdb
|
||||||
sidebar_position: 9
|
sidebar_position: 50
|
||||||
sidebar_label: EmbeddedRocksDB
|
sidebar_label: EmbeddedRocksDB
|
||||||
---
|
---
|
||||||
|
|
||||||
@ -99,7 +99,7 @@ INSERT INTO test VALUES ('some key', 1, 'value', 3.2);
|
|||||||
|
|
||||||
### Deletes
|
### Deletes
|
||||||
|
|
||||||
Rows can be deleted using `DELETE` query or `TRUNCATE`.
|
Rows can be deleted using `DELETE` query or `TRUNCATE`.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
DELETE FROM test WHERE key LIKE 'some%' AND v1 > 1;
|
DELETE FROM test WHERE key LIKE 'some%' AND v1 > 1;
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/engines/table-engines/integrations/hdfs
|
slug: /en/engines/table-engines/integrations/hdfs
|
||||||
sidebar_position: 6
|
sidebar_position: 80
|
||||||
sidebar_label: HDFS
|
sidebar_label: HDFS
|
||||||
---
|
---
|
||||||
|
|
||||||
@ -63,7 +63,7 @@ SELECT * FROM hdfs_engine_table LIMIT 2
|
|||||||
- `ALTER` and `SELECT...SAMPLE` operations.
|
- `ALTER` and `SELECT...SAMPLE` operations.
|
||||||
- Indexes.
|
- Indexes.
|
||||||
- [Zero-copy](../../../operations/storing-data.md#zero-copy) replication is possible, but not recommended.
|
- [Zero-copy](../../../operations/storing-data.md#zero-copy) replication is possible, but not recommended.
|
||||||
|
|
||||||
:::note Zero-copy replication is not ready for production
|
:::note Zero-copy replication is not ready for production
|
||||||
Zero-copy replication is disabled by default in ClickHouse version 22.8 and higher. This feature is not recommended for production use.
|
Zero-copy replication is disabled by default in ClickHouse version 22.8 and higher. This feature is not recommended for production use.
|
||||||
:::
|
:::
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/engines/table-engines/integrations/hive
|
slug: /en/engines/table-engines/integrations/hive
|
||||||
sidebar_position: 4
|
sidebar_position: 84
|
||||||
sidebar_label: Hive
|
sidebar_label: Hive
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/engines/table-engines/integrations/hudi
|
slug: /en/engines/table-engines/integrations/hudi
|
||||||
|
sidebar_position: 86
|
||||||
sidebar_label: Hudi
|
sidebar_label: Hudi
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/engines/table-engines/integrations/iceberg
|
slug: /en/engines/table-engines/integrations/iceberg
|
||||||
|
sidebar_position: 90
|
||||||
sidebar_label: Iceberg
|
sidebar_label: Iceberg
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/engines/table-engines/integrations/jdbc
|
slug: /en/engines/table-engines/integrations/jdbc
|
||||||
sidebar_position: 3
|
sidebar_position: 100
|
||||||
sidebar_label: JDBC
|
sidebar_label: JDBC
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/engines/table-engines/integrations/kafka
|
slug: /en/engines/table-engines/integrations/kafka
|
||||||
sidebar_position: 8
|
sidebar_position: 110
|
||||||
sidebar_label: Kafka
|
sidebar_label: Kafka
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/engines/table-engines/integrations/materialized-postgresql
|
slug: /en/engines/table-engines/integrations/materialized-postgresql
|
||||||
sidebar_position: 12
|
sidebar_position: 130
|
||||||
sidebar_label: MaterializedPostgreSQL
|
sidebar_label: MaterializedPostgreSQL
|
||||||
title: MaterializedPostgreSQL
|
title: MaterializedPostgreSQL
|
||||||
---
|
---
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/engines/table-engines/integrations/mongodb
|
slug: /en/engines/table-engines/integrations/mongodb
|
||||||
sidebar_position: 5
|
sidebar_position: 135
|
||||||
sidebar_label: MongoDB
|
sidebar_label: MongoDB
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/engines/table-engines/integrations/mysql
|
slug: /en/engines/table-engines/integrations/mysql
|
||||||
sidebar_position: 4
|
sidebar_position: 138
|
||||||
sidebar_label: MySQL
|
sidebar_label: MySQL
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/engines/table-engines/integrations/nats
|
slug: /en/engines/table-engines/integrations/nats
|
||||||
sidebar_position: 14
|
sidebar_position: 140
|
||||||
sidebar_label: NATS
|
sidebar_label: NATS
|
||||||
---
|
---
|
||||||
|
|
||||||
@ -83,12 +83,12 @@ You can select one of the subjects the table reads from and publish your data th
|
|||||||
CREATE TABLE queue (
|
CREATE TABLE queue (
|
||||||
key UInt64,
|
key UInt64,
|
||||||
value UInt64
|
value UInt64
|
||||||
) ENGINE = NATS
|
) ENGINE = NATS
|
||||||
SETTINGS nats_url = 'localhost:4444',
|
SETTINGS nats_url = 'localhost:4444',
|
||||||
nats_subjects = 'subject1,subject2',
|
nats_subjects = 'subject1,subject2',
|
||||||
nats_format = 'JSONEachRow';
|
nats_format = 'JSONEachRow';
|
||||||
|
|
||||||
INSERT INTO queue
|
INSERT INTO queue
|
||||||
SETTINGS stream_like_engine_insert_queue = 'subject2'
|
SETTINGS stream_like_engine_insert_queue = 'subject2'
|
||||||
VALUES (1, 1);
|
VALUES (1, 1);
|
||||||
```
|
```
|
||||||
@ -102,7 +102,7 @@ Example:
|
|||||||
key UInt64,
|
key UInt64,
|
||||||
value UInt64,
|
value UInt64,
|
||||||
date DateTime
|
date DateTime
|
||||||
) ENGINE = NATS
|
) ENGINE = NATS
|
||||||
SETTINGS nats_url = 'localhost:4444',
|
SETTINGS nats_url = 'localhost:4444',
|
||||||
nats_subjects = 'subject1',
|
nats_subjects = 'subject1',
|
||||||
nats_format = 'JSONEachRow',
|
nats_format = 'JSONEachRow',
|
||||||
@ -137,7 +137,7 @@ Example:
|
|||||||
CREATE TABLE queue (
|
CREATE TABLE queue (
|
||||||
key UInt64,
|
key UInt64,
|
||||||
value UInt64
|
value UInt64
|
||||||
) ENGINE = NATS
|
) ENGINE = NATS
|
||||||
SETTINGS nats_url = 'localhost:4444',
|
SETTINGS nats_url = 'localhost:4444',
|
||||||
nats_subjects = 'subject1',
|
nats_subjects = 'subject1',
|
||||||
nats_format = 'JSONEachRow',
|
nats_format = 'JSONEachRow',
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/engines/table-engines/integrations/odbc
|
slug: /en/engines/table-engines/integrations/odbc
|
||||||
sidebar_position: 2
|
sidebar_position: 150
|
||||||
sidebar_label: ODBC
|
sidebar_label: ODBC
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/engines/table-engines/integrations/postgresql
|
slug: /en/engines/table-engines/integrations/postgresql
|
||||||
sidebar_position: 11
|
sidebar_position: 160
|
||||||
sidebar_label: PostgreSQL
|
sidebar_label: PostgreSQL
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/engines/table-engines/integrations/rabbitmq
|
slug: /en/engines/table-engines/integrations/rabbitmq
|
||||||
sidebar_position: 10
|
sidebar_position: 170
|
||||||
sidebar_label: RabbitMQ
|
sidebar_label: RabbitMQ
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/engines/table-engines/integrations/redis
|
slug: /en/engines/table-engines/integrations/redis
|
||||||
sidebar_position: 43
|
sidebar_position: 175
|
||||||
sidebar_label: Redis
|
sidebar_label: Redis
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/engines/table-engines/integrations/s3
|
slug: /en/engines/table-engines/integrations/s3
|
||||||
sidebar_position: 7
|
sidebar_position: 180
|
||||||
sidebar_label: S3
|
sidebar_label: S3
|
||||||
---
|
---
|
||||||
|
|
||||||
@ -8,30 +8,7 @@ sidebar_label: S3
|
|||||||
|
|
||||||
This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ecosystem. This engine is similar to the [HDFS](../../../engines/table-engines/special/file.md#table_engines-hdfs) engine, but provides S3-specific features.
|
This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ecosystem. This engine is similar to the [HDFS](../../../engines/table-engines/special/file.md#table_engines-hdfs) engine, but provides S3-specific features.
|
||||||
|
|
||||||
## Create Table {#creating-a-table}
|
## Example
|
||||||
|
|
||||||
``` sql
|
|
||||||
CREATE TABLE s3_engine_table (name String, value UInt32)
|
|
||||||
ENGINE = S3(path [, NOSIGN | aws_access_key_id, aws_secret_access_key,] format, [compression])
|
|
||||||
[PARTITION BY expr]
|
|
||||||
[SETTINGS ...]
|
|
||||||
```
|
|
||||||
|
|
||||||
**Engine parameters**
|
|
||||||
|
|
||||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
|
||||||
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
|
||||||
- `format` — The [format](../../../interfaces/formats.md#formats) of the file.
|
|
||||||
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3).
|
|
||||||
- `compression` — Compression type. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Parameter is optional. By default, it will auto-detect compression by file extension.
|
|
||||||
|
|
||||||
### PARTITION BY
|
|
||||||
|
|
||||||
`PARTITION BY` — Optional. In most cases you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
|
||||||
|
|
||||||
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE TABLE s3_engine_table (name String, value UInt32)
|
CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||||
@ -49,6 +26,135 @@ SELECT * FROM s3_engine_table LIMIT 2;
|
|||||||
│ two │ 2 │
|
│ two │ 2 │
|
||||||
└──────┴───────┘
|
└──────┴───────┘
|
||||||
```
|
```
|
||||||
|
## Create Table {#creating-a-table}
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||||
|
ENGINE = S3(path [, NOSIGN | aws_access_key_id, aws_secret_access_key,] format, [compression])
|
||||||
|
[PARTITION BY expr]
|
||||||
|
[SETTINGS ...]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Engine parameters
|
||||||
|
|
||||||
|
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
||||||
|
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
||||||
|
- `format` — The [format](../../../interfaces/formats.md#formats) of the file.
|
||||||
|
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3).
|
||||||
|
- `compression` — Compression type. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Parameter is optional. By default, it will auto-detect compression by file extension.
|
||||||
|
|
||||||
|
### PARTITION BY
|
||||||
|
|
||||||
|
`PARTITION BY` — Optional. In most cases you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
||||||
|
|
||||||
|
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
||||||
|
|
||||||
|
### Querying partitioned data
|
||||||
|
|
||||||
|
This example uses the [docker compose recipe](https://github.com/ClickHouse/examples/tree/5fdc6ff72f4e5137e23ea075c88d3f44b0202490/docker-compose-recipes/recipes/ch-and-minio-S3), which integrates ClickHouse and MinIO. You should be able to reproduce the same queries using S3 by replacing the endpoint and authentication values.
|
||||||
|
|
||||||
|
Notice that the S3 endpoint in the `ENGINE` configuration uses the parameter token `{_partition_id}` as part of the S3 object (filename), and that the SELECT queries select against those resulting object names (e.g., `test_3.csv`).
|
||||||
|
|
||||||
|
:::note
|
||||||
|
As shown in the example, querying from S3 tables that are partitioned is
|
||||||
|
not directly supported at this time, but can be accomplished by querying the bucket contents with a wildcard.
|
||||||
|
|
||||||
|
The primary use-case for writing
|
||||||
|
partitioned data in S3 is to enable transferring that data into another
|
||||||
|
ClickHouse system (for example, moving from on-prem systems to ClickHouse
|
||||||
|
Cloud). Because ClickHouse datasets are often very large, and network
|
||||||
|
reliability is sometimes imperfect it makes sense to transfer datasets
|
||||||
|
in subsets, hence partitioned writes.
|
||||||
|
:::
|
||||||
|
|
||||||
|
#### Create the table
|
||||||
|
```sql
|
||||||
|
CREATE TABLE p
|
||||||
|
(
|
||||||
|
`column1` UInt32,
|
||||||
|
`column2` UInt32,
|
||||||
|
`column3` UInt32
|
||||||
|
)
|
||||||
|
ENGINE = S3(
|
||||||
|
# highlight-next-line
|
||||||
|
'http://minio:10000/clickhouse//test_{_partition_id}.csv',
|
||||||
|
'minioadmin',
|
||||||
|
'minioadminpassword',
|
||||||
|
'CSV')
|
||||||
|
PARTITION BY column3
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Insert data
|
||||||
|
```sql
|
||||||
|
insert into p values (1, 2, 3), (3, 2, 1), (78, 43, 45)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Select from partition 3
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
This query uses the s3 table function
|
||||||
|
:::
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT *
|
||||||
|
FROM s3('http://minio:10000/clickhouse//test_3.csv', 'minioadmin', 'minioadminpassword', 'CSV')
|
||||||
|
```
|
||||||
|
```response
|
||||||
|
┌─c1─┬─c2─┬─c3─┐
|
||||||
|
│ 1 │ 2 │ 3 │
|
||||||
|
└────┴────┴────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Select from partition 1
|
||||||
|
```sql
|
||||||
|
SELECT *
|
||||||
|
FROM s3('http://minio:10000/clickhouse//test_1.csv', 'minioadmin', 'minioadminpassword', 'CSV')
|
||||||
|
```
|
||||||
|
```response
|
||||||
|
┌─c1─┬─c2─┬─c3─┐
|
||||||
|
│ 3 │ 2 │ 1 │
|
||||||
|
└────┴────┴────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Select from partition 45
|
||||||
|
```sql
|
||||||
|
SELECT *
|
||||||
|
FROM s3('http://minio:10000/clickhouse//test_45.csv', 'minioadmin', 'minioadminpassword', 'CSV')
|
||||||
|
```
|
||||||
|
```response
|
||||||
|
┌─c1─┬─c2─┬─c3─┐
|
||||||
|
│ 78 │ 43 │ 45 │
|
||||||
|
└────┴────┴────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Select from all partitions
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT *
|
||||||
|
FROM s3('http://minio:10000/clickhouse//**', 'minioadmin', 'minioadminpassword', 'CSV')
|
||||||
|
```
|
||||||
|
```response
|
||||||
|
┌─c1─┬─c2─┬─c3─┐
|
||||||
|
│ 3 │ 2 │ 1 │
|
||||||
|
└────┴────┴────┘
|
||||||
|
┌─c1─┬─c2─┬─c3─┐
|
||||||
|
│ 1 │ 2 │ 3 │
|
||||||
|
└────┴────┴────┘
|
||||||
|
┌─c1─┬─c2─┬─c3─┐
|
||||||
|
│ 78 │ 43 │ 45 │
|
||||||
|
└────┴────┴────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
You may naturally try to `Select * from p`, but as noted above, this query will fail; use the preceding query.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT * FROM p
|
||||||
|
```
|
||||||
|
```response
|
||||||
|
Received exception from server (version 23.4.1):
|
||||||
|
Code: 48. DB::Exception: Received from localhost:9000. DB::Exception: Reading from a partitioned S3 storage is not implemented yet. (NOT_IMPLEMENTED)
|
||||||
|
```
|
||||||
|
|
||||||
## Virtual columns {#virtual-columns}
|
## Virtual columns {#virtual-columns}
|
||||||
|
|
||||||
- `_path` — Path to the file.
|
- `_path` — Path to the file.
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/engines/table-engines/integrations/sqlite
|
slug: /en/engines/table-engines/integrations/sqlite
|
||||||
sidebar_position: 7
|
sidebar_position: 185
|
||||||
sidebar_label: SQLite
|
sidebar_label: SQLite
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,9 +0,0 @@
|
|||||||
---
|
|
||||||
slug: /en/operations/optimizing-performance/
|
|
||||||
sidebar_label: Optimizing Performance
|
|
||||||
sidebar_position: 52
|
|
||||||
---
|
|
||||||
|
|
||||||
# Optimizing Performance
|
|
||||||
|
|
||||||
- [Sampling query profiler](../../operations/optimizing-performance/sampling-query-profiler.md)
|
|
@ -1975,6 +1975,10 @@ The time zone is necessary for conversions between String and DateTime formats w
|
|||||||
<timezone>Asia/Istanbul</timezone>
|
<timezone>Asia/Istanbul</timezone>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**See also**
|
||||||
|
|
||||||
|
- [session_timezone](../settings/settings.md#session_timezone)
|
||||||
|
|
||||||
## tcp_port {#server_configuration_parameters-tcp_port}
|
## tcp_port {#server_configuration_parameters-tcp_port}
|
||||||
|
|
||||||
Port for communicating with clients over the TCP protocol.
|
Port for communicating with clients over the TCP protocol.
|
||||||
|
@ -4251,6 +4251,69 @@ Default value: `0`.
|
|||||||
Use this setting only for backward compatibility if your use cases depend on old syntax.
|
Use this setting only for backward compatibility if your use cases depend on old syntax.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
## session_timezone {#session_timezone}
|
||||||
|
|
||||||
|
Sets the implicit time zone of the current session or query.
|
||||||
|
The implicit time zone is the time zone applied to values of type DateTime/DateTime64 which have no explicitly specified time zone.
|
||||||
|
The setting takes precedence over the globally configured (server-level) implicit time zone.
|
||||||
|
A value of '' (empty string) means that the implicit time zone of the current session or query is equal to the [server time zone](../server-configuration-parameters/settings.md#server_configuration_parameters-timezone).
|
||||||
|
|
||||||
|
You can use functions `timeZone()` and `serverTimeZone()` to get the session time zone and server time zone.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- Any time zone name from `system.time_zones`, e.g. `Europe/Berlin`, `UTC` or `Zulu`
|
||||||
|
|
||||||
|
Default value: `''`.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT timeZone(), serverTimeZone() FORMAT TSV
|
||||||
|
|
||||||
|
Europe/Berlin Europe/Berlin
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT timeZone(), serverTimeZone() SETTINGS session_timezone = 'Asia/Novosibirsk' FORMAT TSV
|
||||||
|
|
||||||
|
Asia/Novosibirsk Europe/Berlin
|
||||||
|
```
|
||||||
|
|
||||||
|
Assign session time zone 'America/Denver' to the inner DateTime without explicitly specified time zone:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS session_timezone = 'America/Denver' FORMAT TSV
|
||||||
|
|
||||||
|
1999-12-13 07:23:23.123
|
||||||
|
```
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
Not all functions that parse DateTime/DateTime64 respect `session_timezone`. This can lead to subtle errors.
|
||||||
|
See the following example and explanation.
|
||||||
|
:::
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE test_tz (`d` DateTime('UTC')) ENGINE = Memory AS SELECT toDateTime('2000-01-01 00:00:00', 'UTC');
|
||||||
|
|
||||||
|
SELECT *, timeZone() FROM test_tz WHERE d = toDateTime('2000-01-01 00:00:00') SETTINGS session_timezone = 'Asia/Novosibirsk'
|
||||||
|
0 rows in set.
|
||||||
|
|
||||||
|
SELECT *, timeZone() FROM test_tz WHERE d = '2000-01-01 00:00:00' SETTINGS session_timezone = 'Asia/Novosibirsk'
|
||||||
|
┌───────────────────d─┬─timeZone()───────┐
|
||||||
|
│ 2000-01-01 00:00:00 │ Asia/Novosibirsk │
|
||||||
|
└─────────────────────┴──────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
This happens due to different parsing pipelines:
|
||||||
|
|
||||||
|
- `toDateTime()` without explicitly given time zone used in the first `SELECT` query honors setting `session_timezone` and the global time zone.
|
||||||
|
- In the second query, a DateTime is parsed from a String, and inherits the type and time zone of the existing column`d`. Thus, setting `session_timezone` and the global time zone are not honored.
|
||||||
|
|
||||||
|
**See also**
|
||||||
|
|
||||||
|
- [timezone](../server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
|
||||||
|
|
||||||
## final {#final}
|
## final {#final}
|
||||||
|
|
||||||
Automatically applies [FINAL](../../sql-reference/statements/select/from.md#final-modifier) modifier to all tables in a query, to tables where [FINAL](../../sql-reference/statements/select/from.md#final-modifier) is applicable, including joined tables and tables in sub-queries, and
|
Automatically applies [FINAL](../../sql-reference/statements/select/from.md#final-modifier) modifier to all tables in a query, to tables where [FINAL](../../sql-reference/statements/select/from.md#final-modifier) is applicable, including joined tables and tables in sub-queries, and
|
||||||
|
@ -139,8 +139,8 @@ makeDateTime32(year, month, day, hour, minute, second[, fraction[, precision[, t
|
|||||||
|
|
||||||
## timeZone
|
## timeZone
|
||||||
|
|
||||||
Returns the timezone of the server.
|
Returns the timezone of the current session, i.e. the value of setting [session_timezone](../../operations/settings/settings.md#session_timezone).
|
||||||
If the function is executed in the context of a distributed table, it generates a normal column with values relevant to each shard, otherwise it produces a constant value.
|
If the function is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard, otherwise it produces a constant value.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
@ -156,6 +156,33 @@ Alias: `timezone`.
|
|||||||
|
|
||||||
Type: [String](../../sql-reference/data-types/string.md).
|
Type: [String](../../sql-reference/data-types/string.md).
|
||||||
|
|
||||||
|
**See also**
|
||||||
|
|
||||||
|
- [serverTimeZone](#serverTimeZone)
|
||||||
|
|
||||||
|
## serverTimeZone
|
||||||
|
|
||||||
|
Returns the timezone of the server, i.e. the value of setting [timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone).
|
||||||
|
If the function is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise, it produces a constant value.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
serverTimeZone()
|
||||||
|
```
|
||||||
|
|
||||||
|
Alias: `serverTimezone`.
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Timezone.
|
||||||
|
|
||||||
|
Type: [String](../../sql-reference/data-types/string.md).
|
||||||
|
|
||||||
|
**See also**
|
||||||
|
|
||||||
|
- [timeZone](#timeZone)
|
||||||
|
|
||||||
## toTimeZone
|
## toTimeZone
|
||||||
|
|
||||||
Converts a date or date with time to the specified time zone. Does not change the internal value (number of unix seconds) of the data, only the value's time zone attribute and the value's string representation changes.
|
Converts a date or date with time to the specified time zone. Does not change the internal value (number of unix seconds) of the data, only the value's time zone attribute and the value's string representation changes.
|
||||||
|
@ -237,6 +237,43 @@ Result:
|
|||||||
└────────────────────────────┘
|
└────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## L2SquaredDistance
|
||||||
|
|
||||||
|
Calculates the sum of the squares of the difference between the corresponding elements of two vectors.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
L2SquaredDistance(vector1, vector2)
|
||||||
|
```
|
||||||
|
|
||||||
|
Alias: `distanceL2Squared`.
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `vector1` — First vector. [Tuple](../../sql-reference/data-types/tuple.md) or [Array](../../sql-reference/data-types/array.md).
|
||||||
|
- `vector2` — Second vector. [Tuple](../../sql-reference/data-types/tuple.md) or [Array](../../sql-reference/data-types/array.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
Type: [Float](../../sql-reference/data-types/float.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT L2SquaredDistance([1, 2, 3], [0, 0, 0])
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─L2SquaredDistance([1, 2, 3], [0, 0, 0])─┐
|
||||||
|
│ 14 │
|
||||||
|
└─────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## LinfDistance
|
## LinfDistance
|
||||||
|
|
||||||
Calculates the distance between two points (the values of the vectors are the coordinates) in `L_{inf}` space ([maximum norm](https://en.wikipedia.org/wiki/Norm_(mathematics)#Maximum_norm_(special_case_of:_infinity_norm,_uniform_norm,_or_supremum_norm))).
|
Calculates the distance between two points (the values of the vectors are the coordinates) in `L_{inf}` space ([maximum norm](https://en.wikipedia.org/wiki/Norm_(mathematics)#Maximum_norm_(special_case_of:_infinity_norm,_uniform_norm,_or_supremum_norm))).
|
||||||
|
@ -4,6 +4,8 @@ sidebar_position: 130
|
|||||||
sidebar_label: NLP (experimental)
|
sidebar_label: NLP (experimental)
|
||||||
---
|
---
|
||||||
|
|
||||||
|
# Natural Language Processing (NLP) Functions
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
This is an experimental feature that is currently in development and is not ready for general use. It will change in unpredictable backwards-incompatible ways in future releases. Set `allow_experimental_nlp_functions = 1` to enable it.
|
This is an experimental feature that is currently in development and is not ready for general use. It will change in unpredictable backwards-incompatible ways in future releases. Set `allow_experimental_nlp_functions = 1` to enable it.
|
||||||
:::
|
:::
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/azureBlobStorage
|
slug: /en/sql-reference/table-functions/azureBlobStorage
|
||||||
|
sidebar_position: 10
|
||||||
sidebar_label: azureBlobStorage
|
sidebar_label: azureBlobStorage
|
||||||
keywords: [azure blob storage]
|
keywords: [azure blob storage]
|
||||||
---
|
---
|
||||||
@ -34,16 +35,16 @@ A table with the specified structure for reading or writing data in the specifie
|
|||||||
Write data into azure blob storage using the following :
|
Write data into azure blob storage using the following :
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO TABLE FUNCTION azureBlobStorage('http://azurite1:10000/devstoreaccount1',
|
INSERT INTO TABLE FUNCTION azureBlobStorage('http://azurite1:10000/devstoreaccount1',
|
||||||
'test_container', 'test_{_partition_id}.csv', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==',
|
'test_container', 'test_{_partition_id}.csv', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==',
|
||||||
'CSV', 'auto', 'column1 UInt32, column2 UInt32, column3 UInt32') PARTITION BY column3 VALUES (1, 2, 3), (3, 2, 1), (78, 43, 3);
|
'CSV', 'auto', 'column1 UInt32, column2 UInt32, column3 UInt32') PARTITION BY column3 VALUES (1, 2, 3), (3, 2, 1), (78, 43, 3);
|
||||||
```
|
```
|
||||||
|
|
||||||
And then it can be read using
|
And then it can be read using
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT * FROM azureBlobStorage('http://azurite1:10000/devstoreaccount1',
|
SELECT * FROM azureBlobStorage('http://azurite1:10000/devstoreaccount1',
|
||||||
'test_container', 'test_1.csv', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==',
|
'test_container', 'test_1.csv', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==',
|
||||||
'CSV', 'auto', 'column1 UInt32, column2 UInt32, column3 UInt32');
|
'CSV', 'auto', 'column1 UInt32, column2 UInt32, column3 UInt32');
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/cluster
|
slug: /en/sql-reference/table-functions/cluster
|
||||||
sidebar_position: 50
|
sidebar_position: 30
|
||||||
sidebar_label: cluster
|
sidebar_label: cluster
|
||||||
title: "cluster, clusterAllReplicas"
|
title: "cluster, clusterAllReplicas"
|
||||||
---
|
---
|
||||||
@ -9,7 +9,7 @@ Allows to access all shards in an existing cluster which configured in `remote_s
|
|||||||
|
|
||||||
`clusterAllReplicas` function — same as `cluster`, but all replicas are queried. Each replica in a cluster is used as a separate shard/connection.
|
`clusterAllReplicas` function — same as `cluster`, but all replicas are queried. Each replica in a cluster is used as a separate shard/connection.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
All available clusters are listed in the [system.clusters](../../operations/system-tables/clusters.md) table.
|
All available clusters are listed in the [system.clusters](../../operations/system-tables/clusters.md) table.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
@ -23,9 +23,9 @@ clusterAllReplicas('cluster_name', db, table[, sharding_key])
|
|||||||
```
|
```
|
||||||
**Arguments**
|
**Arguments**
|
||||||
|
|
||||||
- `cluster_name` – Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers.
|
- `cluster_name` – Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers.
|
||||||
- `db.table` or `db`, `table` - Name of a database and a table.
|
- `db.table` or `db`, `table` - Name of a database and a table.
|
||||||
- `sharding_key` - A sharding key. Optional. Needs to be specified if the cluster has more than one shard.
|
- `sharding_key` - A sharding key. Optional. Needs to be specified if the cluster has more than one shard.
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/deltalake
|
slug: /en/sql-reference/table-functions/deltalake
|
||||||
sidebar_label: DeltaLake
|
sidebar_position: 45
|
||||||
|
sidebar_label: deltaLake
|
||||||
---
|
---
|
||||||
|
|
||||||
# deltaLake Table Function
|
# deltaLake Table Function
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/dictionary
|
slug: /en/sql-reference/table-functions/dictionary
|
||||||
sidebar_position: 54
|
sidebar_position: 47
|
||||||
sidebar_label: dictionary
|
sidebar_label: dictionary
|
||||||
title: dictionary
|
title: dictionary
|
||||||
---
|
---
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/engines/table-functions/executable
|
slug: /en/engines/table-functions/executable
|
||||||
sidebar_position: 55
|
sidebar_position: 50
|
||||||
sidebar_label: executable
|
sidebar_label: executable
|
||||||
keywords: [udf, user defined function, clickhouse, executable, table, function]
|
keywords: [udf, user defined function, clickhouse, executable, table, function]
|
||||||
---
|
---
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/file
|
slug: /en/sql-reference/table-functions/file
|
||||||
sidebar_position: 37
|
sidebar_position: 60
|
||||||
sidebar_label: file
|
sidebar_label: file
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/format
|
slug: /en/sql-reference/table-functions/format
|
||||||
sidebar_position: 56
|
sidebar_position: 65
|
||||||
sidebar_label: format
|
sidebar_label: format
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/gcs
|
slug: /en/sql-reference/table-functions/gcs
|
||||||
sidebar_position: 45
|
sidebar_position: 70
|
||||||
sidebar_label: gcs
|
sidebar_label: gcs
|
||||||
keywords: [gcs, bucket]
|
keywords: [gcs, bucket]
|
||||||
---
|
---
|
||||||
@ -16,7 +16,7 @@ gcs(path [,hmac_key, hmac_secret] [,format] [,structure] [,compression])
|
|||||||
```
|
```
|
||||||
|
|
||||||
:::tip GCS
|
:::tip GCS
|
||||||
The GCS Table Function integrates with Google Cloud Storage by using the GCS XML API and HMAC keys. See the [Google interoperability docs]( https://cloud.google.com/storage/docs/interoperability) for more details about the endpoint and HMAC.
|
The GCS Table Function integrates with Google Cloud Storage by using the GCS XML API and HMAC keys. See the [Google interoperability docs]( https://cloud.google.com/storage/docs/interoperability) for more details about the endpoint and HMAC.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/generate
|
slug: /en/sql-reference/table-functions/generate
|
||||||
sidebar_position: 47
|
sidebar_position: 75
|
||||||
sidebar_label: generateRandom
|
sidebar_label: generateRandom
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/hdfs
|
slug: /en/sql-reference/table-functions/hdfs
|
||||||
sidebar_position: 45
|
sidebar_position: 80
|
||||||
sidebar_label: hdfs
|
sidebar_label: hdfs
|
||||||
---
|
---
|
||||||
|
|
||||||
@ -79,7 +79,7 @@ SELECT count(*)
|
|||||||
FROM hdfs('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV', 'name String, value UInt32')
|
FROM hdfs('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV', 'name String, value UInt32')
|
||||||
```
|
```
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/hdfsCluster
|
slug: /en/sql-reference/table-functions/hdfsCluster
|
||||||
sidebar_position: 55
|
sidebar_position: 81
|
||||||
sidebar_label: hdfsCluster
|
sidebar_label: hdfsCluster
|
||||||
---
|
---
|
||||||
|
|
||||||
@ -50,7 +50,7 @@ SELECT count(*)
|
|||||||
FROM hdfsCluster('cluster_simple', 'hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV', 'name String, value UInt32')
|
FROM hdfsCluster('cluster_simple', 'hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV', 'name String, value UInt32')
|
||||||
```
|
```
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/hudi
|
slug: /en/sql-reference/table-functions/hudi
|
||||||
sidebar_label: Hudi
|
sidebar_position: 85
|
||||||
|
sidebar_label: hudi
|
||||||
---
|
---
|
||||||
|
|
||||||
# hudi Table Function
|
# hudi Table Function
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/iceberg
|
slug: /en/sql-reference/table-functions/iceberg
|
||||||
sidebar_label: Iceberg
|
sidebar_position: 90
|
||||||
|
sidebar_label: iceberg
|
||||||
---
|
---
|
||||||
|
|
||||||
# iceberg Table Function
|
# iceberg Table Function
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/
|
slug: /en/sql-reference/table-functions/
|
||||||
sidebar_label: Table Functions
|
sidebar_label: Table Functions
|
||||||
sidebar_position: 34
|
sidebar_position: 1
|
||||||
---
|
---
|
||||||
|
|
||||||
# Table Functions
|
# Table Functions
|
||||||
|
|
||||||
Table functions are methods for constructing tables.
|
Table functions are methods for constructing tables.
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/input
|
slug: /en/sql-reference/table-functions/input
|
||||||
sidebar_position: 46
|
sidebar_position: 95
|
||||||
sidebar_label: input
|
sidebar_label: input
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/jdbc
|
slug: /en/sql-reference/table-functions/jdbc
|
||||||
sidebar_position: 43
|
sidebar_position: 100
|
||||||
sidebar_label: jdbc
|
sidebar_label: jdbc
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/merge
|
slug: /en/sql-reference/table-functions/merge
|
||||||
sidebar_position: 38
|
sidebar_position: 130
|
||||||
sidebar_label: merge
|
sidebar_label: merge
|
||||||
---
|
---
|
||||||
|
|
||||||
@ -16,7 +16,7 @@ merge('db_name', 'tables_regexp')
|
|||||||
**Arguments**
|
**Arguments**
|
||||||
|
|
||||||
- `db_name` — Possible values:
|
- `db_name` — Possible values:
|
||||||
- database name,
|
- database name,
|
||||||
- constant expression that returns a string with a database name, for example, `currentDatabase()`,
|
- constant expression that returns a string with a database name, for example, `currentDatabase()`,
|
||||||
- `REGEXP(expression)`, where `expression` is a regular expression to match the DB names.
|
- `REGEXP(expression)`, where `expression` is a regular expression to match the DB names.
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/mongodb
|
slug: /en/sql-reference/table-functions/mongodb
|
||||||
sidebar_position: 42
|
sidebar_position: 135
|
||||||
sidebar_label: mongodb
|
sidebar_label: mongodb
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/mysql
|
slug: /en/sql-reference/table-functions/mysql
|
||||||
sidebar_position: 42
|
sidebar_position: 137
|
||||||
sidebar_label: mysql
|
sidebar_label: mysql
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/null
|
slug: /en/sql-reference/table-functions/null
|
||||||
sidebar_position: 53
|
sidebar_position: 140
|
||||||
sidebar_label: null function
|
sidebar_label: null function
|
||||||
title: 'null'
|
title: 'null'
|
||||||
---
|
---
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/numbers
|
slug: /en/sql-reference/table-functions/numbers
|
||||||
sidebar_position: 39
|
sidebar_position: 145
|
||||||
sidebar_label: numbers
|
sidebar_label: numbers
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/odbc
|
slug: /en/sql-reference/table-functions/odbc
|
||||||
sidebar_position: 44
|
sidebar_position: 150
|
||||||
sidebar_label: odbc
|
sidebar_label: odbc
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/postgresql
|
slug: /en/sql-reference/table-functions/postgresql
|
||||||
sidebar_position: 42
|
sidebar_position: 160
|
||||||
sidebar_label: postgresql
|
sidebar_label: postgresql
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/redis
|
slug: /en/sql-reference/table-functions/redis
|
||||||
sidebar_position: 43
|
sidebar_position: 170
|
||||||
sidebar_label: redis
|
sidebar_label: redis
|
||||||
---
|
---
|
||||||
|
|
||||||
@ -31,7 +31,7 @@ redis(host:port, key, structure[, db_index[, password[, pool_size]]])
|
|||||||
- `primary` must be specified, it supports only one column in the primary key. The primary key will be serialized in binary as a Redis key.
|
- `primary` must be specified, it supports only one column in the primary key. The primary key will be serialized in binary as a Redis key.
|
||||||
|
|
||||||
- columns other than the primary key will be serialized in binary as Redis value in corresponding order.
|
- columns other than the primary key will be serialized in binary as Redis value in corresponding order.
|
||||||
|
|
||||||
- queries with key equals or in filtering will be optimized to multi keys lookup from Redis. If queries without filtering key full table scan will happen which is a heavy operation.
|
- queries with key equals or in filtering will be optimized to multi keys lookup from Redis. If queries without filtering key full table scan will happen which is a heavy operation.
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/remote
|
slug: /en/sql-reference/table-functions/remote
|
||||||
sidebar_position: 40
|
sidebar_position: 175
|
||||||
sidebar_label: remote
|
sidebar_label: remote
|
||||||
---
|
---
|
||||||
|
|
||||||
@ -89,10 +89,10 @@ SELECT * FROM remote_table;
|
|||||||
```
|
```
|
||||||
|
|
||||||
### Migration of tables from one system to another:
|
### Migration of tables from one system to another:
|
||||||
This example uses one table from a sample dataset. The database is `imdb`, and the table is `actors`.
|
This example uses one table from a sample dataset. The database is `imdb`, and the table is `actors`.
|
||||||
|
|
||||||
#### On the source ClickHouse system (the system that currently hosts the data)
|
#### On the source ClickHouse system (the system that currently hosts the data)
|
||||||
- Verify the source database and table name (`imdb.actors`)
|
- Verify the source database and table name (`imdb.actors`)
|
||||||
```sql
|
```sql
|
||||||
show databases
|
show databases
|
||||||
```
|
```
|
||||||
@ -114,9 +114,8 @@ This example uses one table from a sample dataset. The database is `imdb`, and
|
|||||||
`first_name` String,
|
`first_name` String,
|
||||||
`last_name` String,
|
`last_name` String,
|
||||||
`gender` FixedString(1))
|
`gender` FixedString(1))
|
||||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}')
|
ENGINE = MergeTree
|
||||||
ORDER BY (id, first_name, last_name, gender)
|
ORDER BY (id, first_name, last_name, gender);
|
||||||
SETTINGS index_granularity = 8192
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### On the destination ClickHouse system:
|
#### On the destination ClickHouse system:
|
||||||
@ -132,9 +131,8 @@ This example uses one table from a sample dataset. The database is `imdb`, and
|
|||||||
`first_name` String,
|
`first_name` String,
|
||||||
`last_name` String,
|
`last_name` String,
|
||||||
`gender` FixedString(1))
|
`gender` FixedString(1))
|
||||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}')
|
ENGINE = MergeTree
|
||||||
ORDER BY (id, first_name, last_name, gender)
|
ORDER BY (id, first_name, last_name, gender);
|
||||||
SETTINGS index_granularity = 8192
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Back on the source deployment:
|
#### Back on the source deployment:
|
||||||
@ -142,7 +140,7 @@ This example uses one table from a sample dataset. The database is `imdb`, and
|
|||||||
Insert into the new database and table created on the remote system. You will need the host, port, username, password, destination database, and destination table.
|
Insert into the new database and table created on the remote system. You will need the host, port, username, password, destination database, and destination table.
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO FUNCTION
|
INSERT INTO FUNCTION
|
||||||
remoteSecure('remote.clickhouse.cloud:9440', 'imdb.actors', 'USER', 'PASSWORD', rand())
|
remoteSecure('remote.clickhouse.cloud:9440', 'imdb.actors', 'USER', 'PASSWORD')
|
||||||
SELECT * from imdb.actors
|
SELECT * from imdb.actors
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/s3
|
slug: /en/sql-reference/table-functions/s3
|
||||||
sidebar_position: 45
|
sidebar_position: 180
|
||||||
sidebar_label: s3
|
sidebar_label: s3
|
||||||
keywords: [s3, gcs, bucket]
|
keywords: [s3, gcs, bucket]
|
||||||
---
|
---
|
||||||
@ -33,7 +33,7 @@ For GCS, substitute your HMAC key and HMAC secret where you see `aws_access_key_
|
|||||||
and not ~~https://storage.cloud.google.com~~.
|
and not ~~https://storage.cloud.google.com~~.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
||||||
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
|
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
|
||||||
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
|
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
|
||||||
- `compression` — Parameter is optional. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. By default, it will autodetect compression by file extension.
|
- `compression` — Parameter is optional. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. By default, it will autodetect compression by file extension.
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/s3Cluster
|
slug: /en/sql-reference/table-functions/s3Cluster
|
||||||
sidebar_position: 55
|
sidebar_position: 181
|
||||||
sidebar_label: s3Cluster
|
sidebar_label: s3Cluster
|
||||||
title: "s3Cluster Table Function"
|
title: "s3Cluster Table Function"
|
||||||
---
|
---
|
||||||
@ -31,18 +31,18 @@ Select the data from all the files in the `/root/data/clickhouse` and `/root/dat
|
|||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT * FROM s3Cluster(
|
SELECT * FROM s3Cluster(
|
||||||
'cluster_simple',
|
'cluster_simple',
|
||||||
'http://minio1:9001/root/data/{clickhouse,database}/*',
|
'http://minio1:9001/root/data/{clickhouse,database}/*',
|
||||||
'minio',
|
'minio',
|
||||||
'minio123',
|
'minio123',
|
||||||
'CSV',
|
'CSV',
|
||||||
'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))'
|
'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))'
|
||||||
) ORDER BY (name, value, polygon);
|
) ORDER BY (name, value, polygon);
|
||||||
```
|
```
|
||||||
|
|
||||||
Count the total amount of rows in all files in the cluster `cluster_simple`:
|
Count the total amount of rows in all files in the cluster `cluster_simple`:
|
||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
@ -1,19 +1,19 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/sqlite
|
slug: /en/sql-reference/table-functions/sqlite
|
||||||
sidebar_position: 55
|
sidebar_position: 185
|
||||||
sidebar_label: sqlite
|
sidebar_label: sqlite
|
||||||
title: sqlite
|
title: sqlite
|
||||||
---
|
---
|
||||||
|
|
||||||
Allows to perform queries on a data stored in an [SQLite](../../engines/database-engines/sqlite.md) database.
|
Allows to perform queries on a data stored in an [SQLite](../../engines/database-engines/sqlite.md) database.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
sqlite('db_path', 'table_name')
|
sqlite('db_path', 'table_name')
|
||||||
```
|
```
|
||||||
|
|
||||||
**Arguments**
|
**Arguments**
|
||||||
|
|
||||||
- `db_path` — Path to a file with an SQLite database. [String](../../sql-reference/data-types/string.md).
|
- `db_path` — Path to a file with an SQLite database. [String](../../sql-reference/data-types/string.md).
|
||||||
- `table_name` — Name of a table in the SQLite database. [String](../../sql-reference/data-types/string.md).
|
- `table_name` — Name of a table in the SQLite database. [String](../../sql-reference/data-types/string.md).
|
||||||
@ -40,6 +40,6 @@ Result:
|
|||||||
└───────┴──────┘
|
└───────┴──────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [SQLite](../../engines/table-engines/integrations/sqlite.md) table engine
|
- [SQLite](../../engines/table-engines/integrations/sqlite.md) table engine
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/url
|
slug: /en/sql-reference/table-functions/url
|
||||||
sidebar_position: 41
|
sidebar_position: 200
|
||||||
sidebar_label: url
|
sidebar_label: url
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/urlCluster
|
slug: /en/sql-reference/table-functions/urlCluster
|
||||||
sidebar_position: 55
|
sidebar_position: 201
|
||||||
sidebar_label: urlCluster
|
sidebar_label: urlCluster
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/view
|
slug: /en/sql-reference/table-functions/view
|
||||||
sidebar_position: 51
|
sidebar_position: 210
|
||||||
sidebar_label: view
|
sidebar_label: view
|
||||||
title: view
|
title: view
|
||||||
---
|
---
|
||||||
|
@ -1,453 +1,6 @@
|
|||||||
agg_functions/combinators.md query-language/agg-functions/combinators.md
|
The redirects from this file were moved to the Docusaurus configuration file.
|
||||||
agg_functions/index.md query-language/agg-functions/index.md
|
If you need to add a redirect please either open a PR in
|
||||||
agg_functions/parametric_functions.md query-language/agg-functions/parametric-functions.md
|
https://github.com/clickhouse/clickhouse-docs adding the redirect to
|
||||||
agg_functions/reference.md query-language/agg-functions/reference.md
|
https://github.com/ClickHouse/clickhouse-docs/blob/main/docusaurus.config.js
|
||||||
changelog/2017.md whats-new/changelog/2017.md
|
or open an issue in the same repo and provide the old URL and new URL to have
|
||||||
changelog/2018.md whats-new/changelog/2018.md
|
the redirect added.
|
||||||
changelog/2019.md whats-new/changelog/2019.md
|
|
||||||
changelog/index.md whats-new/changelog/index.md
|
|
||||||
commercial/cloud.md https://clickhouse.com/cloud/
|
|
||||||
data_types/array.md sql-reference/data-types/array.md
|
|
||||||
data_types/boolean.md sql-reference/data-types/boolean.md
|
|
||||||
data_types/date.md sql-reference/data-types/date.md
|
|
||||||
data_types/datetime.md sql-reference/data-types/datetime.md
|
|
||||||
data_types/datetime64.md sql-reference/data-types/datetime64.md
|
|
||||||
data_types/decimal.md sql-reference/data-types/decimal.md
|
|
||||||
data_types/domains/ipv4.md sql-reference/data-types/ipv4.md
|
|
||||||
data_types/domains/ipv6.md sql-reference/data-types/ipv6.md
|
|
||||||
data_types/domains/overview.md sql-reference/data-types/domains/overview.md
|
|
||||||
data_types/enum.md sql-reference/data-types/enum.md
|
|
||||||
data_types/fixedstring.md sql-reference/data-types/fixedstring.md
|
|
||||||
data_types/float.md sql-reference/data-types/float.md
|
|
||||||
data_types/index.md sql-reference/data-types/index.md
|
|
||||||
data_types/int_uint.md sql-reference/data-types/int-uint.md
|
|
||||||
data_types/nested_data_structures/aggregatefunction.md sql-reference/data-types/aggregatefunction.md
|
|
||||||
data_types/nested_data_structures/index.md sql-reference/data-types/nested-data-structures/index.md
|
|
||||||
data_types/nested_data_structures/nested.md sql-reference/data-types/nested-data-structures/nested.md
|
|
||||||
data_types/nullable.md sql-reference/data-types/nullable.md
|
|
||||||
data_types/special_data_types/expression.md sql-reference/data-types/special-data-types/expression.md
|
|
||||||
data_types/special_data_types/index.md sql-reference/data-types/special-data-types/index.md
|
|
||||||
data_types/special_data_types/interval.md sql-reference/data-types/special-data-types/interval.md
|
|
||||||
data_types/special_data_types/nothing.md sql-reference/data-types/special-data-types/nothing.md
|
|
||||||
data_types/special_data_types/set.md sql-reference/data-types/special-data-types/set.md
|
|
||||||
data_types/string.md sql-reference/data-types/string.md
|
|
||||||
data_types/tuple.md sql-reference/data-types/tuple.md
|
|
||||||
data_types/uuid.md sql-reference/data-types/uuid.md
|
|
||||||
database_engines/index.md engines/database-engines/index.md
|
|
||||||
database_engines/lazy.md engines/database-engines/lazy.md
|
|
||||||
database_engines/mysql.md engines/database-engines/mysql.md
|
|
||||||
development/browse_code.md development/browse-code.md
|
|
||||||
development/build_cross_arm.md development/build-cross-arm.md
|
|
||||||
development/build_cross_osx.md development/build-cross-osx.md
|
|
||||||
development/build_osx.md development/build-osx.md
|
|
||||||
development/developer_instruction.md development/developer-instruction.md
|
|
||||||
dicts/external_dicts.md query-language/dicts/external-dicts.md
|
|
||||||
dicts/external_dicts_dict.md query-language/dicts/external-dicts-dict.md
|
|
||||||
dicts/external_dicts_dict_layout.md query-language/dicts/external-dicts-dict-layout.md
|
|
||||||
dicts/external_dicts_dict_lifetime.md query-language/dicts/external-dicts-dict-lifetime.md
|
|
||||||
dicts/external_dicts_dict_sources.md query-language/dicts/external-dicts-dict-sources.md
|
|
||||||
dicts/external_dicts_dict_structure.md query-language/dicts/external-dicts-dict-structure.md
|
|
||||||
dicts/index.md query-language/dicts/index.md
|
|
||||||
dicts/internal_dicts.md query-language/dicts/internal-dicts.md
|
|
||||||
engines/database_engines/index.md engines/database-engines/index.md
|
|
||||||
engines/database_engines/lazy.md engines/database-engines/lazy.md
|
|
||||||
engines/database_engines/mysql.md engines/database-engines/mysql.md
|
|
||||||
engines/table-engines/log-family/log-family.md engines/table-engines/log-family/index.md
|
|
||||||
engines/table_engines/index.md engines/table-engines/index.md
|
|
||||||
engines/table_engines/integrations/hdfs.md engines/table-engines/integrations/hdfs.md
|
|
||||||
engines/table_engines/integrations/index.md engines/table-engines/integrations/index.md
|
|
||||||
engines/table_engines/integrations/jdbc.md engines/table-engines/integrations/jdbc.md
|
|
||||||
engines/table_engines/integrations/kafka.md engines/table-engines/integrations/kafka.md
|
|
||||||
engines/table_engines/integrations/mysql.md engines/table-engines/integrations/mysql.md
|
|
||||||
engines/table_engines/integrations/odbc.md engines/table-engines/integrations/odbc.md
|
|
||||||
engines/table_engines/log_family/index.md engines/table-engines/log-family/index.md
|
|
||||||
engines/table_engines/log_family/log.md engines/table-engines/log-family/log.md
|
|
||||||
engines/table_engines/log_family/log_family.md engines/table-engines/log-family/log-family.md
|
|
||||||
engines/table_engines/log_family/stripelog.md engines/table-engines/log-family/stripelog.md
|
|
||||||
engines/table_engines/log_family/tinylog.md engines/table-engines/log-family/tinylog.md
|
|
||||||
engines/table_engines/mergetree_family/aggregatingmergetree.md engines/table-engines/mergetree-family/aggregatingmergetree.md
|
|
||||||
engines/table_engines/mergetree_family/collapsingmergetree.md engines/table-engines/mergetree-family/collapsingmergetree.md
|
|
||||||
engines/table_engines/mergetree_family/custom_partitioning_key.md engines/table-engines/mergetree-family/custom-partitioning-key.md
|
|
||||||
engines/table_engines/mergetree_family/graphitemergetree.md engines/table-engines/mergetree-family/graphitemergetree.md
|
|
||||||
engines/table_engines/mergetree_family/index.md engines/table-engines/mergetree-family/index.md
|
|
||||||
engines/table_engines/mergetree_family/mergetree.md engines/table-engines/mergetree-family/mergetree.md
|
|
||||||
engines/table_engines/mergetree_family/replacingmergetree.md engines/table-engines/mergetree-family/replacingmergetree.md
|
|
||||||
engines/table_engines/mergetree_family/replication.md engines/table-engines/mergetree-family/replication.md
|
|
||||||
engines/table_engines/mergetree_family/summingmergetree.md engines/table-engines/mergetree-family/summingmergetree.md
|
|
||||||
engines/table_engines/mergetree_family/versionedcollapsingmergetree.md engines/table-engines/mergetree-family/versionedcollapsingmergetree.md
|
|
||||||
engines/table_engines/special/buffer.md engines/table-engines/special/buffer.md
|
|
||||||
engines/table_engines/special/dictionary.md engines/table-engines/special/dictionary.md
|
|
||||||
engines/table_engines/special/distributed.md engines/table-engines/special/distributed.md
|
|
||||||
engines/table_engines/special/external_data.md engines/table-engines/special/external-data.md
|
|
||||||
engines/table_engines/special/file.md engines/table-engines/special/file.md
|
|
||||||
engines/table_engines/special/generate.md engines/table-engines/special/generate.md
|
|
||||||
engines/table_engines/special/index.md engines/table-engines/special/index.md
|
|
||||||
engines/table_engines/special/join.md engines/table-engines/special/join.md
|
|
||||||
engines/table_engines/special/materializedview.md engines/table-engines/special/materializedview.md
|
|
||||||
engines/table_engines/special/memory.md engines/table-engines/special/memory.md
|
|
||||||
engines/table_engines/special/merge.md engines/table-engines/special/merge.md
|
|
||||||
engines/table_engines/special/null.md engines/table-engines/special/null.md
|
|
||||||
engines/table_engines/special/set.md engines/table-engines/special/set.md
|
|
||||||
engines/table_engines/special/url.md engines/table-engines/special/url.md
|
|
||||||
engines/table_engines/special/view.md engines/table-engines/special/view.md
|
|
||||||
extended_roadmap.md whats-new/extended-roadmap.md
|
|
||||||
formats.md interfaces/formats.md
|
|
||||||
formats/capnproto.md interfaces/formats.md
|
|
||||||
formats/csv.md interfaces/formats.md
|
|
||||||
formats/csvwithnames.md interfaces/formats.md
|
|
||||||
formats/json.md interfaces/formats.md
|
|
||||||
formats/jsoncompact.md interfaces/formats.md
|
|
||||||
formats/jsoneachrow.md interfaces/formats.md
|
|
||||||
formats/native.md interfaces/formats.md
|
|
||||||
formats/null.md interfaces/formats.md
|
|
||||||
formats/pretty.md interfaces/formats.md
|
|
||||||
formats/prettycompact.md interfaces/formats.md
|
|
||||||
formats/prettycompactmonoblock.md interfaces/formats.md
|
|
||||||
formats/prettynoescapes.md interfaces/formats.md
|
|
||||||
formats/prettyspace.md interfaces/formats.md
|
|
||||||
formats/rowbinary.md interfaces/formats.md
|
|
||||||
formats/tabseparated.md interfaces/formats.md
|
|
||||||
formats/tabseparatedraw.md interfaces/formats.md
|
|
||||||
formats/tabseparatedwithnames.md interfaces/formats.md
|
|
||||||
formats/tabseparatedwithnamesandtypes.md interfaces/formats.md
|
|
||||||
formats/tskv.md interfaces/formats.md
|
|
||||||
formats/values.md interfaces/formats.md
|
|
||||||
formats/vertical.md interfaces/formats.md
|
|
||||||
formats/verticalraw.md interfaces/formats.md
|
|
||||||
formats/xml.md interfaces/formats.md
|
|
||||||
functions/arithmetic_functions.md query-language/functions/arithmetic-functions.md
|
|
||||||
functions/array_functions.md query-language/functions/array-functions.md
|
|
||||||
functions/array_join.md query-language/functions/array-join.md
|
|
||||||
functions/bit_functions.md query-language/functions/bit-functions.md
|
|
||||||
functions/bitmap_functions.md query-language/functions/bitmap-functions.md
|
|
||||||
functions/comparison_functions.md query-language/functions/comparison-functions.md
|
|
||||||
functions/conditional_functions.md query-language/functions/conditional-functions.md
|
|
||||||
functions/date_time_functions.md query-language/functions/date-time-functions.md
|
|
||||||
functions/encoding_functions.md query-language/functions/encoding-functions.md
|
|
||||||
functions/ext_dict_functions.md query-language/functions/ext-dict-functions.md
|
|
||||||
functions/hash_functions.md query-language/functions/hash-functions.md
|
|
||||||
functions/higher_order_functions.md query-language/functions/higher-order-functions.md
|
|
||||||
functions/in_functions.md query-language/functions/in-functions.md
|
|
||||||
functions/index.md query-language/functions/index.md
|
|
||||||
functions/ip_address_functions.md query-language/functions/ip-address-functions.md
|
|
||||||
functions/json_functions.md query-language/functions/json-functions.md
|
|
||||||
functions/logical_functions.md query-language/functions/logical-functions.md
|
|
||||||
functions/math_functions.md query-language/functions/math-functions.md
|
|
||||||
functions/other_functions.md query-language/functions/other-functions.md
|
|
||||||
functions/random_functions.md query-language/functions/random-functions.md
|
|
||||||
functions/rounding_functions.md query-language/functions/rounding-functions.md
|
|
||||||
functions/splitting_merging_functions.md query-language/functions/splitting-merging-functions.md
|
|
||||||
functions/string_functions.md query-language/functions/string-functions.md
|
|
||||||
functions/string_replace_functions.md query-language/functions/string-replace-functions.md
|
|
||||||
functions/string_search_functions.md query-language/functions/string-search-functions.md
|
|
||||||
functions/type_conversion_functions.md query-language/functions/type-conversion-functions.md
|
|
||||||
functions/url_functions.md query-language/functions/url-functions.md
|
|
||||||
functions/ym_dict_functions.md query-language/functions/ym-dict-functions.md
|
|
||||||
getting_started/example_datasets/amplab_benchmark.md getting-started/example-datasets/amplab-benchmark.md
|
|
||||||
getting_started/example_datasets/criteo.md getting-started/example-datasets/criteo.md
|
|
||||||
getting_started/example_datasets/index.md getting-started/example-datasets/index.md
|
|
||||||
getting_started/example_datasets/metrica.md getting-started/example-datasets/metrica.md
|
|
||||||
getting_started/example_datasets/nyc_taxi.md getting-started/example-datasets/nyc-taxi.md
|
|
||||||
getting_started/example_datasets/ontime.md getting-started/example-datasets/ontime.md
|
|
||||||
getting_started/example_datasets/star_schema.md getting-started/example-datasets/star-schema.md
|
|
||||||
getting_started/example_datasets/wikistat.md getting-started/example-datasets/wikistat.md
|
|
||||||
getting_started/index.md getting-started/index.md
|
|
||||||
getting_started/install.md getting-started/install.md
|
|
||||||
getting_started/playground.md getting-started/playground.md
|
|
||||||
getting_started/tutorial.md getting-started/tutorial.md
|
|
||||||
images/column_oriented.gif images/column-oriented.gif
|
|
||||||
images/row_oriented.gif images/row-oriented.gif
|
|
||||||
interfaces/http_interface.md interfaces/http.md
|
|
||||||
interfaces/third-party/client_libraries.md interfaces/third-party/client-libraries.md
|
|
||||||
interfaces/third-party_client_libraries.md interfaces/third-party/client-libraries.md
|
|
||||||
interfaces/third-party_gui.md interfaces/third-party/gui.md
|
|
||||||
interfaces/third_party/index.md interfaces/third-party/index.md
|
|
||||||
introduction/index.md
|
|
||||||
introduction/distinctive_features.md introduction/distinctive-features.md
|
|
||||||
introduction/features_considered_disadvantages.md introduction/distinctive-features.md
|
|
||||||
introduction/possible_silly_questions.md faq/general.md
|
|
||||||
introduction/ya_metrika_task.md introduction/history.md
|
|
||||||
operations/access_rights.md operations/access-rights.md
|
|
||||||
operations/configuration_files.md operations/configuration-files.md
|
|
||||||
operations/optimizing_performance/index.md operations/optimizing-performance/index.md
|
|
||||||
operations/optimizing_performance/sampling_query_profiler.md operations/optimizing-performance/sampling-query-profiler.md
|
|
||||||
operations/performance/sampling_query_profiler.md operations/optimizing-performance/sampling-query-profiler.md
|
|
||||||
operations/performance_test.md operations/performance-test.md
|
|
||||||
operations/server_configuration_parameters/index.md operations/server-configuration-parameters/index.md
|
|
||||||
operations/server_configuration_parameters/settings.md operations/server-configuration-parameters/settings.md
|
|
||||||
operations/server_settings/index.md operations/server-configuration-parameters/index.md
|
|
||||||
operations/server_settings/settings.md operations/server-configuration-parameters/settings.md
|
|
||||||
operations/settings/constraints_on_settings.md operations/settings/constraints-on-settings.md
|
|
||||||
operations/settings/permissions_for_queries.md operations/settings/permissions-for-queries.md
|
|
||||||
operations/settings/query_complexity.md operations/settings/query-complexity.md
|
|
||||||
operations/settings/settings_profiles.md operations/settings/settings-profiles.md
|
|
||||||
operations/settings/settings_users.md operations/settings/settings-users.md
|
|
||||||
operations/system_tables.md operations/system-tables.md
|
|
||||||
operations/table_engines/aggregatingmergetree.md engines/table-engines/mergetree-family/aggregatingmergetree.md
|
|
||||||
operations/table_engines/buffer.md engines/table-engines/special/buffer.md
|
|
||||||
operations/table_engines/collapsingmergetree.md engines/table-engines/mergetree-family/collapsingmergetree.md
|
|
||||||
operations/table_engines/custom_partitioning_key.md engines/table-engines/mergetree-family/custom-partitioning-key.md
|
|
||||||
operations/table_engines/dictionary.md engines/table-engines/special/dictionary.md
|
|
||||||
operations/table_engines/distributed.md engines/table-engines/special/distributed.md
|
|
||||||
operations/table_engines/external_data.md engines/table-engines/special/external-data.md
|
|
||||||
operations/table_engines/file.md engines/table-engines/special/file.md
|
|
||||||
operations/table_engines/generate.md engines/table-engines/special/generate.md
|
|
||||||
operations/table_engines/graphitemergetree.md engines/table-engines/mergetree-family/graphitemergetree.md
|
|
||||||
operations/table_engines/hdfs.md engines/table-engines/integrations/hdfs.md
|
|
||||||
operations/table_engines/index.md engines/table-engines/index.md
|
|
||||||
operations/table_engines/jdbc.md engines/table-engines/integrations/jdbc.md
|
|
||||||
operations/table_engines/join.md engines/table-engines/special/join.md
|
|
||||||
operations/table_engines/kafka.md engines/table-engines/integrations/kafka.md
|
|
||||||
operations/table_engines/log.md engines/table-engines/log-family/log.md
|
|
||||||
operations/table_engines/log_family.md engines/table-engines/log-family/log-family.md
|
|
||||||
operations/table_engines/materializedview.md engines/table-engines/special/materializedview.md
|
|
||||||
operations/table_engines/memory.md engines/table-engines/special/memory.md
|
|
||||||
operations/table_engines/merge.md engines/table-engines/special/merge.md
|
|
||||||
operations/table_engines/mergetree.md engines/table-engines/mergetree-family/mergetree.md
|
|
||||||
operations/table_engines/mysql.md engines/table-engines/integrations/mysql.md
|
|
||||||
operations/table_engines/null.md engines/table-engines/special/null.md
|
|
||||||
operations/table_engines/odbc.md engines/table-engines/integrations/odbc.md
|
|
||||||
operations/table_engines/replacingmergetree.md engines/table-engines/mergetree-family/replacingmergetree.md
|
|
||||||
operations/table_engines/replication.md engines/table-engines/mergetree-family/replication.md
|
|
||||||
operations/table_engines/set.md engines/table-engines/special/set.md
|
|
||||||
operations/table_engines/stripelog.md engines/table-engines/log-family/stripelog.md
|
|
||||||
operations/table_engines/summingmergetree.md engines/table-engines/mergetree-family/summingmergetree.md
|
|
||||||
operations/table_engines/tinylog.md engines/table-engines/log-family/tinylog.md
|
|
||||||
operations/table_engines/url.md engines/table-engines/special/url.md
|
|
||||||
operations/table_engines/versionedcollapsingmergetree.md engines/table-engines/mergetree-family/versionedcollapsingmergetree.md
|
|
||||||
operations/table_engines/view.md engines/table-engines/special/view.md
|
|
||||||
operations/utils/clickhouse-benchmark.md operations/utilities/clickhouse-benchmark.md
|
|
||||||
operations/utils/clickhouse-copier.md operations/utilities/clickhouse-copier.md
|
|
||||||
operations/utils/clickhouse-local.md operations/utilities/clickhouse-local.md
|
|
||||||
operations/utils/index.md operations/utilities/index.md
|
|
||||||
query_language/agg_functions/combinators.md sql-reference/aggregate-functions/combinators.md
|
|
||||||
query_language/agg_functions/index.md sql-reference/aggregate-functions/index.md
|
|
||||||
query_language/agg_functions/parametric_functions.md sql-reference/aggregate-functions/parametric-functions.md
|
|
||||||
query_language/agg_functions/reference.md sql-reference/aggregate-functions/reference.md
|
|
||||||
query_language/alter.md sql-reference/statements/alter.md
|
|
||||||
query_language/create.md sql-reference/statements/create.md
|
|
||||||
query_language/dicts/external_dicts.md sql-reference/dictionaries/external-dictionaries/external-dicts.md
|
|
||||||
query_language/dicts/external_dicts_dict.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md
|
|
||||||
query_language/dicts/external_dicts_dict_hierarchical.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md
|
|
||||||
query_language/dicts/external_dicts_dict_layout.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md
|
|
||||||
query_language/dicts/external_dicts_dict_lifetime.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md
|
|
||||||
query_language/dicts/external_dicts_dict_sources.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md
|
|
||||||
query_language/dicts/external_dicts_dict_structure.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md
|
|
||||||
query_language/dicts/index.md sql-reference/dictionaries/index.md
|
|
||||||
query_language/dicts/internal_dicts.md sql-reference/dictionaries/internal-dicts.md
|
|
||||||
query_language/functions/arithmetic_functions.md sql-reference/functions/arithmetic-functions.md
|
|
||||||
query_language/functions/array_functions.md sql-reference/functions/array-functions.md
|
|
||||||
query_language/functions/array_join.md sql-reference/functions/array-join.md
|
|
||||||
query_language/functions/bit_functions.md sql-reference/functions/bit-functions.md
|
|
||||||
query_language/functions/bitmap_functions.md sql-reference/functions/bitmap-functions.md
|
|
||||||
query_language/functions/comparison_functions.md sql-reference/functions/comparison-functions.md
|
|
||||||
query_language/functions/conditional_functions.md sql-reference/functions/conditional-functions.md
|
|
||||||
query_language/functions/date_time_functions.md sql-reference/functions/date-time-functions.md
|
|
||||||
query_language/functions/encoding_functions.md sql-reference/functions/encoding-functions.md
|
|
||||||
query_language/functions/ext_dict_functions.md sql-reference/functions/ext-dict-functions.md
|
|
||||||
query_language/functions/functions_for_nulls.md sql-reference/functions/functions-for-nulls.md
|
|
||||||
query_language/functions/geo.md sql-reference/functions/geo.md
|
|
||||||
query_language/functions/hash_functions.md sql-reference/functions/hash-functions.md
|
|
||||||
query_language/functions/higher_order_functions.md sql-reference/functions/higher-order-functions.md
|
|
||||||
query_language/functions/in_functions.md sql-reference/functions/in-functions.md
|
|
||||||
query_language/functions/index.md sql-reference/functions/index.md
|
|
||||||
query_language/functions/introspection.md sql-reference/functions/introspection.md
|
|
||||||
query_language/functions/ip_address_functions.md sql-reference/functions/ip-address-functions.md
|
|
||||||
query_language/functions/json_functions.md sql-reference/functions/json-functions.md
|
|
||||||
query_language/functions/logical_functions.md sql-reference/functions/logical-functions.md
|
|
||||||
query_language/functions/machine_learning_functions.md sql-reference/functions/machine-learning-functions.md
|
|
||||||
query_language/functions/math_functions.md sql-reference/functions/math-functions.md
|
|
||||||
query_language/functions/other_functions.md sql-reference/functions/other-functions.md
|
|
||||||
query_language/functions/random_functions.md sql-reference/functions/random-functions.md
|
|
||||||
query_language/functions/rounding_functions.md sql-reference/functions/rounding-functions.md
|
|
||||||
query_language/functions/splitting_merging_functions.md sql-reference/functions/splitting-merging-functions.md
|
|
||||||
query_language/functions/string_functions.md sql-reference/functions/string-functions.md
|
|
||||||
query_language/functions/string_replace_functions.md sql-reference/functions/string-replace-functions.md
|
|
||||||
query_language/functions/string_search_functions.md sql-reference/functions/string-search-functions.md
|
|
||||||
query_language/functions/type_conversion_functions.md sql-reference/functions/type-conversion-functions.md
|
|
||||||
query_language/functions/url_functions.md sql-reference/functions/url-functions.md
|
|
||||||
query_language/functions/uuid_functions.md sql-reference/functions/uuid-functions.md
|
|
||||||
query_language/functions/ym_dict_functions.md sql-reference/functions/ym-dict-functions.md
|
|
||||||
query_language/index.md sql-reference/index.md
|
|
||||||
query_language/insert_into.md sql-reference/statements/insert-into.md
|
|
||||||
query_language/misc.md sql-reference/statements/misc.md
|
|
||||||
query_language/operators.md sql-reference/operators.md
|
|
||||||
query_language/queries.md query-language.md
|
|
||||||
query_language/select.md sql-reference/statements/select.md
|
|
||||||
query_language/show.md sql-reference/statements/show.md
|
|
||||||
query_language/syntax.md sql-reference/syntax.md
|
|
||||||
query_language/system.md sql-reference/statements/system.md
|
|
||||||
query_language/table_functions/file.md sql-reference/table-functions/file.md
|
|
||||||
query_language/table_functions/generate.md sql-reference/table-functions/generate.md
|
|
||||||
query_language/table_functions/hdfs.md sql-reference/table-functions/hdfs.md
|
|
||||||
query_language/table_functions/index.md sql-reference/table-functions/index.md
|
|
||||||
query_language/table_functions/input.md sql-reference/table-functions/input.md
|
|
||||||
query_language/table_functions/jdbc.md sql-reference/table-functions/jdbc.md
|
|
||||||
query_language/table_functions/merge.md sql-reference/table-functions/merge.md
|
|
||||||
query_language/table_functions/mysql.md sql-reference/table-functions/mysql.md
|
|
||||||
query_language/table_functions/numbers.md sql-reference/table-functions/numbers.md
|
|
||||||
query_language/table_functions/odbc.md sql-reference/table-functions/odbc.md
|
|
||||||
query_language/table_functions/remote.md sql-reference/table-functions/remote.md
|
|
||||||
query_language/table_functions/url.md sql-reference/table-functions/url.md
|
|
||||||
roadmap.md whats-new/roadmap.md
|
|
||||||
security_changelog.md whats-new/security-changelog.md
|
|
||||||
sql-reference/data-types/domains/overview.md sql-reference/data-types/domains/index.md
|
|
||||||
sql_reference/aggregate_functions/combinators.md sql-reference/aggregate-functions/combinators.md
|
|
||||||
sql_reference/aggregate_functions/index.md sql-reference/aggregate-functions/index.md
|
|
||||||
sql_reference/aggregate_functions/parametric_functions.md sql-reference/aggregate-functions/parametric-functions.md
|
|
||||||
sql_reference/aggregate_functions/reference.md sql-reference/aggregate-functions/reference.md
|
|
||||||
sql_reference/ansi.md sql-reference/ansi.md
|
|
||||||
sql_reference/data_types/aggregatefunction.md sql-reference/data-types/aggregatefunction.md
|
|
||||||
sql_reference/data_types/array.md sql-reference/data-types/array.md
|
|
||||||
sql_reference/data_types/boolean.md sql-reference/data-types/boolean.md
|
|
||||||
sql_reference/data_types/date.md sql-reference/data-types/date.md
|
|
||||||
sql_reference/data_types/datetime.md sql-reference/data-types/datetime.md
|
|
||||||
sql_reference/data_types/datetime64.md sql-reference/data-types/datetime64.md
|
|
||||||
sql_reference/data_types/decimal.md sql-reference/data-types/decimal.md
|
|
||||||
sql_reference/data_types/domains/index.md sql-reference/data-types/domains/index.md
|
|
||||||
sql_reference/data_types/domains/ipv4.md sql-reference/data-types/ipv4.md
|
|
||||||
sql_reference/data_types/domains/ipv6.md sql-reference/data-types/ipv6.md
|
|
||||||
sql_reference/data_types/domains/overview.md sql-reference/data-types/domains/overview.md
|
|
||||||
sql_reference/data_types/enum.md sql-reference/data-types/enum.md
|
|
||||||
sql_reference/data_types/fixedstring.md sql-reference/data-types/fixedstring.md
|
|
||||||
sql_reference/data_types/float.md sql-reference/data-types/float.md
|
|
||||||
sql_reference/data_types/index.md sql-reference/data-types/index.md
|
|
||||||
sql_reference/data_types/int_uint.md sql-reference/data-types/int-uint.md
|
|
||||||
sql_reference/data_types/nested_data_structures/index.md sql-reference/data-types/nested-data-structures/index.md
|
|
||||||
sql_reference/data_types/nested_data_structures/nested.md sql-reference/data-types/nested-data-structures/nested.md
|
|
||||||
sql_reference/data_types/nullable.md sql-reference/data-types/nullable.md
|
|
||||||
sql_reference/data_types/simpleaggregatefunction.md sql-reference/data-types/simpleaggregatefunction.md
|
|
||||||
sql_reference/data_types/special_data_types/expression.md sql-reference/data-types/special-data-types/expression.md
|
|
||||||
sql_reference/data_types/special_data_types/index.md sql-reference/data-types/special-data-types/index.md
|
|
||||||
sql_reference/data_types/special_data_types/interval.md sql-reference/data-types/special-data-types/interval.md
|
|
||||||
sql_reference/data_types/special_data_types/nothing.md sql-reference/data-types/special-data-types/nothing.md
|
|
||||||
sql_reference/data_types/special_data_types/set.md sql-reference/data-types/special-data-types/set.md
|
|
||||||
sql_reference/data_types/string.md sql-reference/data-types/string.md
|
|
||||||
sql_reference/data_types/tuple.md sql-reference/data-types/tuple.md
|
|
||||||
sql_reference/data_types/uuid.md sql-reference/data-types/uuid.md
|
|
||||||
sql_reference/dictionaries/external_dictionaries/external_dicts.md sql-reference/dictionaries/external-dictionaries/external-dicts.md
|
|
||||||
sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md
|
|
||||||
sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md
|
|
||||||
sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md
|
|
||||||
sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md
|
|
||||||
sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md
|
|
||||||
sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md
|
|
||||||
sql_reference/dictionaries/external_dictionaries/index.md sql-reference/dictionaries/external-dictionaries/index.md
|
|
||||||
sql_reference/dictionaries/index.md sql-reference/dictionaries/index.md
|
|
||||||
sql_reference/dictionaries/internal_dicts.md sql-reference/dictionaries/internal-dicts.md
|
|
||||||
sql_reference/functions/arithmetic_functions.md sql-reference/functions/arithmetic-functions.md
|
|
||||||
sql_reference/functions/array_functions.md sql-reference/functions/array-functions.md
|
|
||||||
sql_reference/functions/array_join.md sql-reference/functions/array-join.md
|
|
||||||
sql_reference/functions/bit_functions.md sql-reference/functions/bit-functions.md
|
|
||||||
sql_reference/functions/bitmap_functions.md sql-reference/functions/bitmap-functions.md
|
|
||||||
sql_reference/functions/comparison_functions.md sql-reference/functions/comparison-functions.md
|
|
||||||
sql_reference/functions/conditional_functions.md sql-reference/functions/conditional-functions.md
|
|
||||||
sql_reference/functions/date_time_functions.md sql-reference/functions/date-time-functions.md
|
|
||||||
sql_reference/functions/encoding_functions.md sql-reference/functions/encoding-functions.md
|
|
||||||
sql_reference/functions/ext_dict_functions.md sql-reference/functions/ext-dict-functions.md
|
|
||||||
sql_reference/functions/functions_for_nulls.md sql-reference/functions/functions-for-nulls.md
|
|
||||||
sql_reference/functions/geo.md sql-reference/functions/geo.md
|
|
||||||
sql_reference/functions/hash_functions.md sql-reference/functions/hash-functions.md
|
|
||||||
sql_reference/functions/higher_order_functions.md sql-reference/functions/higher-order-functions.md
|
|
||||||
sql_reference/functions/in_functions.md sql-reference/functions/in-functions.md
|
|
||||||
sql_reference/functions/index.md sql-reference/functions/index.md
|
|
||||||
sql_reference/functions/introspection.md sql-reference/functions/introspection.md
|
|
||||||
sql_reference/functions/ip_address_functions.md sql-reference/functions/ip-address-functions.md
|
|
||||||
sql_reference/functions/json_functions.md sql-reference/functions/json-functions.md
|
|
||||||
sql_reference/functions/logical_functions.md sql-reference/functions/logical-functions.md
|
|
||||||
sql_reference/functions/machine_learning_functions.md sql-reference/functions/machine-learning-functions.md
|
|
||||||
sql_reference/functions/math_functions.md sql-reference/functions/math-functions.md
|
|
||||||
sql_reference/functions/other_functions.md sql-reference/functions/other-functions.md
|
|
||||||
sql_reference/functions/random_functions.md sql-reference/functions/random-functions.md
|
|
||||||
sql_reference/functions/rounding_functions.md sql-reference/functions/rounding-functions.md
|
|
||||||
sql_reference/functions/splitting_merging_functions.md sql-reference/functions/splitting-merging-functions.md
|
|
||||||
sql_reference/functions/string_functions.md sql-reference/functions/string-functions.md
|
|
||||||
sql_reference/functions/string_replace_functions.md sql-reference/functions/string-replace-functions.md
|
|
||||||
sql_reference/functions/string_search_functions.md sql-reference/functions/string-search-functions.md
|
|
||||||
sql_reference/functions/type_conversion_functions.md sql-reference/functions/type-conversion-functions.md
|
|
||||||
sql_reference/functions/url_functions.md sql-reference/functions/url-functions.md
|
|
||||||
sql_reference/functions/uuid_functions.md sql-reference/functions/uuid-functions.md
|
|
||||||
sql_reference/functions/ym_dict_functions.md sql-reference/functions/ym-dict-functions.md
|
|
||||||
sql_reference/index.md sql-reference/index.md
|
|
||||||
sql_reference/operators.md sql-reference/operators.md
|
|
||||||
sql_reference/statements/alter.md sql-reference/statements/alter.md
|
|
||||||
sql_reference/statements/create.md sql-reference/statements/create.md
|
|
||||||
sql_reference/statements/index.md sql-reference/statements/index.md
|
|
||||||
sql_reference/statements/insert_into.md sql-reference/statements/insert-into.md
|
|
||||||
sql_reference/statements/misc.md sql-reference/statements/misc.md
|
|
||||||
sql_reference/statements/select.md sql-reference/statements/select.md
|
|
||||||
sql_reference/statements/show.md sql-reference/statements/show.md
|
|
||||||
sql_reference/statements/system.md sql-reference/statements/system.md
|
|
||||||
sql_reference/syntax.md sql-reference/syntax.md
|
|
||||||
sql_reference/table_functions/file.md sql-reference/table-functions/file.md
|
|
||||||
sql_reference/table_functions/generate.md sql-reference/table-functions/generate.md
|
|
||||||
sql_reference/table_functions/hdfs.md sql-reference/table-functions/hdfs.md
|
|
||||||
sql_reference/table_functions/index.md sql-reference/table-functions/index.md
|
|
||||||
sql_reference/table_functions/input.md sql-reference/table-functions/input.md
|
|
||||||
sql_reference/table_functions/jdbc.md sql-reference/table-functions/jdbc.md
|
|
||||||
sql_reference/table_functions/merge.md sql-reference/table-functions/merge.md
|
|
||||||
sql_reference/table_functions/mysql.md sql-reference/table-functions/mysql.md
|
|
||||||
sql_reference/table_functions/numbers.md sql-reference/table-functions/numbers.md
|
|
||||||
sql_reference/table_functions/odbc.md sql-reference/table-functions/odbc.md
|
|
||||||
sql_reference/table_functions/remote.md sql-reference/table-functions/remote.md
|
|
||||||
sql_reference/table_functions/url.md sql-reference/table-functions/url.md
|
|
||||||
system_tables.md operations/system-tables.md
|
|
||||||
system_tables/system.asynchronous_metrics.md operations/system-tables.md
|
|
||||||
system_tables/system.clusters.md operations/system-tables.md
|
|
||||||
system_tables/system.columns.md operations/system-tables.md
|
|
||||||
system_tables/system.databases.md operations/system-tables.md
|
|
||||||
system_tables/system.dictionaries.md operations/system-tables.md
|
|
||||||
system_tables/system.events.md operations/system-tables.md
|
|
||||||
system_tables/system.functions.md operations/system-tables.md
|
|
||||||
system_tables/system.merges.md operations/system-tables.md
|
|
||||||
system_tables/system.metrics.md operations/system-tables.md
|
|
||||||
system_tables/system.numbers.md operations/system-tables.md
|
|
||||||
system_tables/system.numbers_mt.md operations/system-tables.md
|
|
||||||
system_tables/system.one.md operations/system-tables.md
|
|
||||||
system_tables/system.parts.md operations/system-tables.md
|
|
||||||
system_tables/system.processes.md operations/system-tables.md
|
|
||||||
system_tables/system.replicas.md operations/system-tables.md
|
|
||||||
system_tables/system.settings.md operations/system-tables.md
|
|
||||||
system_tables/system.tables.md operations/system-tables.md
|
|
||||||
system_tables/system.zookeeper.md operations/system-tables.md
|
|
||||||
table_engines.md operations/table-engines.md
|
|
||||||
table_engines/aggregatingmergetree.md operations/table-engines/aggregatingmergetree.md
|
|
||||||
table_engines/buffer.md operations/table-engines/buffer.md
|
|
||||||
table_engines/collapsingmergetree.md operations/table-engines/collapsingmergetree.md
|
|
||||||
table_engines/custom_partitioning_key.md operations/table-engines/custom-partitioning-key.md
|
|
||||||
table_engines/dictionary.md operations/table-engines/dictionary.md
|
|
||||||
table_engines/distributed.md operations/table-engines/distributed.md
|
|
||||||
table_engines/external_data.md operations/table-engines/external-data.md
|
|
||||||
table_engines/file.md operations/table-engines/file.md
|
|
||||||
table_engines/graphitemergetree.md operations/table-engines/graphitemergetree.md
|
|
||||||
table_engines/index.md operations/table-engines/index.md
|
|
||||||
table_engines/join.md operations/table-engines/join.md
|
|
||||||
table_engines/kafka.md operations/table-engines/kafka.md
|
|
||||||
table_engines/log.md operations/table-engines/log.md
|
|
||||||
table_engines/materializedview.md operations/table-engines/materializedview.md
|
|
||||||
table_engines/memory.md operations/table-engines/memory.md
|
|
||||||
table_engines/merge.md operations/table-engines/merge.md
|
|
||||||
table_engines/mergetree.md operations/table-engines/mergetree.md
|
|
||||||
table_engines/mysql.md operations/table-engines/mysql.md
|
|
||||||
table_engines/null.md operations/table-engines/null.md
|
|
||||||
table_engines/replacingmergetree.md operations/table-engines/replacingmergetree.md
|
|
||||||
table_engines/replication.md operations/table-engines/replication.md
|
|
||||||
table_engines/set.md operations/table-engines/set.md
|
|
||||||
table_engines/summingmergetree.md operations/table-engines/summingmergetree.md
|
|
||||||
table_engines/tinylog.md operations/table-engines/tinylog.md
|
|
||||||
table_engines/view.md operations/table-engines/view.md
|
|
||||||
table_functions/file.md query-language/table-functions/file.md
|
|
||||||
table_functions/index.md query-language/table-functions/index.md
|
|
||||||
table_functions/merge.md query-language/table-functions/merge.md
|
|
||||||
table_functions/numbers.md query-language/table-functions/numbers.md
|
|
||||||
table_functions/remote.md query-language/table-functions/remote.md
|
|
||||||
utils.md operations/utils.md
|
|
||||||
utils/clickhouse-copier.md operations/utils/clickhouse-copier.md
|
|
||||||
utils/clickhouse-local.md operations/utils/clickhouse-local.md
|
|
||||||
whats_new/changelog/2017.md whats-new/changelog/2017.md
|
|
||||||
whats_new/changelog/2018.md whats-new/changelog/2018.md
|
|
||||||
whats_new/changelog/2019.md whats-new/changelog/2019.md
|
|
||||||
whats_new/changelog/index.md whats-new/changelog/index.md
|
|
||||||
whats_new/index.md whats-new/index.md
|
|
||||||
whats_new/roadmap.md whats-new/roadmap.md
|
|
||||||
whats_new/security_changelog.md whats-new/security-changelog.md
|
|
||||||
|
@ -1355,6 +1355,10 @@ Parameters:
|
|||||||
<timezone>Europe/Moscow</timezone>
|
<timezone>Europe/Moscow</timezone>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**См. также**
|
||||||
|
|
||||||
|
- [session_timezone](../settings/settings.md#session_timezone)
|
||||||
|
|
||||||
## tcp_port {#server_configuration_parameters-tcp_port}
|
## tcp_port {#server_configuration_parameters-tcp_port}
|
||||||
|
|
||||||
Порт для взаимодействия с клиентами по протоколу TCP.
|
Порт для взаимодействия с клиентами по протоколу TCP.
|
||||||
|
@ -4127,6 +4127,63 @@ SELECT sum(number) FROM numbers(10000000000) SETTINGS partial_result_on_first_ca
|
|||||||
|
|
||||||
Значение по умолчанию: `false`
|
Значение по умолчанию: `false`
|
||||||
|
|
||||||
|
## session_timezone {#session_timezone}
|
||||||
|
|
||||||
|
Задаёт значение часового пояса (session_timezone) по умолчанию для текущей сессии вместо [часового пояса сервера](../server-configuration-parameters/settings.md#server_configuration_parameters-timezone). То есть, все значения DateTime/DateTime64, для которых явно не задан часовой пояс, будут интерпретированы как относящиеся к указанной зоне.
|
||||||
|
При значении настройки `''` (пустая строка), будет совпадать с часовым поясом сервера.
|
||||||
|
|
||||||
|
Функции `timeZone()` and `serverTimezone()` возвращают часовой пояс текущей сессии и сервера соответственно.
|
||||||
|
|
||||||
|
Примеры:
|
||||||
|
```sql
|
||||||
|
SELECT timeZone(), serverTimezone() FORMAT TSV
|
||||||
|
|
||||||
|
Europe/Berlin Europe/Berlin
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT timeZone(), serverTimezone() SETTINGS session_timezone = 'Asia/Novosibirsk' FORMAT TSV
|
||||||
|
|
||||||
|
Asia/Novosibirsk Europe/Berlin
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS session_timezone = 'America/Denver' FORMAT TSV
|
||||||
|
|
||||||
|
1999-12-13 07:23:23.123
|
||||||
|
```
|
||||||
|
|
||||||
|
Возможные значения:
|
||||||
|
|
||||||
|
- Любая зона из `system.time_zones`, например `Europe/Berlin`, `UTC` или `Zulu`
|
||||||
|
|
||||||
|
Значение по умолчанию: `''`.
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
Иногда при формировании значений типа `DateTime` и `DateTime64` параметр `session_timezone` может быть проигнорирован.
|
||||||
|
Это может привести к путанице. Пример и пояснение см. ниже.
|
||||||
|
:::
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE test_tz (`d` DateTime('UTC')) ENGINE = Memory AS SELECT toDateTime('2000-01-01 00:00:00', 'UTC');
|
||||||
|
|
||||||
|
SELECT *, timezone() FROM test_tz WHERE d = toDateTime('2000-01-01 00:00:00') SETTINGS session_timezone = 'Asia/Novosibirsk'
|
||||||
|
0 rows in set.
|
||||||
|
|
||||||
|
SELECT *, timezone() FROM test_tz WHERE d = '2000-01-01 00:00:00' SETTINGS session_timezone = 'Asia/Novosibirsk'
|
||||||
|
┌───────────────────d─┬─timezone()───────┐
|
||||||
|
│ 2000-01-01 00:00:00 │ Asia/Novosibirsk │
|
||||||
|
└─────────────────────┴──────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Это происходит из-за различного происхождения значения, используемого для сравнения:
|
||||||
|
- В первом запросе функция `toDateTime()`, создавая значение типа `DateTime`, принимает во внимание параметр `session_timezone` из контекста запроса;
|
||||||
|
- Во втором запросе `DateTime` формируется из строки неявно, наследуя тип колонки `d` (в том числе и числовой пояс), и параметр `session_timezone` игнорируется.
|
||||||
|
|
||||||
|
**Смотрите также**
|
||||||
|
|
||||||
|
- [timezone](../server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
|
||||||
|
|
||||||
## rename_files_after_processing
|
## rename_files_after_processing
|
||||||
|
|
||||||
- **Тип:** Строка
|
- **Тип:** Строка
|
||||||
|
@ -26,7 +26,8 @@ SELECT
|
|||||||
|
|
||||||
## timeZone {#timezone}
|
## timeZone {#timezone}
|
||||||
|
|
||||||
Возвращает часовой пояс сервера.
|
Возвращает часовой пояс сервера, считающийся умолчанием для текущей сессии: значение параметра [session_timezone](../../operations/settings/settings.md#session_timezone), если установлено.
|
||||||
|
|
||||||
Если функция вызывается в контексте распределенной таблицы, то она генерирует обычный столбец со значениями, актуальными для каждого шарда. Иначе возвращается константа.
|
Если функция вызывается в контексте распределенной таблицы, то она генерирует обычный столбец со значениями, актуальными для каждого шарда. Иначе возвращается константа.
|
||||||
|
|
||||||
**Синтаксис**
|
**Синтаксис**
|
||||||
@ -43,6 +44,33 @@ timeZone()
|
|||||||
|
|
||||||
Тип: [String](../../sql-reference/data-types/string.md).
|
Тип: [String](../../sql-reference/data-types/string.md).
|
||||||
|
|
||||||
|
**Смотрите также**
|
||||||
|
|
||||||
|
- [serverTimeZone](#servertimezone)
|
||||||
|
|
||||||
|
## serverTimeZone {#servertimezone}
|
||||||
|
|
||||||
|
Возвращает часовой пояс сервера по умолчанию, в т.ч. установленный [timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
|
||||||
|
Если функция вызывается в контексте распределенной таблицы, то она генерирует обычный столбец со значениями, актуальными для каждого шарда. Иначе возвращается константа.
|
||||||
|
|
||||||
|
**Синтаксис**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
serverTimeZone()
|
||||||
|
```
|
||||||
|
|
||||||
|
Синонимы: `serverTimezone`.
|
||||||
|
|
||||||
|
**Возвращаемое значение**
|
||||||
|
|
||||||
|
- Часовой пояс.
|
||||||
|
|
||||||
|
Тип: [String](../../sql-reference/data-types/string.md).
|
||||||
|
|
||||||
|
**Смотрите также**
|
||||||
|
|
||||||
|
- [timeZone](#timezone)
|
||||||
|
|
||||||
## toTimeZone {#totimezone}
|
## toTimeZone {#totimezone}
|
||||||
|
|
||||||
Переводит дату или дату с временем в указанный часовой пояс. Часовой пояс - это атрибут типов `Date` и `DateTime`. Внутреннее значение (количество секунд) поля таблицы или результирующего столбца не изменяется, изменяется тип поля и, соответственно, его текстовое отображение.
|
Переводит дату или дату с временем в указанный часовой пояс. Часовой пояс - это атрибут типов `Date` и `DateTime`. Внутреннее значение (количество секунд) поля таблицы или результирующего столбца не изменяется, изменяется тип поля и, соответственно, его текстовое отображение.
|
||||||
|
@ -5,7 +5,7 @@ sidebar_label: "Функции для работы с внешними слов
|
|||||||
---
|
---
|
||||||
|
|
||||||
:::note "Внимание"
|
:::note "Внимание"
|
||||||
Для словарей, созданных с помощью [DDL-запросов](../../sql-reference/statements/create/dictionary.md), в параметре `dict_name` указывается полное имя словаря вместе с базой данных, например: `<database>.<dict_name>`. Если база данных не указана, используется текущая.
|
Для словарей, созданных с помощью [DDL-запросов](../../sql-reference/statements/create/dictionary.md), в параметре `dict_name` указывается полное имя словаря вместе с базой данных, например: `<database>.<dict_name>`. Если база данных не указана, используется текущая.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
# Функции для работы с внешними словарями {#ext_dict_functions}
|
# Функции для работы с внешними словарями {#ext_dict_functions}
|
||||||
|
@ -4,7 +4,9 @@
|
|||||||
#include <map>
|
#include <map>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
|
#include <memory>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
#include <Common/ThreadStatus.h>
|
||||||
#include <Common/scope_guard_safe.h>
|
#include <Common/scope_guard_safe.h>
|
||||||
#include <boost/program_options.hpp>
|
#include <boost/program_options.hpp>
|
||||||
#include <boost/algorithm/string/replace.hpp>
|
#include <boost/algorithm/string/replace.hpp>
|
||||||
@ -307,7 +309,7 @@ int Client::main(const std::vector<std::string> & /*args*/)
|
|||||||
try
|
try
|
||||||
{
|
{
|
||||||
UseSSL use_ssl;
|
UseSSL use_ssl;
|
||||||
MainThreadStatus::getInstance();
|
auto & thread_status = MainThreadStatus::getInstance();
|
||||||
setupSignalHandler();
|
setupSignalHandler();
|
||||||
|
|
||||||
std::cout << std::fixed << std::setprecision(3);
|
std::cout << std::fixed << std::setprecision(3);
|
||||||
@ -320,6 +322,14 @@ try
|
|||||||
processConfig();
|
processConfig();
|
||||||
initTtyBuffer(toProgressOption(config().getString("progress", "default")));
|
initTtyBuffer(toProgressOption(config().getString("progress", "default")));
|
||||||
|
|
||||||
|
{
|
||||||
|
// All that just to set DB::CurrentThread::get().getGlobalContext()
|
||||||
|
// which is required for client timezone (pushed from server) to work.
|
||||||
|
auto thread_group = std::make_shared<ThreadGroup>();
|
||||||
|
const_cast<ContextWeakPtr&>(thread_group->global_context) = global_context;
|
||||||
|
thread_status.attachToGroup(thread_group, false);
|
||||||
|
}
|
||||||
|
|
||||||
/// Includes delayed_interactive.
|
/// Includes delayed_interactive.
|
||||||
if (is_interactive)
|
if (is_interactive)
|
||||||
{
|
{
|
||||||
|
@ -44,7 +44,7 @@ void ClusterCopierApp::initialize(Poco::Util::Application & self)
|
|||||||
time_t timestamp = Poco::Timestamp().epochTime();
|
time_t timestamp = Poco::Timestamp().epochTime();
|
||||||
auto curr_pid = Poco::Process::id();
|
auto curr_pid = Poco::Process::id();
|
||||||
|
|
||||||
process_id = std::to_string(DateLUT::instance().toNumYYYYMMDDhhmmss(timestamp)) + "_" + std::to_string(curr_pid);
|
process_id = std::to_string(DateLUT::serverTimezoneInstance().toNumYYYYMMDDhhmmss(timestamp)) + "_" + std::to_string(curr_pid);
|
||||||
host_id = escapeForFileName(getFQDNOrHostName()) + '#' + process_id;
|
host_id = escapeForFileName(getFQDNOrHostName()) + '#' + process_id;
|
||||||
process_path = fs::weakly_canonical(fs::path(base_dir) / ("clickhouse-copier_" + process_id));
|
process_path = fs::weakly_canonical(fs::path(base_dir) / ("clickhouse-copier_" + process_id));
|
||||||
fs::create_directories(process_path);
|
fs::create_directories(process_path);
|
||||||
|
@ -306,8 +306,8 @@ try
|
|||||||
|
|
||||||
/// Initialize DateLUT early, to not interfere with running time of first query.
|
/// Initialize DateLUT early, to not interfere with running time of first query.
|
||||||
LOG_DEBUG(log, "Initializing DateLUT.");
|
LOG_DEBUG(log, "Initializing DateLUT.");
|
||||||
DateLUT::instance();
|
DateLUT::serverTimezoneInstance();
|
||||||
LOG_TRACE(log, "Initialized DateLUT with time zone '{}'.", DateLUT::instance().getTimeZone());
|
LOG_TRACE(log, "Initialized DateLUT with time zone '{}'.", DateLUT::serverTimezoneInstance().getTimeZone());
|
||||||
|
|
||||||
/// Don't want to use DNS cache
|
/// Don't want to use DNS cache
|
||||||
DNSResolver::instance().setDisableCacheFlag();
|
DNSResolver::instance().setDisableCacheFlag();
|
||||||
|
@ -491,7 +491,7 @@ private:
|
|||||||
const DateLUTImpl & date_lut;
|
const DateLUTImpl & date_lut;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit DateTimeModel(UInt64 seed_) : seed(seed_), date_lut(DateLUT::instance()) {}
|
explicit DateTimeModel(UInt64 seed_) : seed(seed_), date_lut(DateLUT::serverTimezoneInstance()) {}
|
||||||
|
|
||||||
void train(const IColumn &) override {}
|
void train(const IColumn &) override {}
|
||||||
void finalize() override {}
|
void finalize() override {}
|
||||||
|
@ -960,8 +960,8 @@ try
|
|||||||
|
|
||||||
/// Initialize DateLUT early, to not interfere with running time of first query.
|
/// Initialize DateLUT early, to not interfere with running time of first query.
|
||||||
LOG_DEBUG(log, "Initializing DateLUT.");
|
LOG_DEBUG(log, "Initializing DateLUT.");
|
||||||
DateLUT::instance();
|
DateLUT::serverTimezoneInstance();
|
||||||
LOG_TRACE(log, "Initialized DateLUT with time zone '{}'.", DateLUT::instance().getTimeZone());
|
LOG_TRACE(log, "Initialized DateLUT with time zone '{}'.", DateLUT::serverTimezoneInstance().getTimeZone());
|
||||||
|
|
||||||
/// Storage with temporary data for processing of heavy queries.
|
/// Storage with temporary data for processing of heavy queries.
|
||||||
if (!server_settings.tmp_policy.value.empty())
|
if (!server_settings.tmp_policy.value.empty())
|
||||||
|
@ -1543,12 +1543,12 @@
|
|||||||
-->
|
-->
|
||||||
|
|
||||||
<!-- Configuration for the query cache -->
|
<!-- Configuration for the query cache -->
|
||||||
<!-- <query_cache> -->
|
<query_cache>
|
||||||
<!-- <max_size_in_bytes>1073741824</max_size_in_bytes> -->
|
<max_size_in_bytes>1073741824</max_size_in_bytes>
|
||||||
<!-- <max_entries>1024</max_entries> -->
|
<max_entries>1024</max_entries>
|
||||||
<!-- <max_entry_size_in_bytes>1048576</max_entry_size_in_bytes> -->
|
<max_entry_size_in_bytes>1048576</max_entry_size_in_bytes>
|
||||||
<!-- <max_entry_size_in_rows>30000000</max_entry_size_in_rows> -->
|
<max_entry_size_in_rows>30000000</max_entry_size_in_rows>
|
||||||
<!-- </query_cache> -->
|
</query_cache>
|
||||||
|
|
||||||
<!-- Uncomment if enable merge tree metadata cache -->
|
<!-- Uncomment if enable merge tree metadata cache -->
|
||||||
<!--merge_tree_metadata_cache>
|
<!--merge_tree_metadata_cache>
|
||||||
|
@ -78,14 +78,14 @@ namespace detail
|
|||||||
|
|
||||||
void serialize(WriteBuffer & buf) const
|
void serialize(WriteBuffer & buf) const
|
||||||
{
|
{
|
||||||
writeBinary(count, buf);
|
writeBinaryLittleEndian(count, buf);
|
||||||
buf.write(reinterpret_cast<const char *>(elems), count * sizeof(elems[0]));
|
buf.write(reinterpret_cast<const char *>(elems), count * sizeof(elems[0]));
|
||||||
}
|
}
|
||||||
|
|
||||||
void deserialize(ReadBuffer & buf)
|
void deserialize(ReadBuffer & buf)
|
||||||
{
|
{
|
||||||
UInt16 new_count = 0;
|
UInt16 new_count = 0;
|
||||||
readBinary(new_count, buf);
|
readBinaryLittleEndian(new_count, buf);
|
||||||
if (new_count > TINY_MAX_ELEMS)
|
if (new_count > TINY_MAX_ELEMS)
|
||||||
throw Exception(ErrorCodes::INCORRECT_DATA, "The number of elements {} for the 'tiny' kind of quantileTiming is exceeding the maximum of {}", new_count, TINY_MAX_ELEMS);
|
throw Exception(ErrorCodes::INCORRECT_DATA, "The number of elements {} for the 'tiny' kind of quantileTiming is exceeding the maximum of {}", new_count, TINY_MAX_ELEMS);
|
||||||
buf.readStrict(reinterpret_cast<char *>(elems), new_count * sizeof(elems[0]));
|
buf.readStrict(reinterpret_cast<char *>(elems), new_count * sizeof(elems[0]));
|
||||||
@ -164,14 +164,14 @@ namespace detail
|
|||||||
|
|
||||||
void serialize(WriteBuffer & buf) const
|
void serialize(WriteBuffer & buf) const
|
||||||
{
|
{
|
||||||
writeBinary(elems.size(), buf);
|
writeBinaryLittleEndian(elems.size(), buf);
|
||||||
buf.write(reinterpret_cast<const char *>(elems.data()), elems.size() * sizeof(elems[0]));
|
buf.write(reinterpret_cast<const char *>(elems.data()), elems.size() * sizeof(elems[0]));
|
||||||
}
|
}
|
||||||
|
|
||||||
void deserialize(ReadBuffer & buf)
|
void deserialize(ReadBuffer & buf)
|
||||||
{
|
{
|
||||||
size_t size = 0;
|
size_t size = 0;
|
||||||
readBinary(size, buf);
|
readBinaryLittleEndian(size, buf);
|
||||||
if (size > 10'000)
|
if (size > 10'000)
|
||||||
throw Exception(ErrorCodes::INCORRECT_DATA, "The number of elements {} for the 'medium' kind of quantileTiming is too large", size);
|
throw Exception(ErrorCodes::INCORRECT_DATA, "The number of elements {} for the 'medium' kind of quantileTiming is too large", size);
|
||||||
|
|
||||||
@ -341,7 +341,7 @@ namespace detail
|
|||||||
|
|
||||||
void serialize(WriteBuffer & buf) const
|
void serialize(WriteBuffer & buf) const
|
||||||
{
|
{
|
||||||
writeBinary(count, buf);
|
writeBinaryLittleEndian(count, buf);
|
||||||
|
|
||||||
if (count * 2 > SMALL_THRESHOLD + BIG_SIZE)
|
if (count * 2 > SMALL_THRESHOLD + BIG_SIZE)
|
||||||
{
|
{
|
||||||
@ -356,8 +356,8 @@ namespace detail
|
|||||||
{
|
{
|
||||||
if (count_small[i])
|
if (count_small[i])
|
||||||
{
|
{
|
||||||
writeBinary(UInt16(i), buf);
|
writeBinaryLittleEndian(UInt16(i), buf);
|
||||||
writeBinary(count_small[i], buf);
|
writeBinaryLittleEndian(count_small[i], buf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -365,19 +365,19 @@ namespace detail
|
|||||||
{
|
{
|
||||||
if (count_big[i])
|
if (count_big[i])
|
||||||
{
|
{
|
||||||
writeBinary(UInt16(i + SMALL_THRESHOLD), buf);
|
writeBinaryLittleEndian(UInt16(i + SMALL_THRESHOLD), buf);
|
||||||
writeBinary(count_big[i], buf);
|
writeBinaryLittleEndian(count_big[i], buf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Symbolizes end of data.
|
/// Symbolizes end of data.
|
||||||
writeBinary(UInt16(BIG_THRESHOLD), buf);
|
writeBinaryLittleEndian(UInt16(BIG_THRESHOLD), buf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void deserialize(ReadBuffer & buf)
|
void deserialize(ReadBuffer & buf)
|
||||||
{
|
{
|
||||||
readBinary(count, buf);
|
readBinaryLittleEndian(count, buf);
|
||||||
|
|
||||||
if (count * 2 > SMALL_THRESHOLD + BIG_SIZE)
|
if (count * 2 > SMALL_THRESHOLD + BIG_SIZE)
|
||||||
{
|
{
|
||||||
@ -388,12 +388,12 @@ namespace detail
|
|||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
UInt16 index = 0;
|
UInt16 index = 0;
|
||||||
readBinary(index, buf);
|
readBinaryLittleEndian(index, buf);
|
||||||
if (index == BIG_THRESHOLD)
|
if (index == BIG_THRESHOLD)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
UInt64 elem_count = 0;
|
UInt64 elem_count = 0;
|
||||||
readBinary(elem_count, buf);
|
readBinaryLittleEndian(elem_count, buf);
|
||||||
|
|
||||||
if (index < SMALL_THRESHOLD)
|
if (index < SMALL_THRESHOLD)
|
||||||
count_small[index] = elem_count;
|
count_small[index] = elem_count;
|
||||||
@ -692,7 +692,7 @@ public:
|
|||||||
void serialize(WriteBuffer & buf) const
|
void serialize(WriteBuffer & buf) const
|
||||||
{
|
{
|
||||||
auto kind = which();
|
auto kind = which();
|
||||||
DB::writePODBinary(kind, buf);
|
writeBinaryLittleEndian(kind, buf);
|
||||||
|
|
||||||
if (kind == Kind::Tiny)
|
if (kind == Kind::Tiny)
|
||||||
tiny.serialize(buf);
|
tiny.serialize(buf);
|
||||||
@ -706,7 +706,7 @@ public:
|
|||||||
void deserialize(ReadBuffer & buf)
|
void deserialize(ReadBuffer & buf)
|
||||||
{
|
{
|
||||||
Kind kind;
|
Kind kind;
|
||||||
DB::readPODBinary(kind, buf);
|
readBinaryLittleEndian(kind, buf);
|
||||||
|
|
||||||
if (kind == Kind::Tiny)
|
if (kind == Kind::Tiny)
|
||||||
{
|
{
|
||||||
|
@ -721,7 +721,15 @@ void BackupCoordinationRemote::prepareFileInfos() const
|
|||||||
|
|
||||||
bool BackupCoordinationRemote::startWritingFile(size_t data_file_index)
|
bool BackupCoordinationRemote::startWritingFile(size_t data_file_index)
|
||||||
{
|
{
|
||||||
bool acquired_writing = false;
|
{
|
||||||
|
/// Check if this host is already writing this file.
|
||||||
|
std::lock_guard lock{writing_files_mutex};
|
||||||
|
if (writing_files.contains(data_file_index))
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Store in Zookeeper that this host is the only host which is allowed to write this file.
|
||||||
|
bool host_is_assigned = false;
|
||||||
String full_path = zookeeper_path + "/writing_files/" + std::to_string(data_file_index);
|
String full_path = zookeeper_path + "/writing_files/" + std::to_string(data_file_index);
|
||||||
String host_index_str = std::to_string(current_host_index);
|
String host_index_str = std::to_string(current_host_index);
|
||||||
|
|
||||||
@ -733,14 +741,23 @@ bool BackupCoordinationRemote::startWritingFile(size_t data_file_index)
|
|||||||
auto code = zk->tryCreate(full_path, host_index_str, zkutil::CreateMode::Persistent);
|
auto code = zk->tryCreate(full_path, host_index_str, zkutil::CreateMode::Persistent);
|
||||||
|
|
||||||
if (code == Coordination::Error::ZOK)
|
if (code == Coordination::Error::ZOK)
|
||||||
acquired_writing = true; /// If we've just created this ZooKeeper's node, the writing is acquired, i.e. we should write this data file.
|
host_is_assigned = true; /// If we've just created this ZooKeeper's node, this host is assigned.
|
||||||
else if (code == Coordination::Error::ZNODEEXISTS)
|
else if (code == Coordination::Error::ZNODEEXISTS)
|
||||||
acquired_writing = (zk->get(full_path) == host_index_str); /// The previous retry could write this ZooKeeper's node and then fail.
|
host_is_assigned = (zk->get(full_path) == host_index_str); /// The previous retry could write this ZooKeeper's node and then fail.
|
||||||
else
|
else
|
||||||
throw zkutil::KeeperException(code, full_path);
|
throw zkutil::KeeperException(code, full_path);
|
||||||
});
|
});
|
||||||
|
|
||||||
return acquired_writing;
|
if (!host_is_assigned)
|
||||||
|
return false; /// Other host is writing this file.
|
||||||
|
|
||||||
|
{
|
||||||
|
/// Check if this host is already writing this file,
|
||||||
|
/// and if it's not, mark that this host is writing this file.
|
||||||
|
/// We have to check that again because we were accessing ZooKeeper with the mutex unlocked.
|
||||||
|
std::lock_guard lock{writing_files_mutex};
|
||||||
|
return writing_files.emplace(data_file_index).second; /// Return false if this host is already writing this file.
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool BackupCoordinationRemote::hasConcurrentBackups(const std::atomic<size_t> &) const
|
bool BackupCoordinationRemote::hasConcurrentBackups(const std::atomic<size_t> &) const
|
||||||
|
@ -106,12 +106,14 @@ private:
|
|||||||
mutable std::optional<BackupCoordinationReplicatedAccess> TSA_GUARDED_BY(replicated_access_mutex) replicated_access;
|
mutable std::optional<BackupCoordinationReplicatedAccess> TSA_GUARDED_BY(replicated_access_mutex) replicated_access;
|
||||||
mutable std::optional<BackupCoordinationReplicatedSQLObjects> TSA_GUARDED_BY(replicated_sql_objects_mutex) replicated_sql_objects;
|
mutable std::optional<BackupCoordinationReplicatedSQLObjects> TSA_GUARDED_BY(replicated_sql_objects_mutex) replicated_sql_objects;
|
||||||
mutable std::optional<BackupCoordinationFileInfos> TSA_GUARDED_BY(file_infos_mutex) file_infos;
|
mutable std::optional<BackupCoordinationFileInfos> TSA_GUARDED_BY(file_infos_mutex) file_infos;
|
||||||
|
std::unordered_set<size_t> TSA_GUARDED_BY(writing_files_mutex) writing_files;
|
||||||
|
|
||||||
mutable std::mutex zookeeper_mutex;
|
mutable std::mutex zookeeper_mutex;
|
||||||
mutable std::mutex replicated_tables_mutex;
|
mutable std::mutex replicated_tables_mutex;
|
||||||
mutable std::mutex replicated_access_mutex;
|
mutable std::mutex replicated_access_mutex;
|
||||||
mutable std::mutex replicated_sql_objects_mutex;
|
mutable std::mutex replicated_sql_objects_mutex;
|
||||||
mutable std::mutex file_infos_mutex;
|
mutable std::mutex file_infos_mutex;
|
||||||
|
mutable std::mutex writing_files_mutex;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -77,7 +77,6 @@
|
|||||||
#include "config_version.h"
|
#include "config_version.h"
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
|
||||||
|
|
||||||
namespace fs = std::filesystem;
|
namespace fs = std::filesystem;
|
||||||
using namespace std::literals;
|
using namespace std::literals;
|
||||||
|
|
||||||
@ -896,7 +895,6 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa
|
|||||||
|
|
||||||
if (send_external_tables)
|
if (send_external_tables)
|
||||||
sendExternalTables(parsed_query);
|
sendExternalTables(parsed_query);
|
||||||
|
|
||||||
receiveResult(parsed_query, signals_before_stop, settings.partial_result_on_first_cancel);
|
receiveResult(parsed_query, signals_before_stop, settings.partial_result_on_first_cancel);
|
||||||
|
|
||||||
break;
|
break;
|
||||||
@ -1048,6 +1046,10 @@ bool ClientBase::receiveAndProcessPacket(ASTPtr parsed_query, bool cancelled_)
|
|||||||
onProfileEvents(packet.block);
|
onProfileEvents(packet.block);
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
case Protocol::Server::TimezoneUpdate:
|
||||||
|
onTimezoneUpdate(packet.server_timezone);
|
||||||
|
return true;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
throw Exception(
|
throw Exception(
|
||||||
ErrorCodes::UNKNOWN_PACKET_FROM_SERVER, "Unknown packet {} from server {}", packet.type, connection->getDescription());
|
ErrorCodes::UNKNOWN_PACKET_FROM_SERVER, "Unknown packet {} from server {}", packet.type, connection->getDescription());
|
||||||
@ -1070,6 +1072,11 @@ void ClientBase::onProgress(const Progress & value)
|
|||||||
progress_indication.writeProgress(*tty_buf);
|
progress_indication.writeProgress(*tty_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ClientBase::onTimezoneUpdate(const String & tz)
|
||||||
|
{
|
||||||
|
global_context->setSetting("session_timezone", tz);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void ClientBase::onEndOfStream()
|
void ClientBase::onEndOfStream()
|
||||||
{
|
{
|
||||||
@ -1221,9 +1228,13 @@ bool ClientBase::receiveSampleBlock(Block & out, ColumnsDescription & columns_de
|
|||||||
columns_description = ColumnsDescription::parse(packet.multistring_message[1]);
|
columns_description = ColumnsDescription::parse(packet.multistring_message[1]);
|
||||||
return receiveSampleBlock(out, columns_description, parsed_query);
|
return receiveSampleBlock(out, columns_description, parsed_query);
|
||||||
|
|
||||||
|
case Protocol::Server::TimezoneUpdate:
|
||||||
|
onTimezoneUpdate(packet.server_timezone);
|
||||||
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
throw NetException(ErrorCodes::UNEXPECTED_PACKET_FROM_SERVER,
|
throw NetException(ErrorCodes::UNEXPECTED_PACKET_FROM_SERVER,
|
||||||
"Unexpected packet from server (expected Data, Exception or Log, got {})",
|
"Unexpected packet from server (expected Data, Exception, Log or TimezoneUpdate, got {})",
|
||||||
String(Protocol::Server::toString(packet.type)));
|
String(Protocol::Server::toString(packet.type)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1538,7 +1549,9 @@ void ClientBase::receiveLogsAndProfileEvents(ASTPtr parsed_query)
|
|||||||
{
|
{
|
||||||
auto packet_type = connection->checkPacket(0);
|
auto packet_type = connection->checkPacket(0);
|
||||||
|
|
||||||
while (packet_type && (*packet_type == Protocol::Server::Log || *packet_type == Protocol::Server::ProfileEvents))
|
while (packet_type && (*packet_type == Protocol::Server::Log
|
||||||
|
|| *packet_type == Protocol::Server::ProfileEvents
|
||||||
|
|| *packet_type == Protocol::Server::TimezoneUpdate))
|
||||||
{
|
{
|
||||||
receiveAndProcessPacket(parsed_query, false);
|
receiveAndProcessPacket(parsed_query, false);
|
||||||
packet_type = connection->checkPacket(0);
|
packet_type = connection->checkPacket(0);
|
||||||
@ -1575,6 +1588,10 @@ bool ClientBase::receiveEndOfQuery()
|
|||||||
onProfileEvents(packet.block);
|
onProfileEvents(packet.block);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case Protocol::Server::TimezoneUpdate:
|
||||||
|
onTimezoneUpdate(packet.server_timezone);
|
||||||
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
throw NetException(ErrorCodes::UNEXPECTED_PACKET_FROM_SERVER,
|
throw NetException(ErrorCodes::UNEXPECTED_PACKET_FROM_SERVER,
|
||||||
"Unexpected packet from server (expected Exception, EndOfStream, Log, Progress or ProfileEvents. Got {})",
|
"Unexpected packet from server (expected Exception, EndOfStream, Log, Progress or ProfileEvents. Got {})",
|
||||||
|
@ -148,6 +148,7 @@ private:
|
|||||||
void cancelQuery();
|
void cancelQuery();
|
||||||
|
|
||||||
void onProgress(const Progress & value);
|
void onProgress(const Progress & value);
|
||||||
|
void onTimezoneUpdate(const String & tz);
|
||||||
void onData(Block & block, ASTPtr parsed_query);
|
void onData(Block & block, ASTPtr parsed_query);
|
||||||
void onLogData(Block & block);
|
void onLogData(Block & block);
|
||||||
void onTotals(Block & block, ASTPtr parsed_query);
|
void onTotals(Block & block, ASTPtr parsed_query);
|
||||||
|
@ -1022,6 +1022,11 @@ Packet Connection::receivePacket()
|
|||||||
res.block = receiveProfileEvents();
|
res.block = receiveProfileEvents();
|
||||||
return res;
|
return res;
|
||||||
|
|
||||||
|
case Protocol::Server::TimezoneUpdate:
|
||||||
|
readStringBinary(server_timezone, *in);
|
||||||
|
res.server_timezone = server_timezone;
|
||||||
|
return res;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
/// In unknown state, disconnect - to not leave unsynchronised connection.
|
/// In unknown state, disconnect - to not leave unsynchronised connection.
|
||||||
disconnect();
|
disconnect();
|
||||||
|
@ -419,6 +419,7 @@ Packet HedgedConnections::receivePacketFromReplica(const ReplicaLocation & repli
|
|||||||
}
|
}
|
||||||
replica_with_last_received_packet = replica_location;
|
replica_with_last_received_packet = replica_location;
|
||||||
break;
|
break;
|
||||||
|
case Protocol::Server::TimezoneUpdate:
|
||||||
case Protocol::Server::PartUUIDs:
|
case Protocol::Server::PartUUIDs:
|
||||||
case Protocol::Server::ProfileInfo:
|
case Protocol::Server::ProfileInfo:
|
||||||
case Protocol::Server::Totals:
|
case Protocol::Server::Totals:
|
||||||
|
@ -38,6 +38,8 @@ struct Packet
|
|||||||
ParallelReadRequest request;
|
ParallelReadRequest request;
|
||||||
ParallelReadResponse response;
|
ParallelReadResponse response;
|
||||||
|
|
||||||
|
std::string server_timezone;
|
||||||
|
|
||||||
Packet() : type(Protocol::Server::Hello) {}
|
Packet() : type(Protocol::Server::Hello) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -259,6 +259,7 @@ Packet MultiplexedConnections::drain()
|
|||||||
|
|
||||||
switch (packet.type)
|
switch (packet.type)
|
||||||
{
|
{
|
||||||
|
case Protocol::Server::TimezoneUpdate:
|
||||||
case Protocol::Server::MergeTreeAllRangesAnnounecement:
|
case Protocol::Server::MergeTreeAllRangesAnnounecement:
|
||||||
case Protocol::Server::MergeTreeReadTaskRequest:
|
case Protocol::Server::MergeTreeReadTaskRequest:
|
||||||
case Protocol::Server::ReadTaskRequest:
|
case Protocol::Server::ReadTaskRequest:
|
||||||
@ -340,6 +341,7 @@ Packet MultiplexedConnections::receivePacketUnlocked(AsyncCallback async_callbac
|
|||||||
|
|
||||||
switch (packet.type)
|
switch (packet.type)
|
||||||
{
|
{
|
||||||
|
case Protocol::Server::TimezoneUpdate:
|
||||||
case Protocol::Server::MergeTreeAllRangesAnnounecement:
|
case Protocol::Server::MergeTreeAllRangesAnnounecement:
|
||||||
case Protocol::Server::MergeTreeReadTaskRequest:
|
case Protocol::Server::MergeTreeReadTaskRequest:
|
||||||
case Protocol::Server::ReadTaskRequest:
|
case Protocol::Server::ReadTaskRequest:
|
||||||
|
@ -160,6 +160,7 @@ void Suggest::fetch(IServerConnection & connection, const ConnectionTimeouts & t
|
|||||||
fillWordsFromBlock(packet.block);
|
fillWordsFromBlock(packet.block);
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
case Protocol::Server::TimezoneUpdate:
|
||||||
case Protocol::Server::Progress:
|
case Protocol::Server::Progress:
|
||||||
case Protocol::Server::ProfileInfo:
|
case Protocol::Server::ProfileInfo:
|
||||||
case Protocol::Server::Totals:
|
case Protocol::Server::Totals:
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
|
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
|
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
@ -163,3 +164,8 @@ DateLUT & DateLUT::getInstance()
|
|||||||
static DateLUT ret;
|
static DateLUT ret;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::string DateLUT::extractTimezoneFromContext(DB::ContextPtr query_context)
|
||||||
|
{
|
||||||
|
return query_context->getSettingsRef().session_timezone.value;
|
||||||
|
}
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include <base/defines.h>
|
#include <base/defines.h>
|
||||||
|
|
||||||
#include <boost/noncopyable.hpp>
|
#include <boost/noncopyable.hpp>
|
||||||
|
#include "Common/CurrentThread.h"
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
@ -16,22 +17,59 @@
|
|||||||
class DateLUT : private boost::noncopyable
|
class DateLUT : private boost::noncopyable
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
/// Return singleton DateLUTImpl instance for the default time zone.
|
/// Return DateLUTImpl instance for session timezone.
|
||||||
|
/// session_timezone is a session-level setting.
|
||||||
|
/// If setting is not set, returns the server timezone.
|
||||||
static ALWAYS_INLINE const DateLUTImpl & instance()
|
static ALWAYS_INLINE const DateLUTImpl & instance()
|
||||||
|
{
|
||||||
|
const auto & date_lut = getInstance();
|
||||||
|
|
||||||
|
if (DB::CurrentThread::isInitialized())
|
||||||
|
{
|
||||||
|
std::string timezone_from_context;
|
||||||
|
const DB::ContextPtr query_context = DB::CurrentThread::get().getQueryContext();
|
||||||
|
|
||||||
|
if (query_context)
|
||||||
|
{
|
||||||
|
timezone_from_context = extractTimezoneFromContext(query_context);
|
||||||
|
|
||||||
|
if (!timezone_from_context.empty())
|
||||||
|
return date_lut.getImplementation(timezone_from_context);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// On the server side, timezone is passed in query_context,
|
||||||
|
/// but on CH-client side we have no query context,
|
||||||
|
/// and each time we modify client's global context
|
||||||
|
const DB::ContextPtr global_context = DB::CurrentThread::get().getGlobalContext();
|
||||||
|
if (global_context)
|
||||||
|
{
|
||||||
|
timezone_from_context = extractTimezoneFromContext(global_context);
|
||||||
|
|
||||||
|
if (!timezone_from_context.empty())
|
||||||
|
return date_lut.getImplementation(timezone_from_context);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return serverTimezoneInstance();
|
||||||
|
}
|
||||||
|
|
||||||
|
static ALWAYS_INLINE const DateLUTImpl & instance(const std::string & time_zone)
|
||||||
|
{
|
||||||
|
if (time_zone.empty())
|
||||||
|
return instance();
|
||||||
|
|
||||||
|
const auto & date_lut = getInstance();
|
||||||
|
return date_lut.getImplementation(time_zone);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return singleton DateLUTImpl for the server time zone.
|
||||||
|
/// It may be set using 'timezone' server setting.
|
||||||
|
static ALWAYS_INLINE const DateLUTImpl & serverTimezoneInstance()
|
||||||
{
|
{
|
||||||
const auto & date_lut = getInstance();
|
const auto & date_lut = getInstance();
|
||||||
return *date_lut.default_impl.load(std::memory_order_acquire);
|
return *date_lut.default_impl.load(std::memory_order_acquire);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return singleton DateLUTImpl instance for a given time zone.
|
|
||||||
static ALWAYS_INLINE const DateLUTImpl & instance(const std::string & time_zone)
|
|
||||||
{
|
|
||||||
const auto & date_lut = getInstance();
|
|
||||||
if (time_zone.empty())
|
|
||||||
return *date_lut.default_impl.load(std::memory_order_acquire);
|
|
||||||
|
|
||||||
return date_lut.getImplementation(time_zone);
|
|
||||||
}
|
|
||||||
static void setDefaultTimezone(const std::string & time_zone)
|
static void setDefaultTimezone(const std::string & time_zone)
|
||||||
{
|
{
|
||||||
auto & date_lut = getInstance();
|
auto & date_lut = getInstance();
|
||||||
@ -45,6 +83,8 @@ protected:
|
|||||||
private:
|
private:
|
||||||
static DateLUT & getInstance();
|
static DateLUT & getInstance();
|
||||||
|
|
||||||
|
static std::string extractTimezoneFromContext(DB::ContextPtr query_context);
|
||||||
|
|
||||||
const DateLUTImpl & getImplementation(const std::string & time_zone) const;
|
const DateLUTImpl & getImplementation(const std::string & time_zone) const;
|
||||||
|
|
||||||
using DateLUTImplPtr = std::unique_ptr<DateLUTImpl>;
|
using DateLUTImplPtr = std::unique_ptr<DateLUTImpl>;
|
||||||
|
@ -24,9 +24,8 @@ private:
|
|||||||
unsigned char m_month;
|
unsigned char m_month;
|
||||||
unsigned char m_day;
|
unsigned char m_day;
|
||||||
|
|
||||||
void init(time_t time)
|
void init(time_t time, const DateLUTImpl & date_lut)
|
||||||
{
|
{
|
||||||
const auto & date_lut = DateLUT::instance();
|
|
||||||
const auto & values = date_lut.getValues(time);
|
const auto & values = date_lut.getValues(time);
|
||||||
|
|
||||||
m_year = values.year;
|
m_year = values.year;
|
||||||
@ -56,22 +55,22 @@ private:
|
|||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit LocalDate(time_t time)
|
explicit LocalDate(time_t time, const DateLUTImpl & time_zone = DateLUT::instance())
|
||||||
{
|
{
|
||||||
init(time);
|
init(time, time_zone);
|
||||||
}
|
}
|
||||||
|
|
||||||
LocalDate(DayNum day_num) /// NOLINT
|
LocalDate(DayNum day_num, const DateLUTImpl & time_zone = DateLUT::instance()) /// NOLINT
|
||||||
{
|
{
|
||||||
const auto & values = DateLUT::instance().getValues(day_num);
|
const auto & values = time_zone.getValues(day_num);
|
||||||
m_year = values.year;
|
m_year = values.year;
|
||||||
m_month = values.month;
|
m_month = values.month;
|
||||||
m_day = values.day_of_month;
|
m_day = values.day_of_month;
|
||||||
}
|
}
|
||||||
|
|
||||||
explicit LocalDate(ExtendedDayNum day_num)
|
explicit LocalDate(ExtendedDayNum day_num, const DateLUTImpl & time_zone = DateLUT::instance())
|
||||||
{
|
{
|
||||||
const auto & values = DateLUT::instance().getValues(day_num);
|
const auto & values = time_zone.getValues(day_num);
|
||||||
m_year = values.year;
|
m_year = values.year;
|
||||||
m_month = values.month;
|
m_month = values.month;
|
||||||
m_day = values.day_of_month;
|
m_day = values.day_of_month;
|
||||||
@ -99,15 +98,13 @@ public:
|
|||||||
LocalDate(const LocalDate &) noexcept = default;
|
LocalDate(const LocalDate &) noexcept = default;
|
||||||
LocalDate & operator= (const LocalDate &) noexcept = default;
|
LocalDate & operator= (const LocalDate &) noexcept = default;
|
||||||
|
|
||||||
DayNum getDayNum() const
|
DayNum getDayNum(const DateLUTImpl & lut = DateLUT::instance()) const
|
||||||
{
|
{
|
||||||
const auto & lut = DateLUT::instance();
|
|
||||||
return DayNum(lut.makeDayNum(m_year, m_month, m_day).toUnderType());
|
return DayNum(lut.makeDayNum(m_year, m_month, m_day).toUnderType());
|
||||||
}
|
}
|
||||||
|
|
||||||
ExtendedDayNum getExtenedDayNum() const
|
ExtendedDayNum getExtenedDayNum(const DateLUTImpl & lut = DateLUT::instance()) const
|
||||||
{
|
{
|
||||||
const auto & lut = DateLUT::instance();
|
|
||||||
return ExtendedDayNum (lut.makeDayNum(m_year, m_month, m_day).toUnderType());
|
return ExtendedDayNum (lut.makeDayNum(m_year, m_month, m_day).toUnderType());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -83,7 +83,8 @@ namespace Protocol
|
|||||||
ProfileEvents = 14, /// Packet with profile events from server.
|
ProfileEvents = 14, /// Packet with profile events from server.
|
||||||
MergeTreeAllRangesAnnounecement = 15,
|
MergeTreeAllRangesAnnounecement = 15,
|
||||||
MergeTreeReadTaskRequest = 16, /// Request from a MergeTree replica to a coordinator
|
MergeTreeReadTaskRequest = 16, /// Request from a MergeTree replica to a coordinator
|
||||||
MAX = MergeTreeReadTaskRequest,
|
TimezoneUpdate = 17, /// Receive server's (session-wide) default timezone
|
||||||
|
MAX = TimezoneUpdate,
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -111,6 +112,7 @@ namespace Protocol
|
|||||||
"ProfileEvents",
|
"ProfileEvents",
|
||||||
"MergeTreeAllRangesAnnounecement",
|
"MergeTreeAllRangesAnnounecement",
|
||||||
"MergeTreeReadTaskRequest",
|
"MergeTreeReadTaskRequest",
|
||||||
|
"TimezoneUpdate",
|
||||||
};
|
};
|
||||||
return packet <= MAX
|
return packet <= MAX
|
||||||
? data[packet]
|
? data[packet]
|
||||||
|
@ -53,7 +53,7 @@
|
|||||||
/// NOTE: DBMS_TCP_PROTOCOL_VERSION has nothing common with VERSION_REVISION,
|
/// NOTE: DBMS_TCP_PROTOCOL_VERSION has nothing common with VERSION_REVISION,
|
||||||
/// later is just a number for server version (one number instead of commit SHA)
|
/// later is just a number for server version (one number instead of commit SHA)
|
||||||
/// for simplicity (sometimes it may be more convenient in some use cases).
|
/// for simplicity (sometimes it may be more convenient in some use cases).
|
||||||
#define DBMS_TCP_PROTOCOL_VERSION 54463
|
#define DBMS_TCP_PROTOCOL_VERSION 54464
|
||||||
|
|
||||||
#define DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME 54449
|
#define DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME 54449
|
||||||
|
|
||||||
@ -75,3 +75,5 @@
|
|||||||
#define DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET_V2 54462
|
#define DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET_V2 54462
|
||||||
|
|
||||||
#define DBMS_MIN_PROTOCOL_VERSION_WITH_TOTAL_BYTES_IN_PROGRESS 54463
|
#define DBMS_MIN_PROTOCOL_VERSION_WITH_TOTAL_BYTES_IN_PROGRESS 54463
|
||||||
|
|
||||||
|
#define DBMS_MIN_PROTOCOL_VERSION_WITH_TIMEZONE_UPDATES 54464
|
||||||
|
@ -770,6 +770,7 @@ class IColumn;
|
|||||||
M(Bool, allow_experimental_undrop_table_query, false, "Allow to use undrop query to restore dropped table in a limited time", 0) \
|
M(Bool, allow_experimental_undrop_table_query, false, "Allow to use undrop query to restore dropped table in a limited time", 0) \
|
||||||
M(Bool, keeper_map_strict_mode, false, "Enforce additional checks during operations on KeeperMap. E.g. throw an exception on an insert for already existing key", 0) \
|
M(Bool, keeper_map_strict_mode, false, "Enforce additional checks during operations on KeeperMap. E.g. throw an exception on an insert for already existing key", 0) \
|
||||||
M(UInt64, extract_kvp_max_pairs_per_row, 1000, "Max number pairs that can be produced by extractKeyValuePairs function. Used to safeguard against consuming too much memory.", 0) \
|
M(UInt64, extract_kvp_max_pairs_per_row, 1000, "Max number pairs that can be produced by extractKeyValuePairs function. Used to safeguard against consuming too much memory.", 0) \
|
||||||
|
M(Timezone, session_timezone, "", "The default timezone for current session or query. The server default timezone if empty.", 0) \
|
||||||
// End of COMMON_SETTINGS
|
// End of COMMON_SETTINGS
|
||||||
// Please add settings related to formats into the FORMAT_FACTORY_SETTINGS and move obsolete settings to OBSOLETE_SETTINGS.
|
// Please add settings related to formats into the FORMAT_FACTORY_SETTINGS and move obsolete settings to OBSOLETE_SETTINGS.
|
||||||
|
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
|
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
@ -451,6 +450,17 @@ String SettingFieldEnumHelpers::readBinary(ReadBuffer & in)
|
|||||||
return str;
|
return str;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void SettingFieldTimezone::writeBinary(WriteBuffer & out) const
|
||||||
|
{
|
||||||
|
writeStringBinary(value, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SettingFieldTimezone::readBinary(ReadBuffer & in)
|
||||||
|
{
|
||||||
|
String str;
|
||||||
|
readStringBinary(str, in);
|
||||||
|
*this = std::move(str);
|
||||||
|
}
|
||||||
|
|
||||||
String SettingFieldCustom::toString() const
|
String SettingFieldCustom::toString() const
|
||||||
{
|
{
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <Core/Field.h>
|
#include <Core/Field.h>
|
||||||
#include <Core/MultiEnum.h>
|
#include <Core/MultiEnum.h>
|
||||||
#include <boost/range/adaptor/map.hpp>
|
#include <boost/range/adaptor/map.hpp>
|
||||||
|
#include <cctz/time_zone.h>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <string_view>
|
#include <string_view>
|
||||||
@ -565,6 +566,42 @@ void SettingFieldMultiEnum<EnumT, Traits>::readBinary(ReadBuffer & in)
|
|||||||
return getEnumValues<EnumType>().size();\
|
return getEnumValues<EnumType>().size();\
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Setting field for specifying user-defined timezone. It is basically a string, but it needs validation.
|
||||||
|
struct SettingFieldTimezone
|
||||||
|
{
|
||||||
|
String value;
|
||||||
|
bool changed = false;
|
||||||
|
|
||||||
|
explicit SettingFieldTimezone(std::string_view str = {}) { validateTimezone(std::string(str)); value = str; }
|
||||||
|
explicit SettingFieldTimezone(const String & str) { validateTimezone(str); value = str; }
|
||||||
|
explicit SettingFieldTimezone(String && str) { validateTimezone(str); value = std::move(str); }
|
||||||
|
explicit SettingFieldTimezone(const char * str) { validateTimezone(str); value = str; }
|
||||||
|
explicit SettingFieldTimezone(const Field & f) { const String & str = f.safeGet<const String &>(); validateTimezone(str); value = str; }
|
||||||
|
|
||||||
|
SettingFieldTimezone & operator =(std::string_view str) { validateTimezone(std::string(str)); value = str; changed = true; return *this; }
|
||||||
|
SettingFieldTimezone & operator =(const String & str) { *this = std::string_view{str}; return *this; }
|
||||||
|
SettingFieldTimezone & operator =(String && str) { validateTimezone(str); value = std::move(str); changed = true; return *this; }
|
||||||
|
SettingFieldTimezone & operator =(const char * str) { *this = std::string_view{str}; return *this; }
|
||||||
|
SettingFieldTimezone & operator =(const Field & f) { *this = f.safeGet<const String &>(); return *this; }
|
||||||
|
|
||||||
|
operator const String &() const { return value; } /// NOLINT
|
||||||
|
explicit operator Field() const { return value; }
|
||||||
|
|
||||||
|
const String & toString() const { return value; }
|
||||||
|
void parseFromString(const String & str) { *this = str; }
|
||||||
|
|
||||||
|
void writeBinary(WriteBuffer & out) const;
|
||||||
|
void readBinary(ReadBuffer & in);
|
||||||
|
|
||||||
|
private:
|
||||||
|
void validateTimezone(const std::string & tz_str)
|
||||||
|
{
|
||||||
|
cctz::time_zone validated_tz;
|
||||||
|
if (!tz_str.empty() && !cctz::load_time_zone(tz_str, &validated_tz))
|
||||||
|
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Invalid time zone: {}", tz_str);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
/// Can keep a value of any type. Used for user-defined settings.
|
/// Can keep a value of any type. Used for user-defined settings.
|
||||||
struct SettingFieldCustom
|
struct SettingFieldCustom
|
||||||
{
|
{
|
||||||
|
@ -417,6 +417,8 @@ private:
|
|||||||
{
|
{
|
||||||
SentryWriter::onFault(sig, error_message, stack_trace);
|
SentryWriter::onFault(sig, error_message, stack_trace);
|
||||||
|
|
||||||
|
#pragma clang diagnostic push
|
||||||
|
#pragma clang diagnostic ignored "-Wunreachable-code"
|
||||||
/// Advice the user to send it manually.
|
/// Advice the user to send it manually.
|
||||||
if constexpr (std::string_view(VERSION_OFFICIAL).contains("official build"))
|
if constexpr (std::string_view(VERSION_OFFICIAL).contains("official build"))
|
||||||
{
|
{
|
||||||
@ -436,6 +438,8 @@ private:
|
|||||||
{
|
{
|
||||||
LOG_FATAL(log, "This ClickHouse version is not official and should be upgraded to the official build.");
|
LOG_FATAL(log, "This ClickHouse version is not official and should be upgraded to the official build.");
|
||||||
}
|
}
|
||||||
|
#pragma clang diagnostic pop
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// ClickHouse Keeper does not link to some part of Settings.
|
/// ClickHouse Keeper does not link to some part of Settings.
|
||||||
@ -1042,7 +1046,7 @@ void BaseDaemon::shouldSetupWatchdog(char * argv0_)
|
|||||||
void BaseDaemon::setupWatchdog()
|
void BaseDaemon::setupWatchdog()
|
||||||
{
|
{
|
||||||
/// Initialize in advance to avoid double initialization in forked processes.
|
/// Initialize in advance to avoid double initialization in forked processes.
|
||||||
DateLUT::instance();
|
DateLUT::serverTimezoneInstance();
|
||||||
|
|
||||||
std::string original_process_name;
|
std::string original_process_name;
|
||||||
if (argv0)
|
if (argv0)
|
||||||
|
@ -21,7 +21,9 @@ namespace DB
|
|||||||
* all types with different time zones are equivalent and may be used interchangingly.
|
* all types with different time zones are equivalent and may be used interchangingly.
|
||||||
* Time zone only affects parsing and displaying in text formats.
|
* Time zone only affects parsing and displaying in text formats.
|
||||||
*
|
*
|
||||||
* If time zone is not specified (example: DateTime without parameter), then default time zone is used.
|
* If time zone is not specified (example: DateTime without parameter),
|
||||||
|
* then `session_timezone` setting value is used.
|
||||||
|
* If `session_timezone` is not set (or empty string), server default time zone is used.
|
||||||
* Default time zone is server time zone, if server is doing transformations
|
* Default time zone is server time zone, if server is doing transformations
|
||||||
* and if client is doing transformations, unless 'use_client_time_zone' setting is passed to client;
|
* and if client is doing transformations, unless 'use_client_time_zone' setting is passed to client;
|
||||||
* Server time zone is the time zone specified in 'timezone' parameter in configuration file,
|
* Server time zone is the time zone specified in 'timezone' parameter in configuration file,
|
||||||
|
@ -123,9 +123,9 @@ llvm::Value * nativeCast(llvm::IRBuilderBase & b, const DataTypePtr & from_type,
|
|||||||
}
|
}
|
||||||
else if (to_type->isNullable())
|
else if (to_type->isNullable())
|
||||||
{
|
{
|
||||||
auto * from_native_type = toNativeType(b, from_type);
|
auto * to_native_type = toNativeType(b, to_type);
|
||||||
auto * inner = nativeCast(b, from_type, value, removeNullable(to_type));
|
auto * inner = nativeCast(b, from_type, value, removeNullable(to_type));
|
||||||
return b.CreateInsertValue(llvm::Constant::getNullValue(from_native_type), inner, {0});
|
return b.CreateInsertValue(llvm::Constant::getNullValue(to_native_type), inner, {0});
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -13,7 +13,7 @@ namespace DB
|
|||||||
|
|
||||||
void SerializationDate::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const
|
void SerializationDate::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const
|
||||||
{
|
{
|
||||||
writeDateText(DayNum(assert_cast<const ColumnUInt16 &>(column).getData()[row_num]), ostr);
|
writeDateText(DayNum(assert_cast<const ColumnUInt16 &>(column).getData()[row_num]), ostr, time_zone);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SerializationDate::deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
void SerializationDate::deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
@ -26,7 +26,7 @@ void SerializationDate::deserializeWholeText(IColumn & column, ReadBuffer & istr
|
|||||||
void SerializationDate::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings &) const
|
void SerializationDate::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings &) const
|
||||||
{
|
{
|
||||||
DayNum x;
|
DayNum x;
|
||||||
readDateText(x, istr);
|
readDateText(x, istr, time_zone);
|
||||||
assert_cast<ColumnUInt16 &>(column).getData().push_back(x);
|
assert_cast<ColumnUInt16 &>(column).getData().push_back(x);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -46,7 +46,7 @@ void SerializationDate::deserializeTextQuoted(IColumn & column, ReadBuffer & ist
|
|||||||
{
|
{
|
||||||
DayNum x;
|
DayNum x;
|
||||||
assertChar('\'', istr);
|
assertChar('\'', istr);
|
||||||
readDateText(x, istr);
|
readDateText(x, istr, time_zone);
|
||||||
assertChar('\'', istr);
|
assertChar('\'', istr);
|
||||||
assert_cast<ColumnUInt16 &>(column).getData().push_back(x); /// It's important to do this at the end - for exception safety.
|
assert_cast<ColumnUInt16 &>(column).getData().push_back(x); /// It's important to do this at the end - for exception safety.
|
||||||
}
|
}
|
||||||
@ -62,7 +62,7 @@ void SerializationDate::deserializeTextJSON(IColumn & column, ReadBuffer & istr,
|
|||||||
{
|
{
|
||||||
DayNum x;
|
DayNum x;
|
||||||
assertChar('"', istr);
|
assertChar('"', istr);
|
||||||
readDateText(x, istr);
|
readDateText(x, istr, time_zone);
|
||||||
assertChar('"', istr);
|
assertChar('"', istr);
|
||||||
assert_cast<ColumnUInt16 &>(column).getData().push_back(x);
|
assert_cast<ColumnUInt16 &>(column).getData().push_back(x);
|
||||||
}
|
}
|
||||||
@ -77,8 +77,12 @@ void SerializationDate::serializeTextCSV(const IColumn & column, size_t row_num,
|
|||||||
void SerializationDate::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings &) const
|
void SerializationDate::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings &) const
|
||||||
{
|
{
|
||||||
DayNum value;
|
DayNum value;
|
||||||
readCSV(value, istr);
|
readCSV(value, istr, time_zone);
|
||||||
assert_cast<ColumnUInt16 &>(column).getData().push_back(value);
|
assert_cast<ColumnUInt16 &>(column).getData().push_back(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SerializationDate::SerializationDate(const DateLUTImpl & time_zone_) : time_zone(time_zone_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <DataTypes/Serializations/SerializationNumber.h>
|
#include <DataTypes/Serializations/SerializationNumber.h>
|
||||||
|
#include <Common/DateLUT.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -8,6 +9,8 @@ namespace DB
|
|||||||
class SerializationDate final : public SerializationNumber<UInt16>
|
class SerializationDate final : public SerializationNumber<UInt16>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
explicit SerializationDate(const DateLUTImpl & time_zone_ = DateLUT::instance());
|
||||||
|
|
||||||
void serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override;
|
void serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override;
|
||||||
void deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override;
|
void deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override;
|
||||||
void serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override;
|
void serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override;
|
||||||
@ -18,6 +21,9 @@ public:
|
|||||||
void deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override;
|
void deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override;
|
||||||
void serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override;
|
void serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override;
|
||||||
void deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override;
|
void deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
const DateLUTImpl & time_zone;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -11,7 +11,7 @@ namespace DB
|
|||||||
|
|
||||||
void SerializationDate32::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const
|
void SerializationDate32::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const
|
||||||
{
|
{
|
||||||
writeDateText(ExtendedDayNum(assert_cast<const ColumnInt32 &>(column).getData()[row_num]), ostr);
|
writeDateText(ExtendedDayNum(assert_cast<const ColumnInt32 &>(column).getData()[row_num]), ostr, time_zone);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SerializationDate32::deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
void SerializationDate32::deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
@ -24,7 +24,7 @@ void SerializationDate32::deserializeWholeText(IColumn & column, ReadBuffer & is
|
|||||||
void SerializationDate32::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings &) const
|
void SerializationDate32::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings &) const
|
||||||
{
|
{
|
||||||
ExtendedDayNum x;
|
ExtendedDayNum x;
|
||||||
readDateText(x, istr);
|
readDateText(x, istr, time_zone);
|
||||||
assert_cast<ColumnInt32 &>(column).getData().push_back(x);
|
assert_cast<ColumnInt32 &>(column).getData().push_back(x);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -44,7 +44,7 @@ void SerializationDate32::deserializeTextQuoted(IColumn & column, ReadBuffer & i
|
|||||||
{
|
{
|
||||||
ExtendedDayNum x;
|
ExtendedDayNum x;
|
||||||
assertChar('\'', istr);
|
assertChar('\'', istr);
|
||||||
readDateText(x, istr);
|
readDateText(x, istr, time_zone);
|
||||||
assertChar('\'', istr);
|
assertChar('\'', istr);
|
||||||
assert_cast<ColumnInt32 &>(column).getData().push_back(x); /// It's important to do this at the end - for exception safety.
|
assert_cast<ColumnInt32 &>(column).getData().push_back(x); /// It's important to do this at the end - for exception safety.
|
||||||
}
|
}
|
||||||
@ -60,7 +60,7 @@ void SerializationDate32::deserializeTextJSON(IColumn & column, ReadBuffer & ist
|
|||||||
{
|
{
|
||||||
ExtendedDayNum x;
|
ExtendedDayNum x;
|
||||||
assertChar('"', istr);
|
assertChar('"', istr);
|
||||||
readDateText(x, istr);
|
readDateText(x, istr, time_zone);
|
||||||
assertChar('"', istr);
|
assertChar('"', istr);
|
||||||
assert_cast<ColumnInt32 &>(column).getData().push_back(x);
|
assert_cast<ColumnInt32 &>(column).getData().push_back(x);
|
||||||
}
|
}
|
||||||
@ -78,4 +78,8 @@ void SerializationDate32::deserializeTextCSV(IColumn & column, ReadBuffer & istr
|
|||||||
readCSV(value, istr);
|
readCSV(value, istr);
|
||||||
assert_cast<ColumnInt32 &>(column).getData().push_back(value.getExtenedDayNum());
|
assert_cast<ColumnInt32 &>(column).getData().push_back(value.getExtenedDayNum());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SerializationDate32::SerializationDate32(const DateLUTImpl & time_zone_) : time_zone(time_zone_)
|
||||||
|
{
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,12 +1,15 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <DataTypes/Serializations/SerializationNumber.h>
|
#include <DataTypes/Serializations/SerializationNumber.h>
|
||||||
|
#include <Common/DateLUT.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
class SerializationDate32 final : public SerializationNumber<Int32>
|
class SerializationDate32 final : public SerializationNumber<Int32>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
explicit SerializationDate32(const DateLUTImpl & time_zone_ = DateLUT::instance());
|
||||||
|
|
||||||
void serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override;
|
void serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override;
|
||||||
void deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override;
|
void deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override;
|
||||||
void serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override;
|
void serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override;
|
||||||
@ -17,5 +20,8 @@ public:
|
|||||||
void deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override;
|
void deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override;
|
||||||
void serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override;
|
void serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override;
|
||||||
void deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override;
|
void deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
const DateLUTImpl & time_zone;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -985,7 +985,7 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep
|
|||||||
const auto & create_query_string = metadata_it->second;
|
const auto & create_query_string = metadata_it->second;
|
||||||
if (isTableExist(table_name, getContext()))
|
if (isTableExist(table_name, getContext()))
|
||||||
{
|
{
|
||||||
assert(create_query_string == readMetadataFile(table_name));
|
assert(create_query_string == readMetadataFile(table_name) || getTableUUIDIfReplicated(create_query_string, getContext()) != UUIDHelpers::Nil);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1274,7 +1274,7 @@ void DatabaseReplicated::commitAlterTable(const StorageID & table_id,
|
|||||||
const String & statement, ContextPtr query_context)
|
const String & statement, ContextPtr query_context)
|
||||||
{
|
{
|
||||||
auto txn = query_context->getZooKeeperMetadataTransaction();
|
auto txn = query_context->getZooKeeperMetadataTransaction();
|
||||||
assert(!ddl_worker->isCurrentlyActive() || txn);
|
assert(!ddl_worker || !ddl_worker->isCurrentlyActive() || txn);
|
||||||
if (txn && txn->isInitialQuery())
|
if (txn && txn->isInitialQuery())
|
||||||
{
|
{
|
||||||
String metadata_zk_path = zookeeper_path + "/metadata/" + escapeForFileName(table_id.table_name);
|
String metadata_zk_path = zookeeper_path + "/metadata/" + escapeForFileName(table_id.table_name);
|
||||||
|
@ -91,6 +91,7 @@ void DatabaseReplicatedDDLWorker::initializeReplication()
|
|||||||
if (zookeeper->tryGet(database->replica_path + "/digest", digest_str))
|
if (zookeeper->tryGet(database->replica_path + "/digest", digest_str))
|
||||||
{
|
{
|
||||||
digest = parse<UInt64>(digest_str);
|
digest = parse<UInt64>(digest_str);
|
||||||
|
LOG_TRACE(log, "Metadata digest in ZooKeeper: {}", digest);
|
||||||
std::lock_guard lock{database->metadata_mutex};
|
std::lock_guard lock{database->metadata_mutex};
|
||||||
local_digest = database->tables_metadata_digest;
|
local_digest = database->tables_metadata_digest;
|
||||||
}
|
}
|
||||||
|
@ -114,7 +114,10 @@ QueryPipeline ExecutableDictionarySource::loadAll()
|
|||||||
auto command = configuration.command;
|
auto command = configuration.command;
|
||||||
updateCommandIfNeeded(command, coordinator_configuration.execute_direct, context);
|
updateCommandIfNeeded(command, coordinator_configuration.execute_direct, context);
|
||||||
|
|
||||||
return QueryPipeline(coordinator->createPipe(command, configuration.command_arguments, sample_block, context));
|
ShellCommandSourceConfiguration command_configuration {
|
||||||
|
.check_exit_code = true,
|
||||||
|
};
|
||||||
|
return QueryPipeline(coordinator->createPipe(command, configuration.command_arguments, {}, sample_block, context, command_configuration));
|
||||||
}
|
}
|
||||||
|
|
||||||
QueryPipeline ExecutableDictionarySource::loadUpdatedAll()
|
QueryPipeline ExecutableDictionarySource::loadUpdatedAll()
|
||||||
@ -148,7 +151,11 @@ QueryPipeline ExecutableDictionarySource::loadUpdatedAll()
|
|||||||
update_time = new_update_time;
|
update_time = new_update_time;
|
||||||
|
|
||||||
LOG_TRACE(log, "loadUpdatedAll {}", command);
|
LOG_TRACE(log, "loadUpdatedAll {}", command);
|
||||||
return QueryPipeline(coordinator->createPipe(command, command_arguments, sample_block, context));
|
|
||||||
|
ShellCommandSourceConfiguration command_configuration {
|
||||||
|
.check_exit_code = true,
|
||||||
|
};
|
||||||
|
return QueryPipeline(coordinator->createPipe(command, command_arguments, {}, sample_block, context, command_configuration));
|
||||||
}
|
}
|
||||||
|
|
||||||
QueryPipeline ExecutableDictionarySource::loadIds(const std::vector<UInt64> & ids)
|
QueryPipeline ExecutableDictionarySource::loadIds(const std::vector<UInt64> & ids)
|
||||||
@ -179,7 +186,11 @@ QueryPipeline ExecutableDictionarySource::getStreamForBlock(const Block & block)
|
|||||||
Pipes shell_input_pipes;
|
Pipes shell_input_pipes;
|
||||||
shell_input_pipes.emplace_back(std::move(shell_input_pipe));
|
shell_input_pipes.emplace_back(std::move(shell_input_pipe));
|
||||||
|
|
||||||
auto pipe = coordinator->createPipe(command, configuration.command_arguments, std::move(shell_input_pipes), sample_block, context);
|
ShellCommandSourceConfiguration command_configuration {
|
||||||
|
.check_exit_code = true,
|
||||||
|
};
|
||||||
|
|
||||||
|
auto pipe = coordinator->createPipe(command, configuration.command_arguments, std::move(shell_input_pipes), sample_block, context, command_configuration);
|
||||||
|
|
||||||
if (configuration.implicit_key)
|
if (configuration.implicit_key)
|
||||||
pipe.addTransform(std::make_shared<TransformWithAdditionalColumns>(block, pipe.getHeader()));
|
pipe.addTransform(std::make_shared<TransformWithAdditionalColumns>(block, pipe.getHeader()));
|
||||||
|
@ -132,6 +132,7 @@ QueryPipeline ExecutablePoolDictionarySource::getStreamForBlock(const Block & bl
|
|||||||
ShellCommandSourceConfiguration command_configuration;
|
ShellCommandSourceConfiguration command_configuration;
|
||||||
command_configuration.read_fixed_number_of_rows = true;
|
command_configuration.read_fixed_number_of_rows = true;
|
||||||
command_configuration.number_of_rows_to_read = block.rows();
|
command_configuration.number_of_rows_to_read = block.rows();
|
||||||
|
command_configuration.check_exit_code = true;
|
||||||
|
|
||||||
Pipes shell_input_pipes;
|
Pipes shell_input_pipes;
|
||||||
shell_input_pipes.emplace_back(std::move(shell_input_pipe));
|
shell_input_pipes.emplace_back(std::move(shell_input_pipe));
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user