mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 23:52:03 +00:00
Alphabetize table functions and engines
This commit is contained in:
parent
cc3398159e
commit
b5d9ed5b0c
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/ExternalDistributed
|
||||
sidebar_position: 12
|
||||
sidebar_position: 55
|
||||
sidebar_label: ExternalDistributed
|
||||
title: ExternalDistributed
|
||||
---
|
||||
|
@ -1,5 +1,6 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/azureBlobStorage
|
||||
sidebar_position: 10
|
||||
sidebar_label: Azure Blob Storage
|
||||
---
|
||||
|
||||
@ -29,8 +30,8 @@ CREATE TABLE azure_blob_storage_table (name String, value UInt32)
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
CREATE TABLE test_table (key UInt64, data String)
|
||||
ENGINE = AzureBlobStorage('DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite1:10000/devstoreaccount1/;',
|
||||
CREATE TABLE test_table (key UInt64, data String)
|
||||
ENGINE = AzureBlobStorage('DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite1:10000/devstoreaccount1/;',
|
||||
'test_container', 'test_table', 'CSV');
|
||||
|
||||
INSERT INTO test_table VALUES (1, 'a'), (2, 'b'), (3, 'c');
|
||||
|
@ -1,5 +1,6 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/deltalake
|
||||
sidebar_position: 40
|
||||
sidebar_label: DeltaLake
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/embedded-rocksdb
|
||||
sidebar_position: 9
|
||||
sidebar_position: 50
|
||||
sidebar_label: EmbeddedRocksDB
|
||||
---
|
||||
|
||||
@ -99,7 +99,7 @@ INSERT INTO test VALUES ('some key', 1, 'value', 3.2);
|
||||
|
||||
### Deletes
|
||||
|
||||
Rows can be deleted using `DELETE` query or `TRUNCATE`.
|
||||
Rows can be deleted using `DELETE` query or `TRUNCATE`.
|
||||
|
||||
```sql
|
||||
DELETE FROM test WHERE key LIKE 'some%' AND v1 > 1;
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/hdfs
|
||||
sidebar_position: 6
|
||||
sidebar_position: 80
|
||||
sidebar_label: HDFS
|
||||
---
|
||||
|
||||
@ -63,7 +63,7 @@ SELECT * FROM hdfs_engine_table LIMIT 2
|
||||
- `ALTER` and `SELECT...SAMPLE` operations.
|
||||
- Indexes.
|
||||
- [Zero-copy](../../../operations/storing-data.md#zero-copy) replication is possible, but not recommended.
|
||||
|
||||
|
||||
:::note Zero-copy replication is not ready for production
|
||||
Zero-copy replication is disabled by default in ClickHouse version 22.8 and higher. This feature is not recommended for production use.
|
||||
:::
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/hive
|
||||
sidebar_position: 4
|
||||
sidebar_position: 84
|
||||
sidebar_label: Hive
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/hudi
|
||||
sidebar_position: 86
|
||||
sidebar_label: Hudi
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/iceberg
|
||||
sidebar_position: 90
|
||||
sidebar_label: Iceberg
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/jdbc
|
||||
sidebar_position: 3
|
||||
sidebar_position: 100
|
||||
sidebar_label: JDBC
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/kafka
|
||||
sidebar_position: 8
|
||||
sidebar_position: 110
|
||||
sidebar_label: Kafka
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/materialized-postgresql
|
||||
sidebar_position: 12
|
||||
sidebar_position: 130
|
||||
sidebar_label: MaterializedPostgreSQL
|
||||
title: MaterializedPostgreSQL
|
||||
---
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/mongodb
|
||||
sidebar_position: 5
|
||||
sidebar_position: 135
|
||||
sidebar_label: MongoDB
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/mysql
|
||||
sidebar_position: 4
|
||||
sidebar_position: 138
|
||||
sidebar_label: MySQL
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/nats
|
||||
sidebar_position: 14
|
||||
sidebar_position: 140
|
||||
sidebar_label: NATS
|
||||
---
|
||||
|
||||
@ -83,12 +83,12 @@ You can select one of the subjects the table reads from and publish your data th
|
||||
CREATE TABLE queue (
|
||||
key UInt64,
|
||||
value UInt64
|
||||
) ENGINE = NATS
|
||||
) ENGINE = NATS
|
||||
SETTINGS nats_url = 'localhost:4444',
|
||||
nats_subjects = 'subject1,subject2',
|
||||
nats_format = 'JSONEachRow';
|
||||
|
||||
INSERT INTO queue
|
||||
INSERT INTO queue
|
||||
SETTINGS stream_like_engine_insert_queue = 'subject2'
|
||||
VALUES (1, 1);
|
||||
```
|
||||
@ -102,7 +102,7 @@ Example:
|
||||
key UInt64,
|
||||
value UInt64,
|
||||
date DateTime
|
||||
) ENGINE = NATS
|
||||
) ENGINE = NATS
|
||||
SETTINGS nats_url = 'localhost:4444',
|
||||
nats_subjects = 'subject1',
|
||||
nats_format = 'JSONEachRow',
|
||||
@ -137,7 +137,7 @@ Example:
|
||||
CREATE TABLE queue (
|
||||
key UInt64,
|
||||
value UInt64
|
||||
) ENGINE = NATS
|
||||
) ENGINE = NATS
|
||||
SETTINGS nats_url = 'localhost:4444',
|
||||
nats_subjects = 'subject1',
|
||||
nats_format = 'JSONEachRow',
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/odbc
|
||||
sidebar_position: 2
|
||||
sidebar_position: 150
|
||||
sidebar_label: ODBC
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/postgresql
|
||||
sidebar_position: 11
|
||||
sidebar_position: 160
|
||||
sidebar_label: PostgreSQL
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/rabbitmq
|
||||
sidebar_position: 10
|
||||
sidebar_position: 170
|
||||
sidebar_label: RabbitMQ
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/redis
|
||||
sidebar_position: 43
|
||||
sidebar_position: 175
|
||||
sidebar_label: Redis
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/s3
|
||||
sidebar_position: 7
|
||||
sidebar_position: 180
|
||||
sidebar_label: S3
|
||||
---
|
||||
|
||||
@ -20,7 +20,7 @@ CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
**Engine parameters**
|
||||
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
||||
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
||||
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
||||
- `format` — The [format](../../../interfaces/formats.md#formats) of the file.
|
||||
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3).
|
||||
- `compression` — Compression type. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Parameter is optional. By default, it will auto-detect compression by file extension.
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/sqlite
|
||||
sidebar_position: 7
|
||||
sidebar_position: 185
|
||||
sidebar_label: SQLite
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/azureBlobStorage
|
||||
sidebar_position: 10
|
||||
sidebar_label: azureBlobStorage
|
||||
keywords: [azure blob storage]
|
||||
---
|
||||
@ -34,16 +35,16 @@ A table with the specified structure for reading or writing data in the specifie
|
||||
Write data into azure blob storage using the following :
|
||||
|
||||
```sql
|
||||
INSERT INTO TABLE FUNCTION azureBlobStorage('http://azurite1:10000/devstoreaccount1',
|
||||
INSERT INTO TABLE FUNCTION azureBlobStorage('http://azurite1:10000/devstoreaccount1',
|
||||
'test_container', 'test_{_partition_id}.csv', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==',
|
||||
'CSV', 'auto', 'column1 UInt32, column2 UInt32, column3 UInt32') PARTITION BY column3 VALUES (1, 2, 3), (3, 2, 1), (78, 43, 3);
|
||||
```
|
||||
|
||||
And then it can be read using
|
||||
And then it can be read using
|
||||
|
||||
```sql
|
||||
SELECT * FROM azureBlobStorage('http://azurite1:10000/devstoreaccount1',
|
||||
'test_container', 'test_1.csv', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==',
|
||||
SELECT * FROM azureBlobStorage('http://azurite1:10000/devstoreaccount1',
|
||||
'test_container', 'test_1.csv', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==',
|
||||
'CSV', 'auto', 'column1 UInt32, column2 UInt32, column3 UInt32');
|
||||
```
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/cluster
|
||||
sidebar_position: 50
|
||||
sidebar_position: 30
|
||||
sidebar_label: cluster
|
||||
title: "cluster, clusterAllReplicas"
|
||||
---
|
||||
@ -9,7 +9,7 @@ Allows to access all shards in an existing cluster which configured in `remote_s
|
||||
|
||||
`clusterAllReplicas` function — same as `cluster`, but all replicas are queried. Each replica in a cluster is used as a separate shard/connection.
|
||||
|
||||
:::note
|
||||
:::note
|
||||
All available clusters are listed in the [system.clusters](../../operations/system-tables/clusters.md) table.
|
||||
:::
|
||||
|
||||
@ -23,9 +23,9 @@ clusterAllReplicas('cluster_name', db, table[, sharding_key])
|
||||
```
|
||||
**Arguments**
|
||||
|
||||
- `cluster_name` – Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers.
|
||||
- `db.table` or `db`, `table` - Name of a database and a table.
|
||||
- `sharding_key` - A sharding key. Optional. Needs to be specified if the cluster has more than one shard.
|
||||
- `cluster_name` – Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers.
|
||||
- `db.table` or `db`, `table` - Name of a database and a table.
|
||||
- `sharding_key` - A sharding key. Optional. Needs to be specified if the cluster has more than one shard.
|
||||
|
||||
**Returned value**
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/deltalake
|
||||
sidebar_label: DeltaLake
|
||||
sidebar_position: 45
|
||||
sidebar_label: deltaLake
|
||||
---
|
||||
|
||||
# deltaLake Table Function
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/dictionary
|
||||
sidebar_position: 54
|
||||
sidebar_position: 47
|
||||
sidebar_label: dictionary
|
||||
title: dictionary
|
||||
---
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/engines/table-functions/executable
|
||||
sidebar_position: 55
|
||||
sidebar_position: 50
|
||||
sidebar_label: executable
|
||||
keywords: [udf, user defined function, clickhouse, executable, table, function]
|
||||
---
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/file
|
||||
sidebar_position: 37
|
||||
sidebar_position: 60
|
||||
sidebar_label: file
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/format
|
||||
sidebar_position: 56
|
||||
sidebar_position: 65
|
||||
sidebar_label: format
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/gcs
|
||||
sidebar_position: 45
|
||||
sidebar_position: 70
|
||||
sidebar_label: gcs
|
||||
keywords: [gcs, bucket]
|
||||
---
|
||||
@ -16,7 +16,7 @@ gcs(path [,hmac_key, hmac_secret] [,format] [,structure] [,compression])
|
||||
```
|
||||
|
||||
:::tip GCS
|
||||
The GCS Table Function integrates with Google Cloud Storage by using the GCS XML API and HMAC keys. See the [Google interoperability docs]( https://cloud.google.com/storage/docs/interoperability) for more details about the endpoint and HMAC.
|
||||
The GCS Table Function integrates with Google Cloud Storage by using the GCS XML API and HMAC keys. See the [Google interoperability docs]( https://cloud.google.com/storage/docs/interoperability) for more details about the endpoint and HMAC.
|
||||
|
||||
:::
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/generate
|
||||
sidebar_position: 47
|
||||
sidebar_position: 75
|
||||
sidebar_label: generateRandom
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/hdfs
|
||||
sidebar_position: 45
|
||||
sidebar_position: 80
|
||||
sidebar_label: hdfs
|
||||
---
|
||||
|
||||
@ -79,7 +79,7 @@ SELECT count(*)
|
||||
FROM hdfs('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV', 'name String, value UInt32')
|
||||
```
|
||||
|
||||
:::note
|
||||
:::note
|
||||
If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||
:::
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/hdfsCluster
|
||||
sidebar_position: 55
|
||||
sidebar_position: 81
|
||||
sidebar_label: hdfsCluster
|
||||
---
|
||||
|
||||
@ -50,7 +50,7 @@ SELECT count(*)
|
||||
FROM hdfsCluster('cluster_simple', 'hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV', 'name String, value UInt32')
|
||||
```
|
||||
|
||||
:::note
|
||||
:::note
|
||||
If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||
:::
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/hudi
|
||||
sidebar_label: Hudi
|
||||
sidebar_position: 85
|
||||
sidebar_label: hudi
|
||||
---
|
||||
|
||||
# hudi Table Function
|
||||
|
@ -1,6 +1,7 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/iceberg
|
||||
sidebar_label: Iceberg
|
||||
sidebar_position: 90
|
||||
sidebar_label: iceberg
|
||||
---
|
||||
|
||||
# iceberg Table Function
|
||||
|
@ -1,10 +1,10 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/
|
||||
sidebar_label: Table Functions
|
||||
sidebar_position: 34
|
||||
sidebar_position: 1
|
||||
---
|
||||
|
||||
# Table Functions
|
||||
# Table Functions
|
||||
|
||||
Table functions are methods for constructing tables.
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/input
|
||||
sidebar_position: 46
|
||||
sidebar_position: 95
|
||||
sidebar_label: input
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/jdbc
|
||||
sidebar_position: 43
|
||||
sidebar_position: 100
|
||||
sidebar_label: jdbc
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/merge
|
||||
sidebar_position: 38
|
||||
sidebar_position: 130
|
||||
sidebar_label: merge
|
||||
---
|
||||
|
||||
@ -16,7 +16,7 @@ merge('db_name', 'tables_regexp')
|
||||
**Arguments**
|
||||
|
||||
- `db_name` — Possible values:
|
||||
- database name,
|
||||
- database name,
|
||||
- constant expression that returns a string with a database name, for example, `currentDatabase()`,
|
||||
- `REGEXP(expression)`, where `expression` is a regular expression to match the DB names.
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/mongodb
|
||||
sidebar_position: 42
|
||||
sidebar_position: 135
|
||||
sidebar_label: mongodb
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/mysql
|
||||
sidebar_position: 42
|
||||
sidebar_position: 137
|
||||
sidebar_label: mysql
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/null
|
||||
sidebar_position: 53
|
||||
sidebar_position: 140
|
||||
sidebar_label: null function
|
||||
title: 'null'
|
||||
---
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/numbers
|
||||
sidebar_position: 39
|
||||
sidebar_position: 145
|
||||
sidebar_label: numbers
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/odbc
|
||||
sidebar_position: 44
|
||||
sidebar_position: 150
|
||||
sidebar_label: odbc
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/postgresql
|
||||
sidebar_position: 42
|
||||
sidebar_position: 160
|
||||
sidebar_label: postgresql
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/redis
|
||||
sidebar_position: 43
|
||||
sidebar_position: 170
|
||||
sidebar_label: redis
|
||||
---
|
||||
|
||||
@ -31,7 +31,7 @@ redis(host:port, key, structure[, db_index[, password[, pool_size]]])
|
||||
- `primary` must be specified, it supports only one column in the primary key. The primary key will be serialized in binary as a Redis key.
|
||||
|
||||
- columns other than the primary key will be serialized in binary as Redis value in corresponding order.
|
||||
|
||||
|
||||
- queries with key equals or in filtering will be optimized to multi keys lookup from Redis. If queries without filtering key full table scan will happen which is a heavy operation.
|
||||
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/remote
|
||||
sidebar_position: 40
|
||||
sidebar_position: 175
|
||||
sidebar_label: remote
|
||||
---
|
||||
|
||||
@ -89,10 +89,10 @@ SELECT * FROM remote_table;
|
||||
```
|
||||
|
||||
### Migration of tables from one system to another:
|
||||
This example uses one table from a sample dataset. The database is `imdb`, and the table is `actors`.
|
||||
This example uses one table from a sample dataset. The database is `imdb`, and the table is `actors`.
|
||||
|
||||
#### On the source ClickHouse system (the system that currently hosts the data)
|
||||
- Verify the source database and table name (`imdb.actors`)
|
||||
- Verify the source database and table name (`imdb.actors`)
|
||||
```sql
|
||||
show databases
|
||||
```
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/s3
|
||||
sidebar_position: 45
|
||||
sidebar_position: 180
|
||||
sidebar_label: s3
|
||||
keywords: [s3, gcs, bucket]
|
||||
---
|
||||
@ -33,7 +33,7 @@ For GCS, substitute your HMAC key and HMAC secret where you see `aws_access_key_
|
||||
and not ~~https://storage.cloud.google.com~~.
|
||||
:::
|
||||
|
||||
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
||||
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
||||
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
|
||||
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
|
||||
- `compression` — Parameter is optional. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. By default, it will autodetect compression by file extension.
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/s3Cluster
|
||||
sidebar_position: 55
|
||||
sidebar_position: 181
|
||||
sidebar_label: s3Cluster
|
||||
title: "s3Cluster Table Function"
|
||||
---
|
||||
@ -31,18 +31,18 @@ Select the data from all the files in the `/root/data/clickhouse` and `/root/dat
|
||||
|
||||
``` sql
|
||||
SELECT * FROM s3Cluster(
|
||||
'cluster_simple',
|
||||
'http://minio1:9001/root/data/{clickhouse,database}/*',
|
||||
'minio',
|
||||
'minio123',
|
||||
'CSV',
|
||||
'cluster_simple',
|
||||
'http://minio1:9001/root/data/{clickhouse,database}/*',
|
||||
'minio',
|
||||
'minio123',
|
||||
'CSV',
|
||||
'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))'
|
||||
) ORDER BY (name, value, polygon);
|
||||
```
|
||||
|
||||
Count the total amount of rows in all files in the cluster `cluster_simple`:
|
||||
|
||||
:::tip
|
||||
:::tip
|
||||
If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||
:::
|
||||
|
||||
|
@ -1,19 +1,19 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/sqlite
|
||||
sidebar_position: 55
|
||||
sidebar_position: 185
|
||||
sidebar_label: sqlite
|
||||
title: sqlite
|
||||
---
|
||||
|
||||
Allows to perform queries on a data stored in an [SQLite](../../engines/database-engines/sqlite.md) database.
|
||||
|
||||
**Syntax**
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
sqlite('db_path', 'table_name')
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
**Arguments**
|
||||
|
||||
- `db_path` — Path to a file with an SQLite database. [String](../../sql-reference/data-types/string.md).
|
||||
- `table_name` — Name of a table in the SQLite database. [String](../../sql-reference/data-types/string.md).
|
||||
@ -40,6 +40,6 @@ Result:
|
||||
└───────┴──────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
**See Also**
|
||||
|
||||
- [SQLite](../../engines/table-engines/integrations/sqlite.md) table engine
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/url
|
||||
sidebar_position: 41
|
||||
sidebar_position: 200
|
||||
sidebar_label: url
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/urlCluster
|
||||
sidebar_position: 55
|
||||
sidebar_position: 201
|
||||
sidebar_label: urlCluster
|
||||
---
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/view
|
||||
sidebar_position: 51
|
||||
sidebar_position: 210
|
||||
sidebar_label: view
|
||||
title: view
|
||||
---
|
||||
|
Loading…
Reference in New Issue
Block a user