mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Merge branch 'master' into rabbitmq-allow-multiple-hosts
This commit is contained in:
commit
29b605b4ea
@ -42,15 +42,12 @@ RUN apt-get update \
|
|||||||
clang-tidy-10 \
|
clang-tidy-10 \
|
||||||
clang-tidy-11 \
|
clang-tidy-11 \
|
||||||
cmake \
|
cmake \
|
||||||
cmake \
|
|
||||||
curl \
|
curl \
|
||||||
g++-9 \
|
g++-9 \
|
||||||
gcc-9 \
|
gcc-9 \
|
||||||
gdb \
|
gdb \
|
||||||
git \
|
git \
|
||||||
gperf \
|
gperf \
|
||||||
gperf \
|
|
||||||
intel-opencl-icd \
|
|
||||||
libicu-dev \
|
libicu-dev \
|
||||||
libreadline-dev \
|
libreadline-dev \
|
||||||
lld-10 \
|
lld-10 \
|
||||||
@ -61,10 +58,7 @@ RUN apt-get update \
|
|||||||
llvm-11-dev \
|
llvm-11-dev \
|
||||||
moreutils \
|
moreutils \
|
||||||
ninja-build \
|
ninja-build \
|
||||||
ocl-icd-libopencl1 \
|
|
||||||
opencl-headers \
|
|
||||||
pigz \
|
pigz \
|
||||||
pixz \
|
|
||||||
rename \
|
rename \
|
||||||
tzdata \
|
tzdata \
|
||||||
--yes --no-install-recommends
|
--yes --no-install-recommends
|
||||||
|
@ -35,9 +35,6 @@ RUN apt-get update \
|
|||||||
libjemalloc-dev \
|
libjemalloc-dev \
|
||||||
libmsgpack-dev \
|
libmsgpack-dev \
|
||||||
libcurl4-openssl-dev \
|
libcurl4-openssl-dev \
|
||||||
opencl-headers \
|
|
||||||
ocl-icd-libopencl1 \
|
|
||||||
intel-opencl-icd \
|
|
||||||
unixodbc-dev \
|
unixodbc-dev \
|
||||||
odbcinst \
|
odbcinst \
|
||||||
tzdata \
|
tzdata \
|
||||||
|
@ -14,9 +14,7 @@ RUN apt-get --allow-unauthenticated update -y \
|
|||||||
expect \
|
expect \
|
||||||
gdb \
|
gdb \
|
||||||
gperf \
|
gperf \
|
||||||
gperf \
|
|
||||||
heimdal-multidev \
|
heimdal-multidev \
|
||||||
intel-opencl-icd \
|
|
||||||
libboost-filesystem-dev \
|
libboost-filesystem-dev \
|
||||||
libboost-iostreams-dev \
|
libboost-iostreams-dev \
|
||||||
libboost-program-options-dev \
|
libboost-program-options-dev \
|
||||||
@ -50,9 +48,7 @@ RUN apt-get --allow-unauthenticated update -y \
|
|||||||
moreutils \
|
moreutils \
|
||||||
ncdu \
|
ncdu \
|
||||||
netcat-openbsd \
|
netcat-openbsd \
|
||||||
ocl-icd-libopencl1 \
|
|
||||||
odbcinst \
|
odbcinst \
|
||||||
opencl-headers \
|
|
||||||
openssl \
|
openssl \
|
||||||
perl \
|
perl \
|
||||||
pigz \
|
pigz \
|
||||||
|
@ -27,53 +27,20 @@ Or cmake3 instead of cmake on older systems.
|
|||||||
|
|
||||||
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
|
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||||
```
|
```
|
||||||
|
|
||||||
For other Linux distribution - check the availability of the [prebuild packages](https://releases.llvm.org/download.html) or build clang [from sources](https://clang.llvm.org/get_started.html).
|
For other Linux distribution - check the availability of the [prebuild packages](https://releases.llvm.org/download.html) or build clang [from sources](https://clang.llvm.org/get_started.html).
|
||||||
|
|
||||||
#### Use clang-11 for Builds {#use-gcc-10-for-builds}
|
#### Use clang-11 for Builds
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ export CC=clang-11
|
$ export CC=clang-11
|
||||||
$ export CXX=clang++-11
|
$ export CXX=clang++-11
|
||||||
```
|
```
|
||||||
|
|
||||||
### Install GCC 10 {#install-gcc-10}
|
Gcc can also be used though it is discouraged.
|
||||||
|
|
||||||
We recommend building ClickHouse with clang-11, GCC-10 also supported, but it is not used for production builds.
|
|
||||||
|
|
||||||
If you want to use GCC-10 there are several ways to install it.
|
|
||||||
|
|
||||||
#### Install from Repository {#install-from-repository}
|
|
||||||
|
|
||||||
On Ubuntu 19.10 or newer:
|
|
||||||
|
|
||||||
$ sudo apt-get update
|
|
||||||
$ sudo apt-get install gcc-10 g++-10
|
|
||||||
|
|
||||||
#### Install from a PPA Package {#install-from-a-ppa-package}
|
|
||||||
|
|
||||||
On older Ubuntu:
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
$ sudo apt-get install software-properties-common
|
|
||||||
$ sudo apt-add-repository ppa:ubuntu-toolchain-r/test
|
|
||||||
$ sudo apt-get update
|
|
||||||
$ sudo apt-get install gcc-10 g++-10
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Install from Sources {#install-from-sources}
|
|
||||||
|
|
||||||
See [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh)
|
|
||||||
|
|
||||||
#### Use GCC 10 for Builds {#use-gcc-10-for-builds}
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
$ export CC=gcc-10
|
|
||||||
$ export CXX=g++-10
|
|
||||||
```
|
|
||||||
|
|
||||||
### Checkout ClickHouse Sources {#checkout-clickhouse-sources}
|
### Checkout ClickHouse Sources {#checkout-clickhouse-sources}
|
||||||
|
|
||||||
|
@ -131,17 +131,18 @@ ClickHouse uses several external libraries for building. All of them do not need
|
|||||||
|
|
||||||
## C++ Compiler {#c-compiler}
|
## C++ Compiler {#c-compiler}
|
||||||
|
|
||||||
Compilers GCC starting from version 10 and Clang version 8 or above are supported for building ClickHouse.
|
Compilers Clang starting from version 11 is supported for building ClickHouse.
|
||||||
|
|
||||||
Official Yandex builds currently use GCC because it generates machine code of slightly better performance (yielding a difference of up to several percent according to our benchmarks). And Clang is more convenient for development usually. Though, our continuous integration (CI) platform runs checks for about a dozen of build combinations.
|
Clang should be used instead of gcc. Though, our continuous integration (CI) platform runs checks for about a dozen of build combinations.
|
||||||
|
|
||||||
To install GCC on Ubuntu run: `sudo apt install gcc g++`
|
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
|
||||||
|
|
||||||
Check the version of gcc: `gcc --version`. If it is below 10, then follow the instruction here: https://clickhouse.tech/docs/en/development/build/#install-gcc-10.
|
```bash
|
||||||
|
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||||
|
```
|
||||||
|
|
||||||
Mac OS X build is supported only for Clang. Just run `brew install llvm`
|
Mac OS X build is also supported. Just run `brew install llvm`
|
||||||
|
|
||||||
If you decide to use Clang, you can also install `libc++` and `lld`, if you know what it is. Using `ccache` is also recommended.
|
|
||||||
|
|
||||||
## The Building Process {#the-building-process}
|
## The Building Process {#the-building-process}
|
||||||
|
|
||||||
@ -152,14 +153,7 @@ Now that you are ready to build ClickHouse we recommend you to create a separate
|
|||||||
|
|
||||||
You can have several different directories (build_release, build_debug, etc.) for different types of build.
|
You can have several different directories (build_release, build_debug, etc.) for different types of build.
|
||||||
|
|
||||||
While inside the `build` directory, configure your build by running CMake. Before the first run, you need to define environment variables that specify compiler (version 10 gcc compiler in this example).
|
While inside the `build` directory, configure your build by running CMake. Before the first run, you need to define environment variables that specify compiler.
|
||||||
|
|
||||||
Linux:
|
|
||||||
|
|
||||||
export CC=gcc-10 CXX=g++-10
|
|
||||||
cmake ..
|
|
||||||
|
|
||||||
Mac OS X:
|
|
||||||
|
|
||||||
export CC=clang CXX=clang++
|
export CC=clang CXX=clang++
|
||||||
cmake ..
|
cmake ..
|
||||||
|
@ -701,7 +701,7 @@ But other things being equal, cross-platform or portable code is preferred.
|
|||||||
|
|
||||||
**2.** Language: C++20 (see the list of available [C++20 features](https://en.cppreference.com/w/cpp/compiler_support#C.2B.2B20_features)).
|
**2.** Language: C++20 (see the list of available [C++20 features](https://en.cppreference.com/w/cpp/compiler_support#C.2B.2B20_features)).
|
||||||
|
|
||||||
**3.** Compiler: `gcc`. At this time (August 2020), the code is compiled using version 9.3. (It can also be compiled using `clang 8`.)
|
**3.** Compiler: `clang`. At this time (April 2021), the code is compiled using clang version 11. (It can also be compiled using `gcc` version 10, but it's untested and not suitable for production usage).
|
||||||
|
|
||||||
The standard library is used (`libc++`).
|
The standard library is used (`libc++`).
|
||||||
|
|
||||||
@ -711,7 +711,7 @@ The standard library is used (`libc++`).
|
|||||||
|
|
||||||
The CPU instruction set is the minimum supported set among our servers. Currently, it is SSE 4.2.
|
The CPU instruction set is the minimum supported set among our servers. Currently, it is SSE 4.2.
|
||||||
|
|
||||||
**6.** Use `-Wall -Wextra -Werror` compilation flags.
|
**6.** Use `-Wall -Wextra -Werror` compilation flags. Also `-Weverything` is used with few exceptions.
|
||||||
|
|
||||||
**7.** Use static linking with all libraries except those that are difficult to connect to statically (see the output of the `ldd` command).
|
**7.** Use static linking with all libraries except those that are difficult to connect to statically (see the output of the `ldd` command).
|
||||||
|
|
||||||
|
@ -12,6 +12,7 @@ With this instruction you can run basic ClickHouse performance test on any serve
|
|||||||
3. Copy the link to `clickhouse` binary for amd64 or aarch64.
|
3. Copy the link to `clickhouse` binary for amd64 or aarch64.
|
||||||
4. ssh to the server and download it with wget:
|
4. ssh to the server and download it with wget:
|
||||||
```bash
|
```bash
|
||||||
|
# These links are outdated, please obtain the fresh link from the "commits" page.
|
||||||
# For amd64:
|
# For amd64:
|
||||||
wget https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_build_check/gcc-10_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse
|
wget https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_build_check/gcc-10_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse
|
||||||
# For aarch64:
|
# For aarch64:
|
||||||
|
@ -854,8 +854,6 @@ For example, when reading from a table, if it is possible to evaluate expression
|
|||||||
|
|
||||||
Default value: the number of physical CPU cores.
|
Default value: the number of physical CPU cores.
|
||||||
|
|
||||||
If less than one SELECT query is normally run on a server at a time, set this parameter to a value slightly less than the actual number of processor cores.
|
|
||||||
|
|
||||||
For queries that are completed quickly because of a LIMIT, you can set a lower ‘max_threads’. For example, if the necessary number of entries are located in every block and max_threads = 8, then 8 blocks are retrieved, although it would have been enough to read just one.
|
For queries that are completed quickly because of a LIMIT, you can set a lower ‘max_threads’. For example, if the necessary number of entries are located in every block and max_threads = 8, then 8 blocks are retrieved, although it would have been enough to read just one.
|
||||||
|
|
||||||
The smaller the `max_threads` value, the less memory is consumed.
|
The smaller the `max_threads` value, the less memory is consumed.
|
||||||
|
@ -15,16 +15,16 @@ Columns:
|
|||||||
- `node_name` ([String](../../sql-reference/data-types/string.md)) — Node name in ZooKeeper.
|
- `node_name` ([String](../../sql-reference/data-types/string.md)) — Node name in ZooKeeper.
|
||||||
|
|
||||||
- `type` ([String](../../sql-reference/data-types/string.md)) — Type of the task in the queue, one of:
|
- `type` ([String](../../sql-reference/data-types/string.md)) — Type of the task in the queue, one of:
|
||||||
- `GET_PART` - Get the part from another replica.
|
|
||||||
- `ATTACH_PART` - Attach the part, possibly from our own replica (if found in `detached` folder).
|
- `GET_PART` — Get the part from another replica.
|
||||||
You may think of it as a `GET_PART` with some optimisations as they're nearly identical.
|
- `ATTACH_PART` — Attach the part, possibly from our own replica (if found in the `detached` folder). You may think of it as a `GET_PART` with some optimizations as they're nearly identical.
|
||||||
- `MERGE_PARTS` - Merge the parts.
|
- `MERGE_PARTS` — Merge the parts.
|
||||||
- `DROP_RANGE` - Delete the parts in the specified partition in the specified number range.
|
- `DROP_RANGE` — Delete the parts in the specified partition in the specified number range.
|
||||||
- `CLEAR_COLUMN` - NOTE: Deprecated. Drop specific column from specified partition.
|
- `CLEAR_COLUMN` — NOTE: Deprecated. Drop specific column from specified partition.
|
||||||
- `CLEAR_INDEX` - NOTE: Deprecated. Drop specific index from specified partition.
|
- `CLEAR_INDEX` — NOTE: Deprecated. Drop specific index from specified partition.
|
||||||
- `REPLACE_RANGE` - Drop certain range of partitions and replace them by new ones
|
- `REPLACE_RANGE` — Drop a certain range of parts and replace them with new ones.
|
||||||
- `MUTATE_PART` - Apply one or several mutations to the part.
|
- `MUTATE_PART` — Apply one or several mutations to the part.
|
||||||
- `ALTER_METADATA` - Apply alter modification according to global /metadata and /columns paths
|
- `ALTER_METADATA` — Apply alter modification according to global /metadata and /columns paths.
|
||||||
|
|
||||||
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was submitted for execution.
|
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was submitted for execution.
|
||||||
|
|
||||||
|
@ -88,12 +88,10 @@ Read more about setting the partition expression in a section [How to specify th
|
|||||||
This query is replicated. The replica-initiator checks whether there is data in the `detached` directory.
|
This query is replicated. The replica-initiator checks whether there is data in the `detached` directory.
|
||||||
If data exists, the query checks its integrity. If everything is correct, the query adds the data to the table.
|
If data exists, the query checks its integrity. If everything is correct, the query adds the data to the table.
|
||||||
|
|
||||||
If the non-initiator replica, receiving the attach command, finds the part with the correct checksums in its own
|
If the non-initiator replica, receiving the attach command, finds the part with the correct checksums in its own `detached` folder, it attaches the data without fetching it from other replicas.
|
||||||
`detached` folder, it attaches the data without fetching it from other replicas.
|
|
||||||
If there is no part with the correct checksums, the data is downloaded from any replica having the part.
|
If there is no part with the correct checksums, the data is downloaded from any replica having the part.
|
||||||
|
|
||||||
You can put data to the `detached` directory on one replica and use the `ALTER ... ATTACH` query to add it to the
|
You can put data to the `detached` directory on one replica and use the `ALTER ... ATTACH` query to add it to the table on all replicas.
|
||||||
table on all replicas.
|
|
||||||
|
|
||||||
## ATTACH PARTITION FROM {#alter_attach-partition-from}
|
## ATTACH PARTITION FROM {#alter_attach-partition-from}
|
||||||
|
|
||||||
@ -101,8 +99,8 @@ table on all replicas.
|
|||||||
ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1
|
ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1
|
||||||
```
|
```
|
||||||
|
|
||||||
This query copies the data partition from the `table1` to `table2`.
|
This query copies the data partition from `table1` to `table2`.
|
||||||
Note that data won't be deleted neither from `table1` nor from `table2`.
|
Note that data will be deleted neither from `table1` nor from `table2`.
|
||||||
|
|
||||||
For the query to run successfully, the following conditions must be met:
|
For the query to run successfully, the following conditions must be met:
|
||||||
|
|
||||||
|
@ -264,9 +264,7 @@ Wait until a `ReplicatedMergeTree` table will be synced with other replicas in a
|
|||||||
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name
|
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name
|
||||||
```
|
```
|
||||||
|
|
||||||
After running this statement the `[db.]replicated_merge_tree_family_table_name` fetches commands from
|
After running this statement the `[db.]replicated_merge_tree_family_table_name` fetches commands from the common replicated log into its own replication queue, and then the query waits till the replica processes all of the fetched commands.
|
||||||
the common replicated log into its own replication queue, and then the query waits till the replica processes all
|
|
||||||
of the fetched commands.
|
|
||||||
|
|
||||||
### RESTART REPLICA {#query_language-system-restart-replica}
|
### RESTART REPLICA {#query_language-system-restart-replica}
|
||||||
|
|
||||||
|
@ -19,28 +19,17 @@ $ sudo apt-get install git cmake python ninja-build
|
|||||||
|
|
||||||
古いシステムではcmakeの代わりにcmake3。
|
古いシステムではcmakeの代わりにcmake3。
|
||||||
|
|
||||||
## GCC9のインストール {#install-gcc-10}
|
## Clang 11 のインストール
|
||||||
|
|
||||||
これを行うにはいくつかの方法があります。
|
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
|
||||||
|
|
||||||
### PPAパッケージからインストール {#install-from-a-ppa-package}
|
```bash
|
||||||
|
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||||
``` bash
|
|
||||||
$ sudo apt-get install software-properties-common
|
|
||||||
$ sudo apt-add-repository ppa:ubuntu-toolchain-r/test
|
|
||||||
$ sudo apt-get update
|
|
||||||
$ sudo apt-get install gcc-10 g++-10
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### ソースからインスト {#install-from-sources}
|
|
||||||
|
|
||||||
見て [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh)
|
|
||||||
|
|
||||||
## ビルドにGCC9を使用する {#use-gcc-10-for-builds}
|
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ export CC=gcc-10
|
$ export CC=clang
|
||||||
$ export CXX=g++-10
|
$ export CXX=clang++
|
||||||
```
|
```
|
||||||
|
|
||||||
## ツつィツ姪"ツ債ツつケ {#checkout-clickhouse-sources}
|
## ツつィツ姪"ツ債ツつケ {#checkout-clickhouse-sources}
|
||||||
@ -76,7 +65,7 @@ $ cd ..
|
|||||||
- Git(ソースをチェックアウトするためにのみ使用され、ビルドには必要ありません)
|
- Git(ソースをチェックアウトするためにのみ使用され、ビルドには必要ありません)
|
||||||
- CMake3.10以降
|
- CMake3.10以降
|
||||||
- 忍者(推奨)または作る
|
- 忍者(推奨)または作る
|
||||||
- C++コンパイラ:gcc9またはclang8以降
|
- C++コンパイラ:clang11以降
|
||||||
- リンカ:lldまたはgold(古典的なGNU ldは動作しません)
|
- リンカ:lldまたはgold(古典的なGNU ldは動作しません)
|
||||||
- Python(LLVMビルド内でのみ使用され、オプションです)
|
- Python(LLVMビルド内でのみ使用され、オプションです)
|
||||||
|
|
||||||
|
@ -133,19 +133,19 @@ ArchまたはGentooを使用する場合は、おそらくCMakeのインスト
|
|||||||
|
|
||||||
ClickHouseはビルドに複数の外部ライブラリを使用します。 それらのすべては、サブモジュールにあるソースからClickHouseと一緒に構築されているので、別々にインストールする必要はありません。 リストは次の場所で確認できます `contrib`.
|
ClickHouseはビルドに複数の外部ライブラリを使用します。 それらのすべては、サブモジュールにあるソースからClickHouseと一緒に構築されているので、別々にインストールする必要はありません。 リストは次の場所で確認できます `contrib`.
|
||||||
|
|
||||||
# C++コンパイラ {#c-compiler}
|
## C++ Compiler {#c-compiler}
|
||||||
|
|
||||||
ClickHouseのビルドには、バージョン9以降のGCCとClangバージョン8以降のコンパイラがサポートされます。
|
Compilers Clang starting from version 11 is supported for building ClickHouse.
|
||||||
|
|
||||||
公式のYandexビルドは、わずかに優れたパフォーマンスのマシンコードを生成するため、GCCを使用しています(私たちのベンチマークに応じて最大数パーセントの そしてClangは開発のために通常より便利です。 が、当社の継続的インテグレーション(CI)プラットフォームを運チェックのための十数の組み合わせとなります。
|
Clang should be used instead of gcc. Though, our continuous integration (CI) platform runs checks for about a dozen of build combinations.
|
||||||
|
|
||||||
UBUNTUにGCCをインストールするには: `sudo apt install gcc g++`
|
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
|
||||||
|
|
||||||
Gccのバージョンを確認する: `gcc --version`. の場合は下記9その指示に従う。https://clickhouse.tech/docs/ja/development/build/#install-gcc-10.
|
```bash
|
||||||
|
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||||
|
```
|
||||||
|
|
||||||
Mac OS XのビルドはClangでのみサポートされています。 ちょうど実行 `brew install llvm`
|
Mac OS X build is also supported. Just run `brew install llvm`
|
||||||
|
|
||||||
Clangを使用する場合は、次のものもインストールできます `libc++` と `lld` あなたがそれが何であるか知っていれば。 を使用して `ccache` また、推奨されます。
|
|
||||||
|
|
||||||
# 建築プロセス {#the-building-process}
|
# 建築プロセス {#the-building-process}
|
||||||
|
|
||||||
@ -158,13 +158,6 @@ ClickHouseを構築する準備ができたので、別のディレクトリを
|
|||||||
|
|
||||||
中の間 `build` cmakeを実行してビルドを構成します。 最初の実行の前に、コンパイラ(この例ではバージョン9gccコンパイラ)を指定する環境変数を定義する必要があります。
|
中の間 `build` cmakeを実行してビルドを構成します。 最初の実行の前に、コンパイラ(この例ではバージョン9gccコンパイラ)を指定する環境変数を定義する必要があります。
|
||||||
|
|
||||||
Linux:
|
|
||||||
|
|
||||||
export CC=gcc-10 CXX=g++-10
|
|
||||||
cmake ..
|
|
||||||
|
|
||||||
Mac OS X:
|
|
||||||
|
|
||||||
export CC=clang CXX=clang++
|
export CC=clang CXX=clang++
|
||||||
cmake ..
|
cmake ..
|
||||||
|
|
||||||
|
@ -136,18 +136,18 @@ ClickHouse использует для сборки некоторое коли
|
|||||||
|
|
||||||
## Компилятор C++ {#kompiliator-c}
|
## Компилятор C++ {#kompiliator-c}
|
||||||
|
|
||||||
В качестве компилятора C++ поддерживается GCC начиная с версии 9 или Clang начиная с версии 8.
|
В качестве компилятора C++ поддерживается Clang начиная с версии 11.
|
||||||
|
|
||||||
Официальные сборки от Яндекса, на данный момент, используют GCC, так как он генерирует слегка более производительный машинный код (разница в среднем до нескольких процентов по нашим бенчмаркам). Clang обычно более удобен для разработки. Впрочем, наша среда continuous integration проверяет около десятка вариантов сборки.
|
Впрочем, наша среда continuous integration проверяет около десятка вариантов сборки, включая gcc, но сборка с помощью gcc непригодна для использования в продакшене.
|
||||||
|
|
||||||
Для установки GCC под Ubuntu, выполните: `sudo apt install gcc g++`.
|
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
|
||||||
|
|
||||||
Проверьте версию gcc: `gcc --version`. Если версия меньше 10, то следуйте инструкции: https://clickhouse.tech/docs/ru/development/build/#install-gcc-10.
|
```bash
|
||||||
|
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||||
|
```
|
||||||
|
|
||||||
Сборка под Mac OS X поддерживается только для компилятора Clang. Чтобы установить его выполните `brew install llvm`
|
Сборка под Mac OS X поддерживается только для компилятора Clang. Чтобы установить его выполните `brew install llvm`
|
||||||
|
|
||||||
Если вы решили использовать Clang, вы также можете установить `libc++` и `lld`, если вы знаете, что это такое. При желании, установите `ccache`.
|
|
||||||
|
|
||||||
## Процесс сборки {#protsess-sborki}
|
## Процесс сборки {#protsess-sborki}
|
||||||
|
|
||||||
Теперь вы готовы к сборке ClickHouse. Для размещения собранных файлов, рекомендуется создать отдельную директорию build внутри директории ClickHouse:
|
Теперь вы готовы к сборке ClickHouse. Для размещения собранных файлов, рекомендуется создать отдельную директорию build внутри директории ClickHouse:
|
||||||
@ -158,14 +158,7 @@ ClickHouse использует для сборки некоторое коли
|
|||||||
Вы можете иметь несколько разных директорий (build_release, build_debug) для разных вариантов сборки.
|
Вы можете иметь несколько разных директорий (build_release, build_debug) для разных вариантов сборки.
|
||||||
|
|
||||||
Находясь в директории build, выполните конфигурацию сборки с помощью CMake.
|
Находясь в директории build, выполните конфигурацию сборки с помощью CMake.
|
||||||
Перед первым запуском необходимо выставить переменные окружения, отвечающие за выбор компилятора (в данном примере это - gcc версии 9).
|
Перед первым запуском необходимо выставить переменные окружения, отвечающие за выбор компилятора.
|
||||||
|
|
||||||
Linux:
|
|
||||||
|
|
||||||
export CC=gcc-10 CXX=g++-10
|
|
||||||
cmake ..
|
|
||||||
|
|
||||||
Mac OS X:
|
|
||||||
|
|
||||||
export CC=clang CXX=clang++
|
export CC=clang CXX=clang++
|
||||||
cmake ..
|
cmake ..
|
||||||
|
@ -747,7 +747,7 @@ The dictionary is configured incorrectly.
|
|||||||
Есть два основных варианта проверки на такие ошибки:
|
Есть два основных варианта проверки на такие ошибки:
|
||||||
|
|
||||||
* Исключение с кодом `LOGICAL_ERROR`. Его можно использовать для важных проверок, которые делаются в том числе в релизной сборке.
|
* Исключение с кодом `LOGICAL_ERROR`. Его можно использовать для важных проверок, которые делаются в том числе в релизной сборке.
|
||||||
* `assert`. Такие условия не проверяются в релизной сборке, можно использовать для тяжёлых и опциональных проверок.
|
* `assert`. Такие условия не проверяются в релизной сборке, можно использовать для тяжёлых и опциональных проверок.
|
||||||
|
|
||||||
Пример сообщения, у которого должен быть код `LOGICAL_ERROR`:
|
Пример сообщения, у которого должен быть код `LOGICAL_ERROR`:
|
||||||
`Block header is inconsistent with Chunk in ICompicatedProcessor::munge(). It is a bug!`
|
`Block header is inconsistent with Chunk in ICompicatedProcessor::munge(). It is a bug!`
|
||||||
@ -780,7 +780,7 @@ The dictionary is configured incorrectly.
|
|||||||
|
|
||||||
**2.** Язык - C++20 (см. список доступных [C++20 фич](https://en.cppreference.com/w/cpp/compiler_support#C.2B.2B20_features)).
|
**2.** Язык - C++20 (см. список доступных [C++20 фич](https://en.cppreference.com/w/cpp/compiler_support#C.2B.2B20_features)).
|
||||||
|
|
||||||
**3.** Компилятор - `gcc`. На данный момент (август 2020), код собирается версией 9.3. (Также код может быть собран `clang` версий 10 и 9)
|
**3.** Компилятор - `clang`. На данный момент (апрель 2021), код собирается версией 11. (Также код может быть собран `gcc` версии 10, но такая сборка не тестируется и непригодна для продакшена).
|
||||||
|
|
||||||
Используется стандартная библиотека (реализация `libc++`).
|
Используется стандартная библиотека (реализация `libc++`).
|
||||||
|
|
||||||
|
@ -844,8 +844,6 @@ SELECT type, query FROM system.query_log WHERE log_comment = 'log_comment test'
|
|||||||
|
|
||||||
Значение по умолчанию: количество процессорных ядер без учёта Hyper-Threading.
|
Значение по умолчанию: количество процессорных ядер без учёта Hyper-Threading.
|
||||||
|
|
||||||
Если на сервере обычно исполняется менее одного запроса SELECT одновременно, то выставите этот параметр в значение чуть меньше количества реальных процессорных ядер.
|
|
||||||
|
|
||||||
Для запросов, которые быстро завершаются из-за LIMIT-а, имеет смысл выставить max_threads поменьше. Например, если нужное количество записей находится в каждом блоке, то при max_threads = 8 будет считано 8 блоков, хотя достаточно было прочитать один.
|
Для запросов, которые быстро завершаются из-за LIMIT-а, имеет смысл выставить max_threads поменьше. Например, если нужное количество записей находится в каждом блоке, то при max_threads = 8 будет считано 8 блоков, хотя достаточно было прочитать один.
|
||||||
|
|
||||||
Чем меньше `max_threads`, тем меньше будет использоваться оперативки.
|
Чем меньше `max_threads`, тем меньше будет использоваться оперативки.
|
||||||
|
@ -14,7 +14,17 @@
|
|||||||
|
|
||||||
- `node_name` ([String](../../sql-reference/data-types/string.md)) — имя узла в ZooKeeper.
|
- `node_name` ([String](../../sql-reference/data-types/string.md)) — имя узла в ZooKeeper.
|
||||||
|
|
||||||
- `type` ([String](../../sql-reference/data-types/string.md)) — тип задачи в очереди: `GET_PARTS`, `MERGE_PARTS`, `DETACH_PARTS`, `DROP_PARTS` или `MUTATE_PARTS`.
|
- `type` ([String](../../sql-reference/data-types/string.md)) — тип задачи в очереди:
|
||||||
|
|
||||||
|
- `GET_PART` — скачать кусок с другой реплики.
|
||||||
|
- `ATTACH_PART` — присоединить кусок. Задача может быть выполнена и с куском из нашей собственной реплики (если он находится в папке `detached`). Эта задача практически идентична задаче `GET_PART`, лишь немного оптимизирована.
|
||||||
|
- `MERGE_PARTS` — выполнить слияние кусков.
|
||||||
|
- `DROP_RANGE` — удалить куски в партициях из указнного диапазона.
|
||||||
|
- `CLEAR_COLUMN` — удалить указанный столбец из указанной партиции. Примечание: не используется с 20.4.
|
||||||
|
- `CLEAR_INDEX` — удалить указанный индекс из указанной партиции. Примечание: не используется с 20.4.
|
||||||
|
- `REPLACE_RANGE` — удалить указанный диапазон кусков и заменить их на новые.
|
||||||
|
- `MUTATE_PART` — применить одну или несколько мутаций к куску.
|
||||||
|
- `ALTER_METADATA` — применить изменения структуры таблицы в результате запросов с выражением `ALTER`.
|
||||||
|
|
||||||
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время отправки задачи на выполнение.
|
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время отправки задачи на выполнение.
|
||||||
|
|
||||||
@ -77,4 +87,3 @@ last_postpone_time: 1970-01-01 03:00:00
|
|||||||
**Смотрите также**
|
**Смотрите также**
|
||||||
|
|
||||||
- [Управление таблицами ReplicatedMergeTree](../../sql-reference/statements/system.md#query-language-system-replicated)
|
- [Управление таблицами ReplicatedMergeTree](../../sql-reference/statements/system.md#query-language-system-replicated)
|
||||||
|
|
||||||
|
@ -18,10 +18,12 @@ ClickHouse создает эту таблицу когда утсановлен
|
|||||||
|
|
||||||
Во время соединения с сервером через `clickhouse-client`, вы видите строку похожую на `Connected to ClickHouse server version 19.18.1 revision 54429.`. Это поле содержит номер после `revision`, но не содержит строку после `version`.
|
Во время соединения с сервером через `clickhouse-client`, вы видите строку похожую на `Connected to ClickHouse server version 19.18.1 revision 54429.`. Это поле содержит номер после `revision`, но не содержит строку после `version`.
|
||||||
|
|
||||||
- `timer_type`([Enum8](../../sql-reference/data-types/enum.md)) — тип таймера:
|
- `trace_type`([Enum8](../../sql-reference/data-types/enum.md)) — тип трассировки:
|
||||||
|
|
||||||
- `Real` означает wall-clock время.
|
- `Real` — сбор трассировок стека адресов вызова по времени wall-clock.
|
||||||
- `CPU` означает относительное CPU время.
|
- `CPU` — сбор трассировок стека адресов вызова по времени CPU.
|
||||||
|
- `Memory` — сбор выделенной памяти, когда ее размер превышает относительный инкремент.
|
||||||
|
- `MemorySample` — сбор случайно выделенной памяти.
|
||||||
|
|
||||||
- `thread_number`([UInt32](../../sql-reference/data-types/int-uint.md)) — идентификатор треда.
|
- `thread_number`([UInt32](../../sql-reference/data-types/int-uint.md)) — идентификатор треда.
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ ALTER TABLE mt DETACH PART 'all_2_2_0';
|
|||||||
|
|
||||||
После того как запрос будет выполнен, вы сможете производить любые операции с данными в директории `detached`. Например, можно удалить их из файловой системы.
|
После того как запрос будет выполнен, вы сможете производить любые операции с данными в директории `detached`. Например, можно удалить их из файловой системы.
|
||||||
|
|
||||||
Запрос реплицируется — данные будут перенесены в директорию `detached` и забыты на всех репликах. Обратите внимание, запрос может быть отправлен только на реплику-лидер. Чтобы узнать, является ли реплика лидером, выполните запрос `SELECT` к системной таблице [system.replicas](../../../operations/system-tables/replicas.md#system_tables-replicas). Либо можно выполнить запрос `DETACH` на всех репликах — тогда на всех репликах, кроме реплики-лидера, запрос вернет ошибку.
|
Запрос реплицируется — данные будут перенесены в директорию `detached` и забыты на всех репликах. Обратите внимание, запрос может быть отправлен только на реплику-лидер. Чтобы узнать, является ли реплика лидером, выполните запрос `SELECT` к системной таблице [system.replicas](../../../operations/system-tables/replicas.md#system_tables-replicas). Либо можно выполнить запрос `DETACH` на всех репликах — тогда на всех репликах, кроме реплик-лидеров (поскольку допускается несколько лидеров), запрос вернет ошибку.
|
||||||
|
|
||||||
## DROP PARTITION\|PART {#alter_drop-partition}
|
## DROP PARTITION\|PART {#alter_drop-partition}
|
||||||
|
|
||||||
@ -83,9 +83,13 @@ ALTER TABLE visits ATTACH PART 201901_2_2_0;
|
|||||||
|
|
||||||
Как корректно задать имя партиции или куска, см. в разделе [Как задавать имя партиции в запросах ALTER](#alter-how-to-specify-part-expr).
|
Как корректно задать имя партиции или куска, см. в разделе [Как задавать имя партиции в запросах ALTER](#alter-how-to-specify-part-expr).
|
||||||
|
|
||||||
Этот запрос реплицируется. Реплика-иницатор проверяет, есть ли данные в директории `detached`. Если данные есть, то запрос проверяет их целостность. В случае успеха данные добавляются в таблицу. Все остальные реплики загружают данные с реплики-инициатора запроса.
|
Этот запрос реплицируется. Реплика-иницатор проверяет, есть ли данные в директории `detached`.
|
||||||
|
Если данные есть, то запрос проверяет их целостность. В случае успеха данные добавляются в таблицу.
|
||||||
|
|
||||||
Это означает, что вы можете разместить данные в директории `detached` на одной реплике и с помощью запроса `ALTER ... ATTACH` добавить их в таблицу на всех репликах.
|
Если реплика, не являющаяся инициатором запроса, получив команду присоединения, находит кусок с правильными контрольными суммами в своей собственной папке `detached`, она присоединяет данные, не скачивая их с других реплик.
|
||||||
|
Если нет куска с правильными контрольными суммами, данные загружаются из любой реплики, имеющей этот кусок.
|
||||||
|
|
||||||
|
Вы можете поместить данные в директорию `detached` на одной реплике и с помощью запроса `ALTER ... ATTACH` добавить их в таблицу на всех репликах.
|
||||||
|
|
||||||
## ATTACH PARTITION FROM {#alter_attach-partition-from}
|
## ATTACH PARTITION FROM {#alter_attach-partition-from}
|
||||||
|
|
||||||
@ -93,7 +97,8 @@ ALTER TABLE visits ATTACH PART 201901_2_2_0;
|
|||||||
ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1
|
ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1
|
||||||
```
|
```
|
||||||
|
|
||||||
Копирует партицию из таблицы `table1` в таблицу `table2` и добавляет к существующим данным `table2`. Данные из `table1` не удаляются.
|
Копирует партицию из таблицы `table1` в таблицу `table2`.
|
||||||
|
Обратите внимание, что данные не удаляются ни из `table1`, ни из `table2`.
|
||||||
|
|
||||||
Следует иметь в виду:
|
Следует иметь в виду:
|
||||||
|
|
||||||
@ -305,4 +310,3 @@ OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL;
|
|||||||
`IN PARTITION` указывает на партицию, для которой применяются выражения [UPDATE](../../../sql-reference/statements/alter/update.md#alter-table-update-statements) или [DELETE](../../../sql-reference/statements/alter/delete.md#alter-mutations) в результате запроса `ALTER TABLE`. Новые куски создаются только в указанной партиции. Таким образом, `IN PARTITION` помогает снизить нагрузку, когда таблица разбита на множество партиций, а вам нужно обновить данные лишь точечно.
|
`IN PARTITION` указывает на партицию, для которой применяются выражения [UPDATE](../../../sql-reference/statements/alter/update.md#alter-table-update-statements) или [DELETE](../../../sql-reference/statements/alter/delete.md#alter-mutations) в результате запроса `ALTER TABLE`. Новые куски создаются только в указанной партиции. Таким образом, `IN PARTITION` помогает снизить нагрузку, когда таблица разбита на множество партиций, а вам нужно обновить данные лишь точечно.
|
||||||
|
|
||||||
Примеры запросов `ALTER ... PARTITION` можно посмотреть в тестах: [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) и [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql).
|
Примеры запросов `ALTER ... PARTITION` можно посмотреть в тестах: [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) и [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql).
|
||||||
|
|
||||||
|
@ -204,6 +204,7 @@ SYSTEM STOP MOVES [[db.]merge_tree_family_table_name]
|
|||||||
ClickHouse может управлять фоновыми процессами связанными c репликацией в таблицах семейства [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replacingmergetree.md).
|
ClickHouse может управлять фоновыми процессами связанными c репликацией в таблицах семейства [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replacingmergetree.md).
|
||||||
|
|
||||||
### STOP FETCHES {#query_language-system-stop-fetches}
|
### STOP FETCHES {#query_language-system-stop-fetches}
|
||||||
|
|
||||||
Позволяет остановить фоновые процессы синхронизации новыми вставленными кусками данных с другими репликами в кластере для таблиц семейства `ReplicatedMergeTree`:
|
Позволяет остановить фоновые процессы синхронизации новыми вставленными кусками данных с другими репликами в кластере для таблиц семейства `ReplicatedMergeTree`:
|
||||||
Всегда возвращает `Ok.` вне зависимости от типа таблицы и даже если таблица или база данных не существет.
|
Всегда возвращает `Ok.` вне зависимости от типа таблицы и даже если таблица или база данных не существет.
|
||||||
|
|
||||||
@ -212,6 +213,7 @@ SYSTEM STOP FETCHES [[db.]replicated_merge_tree_family_table_name]
|
|||||||
```
|
```
|
||||||
|
|
||||||
### START FETCHES {#query_language-system-start-fetches}
|
### START FETCHES {#query_language-system-start-fetches}
|
||||||
|
|
||||||
Позволяет запустить фоновые процессы синхронизации новыми вставленными кусками данных с другими репликами в кластере для таблиц семейства `ReplicatedMergeTree`:
|
Позволяет запустить фоновые процессы синхронизации новыми вставленными кусками данных с другими репликами в кластере для таблиц семейства `ReplicatedMergeTree`:
|
||||||
Всегда возвращает `Ok.` вне зависимости от типа таблицы и даже если таблица или база данных не существет.
|
Всегда возвращает `Ok.` вне зависимости от типа таблицы и даже если таблица или база данных не существет.
|
||||||
|
|
||||||
@ -220,6 +222,7 @@ SYSTEM START FETCHES [[db.]replicated_merge_tree_family_table_name]
|
|||||||
```
|
```
|
||||||
|
|
||||||
### STOP REPLICATED SENDS {#query_language-system-start-replicated-sends}
|
### STOP REPLICATED SENDS {#query_language-system-start-replicated-sends}
|
||||||
|
|
||||||
Позволяет остановить фоновые процессы отсылки новых вставленных кусков данных другим репликам в кластере для таблиц семейства `ReplicatedMergeTree`:
|
Позволяет остановить фоновые процессы отсылки новых вставленных кусков данных другим репликам в кластере для таблиц семейства `ReplicatedMergeTree`:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
@ -227,6 +230,7 @@ SYSTEM STOP REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name]
|
|||||||
```
|
```
|
||||||
|
|
||||||
### START REPLICATED SENDS {#query_language-system-start-replicated-sends}
|
### START REPLICATED SENDS {#query_language-system-start-replicated-sends}
|
||||||
|
|
||||||
Позволяет запустить фоновые процессы отсылки новых вставленных кусков данных другим репликам в кластере для таблиц семейства `ReplicatedMergeTree`:
|
Позволяет запустить фоновые процессы отсылки новых вставленных кусков данных другим репликам в кластере для таблиц семейства `ReplicatedMergeTree`:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
@ -234,6 +238,7 @@ SYSTEM START REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name]
|
|||||||
```
|
```
|
||||||
|
|
||||||
### STOP REPLICATION QUEUES {#query_language-system-stop-replication-queues}
|
### STOP REPLICATION QUEUES {#query_language-system-stop-replication-queues}
|
||||||
|
|
||||||
Останавливает фоновые процессы разбора заданий из очереди репликации которая хранится в Zookeeper для таблиц семейства `ReplicatedMergeTree`. Возможные типы заданий - merges, fetches, mutation, DDL запросы с ON CLUSTER:
|
Останавливает фоновые процессы разбора заданий из очереди репликации которая хранится в Zookeeper для таблиц семейства `ReplicatedMergeTree`. Возможные типы заданий - merges, fetches, mutation, DDL запросы с ON CLUSTER:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
@ -241,6 +246,7 @@ SYSTEM STOP REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name]
|
|||||||
```
|
```
|
||||||
|
|
||||||
### START REPLICATION QUEUES {#query_language-system-start-replication-queues}
|
### START REPLICATION QUEUES {#query_language-system-start-replication-queues}
|
||||||
|
|
||||||
Запускает фоновые процессы разбора заданий из очереди репликации которая хранится в Zookeeper для таблиц семейства `ReplicatedMergeTree`. Возможные типы заданий - merges, fetches, mutation, DDL запросы с ON CLUSTER:
|
Запускает фоновые процессы разбора заданий из очереди репликации которая хранится в Zookeeper для таблиц семейства `ReplicatedMergeTree`. Возможные типы заданий - merges, fetches, mutation, DDL запросы с ON CLUSTER:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
@ -248,20 +254,24 @@ SYSTEM START REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name]
|
|||||||
```
|
```
|
||||||
|
|
||||||
### SYNC REPLICA {#query_language-system-sync-replica}
|
### SYNC REPLICA {#query_language-system-sync-replica}
|
||||||
|
|
||||||
Ждет когда таблица семейства `ReplicatedMergeTree` будет синхронизирована с другими репликами в кластере, будет работать до достижения `receive_timeout`, если синхронизация для таблицы отключена в настоящий момент времени:
|
Ждет когда таблица семейства `ReplicatedMergeTree` будет синхронизирована с другими репликами в кластере, будет работать до достижения `receive_timeout`, если синхронизация для таблицы отключена в настоящий момент времени:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name
|
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name
|
||||||
```
|
```
|
||||||
|
|
||||||
|
После выполнения этого запроса таблица `[db.]replicated_merge_tree_family_table_name` синхронизирует команды из общего реплицированного лога в свою собственную очередь репликации. Затем запрос ждет, пока реплика не обработает все синхронизированные команды.
|
||||||
|
|
||||||
### RESTART REPLICA {#query_language-system-restart-replica}
|
### RESTART REPLICA {#query_language-system-restart-replica}
|
||||||
Реинициализация состояния Zookeeper сессий для таблицы семейства `ReplicatedMergeTree`, сравнивает текущее состояние с тем что хранится в Zookeeper как источник правды и добавляет задачи Zookeeper очередь если необходимо
|
|
||||||
Инициализация очереди репликации на основе данных ZooKeeper, происходит так же как при attach table. На короткое время таблица станет недоступной для любых операций.
|
Реинициализация состояния Zookeeper-сессий для таблицы семейства `ReplicatedMergeTree`. Сравнивает текущее состояние с тем, что хранится в Zookeeper, как источник правды, и добавляет задачи в очередь репликации в Zookeeper, если необходимо.
|
||||||
|
Инициализация очереди репликации на основе данных ZooKeeper происходит так же, как при attach table. На короткое время таблица станет недоступной для любых операций.
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name
|
SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name
|
||||||
```
|
```
|
||||||
|
|
||||||
### RESTART REPLICAS {#query_language-system-restart-replicas}
|
### RESTART REPLICAS {#query_language-system-restart-replicas}
|
||||||
Реинициализация состояния Zookeeper сессий для всех `ReplicatedMergeTree` таблиц, сравнивает текущее состояние с тем что хранится в Zookeeper как источник правды и добавляет задачи Zookeeper очередь если необходимо
|
|
||||||
|
|
||||||
|
Реинициализация состояния ZooKeeper-сессий для всех `ReplicatedMergeTree` таблиц. Сравнивает текущее состояние реплики с тем, что хранится в ZooKeeper, как c источником правды, и добавляет задачи в очередь репликации в ZooKeeper, если необходимо.
|
||||||
|
@ -35,28 +35,12 @@ sudo apt-get install git cmake ninja-build
|
|||||||
或cmake3而不是旧系统上的cmake。
|
或cmake3而不是旧系统上的cmake。
|
||||||
或者在早期版本的系统中用 cmake3 替代 cmake
|
或者在早期版本的系统中用 cmake3 替代 cmake
|
||||||
|
|
||||||
## 安装 GCC 10 {#an-zhuang-gcc-10}
|
## 安装 Clang
|
||||||
|
|
||||||
有几种方法可以做到这一点。
|
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
|
||||||
|
|
||||||
### 安装 PPA 包 {#an-zhuang-ppa-bao}
|
```bash
|
||||||
|
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||||
``` bash
|
|
||||||
sudo apt-get install software-properties-common
|
|
||||||
sudo apt-add-repository ppa:ubuntu-toolchain-r/test
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install gcc-10 g++-10
|
|
||||||
```
|
|
||||||
|
|
||||||
### 源码安装 gcc {#yuan-ma-an-zhuang-gcc}
|
|
||||||
|
|
||||||
请查看 [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh)
|
|
||||||
|
|
||||||
## 使用 GCC 10 来编译 {#shi-yong-gcc-10-lai-bian-yi}
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
export CC=gcc-10
|
|
||||||
export CXX=g++-10
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 拉取 ClickHouse 源码 {#la-qu-clickhouse-yuan-ma-1}
|
## 拉取 ClickHouse 源码 {#la-qu-clickhouse-yuan-ma-1}
|
||||||
|
@ -123,17 +123,13 @@ ClickHouse使用多个外部库进行构建。大多数外部库不需要单独
|
|||||||
|
|
||||||
# C++ 编译器 {#c-bian-yi-qi}
|
# C++ 编译器 {#c-bian-yi-qi}
|
||||||
|
|
||||||
GCC编译器从版本9开始,以及Clang版本\>=8都可支持构建ClickHouse。
|
We support clang starting from version 11.
|
||||||
|
|
||||||
Yandex官方当前使用GCC构建ClickHouse,因为它生成的机器代码性能较好(根据测评,最多可以相差几个百分点)。Clang通常可以更加便捷的开发。我们的持续集成(CI)平台会运行大约十二种构建组合的检查。
|
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
|
||||||
|
|
||||||
在Ubuntu上安装GCC,请执行:`sudo apt install gcc g++`
|
```bash
|
||||||
|
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||||
请使用`gcc --version`查看gcc的版本。如果gcc版本低于9,请参考此处的指示:https://clickhouse.tech/docs/zh/development/build/#an-zhuang-gcc-10 。
|
```
|
||||||
|
|
||||||
在Mac OS X上安装GCC,请执行:`brew install gcc`
|
|
||||||
|
|
||||||
如果您决定使用Clang,还可以同时安装 `libc++`以及`lld`,前提是您也熟悉它们。此外,也推荐使用`ccache`。
|
|
||||||
|
|
||||||
# 构建的过程 {#gou-jian-de-guo-cheng}
|
# 构建的过程 {#gou-jian-de-guo-cheng}
|
||||||
|
|
||||||
@ -146,7 +142,7 @@ Yandex官方当前使用GCC构建ClickHouse,因为它生成的机器代码性
|
|||||||
|
|
||||||
在`build`目录下,通过运行CMake配置构建。 在第一次运行之前,请定义用于指定编译器的环境变量(本示例中为gcc 9 编译器)。
|
在`build`目录下,通过运行CMake配置构建。 在第一次运行之前,请定义用于指定编译器的环境变量(本示例中为gcc 9 编译器)。
|
||||||
|
|
||||||
export CC=gcc-10 CXX=g++-10
|
export CC=clang CXX=clang++
|
||||||
cmake ..
|
cmake ..
|
||||||
|
|
||||||
`CC`变量指代C的编译器(C Compiler的缩写),而`CXX`变量指代要使用哪个C++编译器进行编译。
|
`CC`变量指代C的编译器(C Compiler的缩写),而`CXX`变量指代要使用哪个C++编译器进行编译。
|
||||||
|
@ -696,7 +696,7 @@ auto s = std::string{"Hello"};
|
|||||||
|
|
||||||
**2.** 语言: C++20.
|
**2.** 语言: C++20.
|
||||||
|
|
||||||
**3.** 编译器: `gcc`。 此时(2020年08月),代码使用9.3版编译。(它也可以使用`clang 8` 编译)
|
**3.** 编译器: `clang`。 此时(2021年03月),代码使用11版编译。(它也可以使用`gcc` 编译 but it is not suitable for production)
|
||||||
|
|
||||||
使用标准库 (`libc++`)。
|
使用标准库 (`libc++`)。
|
||||||
|
|
||||||
|
@ -360,7 +360,8 @@ inline bool isEnum(const DataTypePtr & data_type) { return WhichDataType(data_ty
|
|||||||
inline bool isDecimal(const DataTypePtr & data_type) { return WhichDataType(data_type).isDecimal(); }
|
inline bool isDecimal(const DataTypePtr & data_type) { return WhichDataType(data_type).isDecimal(); }
|
||||||
inline bool isTuple(const DataTypePtr & data_type) { return WhichDataType(data_type).isTuple(); }
|
inline bool isTuple(const DataTypePtr & data_type) { return WhichDataType(data_type).isTuple(); }
|
||||||
inline bool isArray(const DataTypePtr & data_type) { return WhichDataType(data_type).isArray(); }
|
inline bool isArray(const DataTypePtr & data_type) { return WhichDataType(data_type).isArray(); }
|
||||||
inline bool isMap(const DataTypePtr & data_type) {return WhichDataType(data_type).isMap(); }
|
inline bool isMap(const DataTypePtr & data_type) { return WhichDataType(data_type).isMap(); }
|
||||||
|
inline bool isNothing(const DataTypePtr & data_type) { return WhichDataType(data_type).isNothing(); }
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
inline bool isUInt8(const T & data_type)
|
inline bool isUInt8(const T & data_type)
|
||||||
|
@ -2496,7 +2496,7 @@ private:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
WrapperType createArrayWrapper(const DataTypePtr & from_type_untyped, const DataTypeArray * to_type) const
|
WrapperType createArrayWrapper(const DataTypePtr & from_type_untyped, const DataTypeArray & to_type) const
|
||||||
{
|
{
|
||||||
/// Conversion from String through parsing.
|
/// Conversion from String through parsing.
|
||||||
if (checkAndGetDataType<DataTypeString>(from_type_untyped.get()))
|
if (checkAndGetDataType<DataTypeString>(from_type_untyped.get()))
|
||||||
@ -2507,24 +2507,23 @@ private:
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr from_nested_type;
|
|
||||||
DataTypePtr to_nested_type;
|
|
||||||
const auto * from_type = checkAndGetDataType<DataTypeArray>(from_type_untyped.get());
|
const auto * from_type = checkAndGetDataType<DataTypeArray>(from_type_untyped.get());
|
||||||
|
if (!from_type)
|
||||||
/// get the most nested type
|
|
||||||
if (from_type && to_type)
|
|
||||||
{
|
{
|
||||||
from_nested_type = from_type->getNestedType();
|
throw Exception(ErrorCodes::TYPE_MISMATCH,
|
||||||
to_nested_type = to_type->getNestedType();
|
"CAST AS Array can only be perforamed between same-dimensional Array or String types");
|
||||||
|
|
||||||
from_type = checkAndGetDataType<DataTypeArray>(from_nested_type.get());
|
|
||||||
to_type = checkAndGetDataType<DataTypeArray>(to_nested_type.get());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// both from_type and to_type should be nullptr now is array types had same dimensions
|
DataTypePtr from_nested_type = from_type->getNestedType();
|
||||||
if ((from_type == nullptr) != (to_type == nullptr))
|
|
||||||
throw Exception{"CAST AS Array can only be performed between same-dimensional array types or from String",
|
/// In query SELECT CAST([] AS Array(Array(String))) from type is Array(Nothing)
|
||||||
ErrorCodes::TYPE_MISMATCH};
|
bool from_empty_array = isNothing(from_nested_type);
|
||||||
|
|
||||||
|
if (from_type->getNumberOfDimensions() != to_type.getNumberOfDimensions() && !from_empty_array)
|
||||||
|
throw Exception(ErrorCodes::TYPE_MISMATCH,
|
||||||
|
"CAST AS Array can only be perforamed between same-dimensional array types");
|
||||||
|
|
||||||
|
const DataTypePtr & to_nested_type = to_type.getNestedType();
|
||||||
|
|
||||||
/// Prepare nested type conversion
|
/// Prepare nested type conversion
|
||||||
const auto nested_function = prepareUnpackDictionaries(from_nested_type, to_nested_type);
|
const auto nested_function = prepareUnpackDictionaries(from_nested_type, to_nested_type);
|
||||||
@ -3090,14 +3089,12 @@ private:
|
|||||||
return createStringWrapper(from_type);
|
return createStringWrapper(from_type);
|
||||||
case TypeIndex::FixedString:
|
case TypeIndex::FixedString:
|
||||||
return createFixedStringWrapper(from_type, checkAndGetDataType<DataTypeFixedString>(to_type.get())->getN());
|
return createFixedStringWrapper(from_type, checkAndGetDataType<DataTypeFixedString>(to_type.get())->getN());
|
||||||
|
|
||||||
case TypeIndex::Array:
|
case TypeIndex::Array:
|
||||||
return createArrayWrapper(from_type, checkAndGetDataType<DataTypeArray>(to_type.get()));
|
return createArrayWrapper(from_type, static_cast<const DataTypeArray &>(*to_type));
|
||||||
case TypeIndex::Tuple:
|
case TypeIndex::Tuple:
|
||||||
return createTupleWrapper(from_type, checkAndGetDataType<DataTypeTuple>(to_type.get()));
|
return createTupleWrapper(from_type, checkAndGetDataType<DataTypeTuple>(to_type.get()));
|
||||||
case TypeIndex::Map:
|
case TypeIndex::Map:
|
||||||
return createMapWrapper(from_type, checkAndGetDataType<DataTypeMap>(to_type.get()));
|
return createMapWrapper(from_type, checkAndGetDataType<DataTypeMap>(to_type.get()));
|
||||||
|
|
||||||
case TypeIndex::AggregateFunction:
|
case TypeIndex::AggregateFunction:
|
||||||
return createAggregateFunctionWrapper(from_type, checkAndGetDataType<DataTypeAggregateFunction>(to_type.get()));
|
return createAggregateFunctionWrapper(from_type, checkAndGetDataType<DataTypeAggregateFunction>(to_type.get()));
|
||||||
default:
|
default:
|
||||||
|
@ -24,7 +24,6 @@ namespace ErrorCodes
|
|||||||
ArrowBlockInputFormat::ArrowBlockInputFormat(ReadBuffer & in_, const Block & header_, bool stream_)
|
ArrowBlockInputFormat::ArrowBlockInputFormat(ReadBuffer & in_, const Block & header_, bool stream_)
|
||||||
: IInputFormat(header_, in_), stream{stream_}
|
: IInputFormat(header_, in_), stream{stream_}
|
||||||
{
|
{
|
||||||
prepareReader();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Chunk ArrowBlockInputFormat::generate()
|
Chunk ArrowBlockInputFormat::generate()
|
||||||
@ -35,12 +34,18 @@ Chunk ArrowBlockInputFormat::generate()
|
|||||||
|
|
||||||
if (stream)
|
if (stream)
|
||||||
{
|
{
|
||||||
|
if (!stream_reader)
|
||||||
|
prepareReader();
|
||||||
|
|
||||||
batch_result = stream_reader->Next();
|
batch_result = stream_reader->Next();
|
||||||
if (batch_result.ok() && !(*batch_result))
|
if (batch_result.ok() && !(*batch_result))
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
if (!file_reader)
|
||||||
|
prepareReader();
|
||||||
|
|
||||||
if (record_batch_current >= record_batch_total)
|
if (record_batch_current >= record_batch_total)
|
||||||
return res;
|
return res;
|
||||||
|
|
||||||
@ -71,7 +76,7 @@ void ArrowBlockInputFormat::resetParser()
|
|||||||
stream_reader.reset();
|
stream_reader.reset();
|
||||||
else
|
else
|
||||||
file_reader.reset();
|
file_reader.reset();
|
||||||
prepareReader();
|
record_batch_current = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ArrowBlockInputFormat::prepareReader()
|
void ArrowBlockInputFormat::prepareReader()
|
||||||
|
@ -12,7 +12,16 @@ namespace DB
|
|||||||
class Context;
|
class Context;
|
||||||
|
|
||||||
|
|
||||||
/** Base class for system tables whose all columns have String type.
|
/** IStorageSystemOneBlock is base class for system tables whose all columns can be synchronously fetched.
|
||||||
|
*
|
||||||
|
* Client class need to provide static method static NamesAndTypesList getNamesAndTypes() that will return list of column names and
|
||||||
|
* their types. IStorageSystemOneBlock during read will create result columns in same order as result of getNamesAndTypes
|
||||||
|
* and pass it with fillData method.
|
||||||
|
*
|
||||||
|
* Client also must override fillData and fill result columns.
|
||||||
|
*
|
||||||
|
* If subclass want to support virtual columns, it should override getVirtuals method of IStorage interface.
|
||||||
|
* IStorageSystemOneBlock will add virtuals columns at the end of result columns of fillData method.
|
||||||
*/
|
*/
|
||||||
template <typename Self>
|
template <typename Self>
|
||||||
class IStorageSystemOneBlock : public IStorage
|
class IStorageSystemOneBlock : public IStorage
|
||||||
@ -41,9 +50,10 @@ public:
|
|||||||
size_t /*max_block_size*/,
|
size_t /*max_block_size*/,
|
||||||
unsigned /*num_streams*/) override
|
unsigned /*num_streams*/) override
|
||||||
{
|
{
|
||||||
metadata_snapshot->check(column_names, getVirtuals(), getStorageID());
|
auto virtuals_names_and_types = getVirtuals();
|
||||||
|
metadata_snapshot->check(column_names, virtuals_names_and_types, getStorageID());
|
||||||
|
|
||||||
Block sample_block = metadata_snapshot->getSampleBlock();
|
Block sample_block = metadata_snapshot->getSampleBlockWithVirtuals(virtuals_names_and_types);
|
||||||
MutableColumns res_columns = sample_block.cloneEmptyColumns();
|
MutableColumns res_columns = sample_block.cloneEmptyColumns();
|
||||||
fillData(res_columns, context, query_info);
|
fillData(res_columns, context, query_info);
|
||||||
|
|
||||||
|
@ -50,6 +50,13 @@ NamesAndTypesList StorageSystemDictionaries::getNamesAndTypes()
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
NamesAndTypesList StorageSystemDictionaries::getVirtuals() const
|
||||||
|
{
|
||||||
|
return {
|
||||||
|
{"key", std::make_shared<DataTypeString>()}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
void StorageSystemDictionaries::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo & /*query_info*/) const
|
void StorageSystemDictionaries::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo & /*query_info*/) const
|
||||||
{
|
{
|
||||||
const auto access = context->getAccess();
|
const auto access = context->getAccess();
|
||||||
@ -128,6 +135,9 @@ void StorageSystemDictionaries::fillData(MutableColumns & res_columns, ContextPt
|
|||||||
else
|
else
|
||||||
res_columns[i++]->insertDefault();
|
res_columns[i++]->insertDefault();
|
||||||
|
|
||||||
|
/// Start fill virtual columns
|
||||||
|
|
||||||
|
res_columns[i++]->insert(dictionary_structure.getKeyDescription());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,6 +18,8 @@ public:
|
|||||||
|
|
||||||
static NamesAndTypesList getNamesAndTypes();
|
static NamesAndTypesList getNamesAndTypes();
|
||||||
|
|
||||||
|
NamesAndTypesList getVirtuals() const override;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
using IStorageSystemOneBlock::IStorageSystemOneBlock;
|
using IStorageSystemOneBlock::IStorageSystemOneBlock;
|
||||||
|
|
||||||
|
File diff suppressed because one or more lines are too long
@ -0,0 +1,2 @@
|
|||||||
|
[]
|
||||||
|
[]
|
@ -0,0 +1,2 @@
|
|||||||
|
SELECT CAST([] AS Array(Array(String)));
|
||||||
|
SELECT CAST([] AS Array(Array(Array(String))));
|
@ -0,0 +1,4 @@
|
|||||||
|
simple key
|
||||||
|
example_simple_key_dictionary UInt64
|
||||||
|
complex key
|
||||||
|
example_complex_key_dictionary (UInt64, String)
|
@ -0,0 +1,26 @@
|
|||||||
|
DROP DICTIONARY IF EXISTS example_simple_key_dictionary;
|
||||||
|
CREATE DICTIONARY example_simple_key_dictionary (
|
||||||
|
id UInt64,
|
||||||
|
value UInt64
|
||||||
|
)
|
||||||
|
PRIMARY KEY id
|
||||||
|
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE '' DATABASE currentDatabase()))
|
||||||
|
LAYOUT(DIRECT());
|
||||||
|
|
||||||
|
SELECT 'simple key';
|
||||||
|
|
||||||
|
SELECT name, key FROM system.dictionaries WHERE name='example_simple_key_dictionary' AND database=currentDatabase();
|
||||||
|
|
||||||
|
DROP DICTIONARY IF EXISTS example_complex_key_dictionary;
|
||||||
|
CREATE DICTIONARY example_complex_key_dictionary (
|
||||||
|
id UInt64,
|
||||||
|
id_key String,
|
||||||
|
value UInt64
|
||||||
|
)
|
||||||
|
PRIMARY KEY id, id_key
|
||||||
|
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE '' DATABASE currentDatabase()))
|
||||||
|
LAYOUT(COMPLEX_KEY_DIRECT());
|
||||||
|
|
||||||
|
SELECT 'complex key';
|
||||||
|
|
||||||
|
SELECT name, key FROM system.dictionaries WHERE name='example_complex_key_dictionary' AND database=currentDatabase();
|
0
tests/testflows/map_type/__init__.py
Normal file
0
tests/testflows/map_type/__init__.py
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
<yandex>
|
||||||
|
<logger>
|
||||||
|
<level>trace</level>
|
||||||
|
<log>/var/log/clickhouse-server/log.log</log>
|
||||||
|
<errorlog>/var/log/clickhouse-server/log.err.log</errorlog>
|
||||||
|
<size>1000M</size>
|
||||||
|
<count>10</count>
|
||||||
|
<stderr>/var/log/clickhouse-server/stderr.log</stderr>
|
||||||
|
<stdout>/var/log/clickhouse-server/stdout.log</stdout>
|
||||||
|
</logger>
|
||||||
|
<part_log>
|
||||||
|
<database>system</database>
|
||||||
|
<table>part_log</table>
|
||||||
|
<flush_interval_milliseconds>500</flush_interval_milliseconds>
|
||||||
|
</part_log>
|
||||||
|
</yandex>
|
@ -0,0 +1,42 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<yandex>
|
||||||
|
<remote_servers>
|
||||||
|
<replicated_cluster>
|
||||||
|
<shard>
|
||||||
|
<internal_replication>true</internal_replication>
|
||||||
|
<replica>
|
||||||
|
<host>clickhouse1</host>
|
||||||
|
<port>9000</port>
|
||||||
|
</replica>
|
||||||
|
<replica>
|
||||||
|
<host>clickhouse2</host>
|
||||||
|
<port>9000</port>
|
||||||
|
</replica>
|
||||||
|
<replica>
|
||||||
|
<host>clickhouse3</host>
|
||||||
|
<port>9000</port>
|
||||||
|
</replica>
|
||||||
|
</shard>
|
||||||
|
</replicated_cluster>
|
||||||
|
<sharded_cluster>
|
||||||
|
<shard>
|
||||||
|
<replica>
|
||||||
|
<host>clickhouse1</host>
|
||||||
|
<port>9000</port>
|
||||||
|
</replica>
|
||||||
|
</shard>
|
||||||
|
<shard>
|
||||||
|
<replica>
|
||||||
|
<host>clickhouse2</host>
|
||||||
|
<port>9000</port>
|
||||||
|
</replica>
|
||||||
|
</shard>
|
||||||
|
<shard>
|
||||||
|
<replica>
|
||||||
|
<host>clickhouse3</host>
|
||||||
|
<port>9000</port>
|
||||||
|
</replica>
|
||||||
|
</shard>
|
||||||
|
</sharded_cluster>
|
||||||
|
</remote_servers>
|
||||||
|
</yandex>
|
@ -0,0 +1,10 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<yandex>
|
||||||
|
<zookeeper>
|
||||||
|
<node index="1">
|
||||||
|
<host>zookeeper</host>
|
||||||
|
<port>2181</port>
|
||||||
|
</node>
|
||||||
|
<session_timeout_ms>15000</session_timeout_ms>
|
||||||
|
</zookeeper>
|
||||||
|
</yandex>
|
448
tests/testflows/map_type/configs/clickhouse/config.xml
Normal file
448
tests/testflows/map_type/configs/clickhouse/config.xml
Normal file
@ -0,0 +1,448 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<!--
|
||||||
|
NOTE: User and query level settings are set up in "users.xml" file.
|
||||||
|
-->
|
||||||
|
<yandex>
|
||||||
|
<logger>
|
||||||
|
<!-- Possible levels: https://github.com/pocoproject/poco/blob/develop/Foundation/include/Poco/Logger.h#L105 -->
|
||||||
|
<level>trace</level>
|
||||||
|
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||||
|
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||||
|
<size>1000M</size>
|
||||||
|
<count>10</count>
|
||||||
|
<!-- <console>1</console> --> <!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
|
||||||
|
</logger>
|
||||||
|
<!--display_name>production</display_name--> <!-- It is the name that will be shown in the client -->
|
||||||
|
<http_port>8123</http_port>
|
||||||
|
<tcp_port>9000</tcp_port>
|
||||||
|
|
||||||
|
<!-- For HTTPS and SSL over native protocol. -->
|
||||||
|
<!--
|
||||||
|
<https_port>8443</https_port>
|
||||||
|
<tcp_port_secure>9440</tcp_port_secure>
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!-- Used with https_port and tcp_port_secure. Full ssl options list: https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->
|
||||||
|
<openSSL>
|
||||||
|
<server> <!-- Used for https server AND secure tcp port -->
|
||||||
|
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
|
||||||
|
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
|
||||||
|
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
|
||||||
|
<!-- openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096 -->
|
||||||
|
<dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile>
|
||||||
|
<verificationMode>none</verificationMode>
|
||||||
|
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||||
|
<cacheSessions>true</cacheSessions>
|
||||||
|
<disableProtocols>sslv2,sslv3</disableProtocols>
|
||||||
|
<preferServerCiphers>true</preferServerCiphers>
|
||||||
|
</server>
|
||||||
|
|
||||||
|
<client> <!-- Used for connecting to https dictionary source -->
|
||||||
|
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||||
|
<cacheSessions>true</cacheSessions>
|
||||||
|
<disableProtocols>sslv2,sslv3</disableProtocols>
|
||||||
|
<preferServerCiphers>true</preferServerCiphers>
|
||||||
|
<!-- Use for self-signed: <verificationMode>none</verificationMode> -->
|
||||||
|
<invalidCertificateHandler>
|
||||||
|
<!-- Use for self-signed: <name>AcceptCertificateHandler</name> -->
|
||||||
|
<name>RejectCertificateHandler</name>
|
||||||
|
</invalidCertificateHandler>
|
||||||
|
</client>
|
||||||
|
</openSSL>
|
||||||
|
|
||||||
|
<!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 -->
|
||||||
|
<!--
|
||||||
|
<http_server_default_response><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></http_server_default_response>
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!-- Port for communication between replicas. Used for data exchange. -->
|
||||||
|
<interserver_http_port>9009</interserver_http_port>
|
||||||
|
|
||||||
|
<!-- Hostname that is used by other replicas to request this server.
|
||||||
|
If not specified, than it is determined analoguous to 'hostname -f' command.
|
||||||
|
This setting could be used to switch replication to another network interface.
|
||||||
|
-->
|
||||||
|
<!--
|
||||||
|
<interserver_http_host>example.yandex.ru</interserver_http_host>
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!-- Listen specified host. use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere. -->
|
||||||
|
<!-- <listen_host>::</listen_host> -->
|
||||||
|
<!-- Same for hosts with disabled ipv6: -->
|
||||||
|
<listen_host>0.0.0.0</listen_host>
|
||||||
|
|
||||||
|
<!-- Default values - try listen localhost on ipv4 and ipv6: -->
|
||||||
|
<!--
|
||||||
|
<listen_host>::1</listen_host>
|
||||||
|
<listen_host>127.0.0.1</listen_host>
|
||||||
|
-->
|
||||||
|
<!-- Don't exit if ipv6 or ipv4 unavailable, but listen_host with this protocol specified -->
|
||||||
|
<!-- <listen_try>0</listen_try> -->
|
||||||
|
|
||||||
|
<!-- Allow listen on same address:port -->
|
||||||
|
<!-- <listen_reuse_port>0</listen_reuse_port> -->
|
||||||
|
|
||||||
|
<!-- <listen_backlog>64</listen_backlog> -->
|
||||||
|
|
||||||
|
<max_connections>4096</max_connections>
|
||||||
|
<keep_alive_timeout>3</keep_alive_timeout>
|
||||||
|
|
||||||
|
<!-- Maximum number of concurrent queries. -->
|
||||||
|
<max_concurrent_queries>100</max_concurrent_queries>
|
||||||
|
|
||||||
|
<!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
|
||||||
|
correct maximum value. -->
|
||||||
|
<!-- <max_open_files>262144</max_open_files> -->
|
||||||
|
|
||||||
|
<!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
|
||||||
|
In bytes. Cache is single for server. Memory is allocated only on demand.
|
||||||
|
Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
|
||||||
|
Uncompressed cache is advantageous only for very short queries and in rare cases.
|
||||||
|
-->
|
||||||
|
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
|
||||||
|
|
||||||
|
<!-- Approximate size of mark cache, used in tables of MergeTree family.
|
||||||
|
In bytes. Cache is single for server. Memory is allocated only on demand.
|
||||||
|
You should not lower this value.
|
||||||
|
-->
|
||||||
|
<mark_cache_size>5368709120</mark_cache_size>
|
||||||
|
|
||||||
|
|
||||||
|
<!-- Path to data directory, with trailing slash. -->
|
||||||
|
<path>/var/lib/clickhouse/</path>
|
||||||
|
|
||||||
|
<!-- Path to temporary data for processing hard queries. -->
|
||||||
|
<tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
|
||||||
|
|
||||||
|
<!-- Directory with user provided files that are accessible by 'file' table function. -->
|
||||||
|
<user_files_path>/var/lib/clickhouse/user_files/</user_files_path>
|
||||||
|
|
||||||
|
<!-- Path to folder where users and roles created by SQL commands are stored. -->
|
||||||
|
<access_control_path>/var/lib/clickhouse/access/</access_control_path>
|
||||||
|
|
||||||
|
<!-- Sources to read users, roles, access rights, profiles of settings, quotas. -->
|
||||||
|
<user_directories>
|
||||||
|
<users_xml>
|
||||||
|
<!-- Path to configuration file with predefined users. -->
|
||||||
|
<path>users.xml</path>
|
||||||
|
</users_xml>
|
||||||
|
<local_directory>
|
||||||
|
<!-- Path to folder where users created by SQL commands are stored. -->
|
||||||
|
<path>/var/lib/clickhouse/access/</path>
|
||||||
|
</local_directory>
|
||||||
|
</user_directories>
|
||||||
|
|
||||||
|
<!-- Path to configuration file with users, access rights, profiles of settings, quotas. -->
|
||||||
|
<users_config>users.xml</users_config>
|
||||||
|
|
||||||
|
<!-- Default profile of settings. -->
|
||||||
|
<default_profile>default</default_profile>
|
||||||
|
|
||||||
|
<!-- System profile of settings. This settings are used by internal processes (Buffer storage, Distibuted DDL worker and so on). -->
|
||||||
|
<!-- <system_profile>default</system_profile> -->
|
||||||
|
|
||||||
|
<!-- Default database. -->
|
||||||
|
<default_database>default</default_database>
|
||||||
|
|
||||||
|
<!-- Server time zone could be set here.
|
||||||
|
|
||||||
|
Time zone is used when converting between String and DateTime types,
|
||||||
|
when printing DateTime in text formats and parsing DateTime from text,
|
||||||
|
it is used in date and time related functions, if specific time zone was not passed as an argument.
|
||||||
|
|
||||||
|
Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
|
||||||
|
If not specified, system time zone at server startup is used.
|
||||||
|
|
||||||
|
Please note, that server could display time zone alias instead of specified name.
|
||||||
|
Example: W-SU is an alias for Europe/Moscow and Zulu is an alias for UTC.
|
||||||
|
-->
|
||||||
|
<!-- <timezone>Europe/Moscow</timezone> -->
|
||||||
|
|
||||||
|
<!-- You can specify umask here (see "man umask"). Server will apply it on startup.
|
||||||
|
Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
|
||||||
|
-->
|
||||||
|
<!-- <umask>022</umask> -->
|
||||||
|
|
||||||
|
<!-- Perform mlockall after startup to lower first queries latency
|
||||||
|
and to prevent clickhouse executable from being paged out under high IO load.
|
||||||
|
Enabling this option is recommended but will lead to increased startup time for up to a few seconds.
|
||||||
|
-->
|
||||||
|
<mlock_executable>false</mlock_executable>
|
||||||
|
|
||||||
|
<!-- Configuration of clusters that could be used in Distributed tables.
|
||||||
|
https://clickhouse.yandex/docs/en/table_engines/distributed/
|
||||||
|
-->
|
||||||
|
<remote_servers incl="clickhouse_remote_servers" >
|
||||||
|
<!-- Test only shard config for testing distributed storage -->
|
||||||
|
<test_shard_localhost>
|
||||||
|
<shard>
|
||||||
|
<replica>
|
||||||
|
<host>localhost</host>
|
||||||
|
<port>9000</port>
|
||||||
|
</replica>
|
||||||
|
</shard>
|
||||||
|
</test_shard_localhost>
|
||||||
|
<test_cluster_two_shards_localhost>
|
||||||
|
<shard>
|
||||||
|
<replica>
|
||||||
|
<host>localhost</host>
|
||||||
|
<port>9000</port>
|
||||||
|
</replica>
|
||||||
|
</shard>
|
||||||
|
<shard>
|
||||||
|
<replica>
|
||||||
|
<host>localhost</host>
|
||||||
|
<port>9000</port>
|
||||||
|
</replica>
|
||||||
|
</shard>
|
||||||
|
</test_cluster_two_shards_localhost>
|
||||||
|
<test_shard_localhost_secure>
|
||||||
|
<shard>
|
||||||
|
<replica>
|
||||||
|
<host>localhost</host>
|
||||||
|
<port>9440</port>
|
||||||
|
<secure>1</secure>
|
||||||
|
</replica>
|
||||||
|
</shard>
|
||||||
|
</test_shard_localhost_secure>
|
||||||
|
<test_unavailable_shard>
|
||||||
|
<shard>
|
||||||
|
<replica>
|
||||||
|
<host>localhost</host>
|
||||||
|
<port>9000</port>
|
||||||
|
</replica>
|
||||||
|
</shard>
|
||||||
|
<shard>
|
||||||
|
<replica>
|
||||||
|
<host>localhost</host>
|
||||||
|
<port>1</port>
|
||||||
|
</replica>
|
||||||
|
</shard>
|
||||||
|
</test_unavailable_shard>
|
||||||
|
</remote_servers>
|
||||||
|
|
||||||
|
|
||||||
|
<!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
|
||||||
|
By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
|
||||||
|
Values for substitutions are specified in /yandex/name_of_substitution elements in that file.
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
|
||||||
|
Optional. If you don't use replicated tables, you could omit that.
|
||||||
|
|
||||||
|
See https://clickhouse.yandex/docs/en/table_engines/replication/
|
||||||
|
-->
|
||||||
|
<zookeeper incl="zookeeper-servers" optional="true" />
|
||||||
|
|
||||||
|
<!-- Substitutions for parameters of replicated tables.
|
||||||
|
Optional. If you don't use replicated tables, you could omit that.
|
||||||
|
|
||||||
|
See https://clickhouse.yandex/docs/en/table_engines/replication/#creating-replicated-tables
|
||||||
|
-->
|
||||||
|
<macros incl="macros" optional="true" />
|
||||||
|
|
||||||
|
|
||||||
|
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
|
||||||
|
<builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
|
||||||
|
|
||||||
|
|
||||||
|
<!-- Maximum session timeout, in seconds. Default: 3600. -->
|
||||||
|
<max_session_timeout>3600</max_session_timeout>
|
||||||
|
|
||||||
|
<!-- Default session timeout, in seconds. Default: 60. -->
|
||||||
|
<default_session_timeout>60</default_session_timeout>
|
||||||
|
|
||||||
|
<!-- Sending data to Graphite for monitoring. Several sections can be defined. -->
|
||||||
|
<!--
|
||||||
|
interval - send every X second
|
||||||
|
root_path - prefix for keys
|
||||||
|
hostname_in_path - append hostname to root_path (default = true)
|
||||||
|
metrics - send data from table system.metrics
|
||||||
|
events - send data from table system.events
|
||||||
|
asynchronous_metrics - send data from table system.asynchronous_metrics
|
||||||
|
-->
|
||||||
|
<!--
|
||||||
|
<graphite>
|
||||||
|
<host>localhost</host>
|
||||||
|
<port>42000</port>
|
||||||
|
<timeout>0.1</timeout>
|
||||||
|
<interval>60</interval>
|
||||||
|
<root_path>one_min</root_path>
|
||||||
|
<hostname_in_path>true</hostname_in_path>
|
||||||
|
|
||||||
|
<metrics>true</metrics>
|
||||||
|
<events>true</events>
|
||||||
|
<asynchronous_metrics>true</asynchronous_metrics>
|
||||||
|
</graphite>
|
||||||
|
<graphite>
|
||||||
|
<host>localhost</host>
|
||||||
|
<port>42000</port>
|
||||||
|
<timeout>0.1</timeout>
|
||||||
|
<interval>1</interval>
|
||||||
|
<root_path>one_sec</root_path>
|
||||||
|
|
||||||
|
<metrics>true</metrics>
|
||||||
|
<events>true</events>
|
||||||
|
<asynchronous_metrics>false</asynchronous_metrics>
|
||||||
|
</graphite>
|
||||||
|
-->
|
||||||
|
|
||||||
|
|
||||||
|
<!-- Query log. Used only for queries with setting log_queries = 1. -->
|
||||||
|
<query_log>
|
||||||
|
<!-- What table to insert data. If table is not exist, it will be created.
|
||||||
|
When query log structure is changed after system update,
|
||||||
|
then old table will be renamed and new table will be created automatically.
|
||||||
|
-->
|
||||||
|
<database>system</database>
|
||||||
|
<table>query_log</table>
|
||||||
|
<!--
|
||||||
|
PARTITION BY expr https://clickhouse.yandex/docs/en/table_engines/custom_partitioning_key/
|
||||||
|
Example:
|
||||||
|
event_date
|
||||||
|
toMonday(event_date)
|
||||||
|
toYYYYMM(event_date)
|
||||||
|
toStartOfHour(event_time)
|
||||||
|
-->
|
||||||
|
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||||
|
<!-- Interval of flushing data. -->
|
||||||
|
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||||
|
</query_log>
|
||||||
|
|
||||||
|
<!-- Trace log. Stores stack traces collected by query profilers.
|
||||||
|
See query_profiler_real_time_period_ns and query_profiler_cpu_time_period_ns settings. -->
|
||||||
|
<trace_log>
|
||||||
|
<database>system</database>
|
||||||
|
<table>trace_log</table>
|
||||||
|
|
||||||
|
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||||
|
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||||
|
</trace_log>
|
||||||
|
|
||||||
|
<!-- Query thread log. Has information about all threads participated in query execution.
|
||||||
|
Used only for queries with setting log_query_threads = 1. -->
|
||||||
|
<query_thread_log>
|
||||||
|
<database>system</database>
|
||||||
|
<table>query_thread_log</table>
|
||||||
|
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||||
|
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||||
|
</query_thread_log>
|
||||||
|
|
||||||
|
<!-- Uncomment if use part log.
|
||||||
|
Part log contains information about all actions with parts in MergeTree tables (creation, deletion, merges, downloads).
|
||||||
|
<part_log>
|
||||||
|
<database>system</database>
|
||||||
|
<table>part_log</table>
|
||||||
|
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||||
|
</part_log>
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!-- Uncomment to write text log into table.
|
||||||
|
Text log contains all information from usual server log but stores it in structured and efficient way.
|
||||||
|
<text_log>
|
||||||
|
<database>system</database>
|
||||||
|
<table>text_log</table>
|
||||||
|
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||||
|
</text_log>
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
|
||||||
|
See https://clickhouse.yandex/docs/en/dicts/internal_dicts/
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!-- Path to file with region hierarchy. -->
|
||||||
|
<!-- <path_to_regions_hierarchy_file>/opt/geo/regions_hierarchy.txt</path_to_regions_hierarchy_file> -->
|
||||||
|
|
||||||
|
<!-- Path to directory with files containing names of regions -->
|
||||||
|
<!-- <path_to_regions_names_files>/opt/geo/</path_to_regions_names_files> -->
|
||||||
|
|
||||||
|
|
||||||
|
<!-- Configuration of external dictionaries. See:
|
||||||
|
https://clickhouse.yandex/docs/en/dicts/external_dicts/
|
||||||
|
-->
|
||||||
|
<dictionaries_config>*_dictionary.xml</dictionaries_config>
|
||||||
|
|
||||||
|
<!-- Uncomment if you want data to be compressed 30-100% better.
|
||||||
|
Don't do that if you just started using ClickHouse.
|
||||||
|
-->
|
||||||
|
<compression incl="clickhouse_compression">
|
||||||
|
<!--
|
||||||
|
<!- - Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. - ->
|
||||||
|
<case>
|
||||||
|
|
||||||
|
<!- - Conditions. All must be satisfied. Some conditions may be omitted. - ->
|
||||||
|
<min_part_size>10000000000</min_part_size> <!- - Min part size in bytes. - ->
|
||||||
|
<min_part_size_ratio>0.01</min_part_size_ratio> <!- - Min size of part relative to whole table size. - ->
|
||||||
|
|
||||||
|
<!- - What compression method to use. - ->
|
||||||
|
<method>zstd</method>
|
||||||
|
</case>
|
||||||
|
-->
|
||||||
|
</compression>
|
||||||
|
|
||||||
|
<!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
|
||||||
|
Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. -->
|
||||||
|
<distributed_ddl>
|
||||||
|
<!-- Path in ZooKeeper to queue with DDL queries -->
|
||||||
|
<path>/clickhouse/task_queue/ddl</path>
|
||||||
|
|
||||||
|
<!-- Settings from this profile will be used to execute DDL queries -->
|
||||||
|
<!-- <profile>default</profile> -->
|
||||||
|
</distributed_ddl>
|
||||||
|
|
||||||
|
<!-- Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
|
||||||
|
<!--
|
||||||
|
<merge_tree>
|
||||||
|
<max_suspicious_broken_parts>5</max_suspicious_broken_parts>
|
||||||
|
</merge_tree>
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!-- Protection from accidental DROP.
|
||||||
|
If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
|
||||||
|
If you want do delete one table and don't want to restart clickhouse-server, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
|
||||||
|
By default max_table_size_to_drop is 50GB; max_table_size_to_drop=0 allows to DROP any tables.
|
||||||
|
The same for max_partition_size_to_drop.
|
||||||
|
Uncomment to disable protection.
|
||||||
|
-->
|
||||||
|
<!-- <max_table_size_to_drop>0</max_table_size_to_drop> -->
|
||||||
|
<!-- <max_partition_size_to_drop>0</max_partition_size_to_drop> -->
|
||||||
|
|
||||||
|
<!-- Example of parameters for GraphiteMergeTree table engine -->
|
||||||
|
<graphite_rollup_example>
|
||||||
|
<pattern>
|
||||||
|
<regexp>click_cost</regexp>
|
||||||
|
<function>any</function>
|
||||||
|
<retention>
|
||||||
|
<age>0</age>
|
||||||
|
<precision>3600</precision>
|
||||||
|
</retention>
|
||||||
|
<retention>
|
||||||
|
<age>86400</age>
|
||||||
|
<precision>60</precision>
|
||||||
|
</retention>
|
||||||
|
</pattern>
|
||||||
|
<default>
|
||||||
|
<function>max</function>
|
||||||
|
<retention>
|
||||||
|
<age>0</age>
|
||||||
|
<precision>60</precision>
|
||||||
|
</retention>
|
||||||
|
<retention>
|
||||||
|
<age>3600</age>
|
||||||
|
<precision>300</precision>
|
||||||
|
</retention>
|
||||||
|
<retention>
|
||||||
|
<age>86400</age>
|
||||||
|
<precision>3600</precision>
|
||||||
|
</retention>
|
||||||
|
</default>
|
||||||
|
</graphite_rollup_example>
|
||||||
|
|
||||||
|
<!-- Directory in <clickhouse-path> containing schema files for various input formats.
|
||||||
|
The directory will be created if it doesn't exist.
|
||||||
|
-->
|
||||||
|
<format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path>
|
||||||
|
|
||||||
|
<!-- Uncomment to disable ClickHouse internal DNS caching. -->
|
||||||
|
<!-- <disable_internal_dns_cache>1</disable_internal_dns_cache> -->
|
||||||
|
</yandex>
|
133
tests/testflows/map_type/configs/clickhouse/users.xml
Normal file
133
tests/testflows/map_type/configs/clickhouse/users.xml
Normal file
@ -0,0 +1,133 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<yandex>
|
||||||
|
<!-- Profiles of settings. -->
|
||||||
|
<profiles>
|
||||||
|
<!-- Default settings. -->
|
||||||
|
<default>
|
||||||
|
<!-- Maximum memory usage for processing single query, in bytes. -->
|
||||||
|
<max_memory_usage>10000000000</max_memory_usage>
|
||||||
|
|
||||||
|
<!-- Use cache of uncompressed blocks of data. Meaningful only for processing many of very short queries. -->
|
||||||
|
<use_uncompressed_cache>0</use_uncompressed_cache>
|
||||||
|
|
||||||
|
<!-- How to choose between replicas during distributed query processing.
|
||||||
|
random - choose random replica from set of replicas with minimum number of errors
|
||||||
|
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||||
|
with minimum number of different symbols between replica's hostname and local hostname
|
||||||
|
(Hamming distance).
|
||||||
|
in_order - first live replica is chosen in specified order.
|
||||||
|
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||||
|
-->
|
||||||
|
<load_balancing>random</load_balancing>
|
||||||
|
</default>
|
||||||
|
|
||||||
|
<!-- Profile that allows only read queries. -->
|
||||||
|
<readonly>
|
||||||
|
<readonly>1</readonly>
|
||||||
|
</readonly>
|
||||||
|
</profiles>
|
||||||
|
|
||||||
|
<!-- Users and ACL. -->
|
||||||
|
<users>
|
||||||
|
<!-- If user name was not specified, 'default' user is used. -->
|
||||||
|
<default>
|
||||||
|
<!-- Password could be specified in plaintext or in SHA256 (in hex format).
|
||||||
|
|
||||||
|
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||||
|
Example: <password>qwerty</password>.
|
||||||
|
Password could be empty.
|
||||||
|
|
||||||
|
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||||
|
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||||
|
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||||
|
|
||||||
|
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||||
|
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||||
|
|
||||||
|
How to generate decent password:
|
||||||
|
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||||
|
In first line will be password and in second - corresponding SHA256.
|
||||||
|
|
||||||
|
How to generate double SHA1:
|
||||||
|
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | openssl dgst -sha1 -binary | openssl dgst -sha1
|
||||||
|
In first line will be password and in second - corresponding double SHA1.
|
||||||
|
-->
|
||||||
|
<password></password>
|
||||||
|
|
||||||
|
<!-- List of networks with open access.
|
||||||
|
|
||||||
|
To open access from everywhere, specify:
|
||||||
|
<ip>::/0</ip>
|
||||||
|
|
||||||
|
To open access only from localhost, specify:
|
||||||
|
<ip>::1</ip>
|
||||||
|
<ip>127.0.0.1</ip>
|
||||||
|
|
||||||
|
Each element of list has one of the following forms:
|
||||||
|
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||||
|
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||||
|
<host> Hostname. Example: server01.yandex.ru.
|
||||||
|
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||||
|
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.yandex\.ru$
|
||||||
|
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||||
|
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||||
|
Strongly recommended that regexp is ends with $
|
||||||
|
All results of DNS requests are cached till server restart.
|
||||||
|
-->
|
||||||
|
<networks incl="networks" replace="replace">
|
||||||
|
<ip>::/0</ip>
|
||||||
|
</networks>
|
||||||
|
|
||||||
|
<!-- Settings profile for user. -->
|
||||||
|
<profile>default</profile>
|
||||||
|
|
||||||
|
<!-- Quota for user. -->
|
||||||
|
<quota>default</quota>
|
||||||
|
|
||||||
|
<!-- Allow access management -->
|
||||||
|
<access_management>1</access_management>
|
||||||
|
|
||||||
|
<!-- Example of row level security policy. -->
|
||||||
|
<!-- <databases>
|
||||||
|
<test>
|
||||||
|
<filtered_table1>
|
||||||
|
<filter>a = 1</filter>
|
||||||
|
</filtered_table1>
|
||||||
|
<filtered_table2>
|
||||||
|
<filter>a + b < 1 or c - d > 5</filter>
|
||||||
|
</filtered_table2>
|
||||||
|
</test>
|
||||||
|
</databases> -->
|
||||||
|
</default>
|
||||||
|
|
||||||
|
<!-- Example of user with readonly access. -->
|
||||||
|
<!-- <readonly>
|
||||||
|
<password></password>
|
||||||
|
<networks incl="networks" replace="replace">
|
||||||
|
<ip>::1</ip>
|
||||||
|
<ip>127.0.0.1</ip>
|
||||||
|
</networks>
|
||||||
|
<profile>readonly</profile>
|
||||||
|
<quota>default</quota>
|
||||||
|
</readonly> -->
|
||||||
|
</users>
|
||||||
|
|
||||||
|
<!-- Quotas. -->
|
||||||
|
<quotas>
|
||||||
|
<!-- Name of quota. -->
|
||||||
|
<default>
|
||||||
|
<!-- Limits for time interval. You could specify many intervals with different limits. -->
|
||||||
|
<interval>
|
||||||
|
<!-- Length of interval. -->
|
||||||
|
<duration>3600</duration>
|
||||||
|
|
||||||
|
<!-- No limits. Just calculate resource usage for time interval. -->
|
||||||
|
<queries>0</queries>
|
||||||
|
<errors>0</errors>
|
||||||
|
<result_rows>0</result_rows>
|
||||||
|
<read_rows>0</read_rows>
|
||||||
|
<execution_time>0</execution_time>
|
||||||
|
</interval>
|
||||||
|
</default>
|
||||||
|
</quotas>
|
||||||
|
</yandex>
|
@ -0,0 +1,8 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<yandex>
|
||||||
|
<macros>
|
||||||
|
<replica>clickhouse1</replica>
|
||||||
|
<shard>01</shard>
|
||||||
|
<shard2>01</shard2>
|
||||||
|
</macros>
|
||||||
|
</yandex>
|
@ -0,0 +1,8 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<yandex>
|
||||||
|
<macros>
|
||||||
|
<replica>clickhouse2</replica>
|
||||||
|
<shard>01</shard>
|
||||||
|
<shard2>02</shard2>
|
||||||
|
</macros>
|
||||||
|
</yandex>
|
@ -0,0 +1,8 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<yandex>
|
||||||
|
<macros>
|
||||||
|
<replica>clickhouse3</replica>
|
||||||
|
<shard>01</shard>
|
||||||
|
<shard2>03</shard2>
|
||||||
|
</macros>
|
||||||
|
</yandex>
|
27
tests/testflows/map_type/docker-compose/clickhouse-service.yml
Executable file
27
tests/testflows/map_type/docker-compose/clickhouse-service.yml
Executable file
@ -0,0 +1,27 @@
|
|||||||
|
version: '2.3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
clickhouse:
|
||||||
|
image: yandex/clickhouse-integration-test
|
||||||
|
expose:
|
||||||
|
- "9000"
|
||||||
|
- "9009"
|
||||||
|
- "8123"
|
||||||
|
volumes:
|
||||||
|
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d:/etc/clickhouse-server/config.d"
|
||||||
|
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.d:/etc/clickhouse-server/users.d"
|
||||||
|
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml"
|
||||||
|
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml"
|
||||||
|
- "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse"
|
||||||
|
- "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge"
|
||||||
|
entrypoint: bash -c "clickhouse server --config-file=/etc/clickhouse-server/config.xml --log-file=/var/log/clickhouse-server/clickhouse-server.log --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log"
|
||||||
|
healthcheck:
|
||||||
|
test: clickhouse client --query='select 1'
|
||||||
|
interval: 10s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 300s
|
||||||
|
cap_add:
|
||||||
|
- SYS_PTRACE
|
||||||
|
security_opt:
|
||||||
|
- label:disable
|
60
tests/testflows/map_type/docker-compose/docker-compose.yml
Executable file
60
tests/testflows/map_type/docker-compose/docker-compose.yml
Executable file
@ -0,0 +1,60 @@
|
|||||||
|
version: '2.3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
zookeeper:
|
||||||
|
extends:
|
||||||
|
file: zookeeper-service.yml
|
||||||
|
service: zookeeper
|
||||||
|
|
||||||
|
clickhouse1:
|
||||||
|
extends:
|
||||||
|
file: clickhouse-service.yml
|
||||||
|
service: clickhouse
|
||||||
|
hostname: clickhouse1
|
||||||
|
volumes:
|
||||||
|
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/"
|
||||||
|
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/"
|
||||||
|
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||||
|
depends_on:
|
||||||
|
zookeeper:
|
||||||
|
condition: service_healthy
|
||||||
|
|
||||||
|
clickhouse2:
|
||||||
|
extends:
|
||||||
|
file: clickhouse-service.yml
|
||||||
|
service: clickhouse
|
||||||
|
hostname: clickhouse2
|
||||||
|
volumes:
|
||||||
|
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/database/:/var/lib/clickhouse/"
|
||||||
|
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/logs/:/var/log/clickhouse-server/"
|
||||||
|
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||||
|
depends_on:
|
||||||
|
zookeeper:
|
||||||
|
condition: service_healthy
|
||||||
|
|
||||||
|
clickhouse3:
|
||||||
|
extends:
|
||||||
|
file: clickhouse-service.yml
|
||||||
|
service: clickhouse
|
||||||
|
hostname: clickhouse3
|
||||||
|
volumes:
|
||||||
|
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/database/:/var/lib/clickhouse/"
|
||||||
|
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/logs/:/var/log/clickhouse-server/"
|
||||||
|
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||||
|
depends_on:
|
||||||
|
zookeeper:
|
||||||
|
condition: service_healthy
|
||||||
|
|
||||||
|
# dummy service which does nothing, but allows to postpone
|
||||||
|
# 'docker-compose up -d' till all dependecies will go healthy
|
||||||
|
all_services_ready:
|
||||||
|
image: hello-world
|
||||||
|
depends_on:
|
||||||
|
clickhouse1:
|
||||||
|
condition: service_healthy
|
||||||
|
clickhouse2:
|
||||||
|
condition: service_healthy
|
||||||
|
clickhouse3:
|
||||||
|
condition: service_healthy
|
||||||
|
zookeeper:
|
||||||
|
condition: service_healthy
|
18
tests/testflows/map_type/docker-compose/zookeeper-service.yml
Executable file
18
tests/testflows/map_type/docker-compose/zookeeper-service.yml
Executable file
@ -0,0 +1,18 @@
|
|||||||
|
version: '2.3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
zookeeper:
|
||||||
|
image: zookeeper:3.4.12
|
||||||
|
expose:
|
||||||
|
- "2181"
|
||||||
|
environment:
|
||||||
|
ZOO_TICK_TIME: 500
|
||||||
|
ZOO_MY_ID: 1
|
||||||
|
healthcheck:
|
||||||
|
test: echo stat | nc localhost 2181
|
||||||
|
interval: 3s
|
||||||
|
timeout: 2s
|
||||||
|
retries: 5
|
||||||
|
start_period: 2s
|
||||||
|
security_opt:
|
||||||
|
- label:disable
|
121
tests/testflows/map_type/regression.py
Executable file
121
tests/testflows/map_type/regression.py
Executable file
@ -0,0 +1,121 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from testflows.core import *
|
||||||
|
|
||||||
|
append_path(sys.path, "..")
|
||||||
|
|
||||||
|
from helpers.cluster import Cluster
|
||||||
|
from helpers.argparser import argparser
|
||||||
|
from map_type.requirements import SRS018_ClickHouse_Map_Data_Type
|
||||||
|
|
||||||
|
xfails = {
|
||||||
|
"tests/table map with key integer/Int:":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21032")],
|
||||||
|
"tests/table map with value integer/Int:":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21032")],
|
||||||
|
"tests/table map with key integer/UInt256":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21031")],
|
||||||
|
"tests/table map with value integer/UInt256":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21031")],
|
||||||
|
"tests/select map with key integer/Int64":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21030")],
|
||||||
|
"tests/select map with value integer/Int64":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21030")],
|
||||||
|
"tests/cast tuple of two arrays to map/string -> int":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21029")],
|
||||||
|
"tests/mapcontains/null key in map":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21028")],
|
||||||
|
"tests/mapcontains/null key not in map":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21028")],
|
||||||
|
"tests/mapkeys/null key not in map":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21028")],
|
||||||
|
"tests/mapkeys/null key in map":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21028")],
|
||||||
|
"tests/mapcontains/select nullable key":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21026")],
|
||||||
|
"tests/mapkeys/select keys from column":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21026")],
|
||||||
|
"tests/table map select key with value string/LowCardinality:":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21406")],
|
||||||
|
"tests/table map select key with key string/FixedString":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21406")],
|
||||||
|
"tests/table map select key with key string/Nullable":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21406")],
|
||||||
|
"tests/table map select key with key string/Nullable(NULL)":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21026")],
|
||||||
|
"tests/table map select key with key string/LowCardinality:":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21406")],
|
||||||
|
"tests/table map select key with key integer/Int:":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21032")],
|
||||||
|
"tests/table map select key with key integer/UInt256":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21031")],
|
||||||
|
"tests/table map select key with key integer/toNullable":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21406")],
|
||||||
|
"tests/table map select key with key integer/toNullable(NULL)":
|
||||||
|
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21026")],
|
||||||
|
"tests/select map with key integer/Int128":
|
||||||
|
[(Fail, "large Int128 as key not supported")],
|
||||||
|
"tests/select map with key integer/Int256":
|
||||||
|
[(Fail, "large Int256 as key not supported")],
|
||||||
|
"tests/select map with key integer/UInt256":
|
||||||
|
[(Fail, "large UInt256 as key not supported")],
|
||||||
|
"tests/select map with key integer/toNullable":
|
||||||
|
[(Fail, "Nullable type as key not supported")],
|
||||||
|
"tests/select map with key integer/toNullable(NULL)":
|
||||||
|
[(Fail, "Nullable type as key not supported")],
|
||||||
|
"tests/select map with key string/Nullable":
|
||||||
|
[(Fail, "Nullable type as key not supported")],
|
||||||
|
"tests/select map with key string/Nullable(NULL)":
|
||||||
|
[(Fail, "Nullable type as key not supported")],
|
||||||
|
"tests/table map queries/select map with nullable value":
|
||||||
|
[(Fail, "Nullable value not supported")],
|
||||||
|
"tests/table map with key integer/toNullable":
|
||||||
|
[(Fail, "Nullable type as key not supported")],
|
||||||
|
"tests/table map with key integer/toNullable(NULL)":
|
||||||
|
[(Fail, "Nullable type as key not supported")],
|
||||||
|
"tests/table map with key string/Nullable":
|
||||||
|
[(Fail, "Nullable type as key not supported")],
|
||||||
|
"tests/table map with key string/Nullable(NULL)":
|
||||||
|
[(Fail, "Nullable type as key not supported")],
|
||||||
|
"tests/table map with key string/LowCardinality(String)":
|
||||||
|
[(Fail, "LowCardinality(String) as key not supported")],
|
||||||
|
"tests/table map with key string/LowCardinality(String) cast from String":
|
||||||
|
[(Fail, "LowCardinality(String) as key not supported")],
|
||||||
|
"tests/table map with key string/LowCardinality(String) for key and value":
|
||||||
|
[(Fail, "LowCardinality(String) as key not supported")],
|
||||||
|
"tests/table map with key string/LowCardinality(FixedString)":
|
||||||
|
[(Fail, "LowCardinality(FixedString) as key not supported")],
|
||||||
|
"tests/table map with value string/LowCardinality(String) for key and value":
|
||||||
|
[(Fail, "LowCardinality(String) as key not supported")],
|
||||||
|
}
|
||||||
|
|
||||||
|
xflags = {
|
||||||
|
}
|
||||||
|
|
||||||
|
@TestModule
|
||||||
|
@ArgumentParser(argparser)
|
||||||
|
@XFails(xfails)
|
||||||
|
@XFlags(xflags)
|
||||||
|
@Name("map type")
|
||||||
|
@Specifications(
|
||||||
|
SRS018_ClickHouse_Map_Data_Type
|
||||||
|
)
|
||||||
|
def regression(self, local, clickhouse_binary_path, stress=None, parallel=None):
|
||||||
|
"""Map type regression.
|
||||||
|
"""
|
||||||
|
nodes = {
|
||||||
|
"clickhouse":
|
||||||
|
("clickhouse1", "clickhouse2", "clickhouse3")
|
||||||
|
}
|
||||||
|
with Cluster(local, clickhouse_binary_path, nodes=nodes) as cluster:
|
||||||
|
self.context.cluster = cluster
|
||||||
|
self.context.stress = stress
|
||||||
|
|
||||||
|
if parallel is not None:
|
||||||
|
self.context.parallel = parallel
|
||||||
|
|
||||||
|
Feature(run=load("map_type.tests.feature", "feature"))
|
||||||
|
|
||||||
|
if main():
|
||||||
|
regression()
|
1
tests/testflows/map_type/requirements/__init__.py
Normal file
1
tests/testflows/map_type/requirements/__init__.py
Normal file
@ -0,0 +1 @@
|
|||||||
|
from .requirements import *
|
512
tests/testflows/map_type/requirements/requirements.md
Normal file
512
tests/testflows/map_type/requirements/requirements.md
Normal file
@ -0,0 +1,512 @@
|
|||||||
|
# SRS018 ClickHouse Map Data Type
|
||||||
|
# Software Requirements Specification
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
|
* 1 [Revision History](#revision-history)
|
||||||
|
* 2 [Introduction](#introduction)
|
||||||
|
* 3 [Requirements](#requirements)
|
||||||
|
* 3.1 [General](#general)
|
||||||
|
* 3.1.1 [RQ.SRS-018.ClickHouse.Map.DataType](#rqsrs-018clickhousemapdatatype)
|
||||||
|
* 3.2 [Performance](#performance)
|
||||||
|
* 3.2.1 [RQ.SRS-018.ClickHouse.Map.DataType.Performance.Vs.ArrayOfTuples](#rqsrs-018clickhousemapdatatypeperformancevsarrayoftuples)
|
||||||
|
* 3.2.2 [RQ.SRS-018.ClickHouse.Map.DataType.Performance.Vs.TupleOfArrays](#rqsrs-018clickhousemapdatatypeperformancevstupleofarrays)
|
||||||
|
* 3.3 [Key Types](#key-types)
|
||||||
|
* 3.3.1 [RQ.SRS-018.ClickHouse.Map.DataType.Key.String](#rqsrs-018clickhousemapdatatypekeystring)
|
||||||
|
* 3.3.2 [RQ.SRS-018.ClickHouse.Map.DataType.Key.Integer](#rqsrs-018clickhousemapdatatypekeyinteger)
|
||||||
|
* 3.4 [Value Types](#value-types)
|
||||||
|
* 3.4.1 [RQ.SRS-018.ClickHouse.Map.DataType.Value.String](#rqsrs-018clickhousemapdatatypevaluestring)
|
||||||
|
* 3.4.2 [RQ.SRS-018.ClickHouse.Map.DataType.Value.Integer](#rqsrs-018clickhousemapdatatypevalueinteger)
|
||||||
|
* 3.4.3 [RQ.SRS-018.ClickHouse.Map.DataType.Value.Array](#rqsrs-018clickhousemapdatatypevaluearray)
|
||||||
|
* 3.5 [Invalid Types](#invalid-types)
|
||||||
|
* 3.5.1 [RQ.SRS-018.ClickHouse.Map.DataType.Invalid.Nullable](#rqsrs-018clickhousemapdatatypeinvalidnullable)
|
||||||
|
* 3.5.2 [RQ.SRS-018.ClickHouse.Map.DataType.Invalid.NothingNothing](#rqsrs-018clickhousemapdatatypeinvalidnothingnothing)
|
||||||
|
* 3.6 [Duplicated Keys](#duplicated-keys)
|
||||||
|
* 3.6.1 [RQ.SRS-018.ClickHouse.Map.DataType.DuplicatedKeys](#rqsrs-018clickhousemapdatatypeduplicatedkeys)
|
||||||
|
* 3.7 [Array of Maps](#array-of-maps)
|
||||||
|
* 3.7.1 [RQ.SRS-018.ClickHouse.Map.DataType.ArrayOfMaps](#rqsrs-018clickhousemapdatatypearrayofmaps)
|
||||||
|
* 3.8 [Nested With Maps](#nested-with-maps)
|
||||||
|
* 3.8.1 [RQ.SRS-018.ClickHouse.Map.DataType.NestedWithMaps](#rqsrs-018clickhousemapdatatypenestedwithmaps)
|
||||||
|
* 3.9 [Value Retrieval](#value-retrieval)
|
||||||
|
* 3.9.1 [RQ.SRS-018.ClickHouse.Map.DataType.Value.Retrieval](#rqsrs-018clickhousemapdatatypevalueretrieval)
|
||||||
|
* 3.9.2 [RQ.SRS-018.ClickHouse.Map.DataType.Value.Retrieval.KeyInvalid](#rqsrs-018clickhousemapdatatypevalueretrievalkeyinvalid)
|
||||||
|
* 3.9.3 [RQ.SRS-018.ClickHouse.Map.DataType.Value.Retrieval.KeyNotFound](#rqsrs-018clickhousemapdatatypevalueretrievalkeynotfound)
|
||||||
|
* 3.10 [Converting Tuple(Array, Array) to Map](#converting-tuplearray-array-to-map)
|
||||||
|
* 3.10.1 [RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.TupleOfArraysToMap](#rqsrs-018clickhousemapdatatypeconversionfromtupleofarraystomap)
|
||||||
|
* 3.10.2 [RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.TupleOfArraysMap.Invalid](#rqsrs-018clickhousemapdatatypeconversionfromtupleofarraysmapinvalid)
|
||||||
|
* 3.11 [Converting Array(Tuple(K,V)) to Map](#converting-arraytuplekv-to-map)
|
||||||
|
* 3.11.1 [RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.ArrayOfTuplesToMap](#rqsrs-018clickhousemapdatatypeconversionfromarrayoftuplestomap)
|
||||||
|
* 3.11.2 [RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.ArrayOfTuplesToMap.Invalid](#rqsrs-018clickhousemapdatatypeconversionfromarrayoftuplestomapinvalid)
|
||||||
|
* 3.12 [Keys and Values Subcolumns](#keys-and-values-subcolumns)
|
||||||
|
* 3.12.1 [RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Keys](#rqsrs-018clickhousemapdatatypesubcolumnskeys)
|
||||||
|
* 3.12.2 [RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Keys.ArrayFunctions](#rqsrs-018clickhousemapdatatypesubcolumnskeysarrayfunctions)
|
||||||
|
* 3.12.3 [RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Keys.InlineDefinedMap](#rqsrs-018clickhousemapdatatypesubcolumnskeysinlinedefinedmap)
|
||||||
|
* 3.12.4 [RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Values](#rqsrs-018clickhousemapdatatypesubcolumnsvalues)
|
||||||
|
* 3.12.5 [RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Values.ArrayFunctions](#rqsrs-018clickhousemapdatatypesubcolumnsvaluesarrayfunctions)
|
||||||
|
* 3.12.6 [RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Values.InlineDefinedMap](#rqsrs-018clickhousemapdatatypesubcolumnsvaluesinlinedefinedmap)
|
||||||
|
* 3.13 [Functions](#functions)
|
||||||
|
* 3.13.1 [RQ.SRS-018.ClickHouse.Map.DataType.Functions.InlineDefinedMap](#rqsrs-018clickhousemapdatatypefunctionsinlinedefinedmap)
|
||||||
|
* 3.13.2 [`length`](#length)
|
||||||
|
* 3.13.2.1 [RQ.SRS-018.ClickHouse.Map.DataType.Functions.Length](#rqsrs-018clickhousemapdatatypefunctionslength)
|
||||||
|
* 3.13.3 [`empty`](#empty)
|
||||||
|
* 3.13.3.1 [RQ.SRS-018.ClickHouse.Map.DataType.Functions.Empty](#rqsrs-018clickhousemapdatatypefunctionsempty)
|
||||||
|
* 3.13.4 [`notEmpty`](#notempty)
|
||||||
|
* 3.13.4.1 [RQ.SRS-018.ClickHouse.Map.DataType.Functions.NotEmpty](#rqsrs-018clickhousemapdatatypefunctionsnotempty)
|
||||||
|
* 3.13.5 [`map`](#map)
|
||||||
|
* 3.13.5.1 [RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map](#rqsrs-018clickhousemapdatatypefunctionsmap)
|
||||||
|
* 3.13.5.2 [RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.InvalidNumberOfArguments](#rqsrs-018clickhousemapdatatypefunctionsmapinvalidnumberofarguments)
|
||||||
|
* 3.13.5.3 [RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MixedKeyOrValueTypes](#rqsrs-018clickhousemapdatatypefunctionsmapmixedkeyorvaluetypes)
|
||||||
|
* 3.13.5.4 [RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MapAdd](#rqsrs-018clickhousemapdatatypefunctionsmapmapadd)
|
||||||
|
* 3.13.5.5 [RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MapSubstract](#rqsrs-018clickhousemapdatatypefunctionsmapmapsubstract)
|
||||||
|
* 3.13.5.6 [RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MapPopulateSeries](#rqsrs-018clickhousemapdatatypefunctionsmapmappopulateseries)
|
||||||
|
* 3.13.6 [`mapContains`](#mapcontains)
|
||||||
|
* 3.13.6.1 [RQ.SRS-018.ClickHouse.Map.DataType.Functions.MapContains](#rqsrs-018clickhousemapdatatypefunctionsmapcontains)
|
||||||
|
* 3.13.7 [`mapKeys`](#mapkeys)
|
||||||
|
* 3.13.7.1 [RQ.SRS-018.ClickHouse.Map.DataType.Functions.MapKeys](#rqsrs-018clickhousemapdatatypefunctionsmapkeys)
|
||||||
|
* 3.13.8 [`mapValues`](#mapvalues)
|
||||||
|
* 3.13.8.1 [RQ.SRS-018.ClickHouse.Map.DataType.Functions.MapValues](#rqsrs-018clickhousemapdatatypefunctionsmapvalues)
|
||||||
|
|
||||||
|
## Revision History
|
||||||
|
|
||||||
|
This document is stored in an electronic form using [Git] source control management software
|
||||||
|
hosted in a [GitHub Repository].
|
||||||
|
All the updates are tracked using the [Revision History].
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
This software requirements specification covers requirements for `Map(key, value)` data type in [ClickHouse].
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
### General
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support `Map(key, value)` data type that stores `key:value` pairs.
|
||||||
|
|
||||||
|
### Performance
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.Performance.Vs.ArrayOfTuples
|
||||||
|
version:1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL provide comparable performance for `Map(key, value)` data type as
|
||||||
|
compared to `Array(Tuple(K,V))` data type.
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.Performance.Vs.TupleOfArrays
|
||||||
|
version:1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL provide comparable performance for `Map(key, value)` data type as
|
||||||
|
compared to `Tuple(Array(String), Array(String))` data type where the first
|
||||||
|
array defines an array of keys and the second array defines an array of values.
|
||||||
|
|
||||||
|
### Key Types
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.Key.String
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support `Map(key, value)` data type where key is of a [String] type.
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.Key.Integer
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support `Map(key, value)` data type where key is of an [Integer] type.
|
||||||
|
|
||||||
|
### Value Types
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.Value.String
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support `Map(key, value)` data type where value is of a [String] type.
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.Value.Integer
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support `Map(key, value)` data type where value is of a [Integer] type.
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.Value.Array
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support `Map(key, value)` data type where value is of a [Array] type.
|
||||||
|
|
||||||
|
### Invalid Types
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.Invalid.Nullable
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL not support creating table columns that have `Nullable(Map(key, value))` data type.
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.Invalid.NothingNothing
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL not support creating table columns that have `Map(Nothing, Nothing))` data type.
|
||||||
|
|
||||||
|
### Duplicated Keys
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.DuplicatedKeys
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] MAY support `Map(key, value)` data type with duplicated keys.
|
||||||
|
|
||||||
|
### Array of Maps
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.ArrayOfMaps
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support `Array(Map(key, value))` data type.
|
||||||
|
|
||||||
|
### Nested With Maps
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.NestedWithMaps
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support defining `Map(key, value)` data type inside the [Nested] data type.
|
||||||
|
|
||||||
|
### Value Retrieval
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.Value.Retrieval
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support getting the value from a `Map(key, value)` data type using `map[key]` syntax.
|
||||||
|
If `key` has duplicates then the first `key:value` pair MAY be returned.
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT a['key2'] FROM table_map;
|
||||||
|
```
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.Value.Retrieval.KeyInvalid
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL return an error when key does not match the key type.
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT map(1,2) AS m, m[1024]
|
||||||
|
```
|
||||||
|
|
||||||
|
Exceptions:
|
||||||
|
|
||||||
|
* when key is `NULL` the return value MAY be `NULL`
|
||||||
|
* when key value is not valid for the key type, for example it is out of range for [Integer] type,
|
||||||
|
when reading from a table column it MAY return the default value for key data type
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.Value.Retrieval.KeyNotFound
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL return default value for the data type of the value
|
||||||
|
when there's no corresponding `key` defined in the `Map(key, value)` data type.
|
||||||
|
|
||||||
|
|
||||||
|
### Converting Tuple(Array, Array) to Map
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.TupleOfArraysToMap
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support converting [Tuple(Array, Array)] to `Map(key, value)` using the [CAST] function.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT CAST(([1, 2, 3], ['Ready', 'Steady', 'Go']), 'Map(UInt8, String)') AS map;
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─map───────────────────────────┐
|
||||||
|
│ {1:'Ready',2:'Steady',3:'Go'} │
|
||||||
|
└───────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.TupleOfArraysMap.Invalid
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] MAY return an error when casting [Tuple(Array, Array)] to `Map(key, value)`
|
||||||
|
|
||||||
|
* when arrays are not of equal size
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT CAST(([2, 1, 1023], ['', '']), 'Map(UInt8, String)') AS map, map[10]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Converting Array(Tuple(K,V)) to Map
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.ArrayOfTuplesToMap
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support converting [Array(Tuple(K,V))] to `Map(key, value)` using the [CAST] function.
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT CAST(([(1,2),(3)]), 'Map(UInt8, UInt8)') AS map
|
||||||
|
```
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.ArrayOfTuplesToMap.Invalid
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] MAY return an error when casting [Array(Tuple(K, V))] to `Map(key, value)`
|
||||||
|
|
||||||
|
* when element is not a [Tuple]
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT CAST(([(1,2),(3)]), 'Map(UInt8, UInt8)') AS map
|
||||||
|
```
|
||||||
|
|
||||||
|
* when [Tuple] does not contain two elements
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT CAST(([(1,2),(3,)]), 'Map(UInt8, UInt8)') AS map
|
||||||
|
```
|
||||||
|
|
||||||
|
### Keys and Values Subcolumns
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Keys
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support `keys` subcolumn in the `Map(key, value)` type that can be used
|
||||||
|
to retrieve an [Array] of map keys.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT m.keys FROM t_map;
|
||||||
|
```
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Keys.ArrayFunctions
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support applying [Array] functions to the `keys` subcolumn in the `Map(key, value)` type.
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT * FROM t_map WHERE has(m.keys, 'a');
|
||||||
|
```
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Keys.InlineDefinedMap
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] MAY not support using inline defined map to get `keys` subcolumn.
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT map( 'aa', 4, '44' , 5) as c, c.keys
|
||||||
|
```
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Values
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support `values` subcolumn in the `Map(key, value)` type that can be used
|
||||||
|
to retrieve an [Array] of map values.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT m.values FROM t_map;
|
||||||
|
```
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Values.ArrayFunctions
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support applying [Array] functions to the `values` subcolumn in the `Map(key, value)` type.
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT * FROM t_map WHERE has(m.values, 'a');
|
||||||
|
```
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Values.InlineDefinedMap
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] MAY not support using inline defined map to get `values` subcolumn.
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT map( 'aa', 4, '44' , 5) as c, c.values
|
||||||
|
```
|
||||||
|
|
||||||
|
### Functions
|
||||||
|
|
||||||
|
#### RQ.SRS-018.ClickHouse.Map.DataType.Functions.InlineDefinedMap
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support using inline defined maps as an argument to map functions.
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT map( 'aa', 4, '44' , 5) as c, mapKeys(c)
|
||||||
|
SELECT map( 'aa', 4, '44' , 5) as c, mapValues(c)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `length`
|
||||||
|
|
||||||
|
##### RQ.SRS-018.ClickHouse.Map.DataType.Functions.Length
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support `Map(key, value)` data type as an argument to the [length] function
|
||||||
|
that SHALL return number of keys in the map.
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT length(map(1,2,3,4))
|
||||||
|
SELECT length(map())
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `empty`
|
||||||
|
|
||||||
|
##### RQ.SRS-018.ClickHouse.Map.DataType.Functions.Empty
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support `Map(key, value)` data type as an argument to the [empty] function
|
||||||
|
that SHALL return 1 if number of keys in the map is 0 otherwise if the number of keys is
|
||||||
|
greater or equal to 1 it SHALL return 0.
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT empty(map(1,2,3,4))
|
||||||
|
SELECT empty(map())
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `notEmpty`
|
||||||
|
|
||||||
|
##### RQ.SRS-018.ClickHouse.Map.DataType.Functions.NotEmpty
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support `Map(key, value)` data type as an argument to the [notEmpty] function
|
||||||
|
that SHALL return 0 if number if keys in the map is 0 otherwise if the number of keys is
|
||||||
|
greater or equal to 1 it SHALL return 1.
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT notEmpty(map(1,2,3,4))
|
||||||
|
SELECT notEmpty(map())
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `map`
|
||||||
|
|
||||||
|
##### RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support arranging `key, value` pairs into `Map(key, value)` data type
|
||||||
|
using `map` function.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
map(key1, value1[, key2, value2, ...])
|
||||||
|
```
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT map('key1', number, 'key2', number * 2) FROM numbers(3);
|
||||||
|
|
||||||
|
┌─map('key1', number, 'key2', multiply(number, 2))─┐
|
||||||
|
│ {'key1':0,'key2':0} │
|
||||||
|
│ {'key1':1,'key2':2} │
|
||||||
|
│ {'key1':2,'key2':4} │
|
||||||
|
└──────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
##### RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.InvalidNumberOfArguments
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL return an error when `map` function is called with non even number of arguments.
|
||||||
|
|
||||||
|
##### RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MixedKeyOrValueTypes
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL return an error when `map` function is called with mixed key or value types.
|
||||||
|
|
||||||
|
|
||||||
|
##### RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MapAdd
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support converting the results of `mapAdd` function to a `Map(key, value)` data type.
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT CAST(mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])), "Map(Int8,Int8)")
|
||||||
|
```
|
||||||
|
|
||||||
|
##### RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MapSubstract
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support converting the results of `mapSubstract` function to a `Map(key, value)` data type.
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT CAST(mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt32(2), 1])), "Map(Int8,Int8)")
|
||||||
|
```
|
||||||
|
##### RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MapPopulateSeries
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support converting the results of `mapPopulateSeries` function to a `Map(key, value)` data type.
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT CAST(mapPopulateSeries([1,2,4], [11,22,44], 5), "Map(Int8,Int8)")
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `mapContains`
|
||||||
|
|
||||||
|
##### RQ.SRS-018.ClickHouse.Map.DataType.Functions.MapContains
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support `mapContains(map, key)` function to check weather `map.keys` contains the `key`.
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT mapContains(a, 'abc') from table_map;
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `mapKeys`
|
||||||
|
|
||||||
|
##### RQ.SRS-018.ClickHouse.Map.DataType.Functions.MapKeys
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support `mapKeys(map)` function to return all the map keys in the [Array] format.
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT mapKeys(a) from table_map;
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `mapValues`
|
||||||
|
|
||||||
|
##### RQ.SRS-018.ClickHouse.Map.DataType.Functions.MapValues
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
[ClickHouse] SHALL support `mapValues(map)` function to return all the map values in the [Array] format.
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT mapValues(a) from table_map;
|
||||||
|
```
|
||||||
|
|
||||||
|
[Nested]: https://clickhouse.tech/docs/en/sql-reference/data-types/nested-data-structures/nested/
|
||||||
|
[length]: https://clickhouse.tech/docs/en/sql-reference/functions/array-functions/#array_functions-length
|
||||||
|
[empty]: https://clickhouse.tech/docs/en/sql-reference/functions/array-functions/#function-empty
|
||||||
|
[notEmpty]: https://clickhouse.tech/docs/en/sql-reference/functions/array-functions/#function-notempty
|
||||||
|
[CAST]: https://clickhouse.tech/docs/en/sql-reference/functions/type-conversion-functions/#type_conversion_function-cast
|
||||||
|
[Tuple]: https://clickhouse.tech/docs/en/sql-reference/data-types/tuple/
|
||||||
|
[Tuple(Array,Array)]: https://clickhouse.tech/docs/en/sql-reference/data-types/tuple/
|
||||||
|
[Array]: https://clickhouse.tech/docs/en/sql-reference/data-types/array/
|
||||||
|
[String]: https://clickhouse.tech/docs/en/sql-reference/data-types/string/
|
||||||
|
[Integer]: https://clickhouse.tech/docs/en/sql-reference/data-types/int-uint/
|
||||||
|
[ClickHouse]: https://clickhouse.tech
|
||||||
|
[GitHub Repository]: https://github.com/ClickHouse/ClickHouse/blob/master/tests/testflows/map_type/requirements/requirements.md
|
||||||
|
[Revision History]: https://github.com/ClickHouse/ClickHouse/commits/master/tests/testflows/map_type/requirements/requirements.md
|
||||||
|
[Git]: https://git-scm.com/
|
||||||
|
[GitHub]: https://github.com
|
1427
tests/testflows/map_type/requirements/requirements.py
Normal file
1427
tests/testflows/map_type/requirements/requirements.py
Normal file
File diff suppressed because it is too large
Load Diff
0
tests/testflows/map_type/tests/__init__.py
Normal file
0
tests/testflows/map_type/tests/__init__.py
Normal file
49
tests/testflows/map_type/tests/common.py
Normal file
49
tests/testflows/map_type/tests/common.py
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
import uuid
|
||||||
|
from collections import namedtuple
|
||||||
|
|
||||||
|
from testflows.core import *
|
||||||
|
from testflows.core.name import basename, parentname
|
||||||
|
from testflows._core.testtype import TestSubType
|
||||||
|
|
||||||
|
def getuid():
|
||||||
|
if current().subtype == TestSubType.Example:
|
||||||
|
testname = f"{basename(parentname(current().name)).replace(' ', '_').replace(',','')}"
|
||||||
|
else:
|
||||||
|
testname = f"{basename(current().name).replace(' ', '_').replace(',','')}"
|
||||||
|
return testname + "_" + str(uuid.uuid1()).replace('-', '_')
|
||||||
|
|
||||||
|
@TestStep(Given)
|
||||||
|
def allow_experimental_map_type(self):
|
||||||
|
"""Set allow_experimental_map_type = 1
|
||||||
|
"""
|
||||||
|
setting = ("allow_experimental_map_type", 1)
|
||||||
|
default_query_settings = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
with By("adding allow_experimental_map_type to the default query settings"):
|
||||||
|
default_query_settings = getsattr(current().context, "default_query_settings", [])
|
||||||
|
default_query_settings.append(setting)
|
||||||
|
yield
|
||||||
|
finally:
|
||||||
|
with Finally("I remove allow_experimental_map_type from the default query settings"):
|
||||||
|
if default_query_settings:
|
||||||
|
try:
|
||||||
|
default_query_settings.pop(default_query_settings.index(setting))
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@TestStep(Given)
|
||||||
|
def create_table(self, name, statement, on_cluster=False):
|
||||||
|
"""Create table.
|
||||||
|
"""
|
||||||
|
node = current().context.node
|
||||||
|
try:
|
||||||
|
with Given(f"I have a {name} table"):
|
||||||
|
node.query(statement.format(name=name))
|
||||||
|
yield name
|
||||||
|
finally:
|
||||||
|
with Finally("I drop the table"):
|
||||||
|
if on_cluster:
|
||||||
|
node.query(f"DROP TABLE IF EXISTS {name} ON CLUSTER {on_cluster}")
|
||||||
|
else:
|
||||||
|
node.query(f"DROP TABLE IF EXISTS {name}")
|
1195
tests/testflows/map_type/tests/feature.py
Executable file
1195
tests/testflows/map_type/tests/feature.py
Executable file
File diff suppressed because it is too large
Load Diff
@ -18,6 +18,7 @@ def regression(self, local, clickhouse_binary_path, stress=None, parallel=None):
|
|||||||
# Feature(test=load("ldap.regression", "regression"))(**args)
|
# Feature(test=load("ldap.regression", "regression"))(**args)
|
||||||
# Feature(test=load("rbac.regression", "regression"))(**args)
|
# Feature(test=load("rbac.regression", "regression"))(**args)
|
||||||
# Feature(test=load("aes_encryption.regression", "regression"))(**args)
|
# Feature(test=load("aes_encryption.regression", "regression"))(**args)
|
||||||
|
Feature(test=load("map_type.regression", "regression"))(**args)
|
||||||
# Feature(test=load("kerberos.regression", "regression"))(**args)
|
# Feature(test=load("kerberos.regression", "regression"))(**args)
|
||||||
|
|
||||||
if main():
|
if main():
|
||||||
|
Loading…
Reference in New Issue
Block a user