mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-23 08:02:02 +00:00
Merge remote-tracking branch 'origin/master' into HEAD
This commit is contained in:
commit
c25348ded9
@ -99,6 +99,9 @@
|
|||||||
"docker/test/integration/resolver": {
|
"docker/test/integration/resolver": {
|
||||||
"name": "yandex/clickhouse-python-bottle",
|
"name": "yandex/clickhouse-python-bottle",
|
||||||
"dependent": []
|
"dependent": []
|
||||||
|
},
|
||||||
|
"docker/test/integration/helper_container": {
|
||||||
|
"name": "yandex/clickhouse-integration-helper",
|
||||||
|
"dependent": []
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -90,7 +90,7 @@ do
|
|||||||
sleep 0.1
|
sleep 0.1
|
||||||
done
|
done
|
||||||
|
|
||||||
TESTS_TO_SKIP="parquet avro h3 odbc mysql sha256 _orc_ arrow 01098_temporary_and_external_tables 01083_expressions_in_engine_arguments hdfs 00911_tautological_compare protobuf capnproto java_hash hashing secure 00490_special_line_separators_and_characters_outside_of_bmp 00436_convert_charset 00105_shard_collations 01354_order_by_tuple_collate_const 01292_create_user 01098_msgpack_format 00929_multi_match_edit_distance 00926_multimatch 00834_cancel_http_readonly_queries_on_client_close brotli parallel_alter 00302_http_compression 00417_kill_query 01294_lazy_database_concurrent 01193_metadata_loading base64 01031_mutations_interpreter_and_context json client 01305_replica_create_drop_zookeeper 01092_memory_profiler 01355_ilike 01281_unsucceeded_insert_select_queries_counter live_view limit_memory memory_limit memory_leak 00110_external_sort 00682_empty_parts_merge 00701_rollup 00109_shard_totals_after_having"
|
TESTS_TO_SKIP="parquet avro h3 odbc mysql sha256 _orc_ arrow 01098_temporary_and_external_tables 01083_expressions_in_engine_arguments hdfs 00911_tautological_compare protobuf capnproto java_hash hashing secure 00490_special_line_separators_and_characters_outside_of_bmp 00436_convert_charset 00105_shard_collations 01354_order_by_tuple_collate_const 01292_create_user 01098_msgpack_format 00929_multi_match_edit_distance 00926_multimatch 00834_cancel_http_readonly_queries_on_client_close brotli parallel_alter 00302_http_compression 00417_kill_query 01294_lazy_database_concurrent 01193_metadata_loading base64 01031_mutations_interpreter_and_context json client 01305_replica_create_drop_zookeeper 01092_memory_profiler 01355_ilike 01281_unsucceeded_insert_select_queries_counter live_view limit_memory memory_limit memory_leak 00110_external_sort 00682_empty_parts_merge 00701_rollup 00109_shard_totals_after_having ddl_dictionaries 01251_dict_is_in_infinite_loop 01259_dictionary_custom_settings_ddl 01268_dictionary_direct_layout 01280_ssd_complex_key_dictionary 00652_replicated_mutations_zookeeper"
|
||||||
|
|
||||||
clickhouse-test -j 4 --no-long --testname --shard --zookeeper --skip $TESTS_TO_SKIP 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/test_log.txt
|
clickhouse-test -j 4 --no-long --testname --shard --zookeeper --skip $TESTS_TO_SKIP 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/test_log.txt
|
||||||
|
|
||||||
|
@ -104,9 +104,8 @@ function fuzz
|
|||||||
if [ "$fuzzer_exit_code" == "143" ]
|
if [ "$fuzzer_exit_code" == "143" ]
|
||||||
then
|
then
|
||||||
# Killed by watchdog, meaning, no errors.
|
# Killed by watchdog, meaning, no errors.
|
||||||
return 0
|
fuzzer_exit_code=0
|
||||||
fi
|
fi
|
||||||
return $fuzzer_exit_code
|
|
||||||
}
|
}
|
||||||
|
|
||||||
case "$stage" in
|
case "$stage" in
|
||||||
@ -161,7 +160,7 @@ case "$stage" in
|
|||||||
echo "success" > status.txt
|
echo "success" > status.txt
|
||||||
else
|
else
|
||||||
echo "failure" > status.txt
|
echo "failure" > status.txt
|
||||||
if ! grep "received signal \|Logical error" server.log > description.txt
|
if ! grep -a "received signal \|Logical error" server.log > description.txt
|
||||||
then
|
then
|
||||||
echo "Fuzzer exit code $fuzzer_exit_code. See the logs" > description.txt
|
echo "Fuzzer exit code $fuzzer_exit_code. See the logs" > description.txt
|
||||||
fi
|
fi
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
# docker build -t yandex/clickhouse-integration-helper .
|
||||||
# Helper docker container to run iptables without sudo
|
# Helper docker container to run iptables without sudo
|
||||||
|
|
||||||
FROM alpine
|
FROM alpine
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 71
|
toc_priority: 71
|
||||||
toc_title: Source Code
|
toc_title: Source Code Browser
|
||||||
---
|
---
|
||||||
|
|
||||||
# Browse ClickHouse Source Code {#browse-clickhouse-source-code}
|
# Browse ClickHouse Source Code {#browse-clickhouse-source-code}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 67
|
toc_priority: 67
|
||||||
toc_title: How to Build ClickHouse on Linux for AARCH64 (ARM64)
|
toc_title: Build on Linux for AARCH64 (ARM64)
|
||||||
---
|
---
|
||||||
|
|
||||||
# How to Build ClickHouse on Linux for AARCH64 (ARM64) Architecture {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture}
|
# How to Build ClickHouse on Linux for AARCH64 (ARM64) Architecture {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture}
|
||||||
@ -9,7 +9,7 @@ This is for the case when you have Linux machine and want to use it to build `cl
|
|||||||
|
|
||||||
The cross-build for AARCH64 is based on the [Build instructions](../development/build.md), follow them first.
|
The cross-build for AARCH64 is based on the [Build instructions](../development/build.md), follow them first.
|
||||||
|
|
||||||
# Install Clang-8 {#install-clang-8}
|
## Install Clang-8 {#install-clang-8}
|
||||||
|
|
||||||
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup.
|
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup.
|
||||||
For example, in Ubuntu Bionic you can use the following commands:
|
For example, in Ubuntu Bionic you can use the following commands:
|
||||||
@ -20,7 +20,7 @@ sudo apt-get update
|
|||||||
sudo apt-get install clang-8
|
sudo apt-get install clang-8
|
||||||
```
|
```
|
||||||
|
|
||||||
# Install Cross-Compilation Toolset {#install-cross-compilation-toolset}
|
## Install Cross-Compilation Toolset {#install-cross-compilation-toolset}
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
cd ClickHouse
|
cd ClickHouse
|
||||||
@ -29,7 +29,7 @@ wget 'https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel
|
|||||||
tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build-aarch64/cmake/toolchain/linux-aarch64 --strip-components=1
|
tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build-aarch64/cmake/toolchain/linux-aarch64 --strip-components=1
|
||||||
```
|
```
|
||||||
|
|
||||||
# Build ClickHouse {#build-clickhouse}
|
## Build ClickHouse {#build-clickhouse}
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
cd ClickHouse
|
cd ClickHouse
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 66
|
toc_priority: 66
|
||||||
toc_title: How to Build ClickHouse on Linux for Mac OS X
|
toc_title: Build on Linux for Mac OS X
|
||||||
---
|
---
|
||||||
|
|
||||||
# How to Build ClickHouse on Linux for Mac OS X {#how-to-build-clickhouse-on-linux-for-mac-os-x}
|
# How to Build ClickHouse on Linux for Mac OS X {#how-to-build-clickhouse-on-linux-for-mac-os-x}
|
||||||
@ -9,7 +9,7 @@ This is for the case when you have Linux machine and want to use it to build `cl
|
|||||||
|
|
||||||
The cross-build for Mac OS X is based on the [Build instructions](../development/build.md), follow them first.
|
The cross-build for Mac OS X is based on the [Build instructions](../development/build.md), follow them first.
|
||||||
|
|
||||||
# Install Clang-8 {#install-clang-8}
|
## Install Clang-8 {#install-clang-8}
|
||||||
|
|
||||||
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup.
|
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup.
|
||||||
For example the commands for Bionic are like:
|
For example the commands for Bionic are like:
|
||||||
@ -19,7 +19,7 @@ sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8
|
|||||||
sudo apt-get install clang-8
|
sudo apt-get install clang-8
|
||||||
```
|
```
|
||||||
|
|
||||||
# Install Cross-Compilation Toolset {#install-cross-compilation-toolset}
|
## Install Cross-Compilation Toolset {#install-cross-compilation-toolset}
|
||||||
|
|
||||||
Let’s remember the path where we install `cctools` as ${CCTOOLS}
|
Let’s remember the path where we install `cctools` as ${CCTOOLS}
|
||||||
|
|
||||||
@ -47,7 +47,7 @@ mkdir -p build-darwin/cmake/toolchain/darwin-x86_64
|
|||||||
tar xJf MacOSX10.14.sdk.tar.xz -C build-darwin/cmake/toolchain/darwin-x86_64 --strip-components=1
|
tar xJf MacOSX10.14.sdk.tar.xz -C build-darwin/cmake/toolchain/darwin-x86_64 --strip-components=1
|
||||||
```
|
```
|
||||||
|
|
||||||
# Build ClickHouse {#build-clickhouse}
|
## Build ClickHouse {#build-clickhouse}
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
cd ClickHouse
|
cd ClickHouse
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 65
|
toc_priority: 65
|
||||||
toc_title: How to Build ClickHouse on Mac OS X
|
toc_title: Build on Mac OS X
|
||||||
---
|
---
|
||||||
|
|
||||||
# How to Build ClickHouse on Mac OS X {#how-to-build-clickhouse-on-mac-os-x}
|
# How to Build ClickHouse on Mac OS X {#how-to-build-clickhouse-on-mac-os-x}
|
||||||
@ -45,14 +45,12 @@ $ cd ..
|
|||||||
|
|
||||||
## Caveats {#caveats}
|
## Caveats {#caveats}
|
||||||
|
|
||||||
If you intend to run clickhouse-server, make sure to increase the system’s maxfiles variable.
|
If you intend to run `clickhouse-server`, make sure to increase the system’s maxfiles variable.
|
||||||
|
|
||||||
!!! info "Note"
|
!!! info "Note"
|
||||||
You’ll need to use sudo.
|
You’ll need to use sudo.
|
||||||
|
|
||||||
To do so, create the following file:
|
To do so, create the `/Library/LaunchDaemons/limit.maxfiles.plist` file with the following content:
|
||||||
|
|
||||||
/Library/LaunchDaemons/limit.maxfiles.plist:
|
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
@ -1,11 +1,9 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 64
|
toc_priority: 64
|
||||||
toc_title: How to Build ClickHouse on Linux
|
toc_title: Build on Linux
|
||||||
---
|
---
|
||||||
|
|
||||||
# How to Build ClickHouse for Development {#how-to-build-clickhouse-for-development}
|
# How to Build ClickHouse on Linux {#how-to-build-clickhouse-for-development}
|
||||||
|
|
||||||
The following tutorial is based on the Ubuntu Linux system. With appropriate changes, it should also work on any other Linux distribution.
|
|
||||||
|
|
||||||
Supported platforms:
|
Supported platforms:
|
||||||
|
|
||||||
@ -13,7 +11,11 @@ Supported platforms:
|
|||||||
- AArch64
|
- AArch64
|
||||||
- Power9 (experimental)
|
- Power9 (experimental)
|
||||||
|
|
||||||
## Install Git, CMake, Python and Ninja {#install-git-cmake-python-and-ninja}
|
## Normal Build for Development on Ubuntu
|
||||||
|
|
||||||
|
The following tutorial is based on the Ubuntu Linux system. With appropriate changes, it should also work on any other Linux distribution.
|
||||||
|
|
||||||
|
### Install Git, CMake, Python and Ninja {#install-git-cmake-python-and-ninja}
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ sudo apt-get install git cmake python ninja-build
|
$ sudo apt-get install git cmake python ninja-build
|
||||||
@ -21,18 +23,18 @@ $ sudo apt-get install git cmake python ninja-build
|
|||||||
|
|
||||||
Or cmake3 instead of cmake on older systems.
|
Or cmake3 instead of cmake on older systems.
|
||||||
|
|
||||||
## Install GCC 9 {#install-gcc-9}
|
### Install GCC 9 {#install-gcc-9}
|
||||||
|
|
||||||
There are several ways to do this.
|
There are several ways to do this.
|
||||||
|
|
||||||
### Install from Repository {#install-from-repository}
|
#### Install from Repository {#install-from-repository}
|
||||||
|
|
||||||
On Ubuntu 19.10 or newer:
|
On Ubuntu 19.10 or newer:
|
||||||
|
|
||||||
$ sudo apt-get update
|
$ sudo apt-get update
|
||||||
$ sudo apt-get install gcc-9 g++-9
|
$ sudo apt-get install gcc-9 g++-9
|
||||||
|
|
||||||
### Install from a PPA Package {#install-from-a-ppa-package}
|
#### Install from a PPA Package {#install-from-a-ppa-package}
|
||||||
|
|
||||||
On older Ubuntu:
|
On older Ubuntu:
|
||||||
|
|
||||||
@ -43,18 +45,18 @@ $ sudo apt-get update
|
|||||||
$ sudo apt-get install gcc-9 g++-9
|
$ sudo apt-get install gcc-9 g++-9
|
||||||
```
|
```
|
||||||
|
|
||||||
### Install from Sources {#install-from-sources}
|
#### Install from Sources {#install-from-sources}
|
||||||
|
|
||||||
See [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh)
|
See [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh)
|
||||||
|
|
||||||
## Use GCC 9 for Builds {#use-gcc-9-for-builds}
|
### Use GCC 9 for Builds {#use-gcc-9-for-builds}
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ export CC=gcc-9
|
$ export CC=gcc-9
|
||||||
$ export CXX=g++-9
|
$ export CXX=g++-9
|
||||||
```
|
```
|
||||||
|
|
||||||
## Checkout ClickHouse Sources {#checkout-clickhouse-sources}
|
### Checkout ClickHouse Sources {#checkout-clickhouse-sources}
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ git clone --recursive git@github.com:ClickHouse/ClickHouse.git
|
$ git clone --recursive git@github.com:ClickHouse/ClickHouse.git
|
||||||
@ -66,7 +68,7 @@ or
|
|||||||
$ git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
$ git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||||
```
|
```
|
||||||
|
|
||||||
## Build ClickHouse {#build-clickhouse}
|
### Build ClickHouse {#build-clickhouse}
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ cd ClickHouse
|
$ cd ClickHouse
|
||||||
@ -79,7 +81,7 @@ $ ninja
|
|||||||
To create an executable, run `ninja clickhouse`.
|
To create an executable, run `ninja clickhouse`.
|
||||||
This will create the `programs/clickhouse` executable, which can be used with `client` or `server` arguments.
|
This will create the `programs/clickhouse` executable, which can be used with `client` or `server` arguments.
|
||||||
|
|
||||||
# How to Build ClickHouse on Any Linux {#how-to-build-clickhouse-on-any-linux}
|
## How to Build ClickHouse on Any Linux {#how-to-build-clickhouse-on-any-linux}
|
||||||
|
|
||||||
The build requires the following components:
|
The build requires the following components:
|
||||||
|
|
||||||
@ -93,32 +95,58 @@ The build requires the following components:
|
|||||||
If all the components are installed, you may build in the same way as the steps above.
|
If all the components are installed, you may build in the same way as the steps above.
|
||||||
|
|
||||||
Example for Ubuntu Eoan:
|
Example for Ubuntu Eoan:
|
||||||
|
``` bash
|
||||||
sudo apt update
|
sudo apt update
|
||||||
sudo apt install git cmake ninja-build g++ python
|
sudo apt install git cmake ninja-build g++ python
|
||||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||||
mkdir build && cd build
|
mkdir build && cd build
|
||||||
cmake ../ClickHouse
|
cmake ../ClickHouse
|
||||||
ninja
|
ninja
|
||||||
|
```
|
||||||
|
|
||||||
Example for OpenSUSE Tumbleweed:
|
Example for OpenSUSE Tumbleweed:
|
||||||
|
``` bash
|
||||||
sudo zypper install git cmake ninja gcc-c++ python lld
|
sudo zypper install git cmake ninja gcc-c++ python lld
|
||||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||||
mkdir build && cd build
|
mkdir build && cd build
|
||||||
cmake ../ClickHouse
|
cmake ../ClickHouse
|
||||||
ninja
|
ninja
|
||||||
|
```
|
||||||
|
|
||||||
Example for Fedora Rawhide:
|
Example for Fedora Rawhide:
|
||||||
|
``` bash
|
||||||
|
sudo yum update
|
||||||
|
yum --nogpg install git cmake make gcc-c++ python2
|
||||||
|
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||||
|
mkdir build && cd build
|
||||||
|
cmake ../ClickHouse
|
||||||
|
make -j $(nproc)
|
||||||
|
```
|
||||||
|
|
||||||
sudo yum update
|
|
||||||
yum --nogpg install git cmake make gcc-c++ python2
|
|
||||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
|
||||||
mkdir build && cd build
|
|
||||||
cmake ../ClickHouse
|
|
||||||
make -j $(nproc)
|
|
||||||
|
|
||||||
# You Don’t Have to Build ClickHouse {#you-dont-have-to-build-clickhouse}
|
## How to Build ClickHouse Debian Package {#how-to-build-clickhouse-debian-package}
|
||||||
|
|
||||||
|
### Install Git and Pbuilder {#install-git-and-pbuilder}
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
$ sudo apt-get update
|
||||||
|
$ sudo apt-get install git python pbuilder debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring
|
||||||
|
```
|
||||||
|
|
||||||
|
### Checkout ClickHouse Sources {#checkout-clickhouse-sources-1}
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
$ git clone --recursive --branch master https://github.com/ClickHouse/ClickHouse.git
|
||||||
|
$ cd ClickHouse
|
||||||
|
```
|
||||||
|
|
||||||
|
### Run Release Script {#run-release-script}
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
$ ./release
|
||||||
|
```
|
||||||
|
|
||||||
|
## You Don’t Have to Build ClickHouse {#you-dont-have-to-build-clickhouse}
|
||||||
|
|
||||||
ClickHouse is available in pre-built binaries and packages. Binaries are portable and can be run on any Linux flavour.
|
ClickHouse is available in pre-built binaries and packages. Binaries are portable and can be run on any Linux flavour.
|
||||||
|
|
||||||
@ -126,26 +154,4 @@ They are built for stable, prestable and testing releases as long as for every c
|
|||||||
|
|
||||||
To find the freshest build from `master`, go to [commits page](https://github.com/ClickHouse/ClickHouse/commits/master), click on the first green checkmark or red cross near commit, and click to the “Details” link right after “ClickHouse Build Check”.
|
To find the freshest build from `master`, go to [commits page](https://github.com/ClickHouse/ClickHouse/commits/master), click on the first green checkmark or red cross near commit, and click to the “Details” link right after “ClickHouse Build Check”.
|
||||||
|
|
||||||
# How to Build ClickHouse Debian Package {#how-to-build-clickhouse-debian-package}
|
|
||||||
|
|
||||||
## Install Git and Pbuilder {#install-git-and-pbuilder}
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
$ sudo apt-get update
|
|
||||||
$ sudo apt-get install git python pbuilder debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring
|
|
||||||
```
|
|
||||||
|
|
||||||
## Checkout ClickHouse Sources {#checkout-clickhouse-sources-1}
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
$ git clone --recursive --branch master https://github.com/ClickHouse/ClickHouse.git
|
|
||||||
$ cd ClickHouse
|
|
||||||
```
|
|
||||||
|
|
||||||
## Run Release Script {#run-release-script}
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
$ ./release
|
|
||||||
```
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/en/development/build/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/development/build/) <!--hide-->
|
||||||
|
@ -35,6 +35,7 @@ toc_title: Third-Party Libraries Used
|
|||||||
| poco | [Boost Software License - Version 1.0](https://github.com/ClickHouse-Extras/poco/blob/fe5505e56c27b6ecb0dcbc40c49dc2caf4e9637f/LICENSE) |
|
| poco | [Boost Software License - Version 1.0](https://github.com/ClickHouse-Extras/poco/blob/fe5505e56c27b6ecb0dcbc40c49dc2caf4e9637f/LICENSE) |
|
||||||
| protobuf | [BSD 3-Clause License](https://github.com/ClickHouse-Extras/protobuf/blob/12735370922a35f03999afff478e1c6d7aa917a4/LICENSE) |
|
| protobuf | [BSD 3-Clause License](https://github.com/ClickHouse-Extras/protobuf/blob/12735370922a35f03999afff478e1c6d7aa917a4/LICENSE) |
|
||||||
| re2 | [BSD 3-Clause License](https://github.com/google/re2/blob/7cf8b88e8f70f97fd4926b56aa87e7f53b2717e0/LICENSE) |
|
| re2 | [BSD 3-Clause License](https://github.com/google/re2/blob/7cf8b88e8f70f97fd4926b56aa87e7f53b2717e0/LICENSE) |
|
||||||
|
| sentry-native | [MIT License](https://github.com/getsentry/sentry-native/blob/master/LICENSE) |
|
||||||
| UnixODBC | [LGPL v2.1](https://github.com/ClickHouse-Extras/UnixODBC/tree/b0ad30f7f6289c12b76f04bfb9d466374bb32168) |
|
| UnixODBC | [LGPL v2.1](https://github.com/ClickHouse-Extras/UnixODBC/tree/b0ad30f7f6289c12b76f04bfb9d466374bb32168) |
|
||||||
| zlib-ng | [Zlib License](https://github.com/ClickHouse-Extras/zlib-ng/blob/develop/LICENSE.md) |
|
| zlib-ng | [Zlib License](https://github.com/ClickHouse-Extras/zlib-ng/blob/develop/LICENSE.md) |
|
||||||
| zstd | [BSD 3-Clause License](https://github.com/facebook/zstd/blob/dev/LICENSE) |
|
| zstd | [BSD 3-Clause License](https://github.com/facebook/zstd/blob/dev/LICENSE) |
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 68
|
toc_priority: 68
|
||||||
toc_title: How to Write C++ Code
|
toc_title: C++ Guide
|
||||||
---
|
---
|
||||||
|
|
||||||
# How to Write C++ Code {#how-to-write-c-code}
|
# How to Write C++ Code {#how-to-write-c-code}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 69
|
toc_priority: 69
|
||||||
toc_title: How to Run ClickHouse Tests
|
toc_title: Testing
|
||||||
---
|
---
|
||||||
|
|
||||||
# ClickHouse Testing {#clickhouse-testing}
|
# ClickHouse Testing {#clickhouse-testing}
|
||||||
@ -25,12 +25,7 @@ Tests should use (create, drop, etc) only tables in `test` database that is assu
|
|||||||
|
|
||||||
If you want to use distributed queries in functional tests, you can leverage `remote` table function with `127.0.0.{1..2}` addresses for the server to query itself; or you can use predefined test clusters in server configuration file like `test_shard_localhost`.
|
If you want to use distributed queries in functional tests, you can leverage `remote` table function with `127.0.0.{1..2}` addresses for the server to query itself; or you can use predefined test clusters in server configuration file like `test_shard_localhost`.
|
||||||
|
|
||||||
Some tests are marked with `zookeeper`, `shard` or `long` in their names.
|
Some tests are marked with `zookeeper`, `shard` or `long` in their names. `zookeeper` is for tests that are using ZooKeeper. `shard` is for tests that requires server to listen `127.0.0.*`; `distributed` or `global` have the same meaning. `long` is for tests that run slightly longer that one second. You can disable these groups of tests using `--no-zookeeper`, `--no-shard` and `--no-long` options, respectively.
|
||||||
`zookeeper` is for tests that are using ZooKeeper. `shard` is for tests that
|
|
||||||
requires server to listen `127.0.0.*`; `distributed` or `global` have the same
|
|
||||||
meaning. `long` is for tests that run slightly longer that one second. You can
|
|
||||||
disable these groups of tests using `--no-zookeeper`, `--no-shard` and
|
|
||||||
`--no-long` options, respectively.
|
|
||||||
|
|
||||||
## Known Bugs {#known-bugs}
|
## Known Bugs {#known-bugs}
|
||||||
|
|
||||||
@ -153,11 +148,11 @@ Motivation:
|
|||||||
|
|
||||||
Normally we release and run all tests on a single variant of ClickHouse build. But there are alternative build variants that are not thoroughly tested. Examples:
|
Normally we release and run all tests on a single variant of ClickHouse build. But there are alternative build variants that are not thoroughly tested. Examples:
|
||||||
|
|
||||||
- build on FreeBSD;
|
- build on FreeBSD
|
||||||
- build on Debian with libraries from system packages;
|
- build on Debian with libraries from system packages
|
||||||
- build with shared linking of libraries;
|
- build with shared linking of libraries
|
||||||
- build on AArch64 platform;
|
- build on AArch64 platform
|
||||||
- build on PowerPc platform.
|
- build on PowerPc platform
|
||||||
|
|
||||||
For example, build with system packages is bad practice, because we cannot guarantee what exact version of packages a system will have. But this is really needed by Debian maintainers. For this reason we at least have to support this variant of build. Another example: shared linking is a common source of trouble, but it is needed for some enthusiasts.
|
For example, build with system packages is bad practice, because we cannot guarantee what exact version of packages a system will have. But this is really needed by Debian maintainers. For this reason we at least have to support this variant of build. Another example: shared linking is a common source of trouble, but it is needed for some enthusiasts.
|
||||||
|
|
||||||
@ -177,22 +172,22 @@ For production builds, gcc is used (it still generates slightly more efficient c
|
|||||||
|
|
||||||
## Sanitizers {#sanitizers}
|
## Sanitizers {#sanitizers}
|
||||||
|
|
||||||
**Address sanitizer**.
|
### Address sanitizer
|
||||||
We run functional and integration tests under ASan on per-commit basis.
|
We run functional and integration tests under ASan on per-commit basis.
|
||||||
|
|
||||||
**Valgrind (Memcheck)**.
|
### Valgrind (Memcheck)
|
||||||
We run functional tests under Valgrind overnight. It takes multiple hours. Currently there is one known false positive in `re2` library, see [this article](https://research.swtch.com/sparse).
|
We run functional tests under Valgrind overnight. It takes multiple hours. Currently there is one known false positive in `re2` library, see [this article](https://research.swtch.com/sparse).
|
||||||
|
|
||||||
**Undefined behaviour sanitizer.**
|
### Undefined behaviour sanitizer
|
||||||
We run functional and integration tests under ASan on per-commit basis.
|
We run functional and integration tests under ASan on per-commit basis.
|
||||||
|
|
||||||
**Thread sanitizer**.
|
### Thread sanitizer
|
||||||
We run functional tests under TSan on per-commit basis. We still don’t run integration tests under TSan on per-commit basis.
|
We run functional tests under TSan on per-commit basis. We still don’t run integration tests under TSan on per-commit basis.
|
||||||
|
|
||||||
**Memory sanitizer**.
|
### Memory sanitizer
|
||||||
Currently we still don’t use MSan.
|
Currently we still don’t use MSan.
|
||||||
|
|
||||||
**Debug allocator.**
|
### Debug allocator
|
||||||
Debug version of `jemalloc` is used for debug build.
|
Debug version of `jemalloc` is used for debug build.
|
||||||
|
|
||||||
## Fuzzing {#fuzzing}
|
## Fuzzing {#fuzzing}
|
||||||
@ -227,7 +222,7 @@ If you use `CLion` as an IDE, you can leverage some `clang-tidy` checks out of t
|
|||||||
|
|
||||||
## Code Style {#code-style}
|
## Code Style {#code-style}
|
||||||
|
|
||||||
Code style rules are described [here](https://clickhouse.tech/docs/en/development/style/).
|
Code style rules are described [here](style.md).
|
||||||
|
|
||||||
To check for some common style violations, you can use `utils/check-style` script.
|
To check for some common style violations, you can use `utils/check-style` script.
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ The Distributed engine accepts parameters:
|
|||||||
|
|
||||||
See also:
|
See also:
|
||||||
|
|
||||||
- `insert_distributed_sync` setting
|
- [insert_distributed_sync](../../../operations/settings/settings.md#insert_distributed_sync) setting
|
||||||
- [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) for the examples
|
- [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) for the examples
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
@ -16,7 +16,7 @@ One of the following batches of those t-shirts was supposed to be given away on
|
|||||||
So, what does it mean? Here are some ways to translate *“не тормозит”*:
|
So, what does it mean? Here are some ways to translate *“не тормозит”*:
|
||||||
|
|
||||||
- If you translate it literally, it’d be something like *“ClickHouse doesn’t press the brake pedal”*.
|
- If you translate it literally, it’d be something like *“ClickHouse doesn’t press the brake pedal”*.
|
||||||
- If you’d want to express it as close to how it sounds to a Russian person with IT background, it’d be something like *“If you larger system lags, it’s not because it uses ClickHouse”*.
|
- If you’d want to express it as close to how it sounds to a Russian person with IT background, it’d be something like *“If your larger system lags, it’s not because it uses ClickHouse”*.
|
||||||
- Shorter, but not so precise versions could be *“ClickHouse is not slow”*, *“ClickHouse doesn’t lag”* or just *“ClickHouse is fast”*.
|
- Shorter, but not so precise versions could be *“ClickHouse is not slow”*, *“ClickHouse doesn’t lag”* or just *“ClickHouse is fast”*.
|
||||||
|
|
||||||
If you haven’t seen one of those t-shirts in person, you can check them out online in many ClickHouse-related videos. For example, this one:
|
If you haven’t seen one of those t-shirts in person, you can check them out online in many ClickHouse-related videos. For example, this one:
|
||||||
|
@ -30,7 +30,7 @@ See [File](../../engines/table-engines/special/file.md) table engine.
|
|||||||
|
|
||||||
## Using Command-Line Redirection {#using-command-line-redirection}
|
## Using Command-Line Redirection {#using-command-line-redirection}
|
||||||
|
|
||||||
``` sql
|
``` bash
|
||||||
$ clickhouse-client --query "SELECT * from table" --format FormatName > result.txt
|
$ clickhouse-client --query "SELECT * from table" --format FormatName > result.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -1,13 +1,14 @@
|
|||||||
---
|
---
|
||||||
toc_folder_title: Example Datasets
|
toc_folder_title: Example Datasets
|
||||||
toc_priority: 12
|
toc_priority: 15
|
||||||
toc_title: Introduction
|
toc_title: Introduction
|
||||||
---
|
---
|
||||||
|
|
||||||
# Example Datasets {#example-datasets}
|
# Example Datasets {#example-datasets}
|
||||||
|
|
||||||
This section describes how to obtain example datasets and import them into ClickHouse.
|
This section describes how to obtain example datasets and import them into ClickHouse. For some datasets example queries are also available.
|
||||||
For some datasets example queries are also available.
|
|
||||||
|
The list of documented datasets:
|
||||||
|
|
||||||
- [Anonymized Yandex.Metrica Dataset](../../getting-started/example-datasets/metrica.md)
|
- [Anonymized Yandex.Metrica Dataset](../../getting-started/example-datasets/metrica.md)
|
||||||
- [Star Schema Benchmark](../../getting-started/example-datasets/star-schema.md)
|
- [Star Schema Benchmark](../../getting-started/example-datasets/star-schema.md)
|
||||||
|
@ -37,6 +37,7 @@ The queries are executed as a read-only user. It implies some limitations:
|
|||||||
- INSERT queries are not allowed
|
- INSERT queries are not allowed
|
||||||
|
|
||||||
The following settings are also enforced:
|
The following settings are also enforced:
|
||||||
|
|
||||||
- [max\_result\_bytes=10485760](../operations/settings/query_complexity/#max-result-bytes)
|
- [max\_result\_bytes=10485760](../operations/settings/query_complexity/#max-result-bytes)
|
||||||
- [max\_result\_rows=2000](../operations/settings/query_complexity/#setting-max_result_rows)
|
- [max\_result\_rows=2000](../operations/settings/query_complexity/#setting-max_result_rows)
|
||||||
- [result\_overflow\_mode=break](../operations/settings/query_complexity/#result-overflow-mode)
|
- [result\_overflow\_mode=break](../operations/settings/query_complexity/#result-overflow-mode)
|
||||||
|
@ -8,73 +8,54 @@ toc_title: Testing Hardware
|
|||||||
With this instruction you can run basic ClickHouse performance test on any server without installation of ClickHouse packages.
|
With this instruction you can run basic ClickHouse performance test on any server without installation of ClickHouse packages.
|
||||||
|
|
||||||
1. Go to “commits” page: https://github.com/ClickHouse/ClickHouse/commits/master
|
1. Go to “commits” page: https://github.com/ClickHouse/ClickHouse/commits/master
|
||||||
|
|
||||||
2. Click on the first green check mark or red cross with green “ClickHouse Build Check” and click on the “Details” link near “ClickHouse Build Check”. There is no such link in some commits, for example commits with documentation. In this case, choose the nearest commit having this link.
|
2. Click on the first green check mark or red cross with green “ClickHouse Build Check” and click on the “Details” link near “ClickHouse Build Check”. There is no such link in some commits, for example commits with documentation. In this case, choose the nearest commit having this link.
|
||||||
|
3. Copy the link to `clickhouse` binary for amd64 or aarch64.
|
||||||
3. Copy the link to “clickhouse” binary for amd64 or aarch64.
|
|
||||||
|
|
||||||
4. ssh to the server and download it with wget:
|
4. ssh to the server and download it with wget:
|
||||||
|
```bash
|
||||||
<!-- -->
|
# For amd64:
|
||||||
|
wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578163263_binary/clickhouse
|
||||||
# For amd64:
|
# For aarch64:
|
||||||
wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578163263_binary/clickhouse
|
wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578161264_binary/clickhouse
|
||||||
# For aarch64:
|
# Then do:
|
||||||
wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578161264_binary/clickhouse
|
chmod a+x clickhouse
|
||||||
# Then do:
|
```
|
||||||
chmod a+x clickhouse
|
|
||||||
|
|
||||||
5. Download configs:
|
5. Download configs:
|
||||||
|
```bash
|
||||||
<!-- -->
|
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.xml
|
||||||
|
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/users.xml
|
||||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.xml
|
mkdir config.d
|
||||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/users.xml
|
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml
|
||||||
mkdir config.d
|
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml
|
||||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml
|
```
|
||||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml
|
|
||||||
|
|
||||||
6. Download benchmark files:
|
6. Download benchmark files:
|
||||||
|
```bash
|
||||||
<!-- -->
|
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh
|
||||||
|
chmod a+x benchmark-new.sh
|
||||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh
|
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql
|
||||||
chmod a+x benchmark-new.sh
|
```
|
||||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql
|
|
||||||
|
|
||||||
7. Download test data according to the [Yandex.Metrica dataset](../getting-started/example-datasets/metrica.md) instruction (“hits” table containing 100 million rows).
|
7. Download test data according to the [Yandex.Metrica dataset](../getting-started/example-datasets/metrica.md) instruction (“hits” table containing 100 million rows).
|
||||||
|
```bash
|
||||||
<!-- -->
|
wget https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_100m_obfuscated_v1.tar.xz
|
||||||
|
tar xvf hits_100m_obfuscated_v1.tar.xz -C .
|
||||||
wget https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_100m_obfuscated_v1.tar.xz
|
mv hits_100m_obfuscated_v1/* .
|
||||||
tar xvf hits_100m_obfuscated_v1.tar.xz -C .
|
```
|
||||||
mv hits_100m_obfuscated_v1/* .
|
|
||||||
|
|
||||||
8. Run the server:
|
8. Run the server:
|
||||||
|
```bash
|
||||||
<!-- -->
|
./clickhouse server
|
||||||
|
```
|
||||||
./clickhouse server
|
|
||||||
|
|
||||||
9. Check the data: ssh to the server in another terminal
|
9. Check the data: ssh to the server in another terminal
|
||||||
|
```bash
|
||||||
<!-- -->
|
./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated"
|
||||||
|
100000000
|
||||||
./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated"
|
```
|
||||||
100000000
|
|
||||||
|
|
||||||
10. Edit the benchmark-new.sh, change `clickhouse-client` to `./clickhouse client` and add `--max_memory_usage 100000000000` parameter.
|
10. Edit the benchmark-new.sh, change `clickhouse-client` to `./clickhouse client` and add `--max_memory_usage 100000000000` parameter.
|
||||||
|
```bash
|
||||||
<!-- -->
|
mcedit benchmark-new.sh
|
||||||
|
```
|
||||||
mcedit benchmark-new.sh
|
|
||||||
|
|
||||||
11. Run the benchmark:
|
11. Run the benchmark:
|
||||||
|
```bash
|
||||||
<!-- -->
|
./benchmark-new.sh hits_100m_obfuscated
|
||||||
|
```
|
||||||
./benchmark-new.sh hits_100m_obfuscated
|
|
||||||
|
|
||||||
12. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com
|
12. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com
|
||||||
|
|
||||||
All the results are published here: https://clickhouse.tech/benchmark/hardware/
|
All the results are published here: https://clickhouse.tech/benchmark/hardware/
|
||||||
|
@ -808,6 +808,17 @@ If unsuccessful, several attempts are made to connect to various replicas.
|
|||||||
|
|
||||||
Default value: 50.
|
Default value: 50.
|
||||||
|
|
||||||
|
## connection\_pool\_max\_wait\_ms {#connection-pool-max-wait-ms}
|
||||||
|
|
||||||
|
The wait time in milliseconds for a connection when the connection pool is full.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- Positive integer.
|
||||||
|
- 0 — Infinite timeout.
|
||||||
|
|
||||||
|
Default value: 0.
|
||||||
|
|
||||||
## connections\_with\_failover\_max\_tries {#connections-with-failover-max-tries}
|
## connections\_with\_failover\_max\_tries {#connections-with-failover-max-tries}
|
||||||
|
|
||||||
The maximum number of connection attempts with each replica for the Distributed table engine.
|
The maximum number of connection attempts with each replica for the Distributed table engine.
|
||||||
@ -819,6 +830,21 @@ Default value: 3.
|
|||||||
Whether to count extreme values (the minimums and maximums in columns of a query result). Accepts 0 or 1. By default, 0 (disabled).
|
Whether to count extreme values (the minimums and maximums in columns of a query result). Accepts 0 or 1. By default, 0 (disabled).
|
||||||
For more information, see the section “Extreme values”.
|
For more information, see the section “Extreme values”.
|
||||||
|
|
||||||
|
## kafka\_max\_wait\_ms {#kafka-max-wait-ms}
|
||||||
|
|
||||||
|
The wait time in milliseconds for reading messages from [Kafka](../../engines/table-engines/integrations/kafka.md#kafka) before retry.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- Positive integer.
|
||||||
|
- 0 — Infinite timeout.
|
||||||
|
|
||||||
|
Default value: 5000.
|
||||||
|
|
||||||
|
See also:
|
||||||
|
|
||||||
|
- [Apache Kafka](https://kafka.apache.org/)
|
||||||
|
|
||||||
## use\_uncompressed\_cache {#setting-use_uncompressed_cache}
|
## use\_uncompressed\_cache {#setting-use_uncompressed_cache}
|
||||||
|
|
||||||
Whether to use a cache of uncompressed blocks. Accepts 0 or 1. By default, 0 (disabled).
|
Whether to use a cache of uncompressed blocks. Accepts 0 or 1. By default, 0 (disabled).
|
||||||
@ -837,6 +863,17 @@ If a query from the same user with the same ‘query\_id’ already exists at th
|
|||||||
|
|
||||||
Yandex.Metrica uses this parameter set to 1 for implementing suggestions for segmentation conditions. After entering the next character, if the old query hasn’t finished yet, it should be cancelled.
|
Yandex.Metrica uses this parameter set to 1 for implementing suggestions for segmentation conditions. After entering the next character, if the old query hasn’t finished yet, it should be cancelled.
|
||||||
|
|
||||||
|
## replace\_running\_query\_max\_wait\_ms {#replace-running-query-max-wait-ms}
|
||||||
|
|
||||||
|
The wait time for running query with the same `query_id` to finish, when the [replace_running_query](#replace-running-query) setting is active.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- Positive integer.
|
||||||
|
- 0 — Throwing an exception that does not allow to run a new query if the server already executes a query with the same `query_id`.
|
||||||
|
|
||||||
|
Default value: 5000.
|
||||||
|
|
||||||
## stream\_flush\_interval\_ms {#stream-flush-interval-ms}
|
## stream\_flush\_interval\_ms {#stream-flush-interval-ms}
|
||||||
|
|
||||||
Works for tables with streaming in the case of a timeout, or when a thread generates [max\_insert\_block\_size](#settings-max_insert_block_size) rows.
|
Works for tables with streaming in the case of a timeout, or when a thread generates [max\_insert\_block\_size](#settings-max_insert_block_size) rows.
|
||||||
@ -1422,6 +1459,23 @@ Possible values:
|
|||||||
|
|
||||||
Default value: 16.
|
Default value: 16.
|
||||||
|
|
||||||
|
## insert_distributed_sync {#insert_distributed_sync}
|
||||||
|
|
||||||
|
Enables or disables synchronous data insertion into a [Distributed](../../engines/table-engines/special/distributed.md#distributed) table.
|
||||||
|
|
||||||
|
By default, when inserting data into a `Distributed` table, the ClickHouse server sends data to cluster nodes in asynchronous mode. When `insert_distributed_sync=1`, the data is processed synchronously, and the `INSERT` operation succeeds only after all the data is saved on all shards (at least one replica for each shard if `internal_replication` is true).
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- 0 — Data is inserted in asynchronous mode.
|
||||||
|
- 1 — Data is inserted in synchronous mode.
|
||||||
|
|
||||||
|
Default value: `0`.
|
||||||
|
|
||||||
|
**See Also**
|
||||||
|
|
||||||
|
- [Distributed Table Engine](../../engines/table-engines/special/distributed.md#distributed)
|
||||||
|
- [Managing Distributed Tables](../../sql-reference/statements/system.md#query-language-system-distributed)
|
||||||
## background\_buffer\_flush\_schedule\_pool\_size {#background_buffer_flush_schedule_pool_size}
|
## background\_buffer\_flush\_schedule\_pool\_size {#background_buffer_flush_schedule_pool_size}
|
||||||
|
|
||||||
Sets the number of threads performing background flush in [Buffer](../../engines/table-engines/special/buffer.md)-engine tables. This setting is applied at ClickHouse server start and can’t be changed in a user session.
|
Sets the number of threads performing background flush in [Buffer](../../engines/table-engines/special/buffer.md)-engine tables. This setting is applied at ClickHouse server start and can’t be changed in a user session.
|
||||||
|
10
docs/en/operations/system-tables/current-roles.md
Normal file
10
docs/en/operations/system-tables/current-roles.md
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
#system.current_roles {#system_tables-current_roles}
|
||||||
|
Contains active roles of a current user. `SET ROLE` changes the contents of this table.
|
||||||
|
|
||||||
|
Columns:
|
||||||
|
|
||||||
|
- `role_name` ([String](../../sql-reference/data-types/string.md))) — Role name.
|
||||||
|
- `with_admin_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `current_role` is a role with `ADMIN OPTION` privilege.
|
||||||
|
- `is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `current_role` is a default role.
|
||||||
|
|
||||||
|
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/current-roles) <!--hide-->
|
11
docs/en/operations/system-tables/enabled-roles.md
Normal file
11
docs/en/operations/system-tables/enabled-roles.md
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
#system.enabled_roles {#system_tables-enabled_roles}
|
||||||
|
Contains all active roles at the moment, including current role of the current user and granted roles for current role.
|
||||||
|
|
||||||
|
Columns:
|
||||||
|
|
||||||
|
- `role_name` ([String](../../sql-reference/data-types/string.md))) — Role name.
|
||||||
|
- `with_admin_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `enabled_role` is a role with `ADMIN OPTION` privilege.
|
||||||
|
- `is_current` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `enabled_role` is a current role of a current user.
|
||||||
|
- `is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `enabled_role` is a default role.
|
||||||
|
|
||||||
|
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/enabled-roles) <!--hide-->
|
39
docs/en/operations/system-tables/licenses.md
Normal file
39
docs/en/operations/system-tables/licenses.md
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
# system.licenses {#system-tables_system.licenses}
|
||||||
|
|
||||||
|
Сontains licenses of third-party libraries that are located in the [contrib](https://github.com/ClickHouse/ClickHouse/tree/master/contrib) directory of ClickHouse sources.
|
||||||
|
|
||||||
|
Columns:
|
||||||
|
|
||||||
|
- `library_name` ([String](../../sql-reference/data-types/string.md)) — Name of the library, which is license connected with.
|
||||||
|
- `license_type` ([String](../../sql-reference/data-types/string.md)) — License type — e.g. Apache, MIT.
|
||||||
|
- `license_path` ([String](../../sql-reference/data-types/string.md)) — Path to the file with the license text.
|
||||||
|
- `license_text` ([String](../../sql-reference/data-types/string.md)) — License text.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT library_name, license_type, license_path FROM system.licenses LIMIT 15
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─library_name───────┬─license_type─┬─license_path────────────────────────┐
|
||||||
|
│ FastMemcpy │ MIT │ /contrib/FastMemcpy/LICENSE │
|
||||||
|
│ arrow │ Apache │ /contrib/arrow/LICENSE.txt │
|
||||||
|
│ avro │ Apache │ /contrib/avro/LICENSE.txt │
|
||||||
|
│ aws-c-common │ Apache │ /contrib/aws-c-common/LICENSE │
|
||||||
|
│ aws-c-event-stream │ Apache │ /contrib/aws-c-event-stream/LICENSE │
|
||||||
|
│ aws-checksums │ Apache │ /contrib/aws-checksums/LICENSE │
|
||||||
|
│ aws │ Apache │ /contrib/aws/LICENSE.txt │
|
||||||
|
│ base64 │ BSD 2-clause │ /contrib/base64/LICENSE │
|
||||||
|
│ boost │ Boost │ /contrib/boost/LICENSE_1_0.txt │
|
||||||
|
│ brotli │ MIT │ /contrib/brotli/LICENSE │
|
||||||
|
│ capnproto │ MIT │ /contrib/capnproto/LICENSE │
|
||||||
|
│ cassandra │ Apache │ /contrib/cassandra/LICENSE.txt │
|
||||||
|
│ cctz │ Apache │ /contrib/cctz/LICENSE.txt │
|
||||||
|
│ cityhash102 │ MIT │ /contrib/cityhash102/COPYING │
|
||||||
|
│ cppkafka │ BSD 2-clause │ /contrib/cppkafka/LICENSE │
|
||||||
|
└────────────────────┴──────────────┴─────────────────────────────────────┘
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/licenses) <!--hide-->
|
@ -1,25 +1,46 @@
|
|||||||
# system.mutations {#system_tables-mutations}
|
# system.mutations {#system_tables-mutations}
|
||||||
|
|
||||||
The table contains information about [mutations](../../sql-reference/statements/alter/index.md#alter-mutations) of MergeTree tables and their progress. Each mutation command is represented by a single row. The table has the following columns:
|
The table contains information about [mutations](../../sql-reference/statements/alter/index.md#mutations) of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables and their progress. Each mutation command is represented by a single row.
|
||||||
|
|
||||||
**database**, **table** - The name of the database and table to which the mutation was applied.
|
Columns:
|
||||||
|
|
||||||
**mutation\_id** - The ID of the mutation. For replicated tables these IDs correspond to znode names in the `<table_path_in_zookeeper>/mutations/` directory in ZooKeeper. For unreplicated tables the IDs correspond to file names in the data directory of the table.
|
- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database to which the mutation was applied.
|
||||||
|
|
||||||
**command** - The mutation command string (the part of the query after `ALTER TABLE [db.]table`).
|
- `table` ([String](../../sql-reference/data-types/string.md)) — The name of the table to which the mutation was applied.
|
||||||
|
|
||||||
**create\_time** - When this mutation command was submitted for execution.
|
- `mutation_id` ([String](../../sql-reference/data-types/string.md)) — The ID of the mutation. For replicated tables these IDs correspond to znode names in the `<table_path_in_zookeeper>/mutations/` directory in ZooKeeper. For non-replicated tables the IDs correspond to file names in the data directory of the table.
|
||||||
|
|
||||||
**block\_numbers.partition\_id**, **block\_numbers.number** - A nested column. For mutations of replicated tables, it contains one record for each partition: the partition ID and the block number that was acquired by the mutation (in each partition, only parts that contain blocks with numbers less than the block number acquired by the mutation in that partition will be mutated). In non-replicated tables, block numbers in all partitions form a single sequence. This means that for mutations of non-replicated tables, the column will contain one record with a single block number acquired by the mutation.
|
- `command` ([String](../../sql-reference/data-types/string.md)) — The mutation command string (the part of the query after `ALTER TABLE [db.]table`).
|
||||||
|
|
||||||
**parts\_to\_do** - The number of data parts that need to be mutated for the mutation to finish.
|
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the mutation command was submitted for execution.
|
||||||
|
|
||||||
**is\_done** - Is the mutation done? Note that even if `parts_to_do = 0` it is possible that a mutation of a replicated table is not done yet because of a long-running INSERT that will create a new data part that will need to be mutated.
|
- `block_numbers.partition_id` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — For mutations of replicated tables, the array contains the partitions' IDs (one record for each partition). For mutations of non-replicated tables the array is empty.
|
||||||
|
|
||||||
If there were problems with mutating some parts, the following columns contain additional information:
|
- `block_numbers.number` ([Array](../../sql-reference/data-types/array.md)([Int64](../../sql-reference/data-types/int-uint.md))) — For mutations of replicated tables, the array contains one record for each partition, with the block number that was acquired by the mutation. Only parts that contain blocks with numbers less than this number will be mutated in the partition.
|
||||||
|
|
||||||
|
In non-replicated tables, block numbers in all partitions form a single sequence. This means that for mutations of non-replicated tables, the column will contain one record with a single block number acquired by the mutation.
|
||||||
|
|
||||||
**latest\_failed\_part** - The name of the most recent part that could not be mutated.
|
- `parts_to_do_names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — An array of names of data parts that need to be mutated for the mutation to complete.
|
||||||
|
|
||||||
**latest\_fail\_time** - The time of the most recent part mutation failure.
|
- `parts_to_do` ([Int64](../../sql-reference/data-types/int-uint.md)) — The number of data parts that need to be mutated for the mutation to complete.
|
||||||
|
|
||||||
**latest\_fail\_reason** - The exception message that caused the most recent part mutation failure.
|
- `is_done` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The flag whether the mutation is done or not. Possible values:
|
||||||
|
- `1` if the mutation is completed,
|
||||||
|
- `0` if the mutation is still in process.
|
||||||
|
|
||||||
|
!!! info "Note"
|
||||||
|
Even if `parts_to_do = 0` it is possible that a mutation of a replicated table is not completed yet because of a long-running `INSERT` query, that will create a new data part needed to be mutated.
|
||||||
|
|
||||||
|
If there were problems with mutating some data parts, the following columns contain additional information:
|
||||||
|
|
||||||
|
- `latest_failed_part` ([String](../../sql-reference/data-types/string.md)) — The name of the most recent part that could not be mutated.
|
||||||
|
|
||||||
|
- `latest_fail_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — The date and time of the most recent part mutation failure.
|
||||||
|
|
||||||
|
- `latest_fail_reason` ([String](../../sql-reference/data-types/string.md)) — The exception message that caused the most recent part mutation failure.
|
||||||
|
|
||||||
|
**See Also**
|
||||||
|
|
||||||
|
- [Mutations](../../sql-reference/statements/alter/index.md#mutations)
|
||||||
|
- [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table engine
|
||||||
|
- [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family
|
16
docs/en/operations/system-tables/role-grants.md
Normal file
16
docs/en/operations/system-tables/role-grants.md
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
#system.role_grants {#system_tables-role_grants}
|
||||||
|
Contains the role grants for users and roles. To add entries to this table, use `GRANT role TO user`.
|
||||||
|
|
||||||
|
Columns:
|
||||||
|
|
||||||
|
- `user_name` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — User name.
|
||||||
|
- `role_name` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Role name.
|
||||||
|
- `granted_role_name` ([String](../../sql-reference/data-types/string.md)) — Name of role granted to the `role_name` role. To grant one role to another one use `GRANT role1 TO role2`.
|
||||||
|
- `granted_role_is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `granted_role` is a default role. Possible values:
|
||||||
|
- 1 — `granted_role` is a default role.
|
||||||
|
- 0 — `granted_role` is not a default role.
|
||||||
|
- `with_admin_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `granted_role` is a role with [ADMIN OPTION](../../sql-reference/statements/grant.md#admin-option-privilege) privilege. Possible values:
|
||||||
|
- 1 — The role has `ADMIN OPTION` privilege.
|
||||||
|
- 0 — The role without `ADMIN OPTION` privilege.
|
||||||
|
|
||||||
|
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/role-grants) <!--hide-->
|
10
docs/en/operations/system-tables/roles.md
Normal file
10
docs/en/operations/system-tables/roles.md
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
#system.roles {#system_tables-roles}
|
||||||
|
Contains information about configured [roles](../../operations/access-rights.md#role-management).
|
||||||
|
|
||||||
|
Columns:
|
||||||
|
|
||||||
|
- `name` ([String](../../sql-reference/data-types/string.md)) — Role name.
|
||||||
|
- `id` ([UUID](../../sql-reference/data-types/uuid.md)) — Role ID.
|
||||||
|
- `storage` ([String](../../sql-reference/data-types/string.md)) — Path to the storage of roles. Configured in the `access_control_path` parameter.
|
||||||
|
|
||||||
|
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/roles) <!--hide-->
|
@ -54,8 +54,6 @@ In this case, ClickHouse can reload the dictionary earlier if the dictionary con
|
|||||||
|
|
||||||
When upgrading the dictionaries, the ClickHouse server applies different logic depending on the type of [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md):
|
When upgrading the dictionaries, the ClickHouse server applies different logic depending on the type of [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md):
|
||||||
|
|
||||||
When upgrading the dictionaries, the ClickHouse server applies different logic depending on the type of [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md):
|
|
||||||
|
|
||||||
- For a text file, it checks the time of modification. If the time differs from the previously recorded time, the dictionary is updated.
|
- For a text file, it checks the time of modification. If the time differs from the previously recorded time, the dictionary is updated.
|
||||||
- For MyISAM tables, the time of modification is checked using a `SHOW TABLE STATUS` query.
|
- For MyISAM tables, the time of modification is checked using a `SHOW TABLE STATUS` query.
|
||||||
- Dictionaries from other sources are updated every time by default.
|
- Dictionaries from other sources are updated every time by default.
|
||||||
|
@ -1350,4 +1350,42 @@ len: 30
|
|||||||
- [generateRandom](../../sql-reference/table-functions/generate.md#generaterandom)
|
- [generateRandom](../../sql-reference/table-functions/generate.md#generaterandom)
|
||||||
- [randomPrintableASCII](../../sql-reference/functions/other-functions.md#randomascii)
|
- [randomPrintableASCII](../../sql-reference/functions/other-functions.md#randomascii)
|
||||||
|
|
||||||
|
|
||||||
|
## randomStringUTF8 {#randomstringutf8}
|
||||||
|
|
||||||
|
Generates a random string of a specified length. Result string contains valid UTF-8 code points. The value of code points may be outside of the range of assigned Unicode.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
randomStringUTF8(length);
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters**
|
||||||
|
|
||||||
|
- `length` — Required length of the resulting string in code points. [UInt64](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value(s)**
|
||||||
|
|
||||||
|
- UTF-8 random string.
|
||||||
|
|
||||||
|
Type: [String](../../sql-reference/data-types/string.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT randomStringUTF8(13)
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─randomStringUTF8(13)─┐
|
||||||
|
│ 𘤗д兠庇 │
|
||||||
|
└──────────────────────┘
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/en/query_language/functions/other_functions/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/query_language/functions/other_functions/) <!--hide-->
|
||||||
|
@ -7,14 +7,14 @@ toc_title: VIEW
|
|||||||
|
|
||||||
Creates a new view. There are two types of views: normal and materialized.
|
Creates a new view. There are two types of views: normal and materialized.
|
||||||
|
|
||||||
|
## Normal {#normal}
|
||||||
|
|
||||||
Syntax:
|
Syntax:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE [MATERIALIZED] VIEW [IF NOT EXISTS] [db.]table_name [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ...
|
CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] AS SELECT ...
|
||||||
```
|
```
|
||||||
|
|
||||||
## Normal {#normal}
|
|
||||||
|
|
||||||
Normal views don’t store any data, they just perform a read from another table on each access. In other words, a normal view is nothing more than a saved query. When reading from a view, this saved query is used as a subquery in the [FROM](../../../sql-reference/statements/select/from.md) clause.
|
Normal views don’t store any data, they just perform a read from another table on each access. In other words, a normal view is nothing more than a saved query. When reading from a view, this saved query is used as a subquery in the [FROM](../../../sql-reference/statements/select/from.md) clause.
|
||||||
|
|
||||||
As an example, assume you’ve created a view:
|
As an example, assume you’ve created a view:
|
||||||
@ -37,6 +37,11 @@ SELECT a, b, c FROM (SELECT ...)
|
|||||||
|
|
||||||
## Materialized {#materialized}
|
## Materialized {#materialized}
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ...
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
Materialized views store data transformed by the corresponding [SELECT](../../../sql-reference/statements/select/index.md) query.
|
Materialized views store data transformed by the corresponding [SELECT](../../../sql-reference/statements/select/index.md) query.
|
||||||
|
|
||||||
When creating a materialized view without `TO [db].[table]`, you must specify `ENGINE` – the table engine for storing data.
|
When creating a materialized view without `TO [db].[table]`, you must specify `ENGINE` – the table engine for storing data.
|
||||||
|
@ -77,3 +77,11 @@ DROP [SETTINGS] PROFILE [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
|
|||||||
Deletes a settings profile.
|
Deletes a settings profile.
|
||||||
|
|
||||||
Deleted settings profile is revoked from all the entities where it was assigned.
|
Deleted settings profile is revoked from all the entities where it was assigned.
|
||||||
|
|
||||||
|
## DROP VIEW {#drop-view}
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
DROP VIEW [IF EXISTS] [db.]name [ON CLUSTER cluster]
|
||||||
|
```
|
||||||
|
|
||||||
|
Deletes a view. Views can be deleted by a `DROP TABLE` command as well but `DROP VIEW` checks that `[db.]name` is a view.
|
||||||
|
@ -115,7 +115,7 @@ Aborts ClickHouse process (like `kill -9 {$ pid_clickhouse-server}`)
|
|||||||
|
|
||||||
## Managing Distributed Tables {#query-language-system-distributed}
|
## Managing Distributed Tables {#query-language-system-distributed}
|
||||||
|
|
||||||
ClickHouse can manage [distributed](../../engines/table-engines/special/distributed.md) tables. When a user inserts data into these tables, ClickHouse first creates a queue of the data that should be sent to cluster nodes, then asynchronously sends it. You can manage queue processing with the [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [FLUSH DISTRIBUTED](#query_language-system-flush-distributed), and [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) queries. You can also synchronously insert distributed data with the `insert_distributed_sync` setting.
|
ClickHouse can manage [distributed](../../engines/table-engines/special/distributed.md) tables. When a user inserts data into these tables, ClickHouse first creates a queue of the data that should be sent to cluster nodes, then asynchronously sends it. You can manage queue processing with the [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [FLUSH DISTRIBUTED](#query_language-system-flush-distributed), and [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) queries. You can also synchronously insert distributed data with the [insert_distributed_sync](../../operations/settings/settings.md#insert_distributed_sync) setting.
|
||||||
|
|
||||||
### STOP DISTRIBUTED SENDS {#query_language-system-stop-distributed-sends}
|
### STOP DISTRIBUTED SENDS {#query_language-system-stop-distributed-sends}
|
||||||
|
|
||||||
|
@ -693,6 +693,7 @@ auto s = std::string{"Hello"};
|
|||||||
## Сообщения об ошибках {#error-messages}
|
## Сообщения об ошибках {#error-messages}
|
||||||
|
|
||||||
Сообщения об ошибках -- это часть пользовательского интерфейса программы, предназначенная для того, чтобы позволить пользователю:
|
Сообщения об ошибках -- это часть пользовательского интерфейса программы, предназначенная для того, чтобы позволить пользователю:
|
||||||
|
|
||||||
* замечать ошибочные ситуации,
|
* замечать ошибочные ситуации,
|
||||||
* понимать их смысл и причины,
|
* понимать их смысл и причины,
|
||||||
* устранять эти ситуации.
|
* устранять эти ситуации.
|
||||||
@ -700,6 +701,7 @@ auto s = std::string{"Hello"};
|
|||||||
Форма и содержание сообщений об ошибках должны способствовать достижению этих целей.
|
Форма и содержание сообщений об ошибках должны способствовать достижению этих целей.
|
||||||
|
|
||||||
Есть два основных вида ошибок:
|
Есть два основных вида ошибок:
|
||||||
|
|
||||||
* пользовательская или системная ошибка,
|
* пользовательская или системная ошибка,
|
||||||
* внутренняя программная ошибка.
|
* внутренняя программная ошибка.
|
||||||
|
|
||||||
@ -722,6 +724,7 @@ While processing '(SELECT 2 AS a)'.
|
|||||||
The dictionary is configured incorrectly.
|
The dictionary is configured incorrectly.
|
||||||
```
|
```
|
||||||
Из него не понятно:
|
Из него не понятно:
|
||||||
|
|
||||||
- какой словарь?
|
- какой словарь?
|
||||||
- в чём ошибка конфигурации?
|
- в чём ошибка конфигурации?
|
||||||
|
|
||||||
@ -735,12 +738,14 @@ The dictionary is configured incorrectly.
|
|||||||
Появление такой ошибки всегда свидетельствует о наличии бага в программе. Пользователь не может исправить такую ошибку самостоятельно, и должен сообщить о ней разработчикам.
|
Появление такой ошибки всегда свидетельствует о наличии бага в программе. Пользователь не может исправить такую ошибку самостоятельно, и должен сообщить о ней разработчикам.
|
||||||
|
|
||||||
Есть два основных варианта проверки на такие ошибки:
|
Есть два основных варианта проверки на такие ошибки:
|
||||||
|
|
||||||
* Исключение с кодом `LOGICAL_ERROR`. Его можно использовать для важных проверок, которые делаются в том числе в релизной сборке.
|
* Исключение с кодом `LOGICAL_ERROR`. Его можно использовать для важных проверок, которые делаются в том числе в релизной сборке.
|
||||||
* `assert`. Такие условия не проверяются в релизной сборке, можно использовать для тяжёлых и опциональных проверок.
|
* `assert`. Такие условия не проверяются в релизной сборке, можно использовать для тяжёлых и опциональных проверок.
|
||||||
|
|
||||||
Пример сообщения, у которого должен быть код `LOGICAL_ERROR`:
|
Пример сообщения, у которого должен быть код `LOGICAL_ERROR`:
|
||||||
`Block header is inconsistent with Chunk in ICompicatedProcessor::munge(). It is a bug!`
|
`Block header is inconsistent with Chunk in ICompicatedProcessor::munge(). It is a bug!`
|
||||||
По каким признакам можно заметить, что здесь говорится о внутренней программной ошибке?
|
По каким признакам можно заметить, что здесь говорится о внутренней программной ошибке?
|
||||||
|
|
||||||
* в сообщении упоминаются внутренние сущности из кода,
|
* в сообщении упоминаются внутренние сущности из кода,
|
||||||
* в сообщении написано it's a bug,
|
* в сообщении написано it's a bug,
|
||||||
* непосредственные действия пользователя не могут исправить эту ошибку. Мы ожидаем, что пользователь зарепортит её как баг, и будем исправлять в коде.
|
* непосредственные действия пользователя не могут исправить эту ошибку. Мы ожидаем, что пользователь зарепортит её как баг, и будем исправлять в коде.
|
||||||
@ -752,6 +757,7 @@ The dictionary is configured incorrectly.
|
|||||||
### Как добавить новое сообщение об ошибке? {#error-messages-add}
|
### Как добавить новое сообщение об ошибке? {#error-messages-add}
|
||||||
|
|
||||||
Когда добавляете сообщение об ошибке:
|
Когда добавляете сообщение об ошибке:
|
||||||
|
|
||||||
1. Опишите, что произошло, в пользовательских терминах, а не кусками кода.
|
1. Опишите, что произошло, в пользовательских терминах, а не кусками кода.
|
||||||
2. Добавьте максимум контекста (с чем произошло, когда, почему, и т.д.).
|
2. Добавьте максимум контекста (с чем произошло, когда, почему, и т.д.).
|
||||||
3. Добавьте типичные причины.
|
3. Добавьте типичные причины.
|
||||||
|
@ -725,6 +725,17 @@ log_query_threads=1
|
|||||||
|
|
||||||
Значение по умолчанию: 50.
|
Значение по умолчанию: 50.
|
||||||
|
|
||||||
|
## connection\_pool\_max\_wait\_ms {#connection-pool-max-wait-ms}
|
||||||
|
|
||||||
|
Время ожидания соединения в миллисекундах, когда пул соединений заполнен.
|
||||||
|
|
||||||
|
Возможные значения:
|
||||||
|
|
||||||
|
- Положительное целое число.
|
||||||
|
- 0 — Бесконечный таймаут.
|
||||||
|
|
||||||
|
Значение по умолчанию: 0.
|
||||||
|
|
||||||
## connections\_with\_failover\_max\_tries {#connections-with-failover-max-tries}
|
## connections\_with\_failover\_max\_tries {#connections-with-failover-max-tries}
|
||||||
|
|
||||||
Максимальное количество попыток соединения с каждой репликой, для движка таблиц Distributed.
|
Максимальное количество попыток соединения с каждой репликой, для движка таблиц Distributed.
|
||||||
@ -736,6 +747,21 @@ log_query_threads=1
|
|||||||
Считать ли экстремальные значения (минимумы и максимумы по столбцам результата запроса). Принимает 0 или 1. По умолчанию - 0 (выключено).
|
Считать ли экстремальные значения (минимумы и максимумы по столбцам результата запроса). Принимает 0 или 1. По умолчанию - 0 (выключено).
|
||||||
Подробнее смотрите раздел «Экстремальные значения».
|
Подробнее смотрите раздел «Экстремальные значения».
|
||||||
|
|
||||||
|
## kafka\_max\_wait\_ms {#kafka-max-wait-ms}
|
||||||
|
|
||||||
|
Время ожидания в миллисекундах для чтения сообщений из [Kafka](../../engines/table-engines/integrations/kafka.md#kafka) перед повторной попыткой.
|
||||||
|
|
||||||
|
Возможные значения:
|
||||||
|
|
||||||
|
- Положительное целое число.
|
||||||
|
- 0 — Бесконечный таймаут.
|
||||||
|
|
||||||
|
Значение по умолчанию: 5000.
|
||||||
|
|
||||||
|
См. также:
|
||||||
|
|
||||||
|
- [Apache Kafka](https://kafka.apache.org/)
|
||||||
|
|
||||||
## use\_uncompressed\_cache {#setting-use_uncompressed_cache}
|
## use\_uncompressed\_cache {#setting-use_uncompressed_cache}
|
||||||
|
|
||||||
Использовать ли кэш разжатых блоков. Принимает 0 или 1. По умолчанию - 0 (выключено).
|
Использовать ли кэш разжатых блоков. Принимает 0 или 1. По умолчанию - 0 (выключено).
|
||||||
@ -755,6 +781,17 @@ log_query_threads=1
|
|||||||
|
|
||||||
Эта настройка, выставленная в 1, используется в Яндекс.Метрике для реализации suggest-а значений для условий сегментации. После ввода очередного символа, если старый запрос ещё не выполнился, его следует отменить.
|
Эта настройка, выставленная в 1, используется в Яндекс.Метрике для реализации suggest-а значений для условий сегментации. После ввода очередного символа, если старый запрос ещё не выполнился, его следует отменить.
|
||||||
|
|
||||||
|
## replace\_running\_query\_max\_wait\_ms {#replace-running-query-max-wait-ms}
|
||||||
|
|
||||||
|
Время ожидания завершения выполнения запроса с тем же `query_id`, когда активирована настройка [replace_running_query](#replace-running-query).
|
||||||
|
|
||||||
|
Возможные значения:
|
||||||
|
|
||||||
|
- Положительное целое число.
|
||||||
|
- 0 — Создание исключения, которое не позволяет выполнить новый запрос, если сервер уже выполняет запрос с тем же `query_id`.
|
||||||
|
|
||||||
|
Значение по умолчанию: 5000.
|
||||||
|
|
||||||
## stream\_flush\_interval\_ms {#stream-flush-interval-ms}
|
## stream\_flush\_interval\_ms {#stream-flush-interval-ms}
|
||||||
|
|
||||||
Работает для таблиц со стриммингом в случае тайм-аута, или когда поток генерирует [max\_insert\_block\_size](#settings-max_insert_block_size) строк.
|
Работает для таблиц со стриммингом в случае тайм-аута, или когда поток генерирует [max\_insert\_block\_size](#settings-max_insert_block_size) строк.
|
||||||
@ -1241,6 +1278,23 @@ Default value: 0.
|
|||||||
|
|
||||||
Значение по умолчанию: 16.
|
Значение по умолчанию: 16.
|
||||||
|
|
||||||
|
## insert_distributed_sync {#insert_distributed_sync}
|
||||||
|
|
||||||
|
Включает или отключает режим синхронного добавления данных в распределенные таблицы (таблицы с движком [Distributed](../../engines/table-engines/special/distributed.md#distributed)).
|
||||||
|
|
||||||
|
По умолчанию ClickHouse вставляет данные в распределённую таблицу в асинхронном режиме. Если `insert_distributed_sync=1`, то данные вставляются сихронно, а запрос `INSERT` считается выполненным успешно, когда данные записаны на все шарды (по крайней мере на одну реплику для каждого шарда, если `internal_replication = true`).
|
||||||
|
|
||||||
|
Возможные значения:
|
||||||
|
|
||||||
|
- 0 — Данные добавляются в асинхронном режиме.
|
||||||
|
- 1 — Данные добавляются в синхронном режиме.
|
||||||
|
|
||||||
|
Значение по умолчанию: `0`.
|
||||||
|
|
||||||
|
**См. также**
|
||||||
|
|
||||||
|
- [Движок Distributed](../../engines/table-engines/special/distributed.md#distributed)
|
||||||
|
- [Управление распределёнными таблицами](../../sql-reference/statements/system.md#query-language-system-distributed)
|
||||||
## validate\_polygons {#validate_polygons}
|
## validate\_polygons {#validate_polygons}
|
||||||
|
|
||||||
Включает или отключает генерирование исключения в функции [pointInPolygon](../../sql-reference/functions/geo.md#pointinpolygon), если многоугольник самопересекающийся или самокасающийся.
|
Включает или отключает генерирование исключения в функции [pointInPolygon](../../sql-reference/functions/geo.md#pointinpolygon), если многоугольник самопересекающийся или самокасающийся.
|
||||||
|
@ -126,6 +126,44 @@ SELECT * FROM system.contributors WHERE name='Olga Khvostikova'
|
|||||||
└──────────────────┘
|
└──────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## system.licenses {#system-tables_system.licenses}
|
||||||
|
|
||||||
|
Содержит информацию о лицензиях сторонних библиотек, которые находятся в директории [contrib](https://github.com/ClickHouse/ClickHouse/tree/master/contrib) исходных кодов ClickHouse.
|
||||||
|
|
||||||
|
Столбцы:
|
||||||
|
|
||||||
|
- `library_name` ([String](../sql-reference/data-types/string.md)) — Название библиотеки, к которой относится лицензия.
|
||||||
|
- `license_type` ([String](../sql-reference/data-types/string.md)) — Тип лицензии, например, Apache, MIT.
|
||||||
|
- `license_path` ([String](../sql-reference/data-types/string.md)) — Путь к файлу с текстом лицензии.
|
||||||
|
- `license_text` ([String](../sql-reference/data-types/string.md)) — Текст лицензии.
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT library_name, license_type, license_path FROM system.licenses LIMIT 15
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─library_name───────┬─license_type─┬─license_path────────────────────────┐
|
||||||
|
│ FastMemcpy │ MIT │ /contrib/FastMemcpy/LICENSE │
|
||||||
|
│ arrow │ Apache │ /contrib/arrow/LICENSE.txt │
|
||||||
|
│ avro │ Apache │ /contrib/avro/LICENSE.txt │
|
||||||
|
│ aws-c-common │ Apache │ /contrib/aws-c-common/LICENSE │
|
||||||
|
│ aws-c-event-stream │ Apache │ /contrib/aws-c-event-stream/LICENSE │
|
||||||
|
│ aws-checksums │ Apache │ /contrib/aws-checksums/LICENSE │
|
||||||
|
│ aws │ Apache │ /contrib/aws/LICENSE.txt │
|
||||||
|
│ base64 │ BSD 2-clause │ /contrib/base64/LICENSE │
|
||||||
|
│ boost │ Boost │ /contrib/boost/LICENSE_1_0.txt │
|
||||||
|
│ brotli │ MIT │ /contrib/brotli/LICENSE │
|
||||||
|
│ capnproto │ MIT │ /contrib/capnproto/LICENSE │
|
||||||
|
│ cassandra │ Apache │ /contrib/cassandra/LICENSE.txt │
|
||||||
|
│ cctz │ Apache │ /contrib/cctz/LICENSE.txt │
|
||||||
|
│ cityhash102 │ MIT │ /contrib/cityhash102/COPYING │
|
||||||
|
│ cppkafka │ BSD 2-clause │ /contrib/cppkafka/LICENSE │
|
||||||
|
└────────────────────┴──────────────┴─────────────────────────────────────┘
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
## system.databases {#system-databases}
|
## system.databases {#system-databases}
|
||||||
|
|
||||||
Таблица содержит один столбец name типа String - имя базы данных.
|
Таблица содержит один столбец name типа String - имя базы данных.
|
||||||
@ -1286,29 +1324,50 @@ path: /clickhouse/tables/01-08/visits/replicas
|
|||||||
|
|
||||||
## system.mutations {#system_tables-mutations}
|
## system.mutations {#system_tables-mutations}
|
||||||
|
|
||||||
Таблица содержит информацию о ходе выполнения [мутаций](../sql-reference/statements/alter.md#alter-mutations) MergeTree-таблиц. Каждой команде мутации соответствует одна строка. В таблице есть следующие столбцы:
|
Таблица содержит информацию о ходе выполнения [мутаций](../sql-reference/statements/alter.md#alter-mutations) таблиц семейства MergeTree. Каждой команде мутации соответствует одна строка таблицы.
|
||||||
|
|
||||||
**database**, **table** - имя БД и таблицы, к которой была применена мутация.
|
Столбцы:
|
||||||
|
|
||||||
**mutation\_id** - ID запроса. Для реплицированных таблиц эти ID соответствуют именам записей в директории `<table_path_in_zookeeper>/mutations/` в ZooKeeper, для нереплицированных - именам файлов в директории с данными таблицы.
|
- `database` ([String](../sql-reference/data-types/string.md)) — имя БД, к которой была применена мутация.
|
||||||
|
|
||||||
**command** - Команда мутации (часть запроса после `ALTER TABLE [db.]table`).
|
- `table` ([String](../sql-reference/data-types/string.md)) — имя таблицы, к которой была применена мутация.
|
||||||
|
|
||||||
**create\_time** - Время создания мутации.
|
- `mutation_id` ([String](../sql-reference/data-types/string.md)) — ID запроса. Для реплицированных таблиц эти ID соответствуют именам записей в директории `<table_path_in_zookeeper>/mutations/` в ZooKeeper, для нереплицированных — именам файлов в директории с данными таблицы.
|
||||||
|
|
||||||
**block\_numbers.partition\_id**, **block\_numbers.number** - Nested-столбец. Для мутаций реплицированных таблиц для каждой партиции содержит номер блока, полученный этой мутацией (в каждой партиции будут изменены только куски, содержащие блоки с номерами, меньшими номера, полученного мутацией в этой партиции). Для нереплицированных таблиц нумерация блоков сквозная по партициям, поэтому столбец содержит одну запись с единственным номером блока, полученным мутацией.
|
- `command` ([String](../sql-reference/data-types/string.md)) — команда мутации (часть запроса после `ALTER TABLE [db.]table`).
|
||||||
|
|
||||||
**parts\_to\_do** - Количество кусков таблицы, которые ещё предстоит изменить.
|
- `create_time` ([Datetime](../sql-reference/data-types/datetime.md)) — дата и время создания мутации.
|
||||||
|
|
||||||
**is\_done** - Завершена ли мутация. Замечание: даже если `parts_to_do = 0`, для реплицированной таблицы возможна ситуация, когда мутация ещё не завершена из-за долго выполняющейся вставки, которая добавляет данные, которые нужно будет мутировать.
|
- `block_numbers.partition_id` ([Array](../sql-reference/data-types/array.md)([String](../sql-reference/data-types/string.md))) — Для мутаций реплицированных таблиц массив содержит содержит номера партиций (по одной записи для каждой партиции). Для мутаций нереплицированных таблиц массив пустой.
|
||||||
|
|
||||||
|
- `block_numbers.number` ([Array](../sql-reference/data-types/array.md)([Int64](../sql-reference/data-types/int-uint.md))) — Для мутаций реплицированных таблиц массив содержит по одной записи для каждой партиции, с номером блока, полученным этой мутацией. В каждой партиции будут изменены только куски, содержащие блоки с номерами меньше чем данный номер.
|
||||||
|
|
||||||
|
Для нереплицированных таблиц нумерация блоков сквозная по партициям. Поэтому массив содержит единственную запись с номером блока, полученным мутацией.
|
||||||
|
|
||||||
|
- `parts_to_do_names` ([Array](../sql-reference/data-types/array.md)([String](../sql-reference/data-types/string.md))) — массив с именами кусков данных, которые должны быть изменены для завершения мутации.
|
||||||
|
|
||||||
|
- `parts_to_do` ([Int64](../sql-reference/data-types/int-uint.md)) — количество кусков данных, которые должны быть изменены для завершения мутации.
|
||||||
|
|
||||||
|
- `is_done` ([UInt8](../sql-reference/data-types/int-uint.md)) — Признак, завершена ли мутация. Возможные значения:
|
||||||
|
- `1` — мутация завершена,
|
||||||
|
- `0` — мутация еще продолжается.
|
||||||
|
|
||||||
|
!!! info "Замечание"
|
||||||
|
Даже если `parts_to_do = 0`, для реплицированной таблицы возможна ситуация, когда мутация ещё не завершена из-за долго выполняющейся операции `INSERT`, которая добавляет данные, которые нужно будет мутировать.
|
||||||
|
|
||||||
Если во время мутации какого-либо куска возникли проблемы, заполняются следующие столбцы:
|
Если во время мутации какого-либо куска возникли проблемы, заполняются следующие столбцы:
|
||||||
|
|
||||||
**latest\_failed\_part** - Имя последнего куска, мутация которого не удалась.
|
- `latest_failed_part` ([String](../sql-reference/data-types/string.md)) — имя последнего куска, мутация которого не удалась.
|
||||||
|
|
||||||
**latest\_fail\_time** — время последней ошибки мутации.
|
- `latest_fail_time` ([Datetime](../sql-reference/data-types/datetime.md)) — дата и время последней ошибки мутации.
|
||||||
|
|
||||||
**latest\_fail\_reason** — причина последней ошибки мутации.
|
- `latest_fail_reason` ([String](../sql-reference/data-types/string.md)) — причина последней ошибки мутации.
|
||||||
|
|
||||||
|
**См. также**
|
||||||
|
|
||||||
|
- [Мутации](../sql-reference/statements/alter.md#alter-mutations)
|
||||||
|
- [Движок MergeTree](../engines/table-engines/mergetree-family/mergetree.md)
|
||||||
|
- [Репликация данных](../engines/table-engines/mergetree-family/replication.md) (семейство ReplicatedMergeTree)
|
||||||
|
|
||||||
## system.disks {#system_tables-disks}
|
## system.disks {#system_tables-disks}
|
||||||
|
|
||||||
@ -1337,10 +1396,56 @@ Cодержит информацию о дисках, заданных в [ко
|
|||||||
|
|
||||||
Если политика хранения содержит несколько томов, то каждому тому соответствует отдельная запись в таблице.
|
Если политика хранения содержит несколько томов, то каждому тому соответствует отдельная запись в таблице.
|
||||||
|
|
||||||
|
## system.roles {#system_tables-roles}
|
||||||
|
|
||||||
|
Содержит сведения о [ролях](../operations/access-rights.md#role-management).
|
||||||
|
|
||||||
|
Столбцы:
|
||||||
|
|
||||||
|
- `name` ([String](../sql-reference/data-types/string.md)) — Имя роли.
|
||||||
|
- `id` ([UUID](../sql-reference/data-types/uuid.md)) — ID роли.
|
||||||
|
- `storage` ([String](../sql-reference/data-types/string.md)) — Путь к хранилищу ролей. Настраивается в параметре `access_control_path`.
|
||||||
|
|
||||||
|
## system.role_grants {#system_tables-role_grants}
|
||||||
|
Содержит [гранты](../sql-reference/statements/grant.md) ролей для пользователей и ролей. Чтобы добавить записи в эту таблицу, используйте команду `GRANT role TO user`.
|
||||||
|
|
||||||
|
Столбцы:
|
||||||
|
|
||||||
|
- `user_name` ([Nullable](../sql-reference/data-types/nullable.md)([String](../sql-reference/data-types/string.md))) — Имя пользователя.
|
||||||
|
- `role_name` ([Nullable](../sql-reference/data-types/nullable.md)([String](../sql-reference/data-types/string.md))) — Имя роли.
|
||||||
|
- `granted_role_name` ([String](../sql-reference/data-types/string.md)) — Имя роли, назначенной для роли `role_name`. Чтобы назначить одну роль другой используйте `GRANT role1 TO role2`.
|
||||||
|
- `granted_role_is_default` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Флаг, который показывает, является ли `granted_role` ролью по умолчанию. Возможные значения:
|
||||||
|
- 1 — `granted_role` является ролью по умолчанию.
|
||||||
|
- 0 — `granted_role` не является ролью по умолчанию.
|
||||||
|
- `with_admin_option` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Флаг, который показывает, обладает ли `granted_role` роль привилегией `ADMIN OPTION`. Возможные значения:
|
||||||
|
- 1 — Роль обладает привилегией `ADMIN OPTION`.
|
||||||
|
- 0 — Роль не обладает привилегией `ADMIN OPTION`.
|
||||||
|
|
||||||
|
## system.current_roles {#system_tables-current_roles}
|
||||||
|
|
||||||
|
Содержит активные роли текущего пользователя. `SET ROLE` изменяет содержимое этой таблицы.
|
||||||
|
|
||||||
|
Столбцы:
|
||||||
|
|
||||||
|
- `role_name` ([String](../sql-reference/data-types/string.md))) — Имя роли.
|
||||||
|
- `with_admin_option` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Флаг, который показывает, обладает ли `current_role` роль привилегией `ADMIN OPTION`.
|
||||||
|
- `is_default` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Флаг, который показывает, является ли `current_role` ролью по умолчанию.
|
||||||
|
|
||||||
|
## system.enabled_roles {#system_tables-enabled_roles}
|
||||||
|
Содержит все активные роли на данный момент, включая текущую роль текущего пользователя и роли, назначенные для текущей роли.
|
||||||
|
|
||||||
|
Столбцы:
|
||||||
|
|
||||||
|
- `role_name` ([String](../sql-reference/data-types/string.md))) — Имя роли.
|
||||||
|
- `with_admin_option` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Флаг, который показывает, обладает ли `enabled_role` роль привилегией `ADMIN OPTION`.
|
||||||
|
- `is_current` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Флаг, который показывает, является ли `enabled_role` текущей ролью текущего пользователя.
|
||||||
|
- `is_default` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Флаг, который показывает, является ли `enabled_role` ролью по умолчанию.
|
||||||
|
|
||||||
## system.quotas {#system_tables-quotas}
|
## system.quotas {#system_tables-quotas}
|
||||||
Содержит информацию о [квотах](quotas.md).
|
Содержит информацию о [квотах](quotas.md).
|
||||||
|
|
||||||
Столбцы:
|
Столбцы:
|
||||||
|
|
||||||
- `name` ([String](../sql-reference/data-types/string.md)) — Имя квоты.
|
- `name` ([String](../sql-reference/data-types/string.md)) — Имя квоты.
|
||||||
- `id` ([UUID](../sql-reference/data-types/uuid.md)) — ID квоты.
|
- `id` ([UUID](../sql-reference/data-types/uuid.md)) — ID квоты.
|
||||||
- `storage`([String](../sql-reference/data-types/string.md)) — Хранилище квот. Возможные значения: "users.xml", если квота задана в файле users.xml, "disk" — если квота задана в SQL-запросе.
|
- `storage`([String](../sql-reference/data-types/string.md)) — Хранилище квот. Возможные значения: "users.xml", если квота задана в файле users.xml, "disk" — если квота задана в SQL-запросе.
|
||||||
@ -1362,6 +1467,7 @@ Cодержит информацию о дисках, заданных в [ко
|
|||||||
Содержит информацию о максимумах для всех интервалов всех квот. Одной квоте могут соответствовать любое количество строк или ноль.
|
Содержит информацию о максимумах для всех интервалов всех квот. Одной квоте могут соответствовать любое количество строк или ноль.
|
||||||
|
|
||||||
Столбцы:
|
Столбцы:
|
||||||
|
|
||||||
- `quota_name` ([String](../sql-reference/data-types/string.md)) — Имя квоты.
|
- `quota_name` ([String](../sql-reference/data-types/string.md)) — Имя квоты.
|
||||||
- `duration` ([UInt32](../sql-reference/data-types/int-uint.md)) — Длина временного интервала для расчета потребления ресурсов, в секундах.
|
- `duration` ([UInt32](../sql-reference/data-types/int-uint.md)) — Длина временного интервала для расчета потребления ресурсов, в секундах.
|
||||||
- `is_randomized_interval` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Логическое значение. Оно показывает, является ли интервал рандомизированным. Интервал всегда начинается в одно и то же время, если он не рандомизирован. Например, интервал в 1 минуту всегда начинается с целого числа минут (то есть он может начинаться в 11:20:00, но никогда не начинается в 11:20:01), интервал в один день всегда начинается в полночь UTC. Если интервал рандомизирован, то самый первый интервал начинается в произвольное время, а последующие интервалы начинаются один за другим. Значения:
|
- `is_randomized_interval` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Логическое значение. Оно показывает, является ли интервал рандомизированным. Интервал всегда начинается в одно и то же время, если он не рандомизирован. Например, интервал в 1 минуту всегда начинается с целого числа минут (то есть он может начинаться в 11:20:00, но никогда не начинается в 11:20:01), интервал в один день всегда начинается в полночь UTC. Если интервал рандомизирован, то самый первый интервал начинается в произвольное время, а последующие интервалы начинаются один за другим. Значения:
|
||||||
@ -1379,6 +1485,7 @@ Cодержит информацию о дисках, заданных в [ко
|
|||||||
Использование квоты текущим пользователем: сколько используется и сколько осталось.
|
Использование квоты текущим пользователем: сколько используется и сколько осталось.
|
||||||
|
|
||||||
Столбцы:
|
Столбцы:
|
||||||
|
|
||||||
- `quota_name` ([String](../sql-reference/data-types/string.md)) — Имя квоты.
|
- `quota_name` ([String](../sql-reference/data-types/string.md)) — Имя квоты.
|
||||||
- `quota_key`([String](../sql-reference/data-types/string.md)) — Значение ключа. Например, если keys = `ip_address`, `quota_key` может иметь значение '192.168.1.1'.
|
- `quota_key`([String](../sql-reference/data-types/string.md)) — Значение ключа. Например, если keys = `ip_address`, `quota_key` может иметь значение '192.168.1.1'.
|
||||||
- `start_time`([Nullable](../sql-reference/data-types/nullable.md)([DateTime](../sql-reference/data-types/datetime.md))) — Время начала расчета потребления ресурсов.
|
- `start_time`([Nullable](../sql-reference/data-types/nullable.md)([DateTime](../sql-reference/data-types/datetime.md))) — Время начала расчета потребления ресурсов.
|
||||||
@ -1403,6 +1510,7 @@ Cодержит информацию о дисках, заданных в [ко
|
|||||||
Использование квот всеми пользователями.
|
Использование квот всеми пользователями.
|
||||||
|
|
||||||
Столбцы:
|
Столбцы:
|
||||||
|
|
||||||
- `quota_name` ([String](../sql-reference/data-types/string.md)) — Имя квоты.
|
- `quota_name` ([String](../sql-reference/data-types/string.md)) — Имя квоты.
|
||||||
- `quota_key` ([String](../sql-reference/data-types/string.md)) — Ключ квоты.
|
- `quota_key` ([String](../sql-reference/data-types/string.md)) — Ключ квоты.
|
||||||
- `is_current` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Квота используется для текущего пользователя.
|
- `is_current` ([UInt8](../sql-reference/data-types/int-uint.md#uint-ranges)) — Квота используется для текущего пользователя.
|
||||||
|
@ -1334,4 +1334,42 @@ len: 30
|
|||||||
- [randomPrintableASCII](../../sql-reference/functions/other-functions.md#randomascii)
|
- [randomPrintableASCII](../../sql-reference/functions/other-functions.md#randomascii)
|
||||||
|
|
||||||
|
|
||||||
|
## randomStringUTF8 {#randomstringutf8}
|
||||||
|
|
||||||
|
Генерирует строку определенной длины со случайной строкой в кодировке UTF-8.
|
||||||
|
|
||||||
|
**Синтаксис**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
randomStringUTF8(length);
|
||||||
|
```
|
||||||
|
|
||||||
|
**Параметры**
|
||||||
|
|
||||||
|
- `length` — Длина итоговой строки в кодовых точках. [UInt64](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Возвращаемое значение**
|
||||||
|
|
||||||
|
- Случайная строка в кодировке UTF-8.
|
||||||
|
|
||||||
|
Тип: [String](../../sql-reference/data-types/string.md).
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT randomStringUTF8(13)
|
||||||
|
```
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─randomStringUTF8(13)─┐
|
||||||
|
│ 𘤗д兠庇 │
|
||||||
|
└──────────────────────┘
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/other_functions/) <!--hide-->
|
[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/other_functions/) <!--hide-->
|
||||||
|
@ -15,7 +15,7 @@ FROM <left_table>
|
|||||||
|
|
||||||
## Поддерживаемые типы соединения {#select-join-types}
|
## Поддерживаемые типы соединения {#select-join-types}
|
||||||
|
|
||||||
Весе типы из стандартого [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) поддерживаются:
|
Все типы из стандартого [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) поддерживаются:
|
||||||
|
|
||||||
- `INNER JOIN`, возвращаются только совпадающие строки.
|
- `INNER JOIN`, возвращаются только совпадающие строки.
|
||||||
- `LEFT OUTER JOIN`, не совпадающие строки из левой таблицы возвращаются в дополнение к совпадающим строкам.
|
- `LEFT OUTER JOIN`, не совпадающие строки из левой таблицы возвращаются в дополнение к совпадающим строкам.
|
||||||
|
@ -90,7 +90,7 @@ SELECT name, status FROM system.dictionaries;
|
|||||||
|
|
||||||
## Управление распределёнными таблицами {#query-language-system-distributed}
|
## Управление распределёнными таблицами {#query-language-system-distributed}
|
||||||
|
|
||||||
ClickHouse может оперировать [распределёнными](../../sql-reference/statements/system.md) таблицами. Когда пользователь вставляет данные в эти таблицы, ClickHouse сначала формирует очередь из данных, которые должны быть отправлены на узлы кластера, а затем асинхронно отправляет подготовленные данные. Вы можете управлять очередью с помощью запросов [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) и [FLUSH DISTRIBUTED](#query_language-system-flush-distributed). Также есть возможность синхронно вставлять распределенные данные с помощью настройки `insert_distributed_sync`.
|
ClickHouse может оперировать [распределёнными](../../sql-reference/statements/system.md) таблицами. Когда пользователь вставляет данные в эти таблицы, ClickHouse сначала формирует очередь из данных, которые должны быть отправлены на узлы кластера, а затем асинхронно отправляет подготовленные данные. Вы можете управлять очередью с помощью запросов [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) и [FLUSH DISTRIBUTED](#query_language-system-flush-distributed). Также есть возможность синхронно вставлять распределенные данные с помощью настройки [insert_distributed_sync](../../operations/settings/settings.md#insert_distributed_sync).
|
||||||
|
|
||||||
### STOP DISTRIBUTED SENDS {#query_language-system-stop-distributed-sends}
|
### STOP DISTRIBUTED SENDS {#query_language-system-stop-distributed-sends}
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ mkdocs-macros-plugin==0.4.9
|
|||||||
nltk==3.5
|
nltk==3.5
|
||||||
nose==1.3.7
|
nose==1.3.7
|
||||||
protobuf==3.12.2
|
protobuf==3.12.2
|
||||||
numpy==1.18.5
|
numpy==1.19.1
|
||||||
Pygments==2.5.2
|
Pygments==2.5.2
|
||||||
pymdown-extensions==7.1
|
pymdown-extensions==7.1
|
||||||
python-slugify==4.0.1
|
python-slugify==4.0.1
|
||||||
|
@ -124,15 +124,7 @@ def adjust_markdown_html(content):
|
|||||||
|
|
||||||
|
|
||||||
def minify_html(content):
|
def minify_html(content):
|
||||||
return htmlmin.minify(content,
|
return htmlmin.minify(content)
|
||||||
remove_comments=False,
|
|
||||||
remove_empty_space=False,
|
|
||||||
remove_all_empty_space=False,
|
|
||||||
reduce_empty_attributes=True,
|
|
||||||
reduce_boolean_attributes=False,
|
|
||||||
remove_optional_attribute_quotes=True,
|
|
||||||
convert_charrefs=False,
|
|
||||||
keep_pre=True)
|
|
||||||
|
|
||||||
|
|
||||||
def build_website(args):
|
def build_website(args):
|
||||||
|
@ -4,9 +4,9 @@
|
|||||||
|
|
||||||
功能性测试是最简便使用的。绝大部分 ClickHouse 的功能可以通过功能性测试来测试,任何代码的更改都必须通过该测试。
|
功能性测试是最简便使用的。绝大部分 ClickHouse 的功能可以通过功能性测试来测试,任何代码的更改都必须通过该测试。
|
||||||
|
|
||||||
每个功能测试会向正在运行的 ClickHouse服 务器发送一个或多个查询,并将结果与预期结果进行比较。
|
每个功能测试会向正在运行的 ClickHouse服务器发送一个或多个查询,并将结果与预期结果进行比较。
|
||||||
|
|
||||||
测试用例在 `tests/queries` 目录中。这里有两个子目录:`stateless` 和 `stateful`目录。 无状态的测试无需预加载测试数据集 - 通常是在测试运行期间动态创建小量的数据集。有状态测试需要来自 Yandex.Metrica 的预加载测试数据,而不向一般公众提供。 我们倾向于仅使用«无状态»测试并避免添加新的«有状态»测试。
|
测试用例在 `tests/queries` 目录中。这里有两个子目录:`stateless` 和 `stateful`目录。无状态的测试无需预加载测试数据集 - 通常是在测试运行期间动态创建小量的数据集。有状态测试需要来自 Yandex.Metrica 的预加载测试数据,而不向一般公众提供。我们倾向于仅使用«无状态»测试并避免添加新的«有状态»测试。
|
||||||
|
|
||||||
每个测试用例可以是两种类型之一:`.sql` 和 `.sh`。`.sql` 测试文件是用于管理`clickhouse-client --multiquery --testmode`的简单SQL脚本。`.sh` 测试文件是一个可以自己运行的脚本。
|
每个测试用例可以是两种类型之一:`.sql` 和 `.sh`。`.sql` 测试文件是用于管理`clickhouse-client --multiquery --testmode`的简单SQL脚本。`.sh` 测试文件是一个可以自己运行的脚本。
|
||||||
|
|
||||||
@ -28,7 +28,7 @@
|
|||||||
|
|
||||||
## 集成测试 {#ji-cheng-ce-shi}
|
## 集成测试 {#ji-cheng-ce-shi}
|
||||||
|
|
||||||
集成测试允许在集群配置中测试 ClickHouse,并与其他服务器(如MySQL,Postgres,MongoDB)进行 ClickHouse 交互。它们可用于模拟网络拆分,数据包丢弃等。这些测试在Docker 下运行,并使用各种软件创建多个容器。
|
集成测试允许在集群配置中测试 ClickHouse,并与其他服务器(如MySQL,Postgres,MongoDB)进行 ClickHouse 交互。它们可用于模拟网络拆分,数据包丢弃等。这些测试在Docker下运行,并使用各种软件创建多个容器。
|
||||||
|
|
||||||
参考 `tests/integration/README.md` 文档关于如何使用集成测试。
|
参考 `tests/integration/README.md` 文档关于如何使用集成测试。
|
||||||
|
|
||||||
@ -93,7 +93,7 @@
|
|||||||
|
|
||||||
## 测试环境 {#ce-shi-huan-jing}
|
## 测试环境 {#ce-shi-huan-jing}
|
||||||
|
|
||||||
在将版本发布为稳定之前,我们将其部署在测试环境中 测试环境是一个处理\[Yandex.Metrica\](https://metrica.yandex.com/)总数据的1/39部分大小的集群。 我们与 Yandex.Metrica 团队公用我们的测试环境。ClickHouse 在现有数据的基础上无需停机即可升级。 我们首先看到数据处理成功而不会实时滞后,复制继续工作,并且 Yandex.Metrica 团队无法看到问题。 首先的检查可以通过以下方式完成:
|
在将版本发布为稳定之前,我们将其部署在测试环境中测试环境是一个处理\[Yandex.Metrica\](https://metrica.yandex.com/)总数据的1/39部分大小的集群。我们与 Yandex.Metrica 团队公用我们的测试环境。ClickHouse 在现有数据的基础上无需停机即可升级。我们首先看到数据处理成功而不会实时滞后,复制继续工作,并且 Yandex.Metrica 团队无法看到问题。首先的检查可以通过以下方式完成:
|
||||||
|
|
||||||
SELECT hostName() AS h, any(version()), any(uptime()), max(UTCEventTime), count() FROM remote('example01-01-{1..3}t', merge, hits) WHERE EventDate >= today() - 2 GROUP BY h ORDER BY h;
|
SELECT hostName() AS h, any(version()), any(uptime()), max(UTCEventTime), count() FROM remote('example01-01-{1..3}t', merge, hits) WHERE EventDate >= today() - 2 GROUP BY h ORDER BY h;
|
||||||
|
|
||||||
@ -101,7 +101,7 @@
|
|||||||
|
|
||||||
## 负载测试 {#fu-zai-ce-shi}
|
## 负载测试 {#fu-zai-ce-shi}
|
||||||
|
|
||||||
部署到测试环境后,我们使用生产群集中的查询运行负载测试。 这是手动完成的。
|
部署到测试环境后,我们使用生产群集中的查询运行负载测试。这是手动完成的。
|
||||||
|
|
||||||
确保在生产集群中开启了 `query_log` 选项。
|
确保在生产集群中开启了 `query_log` 选项。
|
||||||
|
|
||||||
@ -125,11 +125,11 @@
|
|||||||
|
|
||||||
## 编译测试 {#bian-yi-ce-shi}
|
## 编译测试 {#bian-yi-ce-shi}
|
||||||
|
|
||||||
构建测试允许检查构建在各种替代配置和某些外部系统上是否被破坏。测试位于`ci`目录。 它们从 Docker,Vagrant 中的源代码运行构建,有时在 Docker 中运行 `qemu-user-static`。这些测试正在开发中,测试运行不是自动化的。
|
构建测试允许检查构建在各种替代配置和某些外部系统上是否被破坏。测试位于`ci`目录。它们从 Docker,Vagrant 中的源代码运行构建,有时在 Docker 中运行 `qemu-user-static`。这些测试正在开发中,测试运行不是自动化的。
|
||||||
|
|
||||||
动机:
|
动机:
|
||||||
|
|
||||||
通常我们会在 ClickHouse 构建的单个版本上发布并运行所有测试。 但是有一些未经过彻底测试的替代构建版本。 例子:
|
通常我们会在 ClickHouse 构建的单个版本上发布并运行所有测试。但是有一些未经过彻底测试的替代构建版本。例子:
|
||||||
|
|
||||||
- 在 FreeBSD 中的构建;
|
- 在 FreeBSD 中的构建;
|
||||||
- 在 Debian 中使用系统包中的库进行构建;
|
- 在 Debian 中使用系统包中的库进行构建;
|
||||||
@ -152,33 +152,41 @@ Clang 有更多有用的警告 - 您可以使用 `-Weverything` 查找它们并
|
|||||||
|
|
||||||
对于生产构建,使用 gcc(它仍然生成比 clang 稍高效的代码)。对于开发来说,clang 通常更方便使用。您可以使用调试模式在自己的机器上构建(以节省笔记本电脑的电量),但请注意,由于更好的控制流程和过程分析,编译器使用 `-O3` 会生成更多警告。 当使用 clang 构建时,使用 `libc++` 而不是 `libstdc++`,并且在使用调试模式构建时,使用调试版本的 `libc++`,它允许在运行时捕获更多错误。
|
对于生产构建,使用 gcc(它仍然生成比 clang 稍高效的代码)。对于开发来说,clang 通常更方便使用。您可以使用调试模式在自己的机器上构建(以节省笔记本电脑的电量),但请注意,由于更好的控制流程和过程分析,编译器使用 `-O3` 会生成更多警告。 当使用 clang 构建时,使用 `libc++` 而不是 `libstdc++`,并且在使用调试模式构建时,使用调试版本的 `libc++`,它允许在运行时捕获更多错误。
|
||||||
|
|
||||||
## 消毒剂 {#sanitizers}
|
## Sanitizers {#sanitizers}
|
||||||
|
|
||||||
**地址消毒剂**.
|
### Address sanitizer
|
||||||
我们在每个提交的基础上在 ASan 下运行功能和集成测试。
|
我们使用Asan对每个提交进行功能和集成测试。
|
||||||
|
|
||||||
**ツ暗ェツ氾环催ツ団ツ法ツ人)**.
|
### Valgrind (Memcheck)
|
||||||
我们在 Valgrind 过夜进行功能测试。 这需要几个小时。 目前在 `re2` 库中有一个已知的误报,请参阅 [文章](https://research.swtch.com/sparse)。
|
我们在夜间使用Valgrind进行功能测试。这需要几个小时。目前在 `re2` 库中有一个已知的误报,请参阅[文章](https://research.swtch.com/sparse)。
|
||||||
|
|
||||||
**螺纹消毒剂**.
|
### Undefined behaviour sanitizer
|
||||||
我们在 TSan 下进行功能测试。ClickHouse 必须通过所有测试。在 TSan 下运行不是自动化的,只是偶尔执行。
|
我们使用Asan对每个提交进行功能和集成测试。
|
||||||
|
|
||||||
**记忆消毒剂**.
|
### Thread sanitizer
|
||||||
|
我们使用TSan对每个提交进行功能测试。目前不使用TSan对每个提交进行集成测试。
|
||||||
|
|
||||||
|
### Memory sanitizer
|
||||||
目前我们不使用 MSan。
|
目前我们不使用 MSan。
|
||||||
|
|
||||||
**未定义的行为消毒剂。**
|
### Debug allocator
|
||||||
我们仍然不会在每次提交的基础上使用 UBSan。 有一些地方需要解决。
|
|
||||||
|
|
||||||
**调试分alloc。**
|
|
||||||
您可以使用 `DEBUG_TCMALLOC` CMake 选项启用 `tcmalloc` 的调试版本。我们在每次提交的基础上使用调试分配器运行测试。
|
您可以使用 `DEBUG_TCMALLOC` CMake 选项启用 `tcmalloc` 的调试版本。我们在每次提交的基础上使用调试分配器运行测试。
|
||||||
|
|
||||||
更多请参阅 `tests/instructions/sanitizers.txt`。
|
更多请参阅 `tests/instructions/sanitizers.txt`。
|
||||||
|
|
||||||
## 模糊测试 {#mo-hu-ce-shi}
|
## 模糊测试 {#mo-hu-ce-shi}
|
||||||
|
|
||||||
我们使用简单的模糊测试来生成随机SQL查询并检查服务器是否正常,使用 Address sanitizer 执行模糊测试。你可以在`00746_sql_fuzzy.pl` 找到它。 测试应连续进行(过夜和更长时间)。
|
ClickHouse模糊测试可以通过[libFuzzer](https://llvm.org/docs/LibFuzzer.html)和随机SQL查询实现。
|
||||||
|
所有的模糊测试都应使用sanitizers(Address及Undefined)。
|
||||||
|
|
||||||
截至2018年12月,我们仍然不使用库代码的孤立模糊测试。
|
LibFuzzer用于对库代码进行独立的模糊测试。模糊器作为测试代码的一部分实现,并具有“\_fuzzer”名称后缀。
|
||||||
|
模糊测试示例在`src/Parsers/tests/lexer_fuzzer.cpp`。LibFuzzer配置、字典及语料库存放在`tests/fuzz`。
|
||||||
|
我们鼓励您为每个处理用户输入的功能编写模糊测试。
|
||||||
|
|
||||||
|
默认情况下不构建模糊器。可通过设置`-DENABLE_FUZZING=1`和`-DENABLE_TESTS=1`来构建模糊器。 我们建议在构建模糊器时关闭Jemalloc。
|
||||||
|
用于将ClickHouse模糊测试集成到的Google OSS-Fuzz的配置文件位于`docker/fuzz`。
|
||||||
|
|
||||||
|
此外,我们使用简单的模糊测试来生成随机SQL查询并检查服务器是否正常。你可以在`00746_sql_fuzzy.pl` 找到它。测试应连续进行(过夜和更长时间)。
|
||||||
|
|
||||||
## 安全审计 {#an-quan-shen-ji}
|
## 安全审计 {#an-quan-shen-ji}
|
||||||
|
|
||||||
@ -208,7 +216,7 @@ Yandex Cloud 部门的人员从安全角度对 ClickHouse 功能进行了一些
|
|||||||
|
|
||||||
## Metrica B2B 测试 {#metrica-b2b-ce-shi}
|
## Metrica B2B 测试 {#metrica-b2b-ce-shi}
|
||||||
|
|
||||||
每个 ClickHouse 版本都经过 Yandex Metrica 和 AppMetrica 引擎的测试。测试和稳定版本的 ClickHouse 部署在虚拟机上,并使用处理输入数据固定样本的度量引擎的小副本运行。 将度量引擎的两个实例的结果一起进行比较
|
每个 ClickHouse 版本都经过 Yandex Metrica 和 AppMetrica 引擎的测试。测试和稳定版本的 ClickHouse 部署在虚拟机上,并使用处理输入数据固定样本的度量引擎的小副本运行。将度量引擎的两个实例的结果一起进行比较
|
||||||
|
|
||||||
这些测试是由单独的团队自动完成的。由于移动部件的数量很多,大部分时间的测试都是完全无关的,很难弄清楚。很可能这些测试对我们来说是负值。然而,这些测试被证明是有用的大约一个或两个倍的数百。
|
这些测试是由单独的团队自动完成的。由于移动部件的数量很多,大部分时间的测试都是完全无关的,很难弄清楚。很可能这些测试对我们来说是负值。然而,这些测试被证明是有用的大约一个或两个倍的数百。
|
||||||
|
|
||||||
@ -218,12 +226,12 @@ Yandex Cloud 部门的人员从安全角度对 ClickHouse 功能进行了一些
|
|||||||
|
|
||||||
## 自动化测试 {#zi-dong-hua-ce-shi}
|
## 自动化测试 {#zi-dong-hua-ce-shi}
|
||||||
|
|
||||||
我们使用 Yandex 内部 CI 和名为«沙箱»的作业自动化系统运行测试。 我们还继续使用 Jenkins(可在Yandex内部使用)。
|
我们使用 Yandex 内部 CI 和名为«沙箱»的作业自动化系统运行测试。我们还继续使用 Jenkins(可在Yandex内部使用)。
|
||||||
|
|
||||||
构建作业和测试在沙箱中按每次提交的基础上运行。结果包和测试结果发布在 GitHub 上,可以通过直接链接下载,结果会被永久存储。当您在 GitHub 上发送拉取请求时,我们将其标记为«可以测试»,我们的 CI 系统将为您构建 ClickHouse 包(发布,调试,地址消除等)。
|
构建作业和测试在沙箱中按每次提交的基础上运行。结果包和测试结果发布在 GitHub 上,可以通过直接链接下载,结果会被永久存储。当您在 GitHub 上发送拉取请求时,我们将其标记为«可以测试»,我们的 CI 系统将为您构建 ClickHouse 包(发布,调试,地址消除等)。
|
||||||
|
|
||||||
由于时间和计算能力的限制,我们不使用 Travis CI。
|
由于时间和计算能力的限制,我们不使用 Travis CI。
|
||||||
|
|
||||||
在 Jenkins,我们运行字典测试,指标B2B测试。 我们使用 Jenkins 来准备和发布版本。 Jenkins是一种传统的技术,所有的工作将被转移到沙箱中。
|
在 Jenkins,我们运行字典测试,指标B2B测试。我们使用 Jenkins 来准备和发布版本。Jenkins是一种传统的技术,所有的工作将被转移到沙箱中。
|
||||||
|
|
||||||
[来源文章](https://clickhouse.tech/docs/zh/development/tests/) <!--hide-->
|
[来源文章](https://clickhouse.tech/docs/zh/development/tests/) <!--hide-->
|
||||||
|
@ -329,9 +329,11 @@ void DiskAccessStorage::initialize(const String & directory_path_, Notifications
|
|||||||
throw Exception("Storage " + getStorageName() + " already initialized with another directory", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Storage " + getStorageName() + " already initialized with another directory", ErrorCodes::LOGICAL_ERROR);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::filesystem::create_directories(canonical_directory_path);
|
std::error_code create_dir_error_code;
|
||||||
if (!std::filesystem::exists(canonical_directory_path) || !std::filesystem::is_directory(canonical_directory_path))
|
std::filesystem::create_directories(canonical_directory_path, create_dir_error_code);
|
||||||
throw Exception("Couldn't create directory " + canonical_directory_path.string(), ErrorCodes::DIRECTORY_DOESNT_EXIST);
|
|
||||||
|
if (!std::filesystem::exists(canonical_directory_path) || !std::filesystem::is_directory(canonical_directory_path) || create_dir_error_code)
|
||||||
|
throw Exception("Couldn't create directory " + canonical_directory_path.string() + " reason: '" + create_dir_error_code.message() + "'", ErrorCodes::DIRECTORY_DOESNT_EXIST);
|
||||||
|
|
||||||
directory_path = canonical_directory_path;
|
directory_path = canonical_directory_path;
|
||||||
initialized = true;
|
initialized = true;
|
||||||
|
@ -124,9 +124,9 @@ public:
|
|||||||
throw Exception("Cannot convert Tuple to " + demangle(typeid(T).name()), ErrorCodes::CANNOT_CONVERT_TYPE);
|
throw Exception("Cannot convert Tuple to " + demangle(typeid(T).name()), ErrorCodes::CANNOT_CONVERT_TYPE);
|
||||||
}
|
}
|
||||||
|
|
||||||
T operator() (const UInt64 & x) const { return x; }
|
T operator() (const UInt64 & x) const { return T(x); }
|
||||||
T operator() (const Int64 & x) const { return x; }
|
T operator() (const Int64 & x) const { return T(x); }
|
||||||
T operator() (const Float64 & x) const { return x; }
|
T operator() (const Float64 & x) const { return T(x); }
|
||||||
|
|
||||||
T operator() (const UInt128 &) const
|
T operator() (const UInt128 &) const
|
||||||
{
|
{
|
||||||
@ -139,7 +139,7 @@ public:
|
|||||||
if constexpr (std::is_floating_point_v<T>)
|
if constexpr (std::is_floating_point_v<T>)
|
||||||
return static_cast<T>(x.getValue()) / x.getScaleMultiplier();
|
return static_cast<T>(x.getValue()) / x.getScaleMultiplier();
|
||||||
else
|
else
|
||||||
return x.getValue() / x.getScaleMultiplier();
|
return static_cast<T>(x.getValue() / x.getScaleMultiplier());
|
||||||
}
|
}
|
||||||
|
|
||||||
T operator() (const AggregateFunctionStateData &) const
|
T operator() (const AggregateFunctionStateData &) const
|
||||||
|
@ -245,6 +245,13 @@ DEFINE_HASH(DB::Float64)
|
|||||||
#undef DEFINE_HASH
|
#undef DEFINE_HASH
|
||||||
|
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct DefaultHash<DB::UInt128> : public DB::UInt128Hash {};
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct DefaultHash<DB::UInt256> : public DB::UInt256Hash {};
|
||||||
|
|
||||||
|
|
||||||
/// It is reasonable to use for UInt8, UInt16 with sufficient hash table size.
|
/// It is reasonable to use for UInt8, UInt16 with sufficient hash table size.
|
||||||
struct TrivialHash
|
struct TrivialHash
|
||||||
{
|
{
|
||||||
|
@ -8,16 +8,11 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
class ProfilingScopedWriteUnlocker;
|
|
||||||
|
|
||||||
class ProfilingScopedWriteRWLock
|
class ProfilingScopedWriteRWLock
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
friend class ProfilingScopedWriteUnlocker;
|
|
||||||
|
|
||||||
ProfilingScopedWriteRWLock(std::shared_mutex & rwl_, ProfileEvents::Event event_) :
|
ProfilingScopedWriteRWLock(std::shared_mutex & rwl_, ProfileEvents::Event event) :
|
||||||
watch(),
|
|
||||||
event(event_),
|
|
||||||
scoped_write_lock(rwl_)
|
scoped_write_lock(rwl_)
|
||||||
{
|
{
|
||||||
ProfileEvents::increment(event, watch.elapsed());
|
ProfileEvents::increment(event, watch.elapsed());
|
||||||
@ -25,38 +20,14 @@ public:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
Stopwatch watch;
|
Stopwatch watch;
|
||||||
ProfileEvents::Event event;
|
|
||||||
std::unique_lock<std::shared_mutex> scoped_write_lock;
|
std::unique_lock<std::shared_mutex> scoped_write_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Inversed RAII
|
|
||||||
/// Used to unlock current writelock for various purposes.
|
|
||||||
class ProfilingScopedWriteUnlocker
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
ProfilingScopedWriteUnlocker() = delete;
|
|
||||||
|
|
||||||
ProfilingScopedWriteUnlocker(ProfilingScopedWriteRWLock & parent_lock_) : parent_lock(parent_lock_)
|
|
||||||
{
|
|
||||||
parent_lock.scoped_write_lock.unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
~ProfilingScopedWriteUnlocker()
|
|
||||||
{
|
|
||||||
Stopwatch watch;
|
|
||||||
parent_lock.scoped_write_lock.lock();
|
|
||||||
ProfileEvents::increment(parent_lock.event, watch.elapsed());
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
ProfilingScopedWriteRWLock & parent_lock;
|
|
||||||
};
|
|
||||||
|
|
||||||
class ProfilingScopedReadRWLock
|
class ProfilingScopedReadRWLock
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
ProfilingScopedReadRWLock(std::shared_mutex & rwl, ProfileEvents::Event event) :
|
ProfilingScopedReadRWLock(std::shared_mutex & rwl, ProfileEvents::Event event) :
|
||||||
watch(),
|
|
||||||
scoped_read_lock(rwl)
|
scoped_read_lock(rwl)
|
||||||
{
|
{
|
||||||
ProfileEvents::increment(event, watch.elapsed());
|
ProfileEvents::increment(event, watch.elapsed());
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Core/Types.h>
|
#include <Core/Types.h>
|
||||||
|
#include <Common/UInt128.h>
|
||||||
#include <Common/TypeList.h>
|
#include <Common/TypeList.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -7,7 +9,9 @@ namespace DB
|
|||||||
|
|
||||||
using TypeListNativeNumbers = TypeList<UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64>;
|
using TypeListNativeNumbers = TypeList<UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64>;
|
||||||
using TypeListDecimalNumbers = TypeList<Decimal32, Decimal64, Decimal128>;
|
using TypeListDecimalNumbers = TypeList<Decimal32, Decimal64, Decimal128>;
|
||||||
using TypeListNumbers = TypeList<UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64,
|
using TypeListNumbers = typename TypeListConcat<TypeListNativeNumbers, TypeListDecimalNumbers>::Type;
|
||||||
Decimal32, Decimal64, Decimal128>;
|
|
||||||
|
/// Currently separate because UInt128 cannot be used in every context where other numbers can be used.
|
||||||
|
using TypeListNumbersAndUInt128 = typename AppendToTypeList<UInt128, TypeListNumbers>::Type;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -62,8 +62,8 @@ Block CheckSortedBlockInputStream::readImpl()
|
|||||||
else if (res > 0)
|
else if (res > 0)
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
"Sort order of blocks violated for column {}, left: {}, right: {}.",
|
"Sort order of blocks violated for column number {}, left: {}, right: {}.",
|
||||||
backQuoteIfNeed(elem.column_name),
|
column_number,
|
||||||
applyVisitor(FieldVisitorDump(), (*left_col)[left_index]),
|
applyVisitor(FieldVisitorDump(), (*left_col)[left_index]),
|
||||||
applyVisitor(FieldVisitorDump(), (*right_col)[right_index]));
|
applyVisitor(FieldVisitorDump(), (*right_col)[right_index]));
|
||||||
}
|
}
|
||||||
|
@ -100,10 +100,12 @@ template <typename A, typename B> struct ResultOfIntegerDivision
|
|||||||
*/
|
*/
|
||||||
template <typename A, typename B> struct ResultOfModulo
|
template <typename A, typename B> struct ResultOfModulo
|
||||||
{
|
{
|
||||||
using Type = typename Construct<
|
using Type = std::conditional_t<std::is_floating_point_v<A> || std::is_floating_point_v<B>,
|
||||||
is_signed_v<A> || is_signed_v<B>,
|
Float64,
|
||||||
false,
|
typename Construct<
|
||||||
sizeof(B)>::Type;
|
is_signed_v<A> || is_signed_v<B>,
|
||||||
|
false,
|
||||||
|
sizeof(B)>::Type>;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename A> struct ResultOfNegate
|
template <typename A> struct ResultOfNegate
|
||||||
|
@ -93,7 +93,7 @@ CacheDictionary::CacheDictionary(
|
|||||||
, update_queue(max_update_queue_size_)
|
, update_queue(max_update_queue_size_)
|
||||||
, update_pool(max_threads_for_updates)
|
, update_pool(max_threads_for_updates)
|
||||||
{
|
{
|
||||||
if (!this->source_ptr->supportsSelectiveLoad())
|
if (!source_ptr->supportsSelectiveLoad())
|
||||||
throw Exception{full_name + ": source cannot be used with CacheDictionary", ErrorCodes::UNSUPPORTED_METHOD};
|
throw Exception{full_name + ": source cannot be used with CacheDictionary", ErrorCodes::UNSUPPORTED_METHOD};
|
||||||
|
|
||||||
createAttributes();
|
createAttributes();
|
||||||
@ -860,40 +860,24 @@ void CacheDictionary::update(BunchUpdateUnit & bunch_update_unit) const
|
|||||||
|
|
||||||
const auto now = std::chrono::system_clock::now();
|
const auto now = std::chrono::system_clock::now();
|
||||||
|
|
||||||
/// Non const because it will be unlocked.
|
|
||||||
ProfilingScopedWriteRWLock write_lock{rw_lock, ProfileEvents::DictCacheLockWriteNs};
|
|
||||||
|
|
||||||
if (now > backoff_end_time.load())
|
if (now > backoff_end_time.load())
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
if (error_count)
|
auto current_source_ptr = getSourceAndUpdateIfNeeded();
|
||||||
{
|
|
||||||
/// Recover after error: we have to clone the source here because
|
|
||||||
/// it could keep connections which should be reset after error.
|
|
||||||
source_ptr = source_ptr->clone();
|
|
||||||
}
|
|
||||||
|
|
||||||
Stopwatch watch;
|
Stopwatch watch;
|
||||||
|
|
||||||
/// To perform parallel loading.
|
BlockInputStreamPtr stream = current_source_ptr->loadIds(bunch_update_unit.getRequestedIds());
|
||||||
BlockInputStreamPtr stream = nullptr;
|
|
||||||
{
|
|
||||||
ProfilingScopedWriteUnlocker unlocker(write_lock);
|
|
||||||
stream = source_ptr->loadIds(bunch_update_unit.getRequestedIds());
|
|
||||||
}
|
|
||||||
|
|
||||||
stream->readPrefix();
|
stream->readPrefix();
|
||||||
|
|
||||||
|
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
Block block;
|
Block block = stream->read();
|
||||||
{
|
if (!block)
|
||||||
ProfilingScopedWriteUnlocker unlocker(write_lock);
|
break;
|
||||||
block = stream->read();
|
|
||||||
if (!block)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto * id_column = typeid_cast<const ColumnUInt64 *>(block.safeGetByPosition(0).column.get());
|
const auto * id_column = typeid_cast<const ColumnUInt64 *>(block.safeGetByPosition(0).column.get());
|
||||||
if (!id_column)
|
if (!id_column)
|
||||||
@ -907,6 +891,8 @@ void CacheDictionary::update(BunchUpdateUnit & bunch_update_unit) const
|
|||||||
|
|
||||||
for (const auto i : ext::range(0, ids.size()))
|
for (const auto i : ext::range(0, ids.size()))
|
||||||
{
|
{
|
||||||
|
/// Modifying cache with write lock
|
||||||
|
ProfilingScopedWriteRWLock write_lock{rw_lock, ProfileEvents::DictCacheLockWriteNs};
|
||||||
const auto id = ids[i];
|
const auto id = ids[i];
|
||||||
|
|
||||||
const auto find_result = findCellIdx(id, now);
|
const auto find_result = findCellIdx(id, now);
|
||||||
@ -943,6 +929,9 @@ void CacheDictionary::update(BunchUpdateUnit & bunch_update_unit) const
|
|||||||
|
|
||||||
stream->readSuffix();
|
stream->readSuffix();
|
||||||
|
|
||||||
|
/// Lock just for last_exception safety
|
||||||
|
ProfilingScopedWriteRWLock write_lock{rw_lock, ProfileEvents::DictCacheLockWriteNs};
|
||||||
|
|
||||||
error_count = 0;
|
error_count = 0;
|
||||||
last_exception = std::exception_ptr{};
|
last_exception = std::exception_ptr{};
|
||||||
backoff_end_time = std::chrono::system_clock::time_point{};
|
backoff_end_time = std::chrono::system_clock::time_point{};
|
||||||
@ -951,6 +940,8 @@ void CacheDictionary::update(BunchUpdateUnit & bunch_update_unit) const
|
|||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
|
/// Lock just for last_exception safety
|
||||||
|
ProfilingScopedWriteRWLock write_lock{rw_lock, ProfileEvents::DictCacheLockWriteNs};
|
||||||
++error_count;
|
++error_count;
|
||||||
last_exception = std::current_exception();
|
last_exception = std::current_exception();
|
||||||
backoff_end_time = now + std::chrono::seconds(calculateDurationWithBackoff(rnd_engine, error_count));
|
backoff_end_time = now + std::chrono::seconds(calculateDurationWithBackoff(rnd_engine, error_count));
|
||||||
@ -960,6 +951,8 @@ void CacheDictionary::update(BunchUpdateUnit & bunch_update_unit) const
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Modifying cache state again with write lock
|
||||||
|
ProfilingScopedWriteRWLock write_lock{rw_lock, ProfileEvents::DictCacheLockWriteNs};
|
||||||
size_t not_found_num = 0;
|
size_t not_found_num = 0;
|
||||||
size_t found_num = 0;
|
size_t found_num = 0;
|
||||||
|
|
||||||
|
@ -92,7 +92,7 @@ public:
|
|||||||
database,
|
database,
|
||||||
name,
|
name,
|
||||||
dict_struct,
|
dict_struct,
|
||||||
source_ptr->clone(),
|
getSourceAndUpdateIfNeeded()->clone(),
|
||||||
dict_lifetime,
|
dict_lifetime,
|
||||||
strict_max_lifetime_seconds,
|
strict_max_lifetime_seconds,
|
||||||
size,
|
size,
|
||||||
@ -289,6 +289,26 @@ private:
|
|||||||
|
|
||||||
Attribute & getAttribute(const std::string & attribute_name) const;
|
Attribute & getAttribute(const std::string & attribute_name) const;
|
||||||
|
|
||||||
|
using SharedDictionarySourcePtr = std::shared_ptr<IDictionarySource>;
|
||||||
|
|
||||||
|
/// Update dictionary source pointer if required and return it. Thread safe.
|
||||||
|
/// MultiVersion is not used here because it works with constant pointers.
|
||||||
|
/// For some reason almost all methods in IDictionarySource interface are
|
||||||
|
/// not constant.
|
||||||
|
SharedDictionarySourcePtr getSourceAndUpdateIfNeeded() const
|
||||||
|
{
|
||||||
|
std::lock_guard lock(source_mutex);
|
||||||
|
if (error_count)
|
||||||
|
{
|
||||||
|
/// Recover after error: we have to clone the source here because
|
||||||
|
/// it could keep connections which should be reset after error.
|
||||||
|
auto new_source_ptr = source_ptr->clone();
|
||||||
|
source_ptr = std::move(new_source_ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
return source_ptr;
|
||||||
|
}
|
||||||
|
|
||||||
struct FindResult
|
struct FindResult
|
||||||
{
|
{
|
||||||
const size_t cell_idx;
|
const size_t cell_idx;
|
||||||
@ -305,7 +325,11 @@ private:
|
|||||||
const std::string name;
|
const std::string name;
|
||||||
const std::string full_name;
|
const std::string full_name;
|
||||||
const DictionaryStructure dict_struct;
|
const DictionaryStructure dict_struct;
|
||||||
mutable DictionarySourcePtr source_ptr;
|
|
||||||
|
/// Dictionary source should be used with mutex
|
||||||
|
mutable std::mutex source_mutex;
|
||||||
|
mutable SharedDictionarySourcePtr source_ptr;
|
||||||
|
|
||||||
const DictionaryLifetime dict_lifetime;
|
const DictionaryLifetime dict_lifetime;
|
||||||
const size_t strict_max_lifetime_seconds;
|
const size_t strict_max_lifetime_seconds;
|
||||||
const bool allow_read_expired_keys;
|
const bool allow_read_expired_keys;
|
||||||
@ -316,6 +340,9 @@ private:
|
|||||||
|
|
||||||
Poco::Logger * log;
|
Poco::Logger * log;
|
||||||
|
|
||||||
|
/// This lock is used for the inner cache state update function lock it for
|
||||||
|
/// write, when it need to update cache state all other functions just
|
||||||
|
/// readers. Suprisingly this lock is also used for last_exception pointer.
|
||||||
mutable std::shared_mutex rw_lock;
|
mutable std::shared_mutex rw_lock;
|
||||||
|
|
||||||
/// Actual size will be increased to match power of 2
|
/// Actual size will be increased to match power of 2
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <cmath>
|
||||||
#include <type_traits>
|
#include <type_traits>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <DataTypes/NumberTraits.h>
|
#include <DataTypes/NumberTraits.h>
|
||||||
@ -86,8 +87,16 @@ struct ModuloImpl
|
|||||||
template <typename Result = ResultType>
|
template <typename Result = ResultType>
|
||||||
static inline Result apply(A a, B b)
|
static inline Result apply(A a, B b)
|
||||||
{
|
{
|
||||||
throwIfDivisionLeadsToFPE(typename NumberTraits::ToInteger<A>::Type(a), typename NumberTraits::ToInteger<B>::Type(b));
|
if constexpr (std::is_floating_point_v<ResultType>)
|
||||||
return typename NumberTraits::ToInteger<A>::Type(a) % typename NumberTraits::ToInteger<B>::Type(b);
|
{
|
||||||
|
/// This computation is similar to `fmod` but the latter is not inlined and has 40 times worse performance.
|
||||||
|
return ResultType(a) - trunc(ResultType(a) / ResultType(b)) * ResultType(b);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
throwIfDivisionLeadsToFPE(typename NumberTraits::ToInteger<A>::Type(a), typename NumberTraits::ToInteger<B>::Type(b));
|
||||||
|
return typename NumberTraits::ToInteger<A>::Type(a) % typename NumberTraits::ToInteger<B>::Type(b);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
#if USE_EMBEDDED_COMPILER
|
||||||
|
@ -17,6 +17,8 @@ struct DummyJSONParser
|
|||||||
class Array;
|
class Array;
|
||||||
class Object;
|
class Object;
|
||||||
|
|
||||||
|
/// References an element in a JSON document, representing a JSON null, boolean, string, number,
|
||||||
|
/// array or object.
|
||||||
class Element
|
class Element
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -39,6 +41,7 @@ struct DummyJSONParser
|
|||||||
Object getObject() const;
|
Object getObject() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// References an array in a JSON document.
|
||||||
class Array
|
class Array
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -46,10 +49,10 @@ struct DummyJSONParser
|
|||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
Element operator*() const { return {}; }
|
Element operator*() const { return {}; }
|
||||||
Iterator & operator ++() { return *this; }
|
Iterator & operator++() { return *this; }
|
||||||
Iterator operator ++(int) { return *this; }
|
Iterator operator++(int) { return *this; }
|
||||||
friend bool operator ==(const Iterator &, const Iterator &) { return true; }
|
friend bool operator==(const Iterator &, const Iterator &) { return true; }
|
||||||
friend bool operator !=(const Iterator &, const Iterator &) { return false; }
|
friend bool operator!=(const Iterator &, const Iterator &) { return false; }
|
||||||
};
|
};
|
||||||
|
|
||||||
Iterator begin() const { return {}; }
|
Iterator begin() const { return {}; }
|
||||||
@ -58,29 +61,40 @@ struct DummyJSONParser
|
|||||||
Element operator[](size_t) const { return {}; }
|
Element operator[](size_t) const { return {}; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
using KeyValuePair = std::pair<std::string_view, Element>;
|
||||||
|
|
||||||
|
/// References an object in a JSON document.
|
||||||
class Object
|
class Object
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
using KeyValuePair = std::pair<std::string_view, Element>;
|
|
||||||
|
|
||||||
class Iterator
|
class Iterator
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
KeyValuePair operator *() const { return {}; }
|
KeyValuePair operator*() const { return {}; }
|
||||||
Iterator & operator ++() { return *this; }
|
Iterator & operator++() { return *this; }
|
||||||
Iterator operator ++(int) { return *this; }
|
Iterator operator++(int) { return *this; }
|
||||||
friend bool operator ==(const Iterator &, const Iterator &) { return true; }
|
friend bool operator==(const Iterator &, const Iterator &) { return true; }
|
||||||
friend bool operator !=(const Iterator &, const Iterator &) { return false; }
|
friend bool operator!=(const Iterator &, const Iterator &) { return false; }
|
||||||
};
|
};
|
||||||
|
|
||||||
Iterator begin() const { return {}; }
|
Iterator begin() const { return {}; }
|
||||||
Iterator end() const { return {}; }
|
Iterator end() const { return {}; }
|
||||||
size_t size() const { return 0; }
|
size_t size() const { return 0; }
|
||||||
KeyValuePair operator[](size_t) const { return {}; }
|
|
||||||
bool find(const std::string_view &, Element &) const { return false; }
|
bool find(const std::string_view &, Element &) const { return false; }
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
/// Optional: Provides access to an object's element by index.
|
||||||
|
KeyValuePair operator[](size_t) const { return {}; }
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// Parses a JSON document, returns the reference to its root element if succeeded.
|
||||||
bool parse(const std::string_view &, Element &) { throw Exception{"Functions JSON* are not supported", ErrorCodes::NOT_IMPLEMENTED}; }
|
bool parse(const std::string_view &, Element &) { throw Exception{"Functions JSON* are not supported", ErrorCodes::NOT_IMPLEMENTED}; }
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
/// Optional: Allocates memory to parse JSON documents faster.
|
||||||
|
void reserve(size_t max_size);
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -506,10 +506,11 @@ public:
|
|||||||
/// greatest(Date, Date) -> Date
|
/// greatest(Date, Date) -> Date
|
||||||
Case<std::is_same_v<LeftDataType, RightDataType> && (std::is_same_v<Op, LeastBaseImpl<T0, T1>> || std::is_same_v<Op, GreatestBaseImpl<T0, T1>>),
|
Case<std::is_same_v<LeftDataType, RightDataType> && (std::is_same_v<Op, LeastBaseImpl<T0, T1>> || std::is_same_v<Op, GreatestBaseImpl<T0, T1>>),
|
||||||
LeftDataType>,
|
LeftDataType>,
|
||||||
/// Date % Int32 -> int32
|
/// Date % Int32 -> Int32
|
||||||
|
/// Date % Float -> Float64
|
||||||
Case<std::is_same_v<Op, ModuloImpl<T0, T1>>, Switch<
|
Case<std::is_same_v<Op, ModuloImpl<T0, T1>>, Switch<
|
||||||
Case<IsDateOrDateTime<LeftDataType> && IsIntegral<RightDataType>, RightDataType>,
|
Case<IsDateOrDateTime<LeftDataType> && IsIntegral<RightDataType>, RightDataType>,
|
||||||
Case<IsDateOrDateTime<LeftDataType> && IsFloatingPoint<RightDataType>, DataTypeInt32>>>>;
|
Case<IsDateOrDateTime<LeftDataType> && IsFloatingPoint<RightDataType>, DataTypeFloat64>>>>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -139,6 +139,12 @@ private:
|
|||||||
BOOST_TTI_HAS_MEMBER_FUNCTION(reserve)
|
BOOST_TTI_HAS_MEMBER_FUNCTION(reserve)
|
||||||
BOOST_TTI_HAS_MEMBER_FUNCTION(prepare)
|
BOOST_TTI_HAS_MEMBER_FUNCTION(prepare)
|
||||||
|
|
||||||
|
template <class T, class = void>
|
||||||
|
struct has_index_operator : std::false_type {};
|
||||||
|
|
||||||
|
template <class T>
|
||||||
|
struct has_index_operator<T, std::void_t<decltype(std::declval<T>()[0])>> : std::true_type {};
|
||||||
|
|
||||||
/// Represents a move of a JSON iterator described by a single argument passed to a JSON function.
|
/// Represents a move of a JSON iterator described by a single argument passed to a JSON function.
|
||||||
/// For example, the call JSONExtractInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 1)
|
/// For example, the call JSONExtractInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 1)
|
||||||
/// contains two moves: {MoveType::ConstKey, "b"} and {MoveType::ConstIndex, 1}.
|
/// contains two moves: {MoveType::ConstKey, "b"} and {MoveType::ConstIndex, 1}.
|
||||||
@ -217,38 +223,32 @@ private:
|
|||||||
{
|
{
|
||||||
auto array = element.getArray();
|
auto array = element.getArray();
|
||||||
if (index >= 0)
|
if (index >= 0)
|
||||||
{
|
|
||||||
--index;
|
--index;
|
||||||
if (static_cast<size_t>(index) >= array.size())
|
else
|
||||||
return false;
|
index += array.size();
|
||||||
element = array[index];
|
|
||||||
out_key = {};
|
if (static_cast<size_t>(index) >= array.size())
|
||||||
return true;
|
|
||||||
}
|
|
||||||
index += array.size();
|
|
||||||
if (index < 0)
|
|
||||||
return false;
|
return false;
|
||||||
element = array[index];
|
element = array[index];
|
||||||
out_key = {};
|
out_key = {};
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (element.isObject())
|
if constexpr (has_index_operator<typename JSONParser::Object>::value)
|
||||||
{
|
{
|
||||||
auto object = element.getObject();
|
if (element.isObject())
|
||||||
if (index >= 0)
|
|
||||||
{
|
{
|
||||||
--index;
|
auto object = element.getObject();
|
||||||
|
if (index >= 0)
|
||||||
|
--index;
|
||||||
|
else
|
||||||
|
index += object.size();
|
||||||
|
|
||||||
if (static_cast<size_t>(index) >= object.size())
|
if (static_cast<size_t>(index) >= object.size())
|
||||||
return false;
|
return false;
|
||||||
std::tie(out_key, element) = object[index];
|
std::tie(out_key, element) = object[index];
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
index += object.size();
|
|
||||||
if (index < 0)
|
|
||||||
return false;
|
|
||||||
std::tie(out_key, element) = object[index];
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return {};
|
return {};
|
||||||
|
@ -13,7 +13,7 @@ struct GenericArraySink;
|
|||||||
template <typename ArraySink>
|
template <typename ArraySink>
|
||||||
struct NullableArraySink;
|
struct NullableArraySink;
|
||||||
|
|
||||||
using NumericArraySinks = typename TypeListMap<NumericArraySink, TypeListNumbers>::Type;
|
using NumericArraySinks = typename TypeListMap<NumericArraySink, TypeListNumbersAndUInt128>::Type;
|
||||||
using BasicArraySinks = typename AppendToTypeList<GenericArraySink, NumericArraySinks>::Type;
|
using BasicArraySinks = typename AppendToTypeList<GenericArraySink, NumericArraySinks>::Type;
|
||||||
using NullableArraySinks = typename TypeListMap<NullableArraySink, BasicArraySinks>::Type;
|
using NullableArraySinks = typename TypeListMap<NullableArraySink, BasicArraySinks>::Type;
|
||||||
using TypeListArraySinks = typename TypeListConcat<BasicArraySinks, NullableArraySinks>::Type;
|
using TypeListArraySinks = typename TypeListConcat<BasicArraySinks, NullableArraySinks>::Type;
|
||||||
|
@ -16,7 +16,7 @@ struct NullableArraySource;
|
|||||||
template <typename Base>
|
template <typename Base>
|
||||||
struct ConstSource;
|
struct ConstSource;
|
||||||
|
|
||||||
using NumericArraySources = typename TypeListMap<NumericArraySource, TypeListNumbers>::Type;
|
using NumericArraySources = typename TypeListMap<NumericArraySource, TypeListNumbersAndUInt128>::Type;
|
||||||
using BasicArraySources = typename AppendToTypeList<GenericArraySource, NumericArraySources>::Type;
|
using BasicArraySources = typename AppendToTypeList<GenericArraySource, NumericArraySources>::Type;
|
||||||
using NullableArraySources = typename TypeListMap<NullableArraySource, BasicArraySources>::Type;
|
using NullableArraySources = typename TypeListMap<NullableArraySource, BasicArraySources>::Type;
|
||||||
using BasicAndNullableArraySources = typename TypeListConcat<BasicArraySources, NullableArraySources>::Type;
|
using BasicAndNullableArraySources = typename TypeListConcat<BasicArraySources, NullableArraySources>::Type;
|
||||||
|
@ -16,7 +16,7 @@ struct NullableValueSource;
|
|||||||
template <typename Base>
|
template <typename Base>
|
||||||
struct ConstSource;
|
struct ConstSource;
|
||||||
|
|
||||||
using NumericValueSources = typename TypeListMap<NumericValueSource, TypeListNumbers>::Type;
|
using NumericValueSources = typename TypeListMap<NumericValueSource, TypeListNumbersAndUInt128>::Type;
|
||||||
using BasicValueSources = typename AppendToTypeList<GenericValueSource, NumericValueSources>::Type;
|
using BasicValueSources = typename AppendToTypeList<GenericValueSource, NumericValueSources>::Type;
|
||||||
using NullableValueSources = typename TypeListMap<NullableValueSource, BasicValueSources>::Type;
|
using NullableValueSources = typename TypeListMap<NullableValueSource, BasicValueSources>::Type;
|
||||||
using BasicAndNullableValueSources = typename TypeListConcat<BasicValueSources, NullableValueSources>::Type;
|
using BasicAndNullableValueSources = typename TypeListConcat<BasicValueSources, NullableValueSources>::Type;
|
||||||
|
@ -41,7 +41,7 @@ struct ArraySinkCreator<>
|
|||||||
|
|
||||||
std::unique_ptr<IArraySink> createArraySink(ColumnArray & col, size_t column_size)
|
std::unique_ptr<IArraySink> createArraySink(ColumnArray & col, size_t column_size)
|
||||||
{
|
{
|
||||||
using Creator = ApplyTypeListForClass<ArraySinkCreator, TypeListNumbers>::Type;
|
using Creator = ApplyTypeListForClass<ArraySinkCreator, TypeListNumbersAndUInt128>::Type;
|
||||||
if (auto * column_nullable = typeid_cast<ColumnNullable *>(&col.getData()))
|
if (auto * column_nullable = typeid_cast<ColumnNullable *>(&col.getData()))
|
||||||
{
|
{
|
||||||
auto column = ColumnArray::create(column_nullable->getNestedColumnPtr()->assumeMutable(), col.getOffsetsPtr()->assumeMutable());
|
auto column = ColumnArray::create(column_nullable->getNestedColumnPtr()->assumeMutable(), col.getOffsetsPtr()->assumeMutable());
|
||||||
|
@ -53,7 +53,7 @@ struct ArraySourceCreator<>
|
|||||||
|
|
||||||
std::unique_ptr<IArraySource> createArraySource(const ColumnArray & col, bool is_const, size_t total_rows)
|
std::unique_ptr<IArraySource> createArraySource(const ColumnArray & col, bool is_const, size_t total_rows)
|
||||||
{
|
{
|
||||||
using Creator = typename ApplyTypeListForClass<ArraySourceCreator, TypeListNumbers>::Type;
|
using Creator = typename ApplyTypeListForClass<ArraySourceCreator, TypeListNumbersAndUInt128>::Type;
|
||||||
if (const auto * column_nullable = typeid_cast<const ColumnNullable *>(&col.getData()))
|
if (const auto * column_nullable = typeid_cast<const ColumnNullable *>(&col.getData()))
|
||||||
{
|
{
|
||||||
auto column = ColumnArray::create(column_nullable->getNestedColumnPtr(), col.getOffsetsPtr());
|
auto column = ColumnArray::create(column_nullable->getNestedColumnPtr(), col.getOffsetsPtr());
|
||||||
|
@ -53,7 +53,7 @@ struct ValueSourceCreator<>
|
|||||||
|
|
||||||
std::unique_ptr<IValueSource> createValueSource(const IColumn & col, bool is_const, size_t total_rows)
|
std::unique_ptr<IValueSource> createValueSource(const IColumn & col, bool is_const, size_t total_rows)
|
||||||
{
|
{
|
||||||
using Creator = typename ApplyTypeListForClass<ValueSourceCreator, TypeListNumbers>::Type;
|
using Creator = typename ApplyTypeListForClass<ValueSourceCreator, TypeListNumbersAndUInt128>::Type;
|
||||||
if (const auto * column_nullable = typeid_cast<const ColumnNullable *>(&col))
|
if (const auto * column_nullable = typeid_cast<const ColumnNullable *>(&col))
|
||||||
{
|
{
|
||||||
return Creator::create(column_nullable->getNestedColumn(), &column_nullable->getNullMapData(), is_const, total_rows);
|
return Creator::create(column_nullable->getNestedColumn(), &column_nullable->getNullMapData(), is_const, total_rows);
|
||||||
|
@ -20,6 +20,8 @@ struct RapidJSONParser
|
|||||||
class Array;
|
class Array;
|
||||||
class Object;
|
class Object;
|
||||||
|
|
||||||
|
/// References an element in a JSON document, representing a JSON null, boolean, string, number,
|
||||||
|
/// array or object.
|
||||||
class Element
|
class Element
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -47,6 +49,7 @@ struct RapidJSONParser
|
|||||||
const rapidjson::Value * ptr = nullptr;
|
const rapidjson::Value * ptr = nullptr;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// References an array in a JSON document.
|
||||||
class Array
|
class Array
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -67,17 +70,18 @@ struct RapidJSONParser
|
|||||||
ALWAYS_INLINE Iterator begin() const { return ptr->Begin(); }
|
ALWAYS_INLINE Iterator begin() const { return ptr->Begin(); }
|
||||||
ALWAYS_INLINE Iterator end() const { return ptr->End(); }
|
ALWAYS_INLINE Iterator end() const { return ptr->End(); }
|
||||||
ALWAYS_INLINE size_t size() const { return ptr->Size(); }
|
ALWAYS_INLINE size_t size() const { return ptr->Size(); }
|
||||||
ALWAYS_INLINE Element operator[](size_t index) const { return *(ptr->Begin() + index); }
|
ALWAYS_INLINE Element operator[](size_t index) const { assert(index < size()); return *(ptr->Begin() + index); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const rapidjson::Value * ptr = nullptr;
|
const rapidjson::Value * ptr = nullptr;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
using KeyValuePair = std::pair<std::string_view, Element>;
|
||||||
|
|
||||||
|
/// References an object in a JSON document.
|
||||||
class Object
|
class Object
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
using KeyValuePair = std::pair<std::string_view, Element>;
|
|
||||||
|
|
||||||
class Iterator
|
class Iterator
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -96,14 +100,7 @@ struct RapidJSONParser
|
|||||||
ALWAYS_INLINE Iterator end() const { return ptr->MemberEnd(); }
|
ALWAYS_INLINE Iterator end() const { return ptr->MemberEnd(); }
|
||||||
ALWAYS_INLINE size_t size() const { return ptr->MemberCount(); }
|
ALWAYS_INLINE size_t size() const { return ptr->MemberCount(); }
|
||||||
|
|
||||||
ALWAYS_INLINE KeyValuePair operator[](size_t index) const
|
bool find(const std::string_view & key, Element & result) const
|
||||||
{
|
|
||||||
auto it = ptr->MemberBegin() + index;
|
|
||||||
std::string_view key{it->name.GetString(), it->name.GetStringLength()};
|
|
||||||
return KeyValuePair{key, it->value};
|
|
||||||
}
|
|
||||||
|
|
||||||
ALWAYS_INLINE bool find(const std::string_view & key, Element & result) const
|
|
||||||
{
|
{
|
||||||
auto it = ptr->FindMember(rapidjson::StringRef(key.data(), key.length()));
|
auto it = ptr->FindMember(rapidjson::StringRef(key.data(), key.length()));
|
||||||
if (it == ptr->MemberEnd())
|
if (it == ptr->MemberEnd())
|
||||||
@ -113,10 +110,20 @@ struct RapidJSONParser
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Optional: Provides access to an object's element by index.
|
||||||
|
ALWAYS_INLINE KeyValuePair operator[](size_t index) const
|
||||||
|
{
|
||||||
|
assert (index < size());
|
||||||
|
auto it = ptr->MemberBegin() + index;
|
||||||
|
std::string_view key{it->name.GetString(), it->name.GetStringLength()};
|
||||||
|
return {key, it->value};
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const rapidjson::Value * ptr = nullptr;
|
const rapidjson::Value * ptr = nullptr;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// Parses a JSON document, returns the reference to its root element if succeeded.
|
||||||
bool parse(const std::string_view & json, Element & result)
|
bool parse(const std::string_view & json, Element & result)
|
||||||
{
|
{
|
||||||
rapidjson::MemoryStream ms(json.data(), json.size());
|
rapidjson::MemoryStream ms(json.data(), json.size());
|
||||||
@ -128,6 +135,11 @@ struct RapidJSONParser
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
/// Optional: Allocates memory to parse JSON documents faster.
|
||||||
|
void reserve(size_t max_size);
|
||||||
|
#endif
|
||||||
|
|
||||||
private:
|
private:
|
||||||
rapidjson::Document document;
|
rapidjson::Document document;
|
||||||
};
|
};
|
||||||
|
@ -25,6 +25,8 @@ struct SimdJSONParser
|
|||||||
class Array;
|
class Array;
|
||||||
class Object;
|
class Object;
|
||||||
|
|
||||||
|
/// References an element in a JSON document, representing a JSON null, boolean, string, number,
|
||||||
|
/// array or object.
|
||||||
class Element
|
class Element
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -52,6 +54,7 @@ struct SimdJSONParser
|
|||||||
simdjson::dom::element element;
|
simdjson::dom::element element;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// References an array in a JSON document.
|
||||||
class Array
|
class Array
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -59,11 +62,11 @@ struct SimdJSONParser
|
|||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
ALWAYS_INLINE Iterator(const simdjson::dom::array::iterator & it_) : it(it_) {}
|
ALWAYS_INLINE Iterator(const simdjson::dom::array::iterator & it_) : it(it_) {}
|
||||||
ALWAYS_INLINE Element operator *() const { return *it; }
|
ALWAYS_INLINE Element operator*() const { return *it; }
|
||||||
ALWAYS_INLINE Iterator & operator ++() { ++it; return *this; }
|
ALWAYS_INLINE Iterator & operator++() { ++it; return *this; }
|
||||||
ALWAYS_INLINE Iterator operator ++(int) { auto res = *this; ++it; return res; }
|
ALWAYS_INLINE Iterator operator++(int) { auto res = *this; ++it; return res; }
|
||||||
ALWAYS_INLINE friend bool operator !=(const Iterator & left, const Iterator & right) { return left.it != right.it; }
|
ALWAYS_INLINE friend bool operator!=(const Iterator & left, const Iterator & right) { return left.it != right.it; }
|
||||||
ALWAYS_INLINE friend bool operator ==(const Iterator & left, const Iterator & right) { return !(left != right); }
|
ALWAYS_INLINE friend bool operator==(const Iterator & left, const Iterator & right) { return !(left != right); }
|
||||||
private:
|
private:
|
||||||
simdjson::dom::array::iterator it;
|
simdjson::dom::array::iterator it;
|
||||||
};
|
};
|
||||||
@ -72,26 +75,27 @@ struct SimdJSONParser
|
|||||||
ALWAYS_INLINE Iterator begin() const { return array.begin(); }
|
ALWAYS_INLINE Iterator begin() const { return array.begin(); }
|
||||||
ALWAYS_INLINE Iterator end() const { return array.end(); }
|
ALWAYS_INLINE Iterator end() const { return array.end(); }
|
||||||
ALWAYS_INLINE size_t size() const { return array.size(); }
|
ALWAYS_INLINE size_t size() const { return array.size(); }
|
||||||
ALWAYS_INLINE Element operator[](size_t index) const { return array.at(index).first; }
|
ALWAYS_INLINE Element operator[](size_t index) const { assert(index < size()); return array.at(index).first; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
simdjson::dom::array array;
|
simdjson::dom::array array;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
using KeyValuePair = std::pair<std::string_view, Element>;
|
||||||
|
|
||||||
|
/// References an object in a JSON document.
|
||||||
class Object
|
class Object
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
using KeyValuePair = std::pair<std::string_view, Element>;
|
|
||||||
|
|
||||||
class Iterator
|
class Iterator
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
ALWAYS_INLINE Iterator(const simdjson::dom::object::iterator & it_) : it(it_) {}
|
ALWAYS_INLINE Iterator(const simdjson::dom::object::iterator & it_) : it(it_) {}
|
||||||
ALWAYS_INLINE KeyValuePair operator *() const { const auto & res = *it; return {res.key, res.value}; }
|
ALWAYS_INLINE KeyValuePair operator*() const { const auto & res = *it; return {res.key, res.value}; }
|
||||||
ALWAYS_INLINE Iterator & operator ++() { ++it; return *this; }
|
ALWAYS_INLINE Iterator & operator++() { ++it; return *this; }
|
||||||
ALWAYS_INLINE Iterator operator ++(int) { auto res = *this; ++it; return res; }
|
ALWAYS_INLINE Iterator operator++(int) { auto res = *this; ++it; return res; }
|
||||||
ALWAYS_INLINE friend bool operator !=(const Iterator & left, const Iterator & right) { return left.it != right.it; }
|
ALWAYS_INLINE friend bool operator!=(const Iterator & left, const Iterator & right) { return left.it != right.it; }
|
||||||
ALWAYS_INLINE friend bool operator ==(const Iterator & left, const Iterator & right) { return !(left != right); }
|
ALWAYS_INLINE friend bool operator==(const Iterator & left, const Iterator & right) { return !(left != right); }
|
||||||
private:
|
private:
|
||||||
simdjson::dom::object::iterator it;
|
simdjson::dom::object::iterator it;
|
||||||
};
|
};
|
||||||
@ -101,15 +105,7 @@ struct SimdJSONParser
|
|||||||
ALWAYS_INLINE Iterator end() const { return object.end(); }
|
ALWAYS_INLINE Iterator end() const { return object.end(); }
|
||||||
ALWAYS_INLINE size_t size() const { return object.size(); }
|
ALWAYS_INLINE size_t size() const { return object.size(); }
|
||||||
|
|
||||||
KeyValuePair operator [](size_t index) const
|
bool find(const std::string_view & key, Element & result) const
|
||||||
{
|
|
||||||
Iterator it = begin();
|
|
||||||
while (index--)
|
|
||||||
++it;
|
|
||||||
return *it;
|
|
||||||
}
|
|
||||||
|
|
||||||
ALWAYS_INLINE bool find(const std::string_view & key, Element & result) const
|
|
||||||
{
|
{
|
||||||
auto x = object.at_key(key);
|
auto x = object.at_key(key);
|
||||||
if (x.error())
|
if (x.error())
|
||||||
@ -119,17 +115,22 @@ struct SimdJSONParser
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Optional: Provides access to an object's element by index.
|
||||||
|
KeyValuePair operator[](size_t index) const
|
||||||
|
{
|
||||||
|
assert(index < size());
|
||||||
|
auto it = object.begin();
|
||||||
|
while (index--)
|
||||||
|
++it;
|
||||||
|
const auto & res = *it;
|
||||||
|
return {res.key, res.value};
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
simdjson::dom::object object;
|
simdjson::dom::object object;
|
||||||
};
|
};
|
||||||
|
|
||||||
void reserve(size_t max_size)
|
/// Parses a JSON document, returns the reference to its root element if succeeded.
|
||||||
{
|
|
||||||
if (parser.allocate(max_size) != simdjson::error_code::SUCCESS)
|
|
||||||
throw Exception{"Couldn't allocate " + std::to_string(max_size) + " bytes when parsing JSON",
|
|
||||||
ErrorCodes::CANNOT_ALLOCATE_MEMORY};
|
|
||||||
}
|
|
||||||
|
|
||||||
bool parse(const std::string_view & json, Element & result)
|
bool parse(const std::string_view & json, Element & result)
|
||||||
{
|
{
|
||||||
auto document = parser.parse(json.data(), json.size());
|
auto document = parser.parse(json.data(), json.size());
|
||||||
@ -140,6 +141,14 @@ struct SimdJSONParser
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Optional: Allocates memory to parse JSON documents faster.
|
||||||
|
void reserve(size_t max_size)
|
||||||
|
{
|
||||||
|
if (parser.allocate(max_size) != simdjson::error_code::SUCCESS)
|
||||||
|
throw Exception{"Couldn't allocate " + std::to_string(max_size) + " bytes when parsing JSON",
|
||||||
|
ErrorCodes::CANNOT_ALLOCATE_MEMORY};
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
simdjson::dom::parser parser;
|
simdjson::dom::parser parser;
|
||||||
};
|
};
|
||||||
|
@ -175,9 +175,6 @@ public:
|
|||||||
static FunctionPtr create(const Context &) { return std::make_shared<FunctionIf>(); }
|
static FunctionPtr create(const Context &) { return std::make_shared<FunctionIf>(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
template <typename T0, typename T1>
|
|
||||||
static constexpr bool allow_arrays = !std::is_same_v<T0, UInt128> && !std::is_same_v<T1, UInt128>;
|
|
||||||
|
|
||||||
template <typename T0, typename T1>
|
template <typename T0, typename T1>
|
||||||
static UInt32 decimalScale(Block & block [[maybe_unused]], const ColumnNumbers & arguments [[maybe_unused]])
|
static UInt32 decimalScale(Block & block [[maybe_unused]], const ColumnNumbers & arguments [[maybe_unused]])
|
||||||
{
|
{
|
||||||
@ -262,7 +259,7 @@ private:
|
|||||||
{
|
{
|
||||||
if constexpr (std::is_same_v<NumberTraits::Error, typename NumberTraits::ResultOfIf<T0, T1>::Type>)
|
if constexpr (std::is_same_v<NumberTraits::Error, typename NumberTraits::ResultOfIf<T0, T1>::Type>)
|
||||||
return false;
|
return false;
|
||||||
else if constexpr (allow_arrays<T0, T1>)
|
else
|
||||||
{
|
{
|
||||||
using ResultType = typename NumberTraits::ResultOfIf<T0, T1>::Type;
|
using ResultType = typename NumberTraits::ResultOfIf<T0, T1>::Type;
|
||||||
|
|
||||||
@ -318,7 +315,7 @@ private:
|
|||||||
{
|
{
|
||||||
if constexpr (std::is_same_v<NumberTraits::Error, typename NumberTraits::ResultOfIf<T0, T1>::Type>)
|
if constexpr (std::is_same_v<NumberTraits::Error, typename NumberTraits::ResultOfIf<T0, T1>::Type>)
|
||||||
return false;
|
return false;
|
||||||
else if constexpr (allow_arrays<T0, T1>)
|
else
|
||||||
{
|
{
|
||||||
using ResultType = typename NumberTraits::ResultOfIf<T0, T1>::Type;
|
using ResultType = typename NumberTraits::ResultOfIf<T0, T1>::Type;
|
||||||
|
|
||||||
|
@ -102,6 +102,7 @@ using FunctionModulo = FunctionBinaryArithmetic<ModuloImpl, NameModulo, false>;
|
|||||||
void registerFunctionModulo(FunctionFactory & factory)
|
void registerFunctionModulo(FunctionFactory & factory)
|
||||||
{
|
{
|
||||||
factory.registerFunction<FunctionModulo>();
|
factory.registerFunction<FunctionModulo>();
|
||||||
|
factory.registerAlias("mod", "modulo", FunctionFactory::CaseInsensitive);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -14,10 +14,18 @@ struct ModuloOrZeroImpl
|
|||||||
template <typename Result = ResultType>
|
template <typename Result = ResultType>
|
||||||
static inline Result apply(A a, B b)
|
static inline Result apply(A a, B b)
|
||||||
{
|
{
|
||||||
if (unlikely(divisionLeadsToFPE(a, b)))
|
if constexpr (std::is_floating_point_v<ResultType>)
|
||||||
return 0;
|
{
|
||||||
|
/// This computation is similar to `fmod` but the latter is not inlined and has 40 times worse performance.
|
||||||
|
return ResultType(a) - trunc(ResultType(a) / ResultType(b)) * ResultType(b);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (unlikely(divisionLeadsToFPE(a, b)))
|
||||||
|
return 0;
|
||||||
|
|
||||||
return ModuloImpl<A, B>::template apply<Result>(a, b);
|
return ModuloImpl<A, B>::template apply<Result>(a, b);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
#if USE_EMBEDDED_COMPILER
|
||||||
|
@ -71,10 +71,13 @@ void AnyInputMatcher::visit(ASTPtr & current_ast, Data data)
|
|||||||
&& function_argument && function_argument->as<ASTFunction>())
|
&& function_argument && function_argument->as<ASTFunction>())
|
||||||
{
|
{
|
||||||
auto name = function_node->name;
|
auto name = function_node->name;
|
||||||
|
auto alias = function_node->alias;
|
||||||
|
|
||||||
///cut any or anyLast
|
///cut any or anyLast
|
||||||
if (!function_argument->as<ASTFunction>()->arguments->children.empty())
|
if (!function_argument->as<ASTFunction>()->arguments->children.empty())
|
||||||
{
|
{
|
||||||
current_ast = function_argument->clone();
|
current_ast = function_argument->clone();
|
||||||
|
current_ast->setAlias(alias);
|
||||||
for (size_t i = 0; i < current_ast->as<ASTFunction>()->arguments->children.size(); ++i)
|
for (size_t i = 0; i < current_ast->as<ASTFunction>()->arguments->children.size(); ++i)
|
||||||
changeAllIdentifiers(current_ast, i, name);
|
changeAllIdentifiers(current_ast, i, name);
|
||||||
}
|
}
|
||||||
|
@ -62,6 +62,11 @@ void CollectJoinOnKeysMatcher::visit(const ASTFunction & func, const ASTPtr & as
|
|||||||
|
|
||||||
if (func.name == "equals")
|
if (func.name == "equals")
|
||||||
{
|
{
|
||||||
|
if (func.arguments->children.size() != 2)
|
||||||
|
{
|
||||||
|
throwSyntaxException("Function 'equals' takes two arguments, got '"
|
||||||
|
+ func.formatForErrorMessage() + "' instead.");
|
||||||
|
}
|
||||||
ASTPtr left = func.arguments->children.at(0);
|
ASTPtr left = func.arguments->children.at(0);
|
||||||
ASTPtr right = func.arguments->children.at(1);
|
ASTPtr right = func.arguments->children.at(1);
|
||||||
auto table_numbers = getTableNumbers(ast, left, right, data);
|
auto table_numbers = getTableNumbers(ast, left, right, data);
|
||||||
|
@ -367,15 +367,28 @@ ASTPtr MutationsInterpreter::prepare(bool dry_run)
|
|||||||
affected_materialized.emplace(mat_column);
|
affected_materialized.emplace(mat_column);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Just to be sure, that we don't change type
|
/// When doing UPDATE column = expression WHERE condition
|
||||||
/// after update expression execution.
|
/// we will replace column to the result of the following expression:
|
||||||
|
///
|
||||||
|
/// CAST(if(condition, CAST(expression, type), column), type)
|
||||||
|
///
|
||||||
|
/// Inner CAST is needed to make 'if' work when branches have no common type,
|
||||||
|
/// example: type is UInt64, UPDATE x = -1 or UPDATE x = x - 1.
|
||||||
|
///
|
||||||
|
/// Outer CAST is added just in case if we don't trust the returning type of 'if'.
|
||||||
|
|
||||||
|
auto type_literal = std::make_shared<ASTLiteral>(columns_desc.getPhysical(column).type->getName());
|
||||||
|
|
||||||
const auto & update_expr = kv.second;
|
const auto & update_expr = kv.second;
|
||||||
auto updated_column = makeASTFunction("CAST",
|
auto updated_column = makeASTFunction("CAST",
|
||||||
makeASTFunction("if",
|
makeASTFunction("if",
|
||||||
command.predicate->clone(),
|
command.predicate->clone(),
|
||||||
update_expr->clone(),
|
makeASTFunction("CAST",
|
||||||
|
update_expr->clone(),
|
||||||
|
type_literal),
|
||||||
std::make_shared<ASTIdentifier>(column)),
|
std::make_shared<ASTIdentifier>(column)),
|
||||||
std::make_shared<ASTLiteral>(columns_desc.getPhysical(column).type->getName()));
|
type_literal);
|
||||||
|
|
||||||
stages.back().column_to_updated.emplace(column, updated_column);
|
stages.back().column_to_updated.emplace(column, updated_column);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -465,7 +478,7 @@ ASTPtr MutationsInterpreter::prepare(bool dry_run)
|
|||||||
throw Exception("Unknown mutation command type: " + DB::toString<int>(command.type), ErrorCodes::UNKNOWN_MUTATION_COMMAND);
|
throw Exception("Unknown mutation command type: " + DB::toString<int>(command.type), ErrorCodes::UNKNOWN_MUTATION_COMMAND);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// We cares about affected indices because we also need to rewrite them
|
/// We care about affected indices because we also need to rewrite them
|
||||||
/// when one of index columns updated or filtered with delete.
|
/// when one of index columns updated or filtered with delete.
|
||||||
/// The same about colums, that are needed for calculation of TTL expressions.
|
/// The same about colums, that are needed for calculation of TTL expressions.
|
||||||
if (!dependencies.empty())
|
if (!dependencies.empty())
|
||||||
|
@ -11,6 +11,7 @@ namespace DB
|
|||||||
|
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
|
extern const int BAD_ARGUMENTS;
|
||||||
extern const int LOGICAL_ERROR;
|
extern const int LOGICAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -63,10 +64,15 @@ struct StorageID
|
|||||||
|
|
||||||
void assertNotEmpty() const
|
void assertNotEmpty() const
|
||||||
{
|
{
|
||||||
|
// Can be triggered by user input, e.g. SELECT joinGetOrNull('', 'num', 500)
|
||||||
if (empty())
|
if (empty())
|
||||||
throw Exception("Both table name and UUID are empty", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Table name cannot be empty. Please specify a valid table name or UUID", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
|
// This can also be triggered by user input, but we haven't decided what
|
||||||
|
// to do about it: create table "_"(a int) engine Log;
|
||||||
if (table_name == TABLE_WITH_UUID_NAME_PLACEHOLDER && !hasUUID())
|
if (table_name == TABLE_WITH_UUID_NAME_PLACEHOLDER && !hasUUID())
|
||||||
throw Exception("Table name was replaced with placeholder, but UUID is Nil", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Table name was replaced with placeholder, but UUID is Nil", ErrorCodes::LOGICAL_ERROR);
|
||||||
|
|
||||||
if (table_name.empty() && !database_name.empty())
|
if (table_name.empty() && !database_name.empty())
|
||||||
throw Exception("Table name is empty, but database name is not", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Table name is empty, but database name is not", ErrorCodes::LOGICAL_ERROR);
|
||||||
}
|
}
|
||||||
|
@ -231,9 +231,8 @@ void MergeSortingTransform::generate()
|
|||||||
ProfileEvents::increment(ProfileEvents::ExternalSortMerge);
|
ProfileEvents::increment(ProfileEvents::ExternalSortMerge);
|
||||||
LOG_INFO(log, "There are {} temporary sorted parts to merge.", temporary_files.size());
|
LOG_INFO(log, "There are {} temporary sorted parts to merge.", temporary_files.size());
|
||||||
|
|
||||||
if (!chunks.empty())
|
processors.emplace_back(std::make_shared<MergeSorterSource>(
|
||||||
processors.emplace_back(std::make_shared<MergeSorterSource>(
|
header_without_constants, std::move(chunks), description, max_merged_block_size, limit));
|
||||||
header_without_constants, std::move(chunks), description, max_merged_block_size, limit));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
generated_prefix = true;
|
generated_prefix = true;
|
||||||
|
@ -3629,7 +3629,7 @@ bool MergeTreeData::canUsePolymorphicParts(const MergeTreeSettings & settings, S
|
|||||||
{
|
{
|
||||||
std::ostringstream message;
|
std::ostringstream message;
|
||||||
message << "Table can't create parts with adaptive granularity, but settings"
|
message << "Table can't create parts with adaptive granularity, but settings"
|
||||||
<< "min_rows_for_wide_part = " << settings.min_rows_for_wide_part
|
<< " min_rows_for_wide_part = " << settings.min_rows_for_wide_part
|
||||||
<< ", min_bytes_for_wide_part = " << settings.min_bytes_for_wide_part
|
<< ", min_bytes_for_wide_part = " << settings.min_bytes_for_wide_part
|
||||||
<< ", min_rows_for_compact_part = " << settings.min_rows_for_compact_part
|
<< ", min_rows_for_compact_part = " << settings.min_rows_for_compact_part
|
||||||
<< ", min_bytes_for_compact_part = " << settings.min_bytes_for_compact_part
|
<< ", min_bytes_for_compact_part = " << settings.min_bytes_for_compact_part
|
||||||
|
3
tests/performance/float_mod.xml
Normal file
3
tests/performance/float_mod.xml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
<test>
|
||||||
|
<query>SELECT count() FROM numbers(100000000) WHERE NOT ignore(number % 1.1)</query>
|
||||||
|
</test>
|
@ -1,9 +1,11 @@
|
|||||||
<test>
|
<test>
|
||||||
<preconditions>
|
<preconditions>
|
||||||
<table_exists>hits_10m_single</table_exists>
|
|
||||||
<table_exists>hits_100m_single</table_exists>
|
<table_exists>hits_100m_single</table_exists>
|
||||||
</preconditions>
|
</preconditions>
|
||||||
|
|
||||||
<query>SELECT length(URL) > 1000 ? 'LONG' : 'SHORT' as x FROM hits_100m_single GROUP BY x FORMAT Null</query>
|
<query>SELECT length(URL) > 1000 ? 'LONG' : 'SHORT' as x FROM hits_100m_single GROUP BY x FORMAT Null</query>
|
||||||
<query>SELECT transform(number, [2, 4, 6], ['google', 'yandex', 'yahoo'], 'other') as x FROM numbers(1000000000) GROUP BY x FORMAT Null</query>
|
<query>SELECT transform(number, [2, 4, 6], ['google', 'yandex', 'yahoo'], 'other') as x FROM numbers(100000000) GROUP BY x FORMAT Null</query>
|
||||||
|
|
||||||
|
<query>SELECT length(URL) > 1000 ? 'LONG' : 'SHORT' as x FROM hits_100m_single GROUP BY x FORMAT Null SETTINGS optimize_if_transform_strings_to_enum = 1</query>
|
||||||
|
<query>SELECT transform(number, [2, 4, 6], ['google', 'yandex', 'yahoo'], 'other') as x FROM numbers(100000000) GROUP BY x FORMAT Null SETTINGS optimize_if_transform_strings_to_enum = 1</query>
|
||||||
</test>
|
</test>
|
||||||
|
@ -8,3 +8,14 @@
|
|||||||
8140551
|
8140551
|
||||||
5427034
|
5427034
|
||||||
2713517
|
2713517
|
||||||
|
-
|
||||||
|
440516
|
||||||
|
1540521
|
||||||
|
733765
|
||||||
|
1833770
|
||||||
|
1027014
|
||||||
|
220258
|
||||||
|
1320263
|
||||||
|
513507
|
||||||
|
1613512
|
||||||
|
806756
|
||||||
|
@ -1,3 +1,9 @@
|
|||||||
SET max_memory_usage = 300000000;
|
SET max_memory_usage = 300000000;
|
||||||
SET max_bytes_before_external_sort = 20000000;
|
SET max_bytes_before_external_sort = 20000000;
|
||||||
SELECT number FROM (SELECT number FROM system.numbers LIMIT 10000000) ORDER BY number * 1234567890123456789 LIMIT 9999990, 10;
|
SELECT number FROM (SELECT number FROM system.numbers LIMIT 10000000) ORDER BY number * 1234567890123456789 LIMIT 9999990, 10;
|
||||||
|
|
||||||
|
SELECT '-';
|
||||||
|
|
||||||
|
SET max_bytes_before_external_sort = 33554432;
|
||||||
|
set max_block_size = 1048576;
|
||||||
|
SELECT number FROM (SELECT number FROM numbers(2097152)) ORDER BY number * 1234567890123456789 LIMIT 2097142, 10;
|
||||||
|
8
tests/queries/0_stateless/01398_any_with_alias.reference
Normal file
8
tests/queries/0_stateless/01398_any_with_alias.reference
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
"n"
|
||||||
|
0
|
||||||
|
SELECT any(number) * any(number) AS n
|
||||||
|
FROM numbers(100)
|
||||||
|
"n"
|
||||||
|
0,0
|
||||||
|
SELECT (any(number), any(number) * 2) AS n
|
||||||
|
FROM numbers(100)
|
5
tests/queries/0_stateless/01398_any_with_alias.sql
Normal file
5
tests/queries/0_stateless/01398_any_with_alias.sql
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
SELECT any(number * number) AS n FROM numbers(100) FORMAT CSVWithNames;
|
||||||
|
EXPLAIN SYNTAX SELECT any(number * number) AS n FROM numbers(100);
|
||||||
|
|
||||||
|
SELECT any((number, number * 2)) as n FROM numbers(100) FORMAT CSVWithNames;
|
||||||
|
EXPLAIN SYNTAX SELECT any((number, number * 2)) as n FROM numbers(100);
|
71
tests/queries/0_stateless/01412_cache_dictionary_race.sh
Executable file
71
tests/queries/0_stateless/01412_cache_dictionary_race.sh
Executable file
@ -0,0 +1,71 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
|
. $CURDIR/../shell_config.sh
|
||||||
|
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT --query "DROP DATABASE IF EXISTS ordinary_db"
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT --query "CREATE DATABASE ordinary_db"
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT -n -q "
|
||||||
|
|
||||||
|
CREATE DICTIONARY ordinary_db.dict1
|
||||||
|
(
|
||||||
|
key_column UInt64 DEFAULT 0,
|
||||||
|
second_column UInt8 DEFAULT 1,
|
||||||
|
third_column String DEFAULT 'qqq'
|
||||||
|
)
|
||||||
|
PRIMARY KEY key_column
|
||||||
|
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'view_for_dict' PASSWORD '' DB 'ordinary_db'))
|
||||||
|
LIFETIME(MIN 1 MAX 3)
|
||||||
|
LAYOUT(CACHE(SIZE_IN_CELLS 3));
|
||||||
|
"
|
||||||
|
|
||||||
|
function dict_get_thread()
|
||||||
|
{
|
||||||
|
while true; do
|
||||||
|
$CLICKHOUSE_CLIENT --query "SELECT dictGetString('ordinary_db.dict1', 'third_column', toUInt64(rand() % 1000)) from numbers(2)" &>/dev/null
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function drop_create_table_thread()
|
||||||
|
{
|
||||||
|
while true; do
|
||||||
|
$CLICKHOUSE_CLIENT -n --query "CREATE TABLE ordinary_db.table_for_dict_real (
|
||||||
|
key_column UInt64,
|
||||||
|
second_column UInt8,
|
||||||
|
third_column String
|
||||||
|
)
|
||||||
|
ENGINE MergeTree() ORDER BY tuple();
|
||||||
|
INSERT INTO ordinary_db.table_for_dict_real SELECT number, number, toString(number) from numbers(2);
|
||||||
|
CREATE VIEW ordinary_db.view_for_dict AS SELECT key_column, second_column, third_column from ordinary_db.table_for_dict_real WHERE sleepEachRow(1) == 0;
|
||||||
|
"
|
||||||
|
sleep 10
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS ordinary_db.table_for_dict_real"
|
||||||
|
sleep 10
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
export -f dict_get_thread;
|
||||||
|
export -f drop_create_table_thread;
|
||||||
|
|
||||||
|
TIMEOUT=30
|
||||||
|
|
||||||
|
timeout $TIMEOUT bash -c dict_get_thread 2> /dev/null &
|
||||||
|
timeout $TIMEOUT bash -c dict_get_thread 2> /dev/null &
|
||||||
|
timeout $TIMEOUT bash -c dict_get_thread 2> /dev/null &
|
||||||
|
timeout $TIMEOUT bash -c dict_get_thread 2> /dev/null &
|
||||||
|
timeout $TIMEOUT bash -c dict_get_thread 2> /dev/null &
|
||||||
|
timeout $TIMEOUT bash -c dict_get_thread 2> /dev/null &
|
||||||
|
|
||||||
|
|
||||||
|
timeout $TIMEOUT bash -c drop_create_table_thread 2> /dev/null &
|
||||||
|
|
||||||
|
wait
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT --query "DROP DATABASE IF EXISTS ordinary_db"
|
3
tests/queries/0_stateless/01412_mod_float.reference
Normal file
3
tests/queries/0_stateless/01412_mod_float.reference
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
1 -1 1 -1
|
||||||
|
0.125 -0.125 0.125 -0.125
|
||||||
|
1 -1 1 -1
|
3
tests/queries/0_stateless/01412_mod_float.sql
Normal file
3
tests/queries/0_stateless/01412_mod_float.sql
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
WITH 8.5 AS a, 2.5 AS b SELECT a % b, -a % b, a % -b, -a % -b;
|
||||||
|
WITH 10.125 AS a, 2.5 AS b SELECT a % b, -a % b, a % -b, -a % -b;
|
||||||
|
WITH 8.5 AS a, 2.5 AS b SELECT mod(a, b), MOD(-a, b), modulo(a, -b), moduloOrZero(-a, -b);
|
@ -0,0 +1,21 @@
|
|||||||
|
0
|
||||||
|
1
|
||||||
|
2
|
||||||
|
3
|
||||||
|
4
|
||||||
|
5
|
||||||
|
6
|
||||||
|
7
|
||||||
|
8
|
||||||
|
9
|
||||||
|
---
|
||||||
|
0
|
||||||
|
0
|
||||||
|
2
|
||||||
|
2
|
||||||
|
4
|
||||||
|
4
|
||||||
|
6
|
||||||
|
6
|
||||||
|
8
|
||||||
|
8
|
13
tests/queries/0_stateless/01413_alter_update_supertype.sql
Normal file
13
tests/queries/0_stateless/01413_alter_update_supertype.sql
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
DROP TABLE IF EXISTS t;
|
||||||
|
CREATE TABLE t (x UInt64) ENGINE = MergeTree ORDER BY tuple();
|
||||||
|
INSERT INTO t SELECT number FROM numbers(10);
|
||||||
|
|
||||||
|
SELECT * FROM t;
|
||||||
|
|
||||||
|
SET mutations_sync = 1;
|
||||||
|
ALTER TABLE t UPDATE x = x - 1 WHERE x % 2 = 1;
|
||||||
|
|
||||||
|
SELECT '---';
|
||||||
|
SELECT * FROM t;
|
||||||
|
|
||||||
|
DROP TABLE t;
|
5
tests/queries/0_stateless/01413_if_array_uuid.reference
Normal file
5
tests/queries/0_stateless/01413_if_array_uuid.reference
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
['00000000-e1fe-11e9-bb8f-853d60c00749']
|
||||||
|
['11111111-e1fe-11e9-bb8f-853d60c00749']
|
||||||
|
['00000000-e1fe-11e9-bb8f-853d60c00749']
|
||||||
|
['11111111-e1fe-11e9-bb8f-853d60c00749']
|
||||||
|
['00000000-e1fe-11e9-bb8f-853d60c00749']
|
1
tests/queries/0_stateless/01413_if_array_uuid.sql
Normal file
1
tests/queries/0_stateless/01413_if_array_uuid.sql
Normal file
@ -0,0 +1 @@
|
|||||||
|
SELECT if(number % 2 = 0, [toUUID('00000000-e1fe-11e9-bb8f-853d60c00749')], [toUUID('11111111-e1fe-11e9-bb8f-853d60c00749')]) FROM numbers(5);
|
@ -1,8 +1,8 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
from query import Query as RemoteRepo
|
from clickhouse.utils.github.cherrypick import CherryPick
|
||||||
from local import BareRepository as LocalRepo
|
from clickhouse.utils.github.query import Query as RemoteRepo
|
||||||
import cherrypick
|
from clickhouse.utils.github.local import Repository as LocalRepo
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import logging
|
import logging
|
||||||
@ -12,83 +12,78 @@ import sys
|
|||||||
|
|
||||||
class Backport:
|
class Backport:
|
||||||
def __init__(self, token, owner, name, team):
|
def __init__(self, token, owner, name, team):
|
||||||
'''
|
|
||||||
`refs` is a list of (ref_path, base_commit) sorted by ancestry starting from the least recent ref.
|
|
||||||
'''
|
|
||||||
self._gh = RemoteRepo(token, owner=owner, name=name, team=team, max_page_size=30)
|
self._gh = RemoteRepo(token, owner=owner, name=name, team=team, max_page_size=30)
|
||||||
|
self._token = token
|
||||||
self.default_branch_name = self._gh.default_branch
|
self.default_branch_name = self._gh.default_branch
|
||||||
|
self.ssh_url = self._gh.ssh_url
|
||||||
|
|
||||||
def getPullRequests(self, from_commit):
|
def getPullRequests(self, from_commit):
|
||||||
return self._gh.get_pull_requests(from_commit)
|
return self._gh.get_pull_requests(from_commit)
|
||||||
|
|
||||||
|
def execute(self, repo, til, number, run_cherrypick):
|
||||||
|
repo = LocalRepo(repo, 'origin', self.default_branch_name)
|
||||||
|
branches = repo.get_release_branches()[-number:] # [(branch_name, base_commit)]
|
||||||
|
|
||||||
def run(token, repo_bare, til, number, run_cherrypick):
|
if not branches:
|
||||||
bp = Backport(token, 'ClickHouse', 'ClickHouse', 'core')
|
logging.info('No release branches found!')
|
||||||
repo = LocalRepo(repo_bare, bp.default_branch_name)
|
return
|
||||||
|
|
||||||
branches = repo.get_release_branches()[-number:] # [(branch_name, base_commit)]
|
|
||||||
|
|
||||||
if not branches:
|
|
||||||
logging.info('No release branches found!')
|
|
||||||
return
|
|
||||||
|
|
||||||
for branch in branches:
|
|
||||||
logging.info('Found release branch: %s', branch[0])
|
|
||||||
|
|
||||||
if not til:
|
|
||||||
til = branches[0][1]
|
|
||||||
prs = bp.getPullRequests(til)
|
|
||||||
|
|
||||||
backport_map = {}
|
|
||||||
|
|
||||||
RE_MUST_BACKPORT = re.compile(r'^v(\d+\.\d+)-must-backport$')
|
|
||||||
RE_NO_BACKPORT = re.compile(r'^v(\d+\.\d+)-no-backport$')
|
|
||||||
|
|
||||||
# pull-requests are sorted by ancestry from the least recent.
|
|
||||||
for pr in prs:
|
|
||||||
while repo.comparator(branches[-1][1]) >= repo.comparator(pr['mergeCommit']['oid']):
|
|
||||||
branches.pop()
|
|
||||||
|
|
||||||
assert len(branches)
|
|
||||||
|
|
||||||
branch_set = set([branch[0] for branch in branches])
|
|
||||||
|
|
||||||
# First pass. Find all must-backports
|
|
||||||
for label in pr['labels']['nodes']:
|
|
||||||
if label['name'].startswith('pr-') and label['color'] == 'ff0000':
|
|
||||||
backport_map[pr['number']] = branch_set.copy()
|
|
||||||
continue
|
|
||||||
m = RE_MUST_BACKPORT.match(label['name'])
|
|
||||||
if m:
|
|
||||||
if pr['number'] not in backport_map:
|
|
||||||
backport_map[pr['number']] = set()
|
|
||||||
backport_map[pr['number']].add(m.group(1))
|
|
||||||
|
|
||||||
# Second pass. Find all no-backports
|
|
||||||
for label in pr['labels']['nodes']:
|
|
||||||
if label['name'] == 'pr-no-backport' and pr['number'] in backport_map:
|
|
||||||
del backport_map[pr['number']]
|
|
||||||
break
|
|
||||||
m = RE_NO_BACKPORT.match(label['name'])
|
|
||||||
if m and pr['number'] in backport_map and m.group(1) in backport_map[pr['number']]:
|
|
||||||
backport_map[pr['number']].remove(m.group(1))
|
|
||||||
|
|
||||||
for pr, branches in backport_map.items():
|
|
||||||
logging.info('PR #%s needs to be backported to:', pr)
|
|
||||||
for branch in branches:
|
for branch in branches:
|
||||||
logging.info('\t%s %s', branch, run_cherrypick(token, pr, branch))
|
logging.info('Found release branch: %s', branch[0])
|
||||||
|
|
||||||
# print API costs
|
if not til:
|
||||||
logging.info('\nGitHub API total costs per query:')
|
til = branches[0][1]
|
||||||
for name, value in bp._gh.api_costs.items():
|
prs = self.getPullRequests(til)
|
||||||
logging.info('%s : %s', name, value)
|
|
||||||
|
backport_map = {}
|
||||||
|
|
||||||
|
RE_MUST_BACKPORT = re.compile(r'^v(\d+\.\d+)-must-backport$')
|
||||||
|
RE_NO_BACKPORT = re.compile(r'^v(\d+\.\d+)-no-backport$')
|
||||||
|
|
||||||
|
# pull-requests are sorted by ancestry from the least recent.
|
||||||
|
for pr in prs:
|
||||||
|
while repo.comparator(branches[-1][1]) >= repo.comparator(pr['mergeCommit']['oid']):
|
||||||
|
branches.pop()
|
||||||
|
|
||||||
|
assert len(branches)
|
||||||
|
|
||||||
|
branch_set = set([branch[0] for branch in branches])
|
||||||
|
|
||||||
|
# First pass. Find all must-backports
|
||||||
|
for label in pr['labels']['nodes']:
|
||||||
|
if label['name'].startswith('pr-') and label['color'] == 'ff0000':
|
||||||
|
backport_map[pr['number']] = branch_set.copy()
|
||||||
|
continue
|
||||||
|
m = RE_MUST_BACKPORT.match(label['name'])
|
||||||
|
if m:
|
||||||
|
if pr['number'] not in backport_map:
|
||||||
|
backport_map[pr['number']] = set()
|
||||||
|
backport_map[pr['number']].add(m.group(1))
|
||||||
|
|
||||||
|
# Second pass. Find all no-backports
|
||||||
|
for label in pr['labels']['nodes']:
|
||||||
|
if label['name'] == 'pr-no-backport' and pr['number'] in backport_map:
|
||||||
|
del backport_map[pr['number']]
|
||||||
|
break
|
||||||
|
m = RE_NO_BACKPORT.match(label['name'])
|
||||||
|
if m and pr['number'] in backport_map and m.group(1) in backport_map[pr['number']]:
|
||||||
|
backport_map[pr['number']].remove(m.group(1))
|
||||||
|
|
||||||
|
for pr, branches in backport_map.items():
|
||||||
|
logging.info('PR #%s needs to be backported to:', pr)
|
||||||
|
for branch in branches:
|
||||||
|
logging.info('\t%s %s', branch, run_cherrypick(self._token, pr, branch))
|
||||||
|
|
||||||
|
# print API costs
|
||||||
|
logging.info('\nGitHub API total costs per query:')
|
||||||
|
for name, value in self._gh.api_costs.items():
|
||||||
|
logging.info('%s : %s', name, value)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument('--token', type=str, required=True, help='token for Github access')
|
parser.add_argument('--token', type=str, required=True, help='token for Github access')
|
||||||
parser.add_argument('--repo-bare', type=str, required=True, help='path to bare repository', metavar='PATH')
|
parser.add_argument('--repo', type=str, required=True, help='path to full repository', metavar='PATH')
|
||||||
parser.add_argument('--repo-full', type=str, required=True, help='path to full repository', metavar='PATH')
|
|
||||||
parser.add_argument('--til', type=str, help='check PRs from HEAD til this commit', metavar='COMMIT')
|
parser.add_argument('--til', type=str, help='check PRs from HEAD til this commit', metavar='COMMIT')
|
||||||
parser.add_argument('-n', type=int, dest='number', help='number of last release branches to consider')
|
parser.add_argument('-n', type=int, dest='number', help='number of last release branches to consider')
|
||||||
parser.add_argument('--dry-run', action='store_true', help='do not create or merge any PRs', default=False)
|
parser.add_argument('--dry-run', action='store_true', help='do not create or merge any PRs', default=False)
|
||||||
@ -100,5 +95,6 @@ if __name__ == "__main__":
|
|||||||
else:
|
else:
|
||||||
logging.basicConfig(format='%(message)s', stream=sys.stdout, level=logging.INFO)
|
logging.basicConfig(format='%(message)s', stream=sys.stdout, level=logging.INFO)
|
||||||
|
|
||||||
cherrypick_run = lambda token, pr, branch: cherrypick.run(token, pr, branch, args.repo_full, args.dry_run)
|
cherrypick_run = lambda token, pr, branch: CherryPick(token, 'ClickHouse', 'ClickHouse', 'core', pr, branch).execute(args.repo, args.dry_run)
|
||||||
run(args.token, args.repo_bare, args.til, args.number, cherrypick_run)
|
bp = Backport(args.token, 'ClickHouse', 'ClickHouse', 'core')
|
||||||
|
bp.execute(args.repo, args.til, args.number, cherrypick_run)
|
||||||
|
@ -14,12 +14,13 @@ Second run checks PR from previous run to be merged or at least being mergeable.
|
|||||||
Third run creates PR from backport branch (with merged previous PR) to release branch.
|
Third run creates PR from backport branch (with merged previous PR) to release branch.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
from query import Query as RemoteRepo
|
from clickhouse.utils.github.query import Query as RemoteRepo
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
|
||||||
@ -33,10 +34,15 @@ class CherryPick:
|
|||||||
SECOND_CONFLICTS = 'conflicts on 2nd stage'
|
SECOND_CONFLICTS = 'conflicts on 2nd stage'
|
||||||
MERGED = 'backported'
|
MERGED = 'backported'
|
||||||
|
|
||||||
|
def _run(self, args):
|
||||||
|
logging.info(subprocess.check_output(args))
|
||||||
|
|
||||||
def __init__(self, token, owner, name, team, pr_number, target_branch):
|
def __init__(self, token, owner, name, team, pr_number, target_branch):
|
||||||
self._gh = RemoteRepo(token, owner=owner, name=name, team=team)
|
self._gh = RemoteRepo(token, owner=owner, name=name, team=team)
|
||||||
self._pr = self._gh.get_pull_request(pr_number)
|
self._pr = self._gh.get_pull_request(pr_number)
|
||||||
|
|
||||||
|
self.ssh_url = self._gh.ssh_url
|
||||||
|
|
||||||
# TODO: check if pull-request is merged.
|
# TODO: check if pull-request is merged.
|
||||||
|
|
||||||
self.merge_commit_oid = self._pr['mergeCommit']['oid']
|
self.merge_commit_oid = self._pr['mergeCommit']['oid']
|
||||||
@ -60,25 +66,26 @@ class CherryPick:
|
|||||||
)
|
)
|
||||||
|
|
||||||
# FIXME: replace with something better than os.system()
|
# FIXME: replace with something better than os.system()
|
||||||
git_prefix = 'git -C {} -c "user.email=robot-clickhouse@yandex-team.ru" -c "user.name=robot-clickhouse" '.format(repo_path)
|
git_prefix = ['git', '-C', repo_path, '-c', 'user.email=robot-clickhouse@yandex-team.ru', '-c', 'user.name=robot-clickhouse']
|
||||||
base_commit_oid = self._pr['mergeCommit']['parents']['nodes'][0]['oid']
|
base_commit_oid = self._pr['mergeCommit']['parents']['nodes'][0]['oid']
|
||||||
|
|
||||||
# Create separate branch for backporting, and make it look like real cherry-pick.
|
# Create separate branch for backporting, and make it look like real cherry-pick.
|
||||||
os.system(git_prefix + 'checkout -f ' + self.target_branch)
|
self._run(git_prefix + ['checkout', '-f', self.target_branch])
|
||||||
os.system(git_prefix + 'checkout -B ' + self.backport_branch)
|
self._run(git_prefix + ['checkout', '-B', self.backport_branch])
|
||||||
os.system(git_prefix + 'merge -s ours --no-edit ' + base_commit_oid)
|
self._run(git_prefix + ['merge', '-s', 'ours', '--no-edit', base_commit_oid])
|
||||||
|
|
||||||
# Create secondary branch to allow pull request with cherry-picked commit.
|
# Create secondary branch to allow pull request with cherry-picked commit.
|
||||||
os.system(git_prefix + 'branch -f {} {}'.format(self.cherrypick_branch, self.merge_commit_oid))
|
self._run(git_prefix + ['branch', '-f', self.cherrypick_branch, self.merge_commit_oid])
|
||||||
|
|
||||||
os.system(git_prefix + 'push -f origin {branch}:{branch}'.format(branch=self.backport_branch))
|
self._run(git_prefix + ['push', '-f', 'origin', '{branch}:{branch}'.format(branch=self.backport_branch)])
|
||||||
os.system(git_prefix + 'push -f origin {branch}:{branch}'.format(branch=self.cherrypick_branch))
|
self._run(git_prefix + ['push', '-f', 'origin', '{branch}:{branch}'.format(branch=self.cherrypick_branch)])
|
||||||
|
|
||||||
# Create pull-request like a local cherry-pick
|
# Create pull-request like a local cherry-pick
|
||||||
pr = self._gh.create_pull_request(source=self.cherrypick_branch, target=self.backport_branch,
|
pr = self._gh.create_pull_request(source=self.cherrypick_branch, target=self.backport_branch,
|
||||||
title='Cherry pick #{number} to {target}: {title}'.format(
|
title='Cherry pick #{number} to {target}: {title}'.format(
|
||||||
number=self._pr['number'], target=self.target_branch, title=self._pr['title'].replace('"', '\\"')),
|
number=self._pr['number'], target=self.target_branch,
|
||||||
description='Original pull-request #{}\n\n{}'.format(self._pr['number'], DESCRIPTION))
|
title=self._pr['title'].replace('"', '\\"')),
|
||||||
|
description='Original pull-request #{}\n\n{}'.format(self._pr['number'], DESCRIPTION))
|
||||||
|
|
||||||
# FIXME: use `team` to leave a single eligible assignee.
|
# FIXME: use `team` to leave a single eligible assignee.
|
||||||
self._gh.add_assignee(pr, self._pr['author'])
|
self._gh.add_assignee(pr, self._pr['author'])
|
||||||
@ -102,18 +109,20 @@ class CherryPick:
|
|||||||
'Merge it only if you intend to backport changes to the target branch, otherwise just close it.\n'
|
'Merge it only if you intend to backport changes to the target branch, otherwise just close it.\n'
|
||||||
)
|
)
|
||||||
|
|
||||||
git_prefix = 'git -C {} -c "user.email=robot-clickhouse@yandex-team.ru" -c "user.name=robot-clickhouse" '.format(repo_path)
|
git_prefix = ['git', '-C', repo_path, '-c', 'user.email=robot-clickhouse@yandex-team.ru', '-c', 'user.name=robot-clickhouse']
|
||||||
|
|
||||||
os.system(git_prefix + 'checkout -f ' + self.backport_branch)
|
pr_title = 'Backport #{number} to {target}: {title}'.format(
|
||||||
os.system(git_prefix + 'pull --ff-only origin ' + self.backport_branch)
|
number=self._pr['number'], target=self.target_branch,
|
||||||
os.system(git_prefix + 'reset --soft `{git} merge-base {target} {backport}`'.format(git=git_prefix, target=self.target_branch, backport=self.backport_branch))
|
title=self._pr['title'].replace('"', '\\"'))
|
||||||
os.system(git_prefix + 'commit -a -m "Squash backport branch"')
|
|
||||||
os.system(git_prefix + 'push -f origin {branch}:{branch}'.format(branch=self.backport_branch))
|
|
||||||
|
|
||||||
pr = self._gh.create_pull_request(source=self.backport_branch, target=self.target_branch,
|
self._run(git_prefix + ['checkout', '-f', self.backport_branch])
|
||||||
title='Backport #{number} to {target}: {title}'.format(
|
self._run(git_prefix + ['pull', '--ff-only', 'origin', self.backport_branch])
|
||||||
number=self._pr['number'], target=self.target_branch, title=self._pr['title'].replace('"', '\\"')),
|
self._run(git_prefix + ['reset', '--soft', self._run(git_prefix + ['merge-base', self.target_branch, self.backport_branch])])
|
||||||
description='Original pull-request #{}\nCherry-pick pull-request #{}\n\n{}'.format(self._pr['number'], cherrypick_pr['number'], DESCRIPTION))
|
self._run(git_prefix + ['commit', '-a', '-m', pr_title])
|
||||||
|
self._run(git_prefix + ['push', '-f', 'origin', '{branch}:{branch}'.format(branch=self.backport_branch)])
|
||||||
|
|
||||||
|
pr = self._gh.create_pull_request(source=self.backport_branch, target=self.target_branch, title=pr_title,
|
||||||
|
description='Original pull-request #{}\nCherry-pick pull-request #{}\n\n{}'.format(self._pr['number'], cherrypick_pr['number'], DESCRIPTION))
|
||||||
|
|
||||||
# FIXME: use `team` to leave a single eligible assignee.
|
# FIXME: use `team` to leave a single eligible assignee.
|
||||||
self._gh.add_assignee(pr, self._pr['author'])
|
self._gh.add_assignee(pr, self._pr['author'])
|
||||||
@ -123,53 +132,50 @@ class CherryPick:
|
|||||||
|
|
||||||
return pr
|
return pr
|
||||||
|
|
||||||
|
def execute(self, repo_path, dry_run=False):
|
||||||
def run(token, pr, branch, repo, dry_run=False):
|
pr1 = self.getCherryPickPullRequest()
|
||||||
cp = CherryPick(token, 'ClickHouse', 'ClickHouse', 'core', pr, branch)
|
if not pr1:
|
||||||
|
if not dry_run:
|
||||||
pr1 = cp.getCherryPickPullRequest()
|
pr1 = self.createCherryPickPullRequest(repo_path)
|
||||||
if not pr1:
|
logging.debug('Created PR with cherry-pick of %s to %s: %s', self._pr['number'], self.target_branch, pr1['url'])
|
||||||
if not dry_run:
|
else:
|
||||||
pr1 = cp.createCherryPickPullRequest(repo)
|
return CherryPick.Status.NOT_INITIATED
|
||||||
logging.debug('Created PR with cherry-pick of %s to %s: %s', pr, branch, pr1['url'])
|
|
||||||
else:
|
else:
|
||||||
return CherryPick.Status.NOT_INITIATED
|
logging.debug('Found PR with cherry-pick of %s to %s: %s', self._pr['number'], self.target_branch, pr1['url'])
|
||||||
else:
|
|
||||||
logging.debug('Found PR with cherry-pick of %s to %s: %s', pr, branch, pr1['url'])
|
|
||||||
|
|
||||||
if not pr1['merged'] and pr1['mergeable'] == 'MERGEABLE' and not pr1['closed']:
|
if not pr1['merged'] and pr1['mergeable'] == 'MERGEABLE' and not pr1['closed']:
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
pr1 = cp.mergeCherryPickPullRequest(pr1)
|
pr1 = self.mergeCherryPickPullRequest(pr1)
|
||||||
logging.debug('Merged PR with cherry-pick of %s to %s: %s', pr, branch, pr1['url'])
|
logging.debug('Merged PR with cherry-pick of %s to %s: %s', self._pr['number'], self.target_branch, pr1['url'])
|
||||||
|
|
||||||
if not pr1['merged']:
|
if not pr1['merged']:
|
||||||
logging.debug('Waiting for PR with cherry-pick of %s to %s: %s', pr, branch, pr1['url'])
|
logging.debug('Waiting for PR with cherry-pick of %s to %s: %s', self._pr['number'], self.target_branch, pr1['url'])
|
||||||
|
|
||||||
if pr1['closed']:
|
if pr1['closed']:
|
||||||
|
return CherryPick.Status.DISCARDED
|
||||||
|
elif pr1['mergeable'] == 'CONFLICTING':
|
||||||
|
return CherryPick.Status.FIRST_CONFLICTS
|
||||||
|
else:
|
||||||
|
return CherryPick.Status.FIRST_MERGEABLE
|
||||||
|
|
||||||
|
pr2 = self.getBackportPullRequest()
|
||||||
|
if not pr2:
|
||||||
|
if not dry_run:
|
||||||
|
pr2 = self.createBackportPullRequest(pr1, repo_path)
|
||||||
|
logging.debug('Created PR with backport of %s to %s: %s', self._pr['number'], self.target_branch, pr2['url'])
|
||||||
|
else:
|
||||||
|
return CherryPick.Status.FIRST_MERGEABLE
|
||||||
|
else:
|
||||||
|
logging.debug('Found PR with backport of %s to %s: %s', self._pr['number'], self.target_branch, pr2['url'])
|
||||||
|
|
||||||
|
if pr2['merged']:
|
||||||
|
return CherryPick.Status.MERGED
|
||||||
|
elif pr2['closed']:
|
||||||
return CherryPick.Status.DISCARDED
|
return CherryPick.Status.DISCARDED
|
||||||
elif pr1['mergeable'] == 'CONFLICTING':
|
elif pr2['mergeable'] == 'CONFLICTING':
|
||||||
return CherryPick.Status.FIRST_CONFLICTS
|
return CherryPick.Status.SECOND_CONFLICTS
|
||||||
else:
|
else:
|
||||||
return CherryPick.Status.FIRST_MERGEABLE
|
return CherryPick.Status.SECOND_MERGEABLE
|
||||||
|
|
||||||
pr2 = cp.getBackportPullRequest()
|
|
||||||
if not pr2:
|
|
||||||
if not dry_run:
|
|
||||||
pr2 = cp.createBackportPullRequest(pr1, repo)
|
|
||||||
logging.debug('Created PR with backport of %s to %s: %s', pr, branch, pr2['url'])
|
|
||||||
else:
|
|
||||||
return CherryPick.Status.FIRST_MERGEABLE
|
|
||||||
else:
|
|
||||||
logging.debug('Found PR with backport of %s to %s: %s', pr, branch, pr2['url'])
|
|
||||||
|
|
||||||
if pr2['merged']:
|
|
||||||
return CherryPick.Status.MERGED
|
|
||||||
elif pr2['closed']:
|
|
||||||
return CherryPick.Status.DISCARDED
|
|
||||||
elif pr2['mergeable'] == 'CONFLICTING':
|
|
||||||
return CherryPick.Status.SECOND_CONFLICTS
|
|
||||||
else:
|
|
||||||
return CherryPick.Status.SECOND_MERGEABLE
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
@ -182,4 +188,5 @@ if __name__ == "__main__":
|
|||||||
parser.add_argument('--repo', '-r', type=str, required=True, help='path to full repository', metavar='PATH')
|
parser.add_argument('--repo', '-r', type=str, required=True, help='path to full repository', metavar='PATH')
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
run(args.token, args.pr, args.branch, args.repo)
|
cp = CherryPick(args.token, 'ClickHouse', 'ClickHouse', 'core', args.pr, args.branch)
|
||||||
|
cp.execute(args.repo)
|
||||||
|
@ -1,11 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
try:
|
|
||||||
import git # `pip install gitpython`
|
|
||||||
except ImportError:
|
|
||||||
import sys
|
|
||||||
sys.exit("Package 'gitpython' not found. Try run: `pip install [--user] gitpython`")
|
|
||||||
|
|
||||||
import functools
|
import functools
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
@ -14,6 +8,8 @@ import re
|
|||||||
|
|
||||||
class RepositoryBase(object):
|
class RepositoryBase(object):
|
||||||
def __init__(self, repo_path):
|
def __init__(self, repo_path):
|
||||||
|
import git
|
||||||
|
|
||||||
self._repo = git.Repo(repo_path, search_parent_directories=(not repo_path))
|
self._repo = git.Repo(repo_path, search_parent_directories=(not repo_path))
|
||||||
|
|
||||||
# commit comparator
|
# commit comparator
|
||||||
@ -34,10 +30,12 @@ class RepositoryBase(object):
|
|||||||
for commit in self._repo.iter_commits(rev_range, first_parent=True):
|
for commit in self._repo.iter_commits(rev_range, first_parent=True):
|
||||||
yield commit
|
yield commit
|
||||||
|
|
||||||
|
|
||||||
class Repository(RepositoryBase):
|
class Repository(RepositoryBase):
|
||||||
def __init__(self, repo_path, remote_name, default_branch_name):
|
def __init__(self, repo_path, remote_name, default_branch_name):
|
||||||
super(Repository, self).__init__(repo_path)
|
super(Repository, self).__init__(repo_path)
|
||||||
self._remote = self._repo.remotes[remote_name]
|
self._remote = self._repo.remotes[remote_name]
|
||||||
|
self._remote.fetch()
|
||||||
self._default = self._remote.refs[default_branch_name]
|
self._default = self._remote.refs[default_branch_name]
|
||||||
|
|
||||||
def get_release_branches(self):
|
def get_release_branches(self):
|
||||||
@ -63,6 +61,7 @@ class Repository(RepositoryBase):
|
|||||||
|
|
||||||
return sorted(release_branches, key=lambda x : self.comparator(x[1]))
|
return sorted(release_branches, key=lambda x : self.comparator(x[1]))
|
||||||
|
|
||||||
|
|
||||||
class BareRepository(RepositoryBase):
|
class BareRepository(RepositoryBase):
|
||||||
def __init__(self, repo_path, default_branch_name):
|
def __init__(self, repo_path, default_branch_name):
|
||||||
super(BareRepository, self).__init__(repo_path)
|
super(BareRepository, self).__init__(repo_path)
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
import time
|
|
||||||
|
|
||||||
|
|
||||||
class Query:
|
class Query:
|
||||||
@ -136,7 +135,7 @@ class Query:
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
query = _QUERY.format(owner=self._owner, name=self._name, number=number,
|
query = _QUERY.format(owner=self._owner, name=self._name, number=number,
|
||||||
pull_request_data = self._PULL_REQUEST, min_page_size=self._min_page_size)
|
pull_request_data=self._PULL_REQUEST, min_page_size=self._min_page_size)
|
||||||
return self._run(query)['repository']['pullRequest']
|
return self._run(query)['repository']['pullRequest']
|
||||||
|
|
||||||
def find_pull_request(self, base, head):
|
def find_pull_request(self, base, head):
|
||||||
@ -152,7 +151,7 @@ class Query:
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
query = _QUERY.format(owner=self._owner, name=self._name, base=base, head=head,
|
query = _QUERY.format(owner=self._owner, name=self._name, base=base, head=head,
|
||||||
pull_request_data = self._PULL_REQUEST, min_page_size=self._min_page_size)
|
pull_request_data=self._PULL_REQUEST, min_page_size=self._min_page_size)
|
||||||
result = self._run(query)['repository']['pullRequests']
|
result = self._run(query)['repository']['pullRequests']
|
||||||
if result['totalCount'] > 0:
|
if result['totalCount'] > 0:
|
||||||
return result['nodes'][0]
|
return result['nodes'][0]
|
||||||
@ -257,7 +256,7 @@ class Query:
|
|||||||
|
|
||||||
query = _QUERY.format(target=target, source=source, id=self._id, title=title, body=description,
|
query = _QUERY.format(target=target, source=source, id=self._id, title=title, body=description,
|
||||||
draft="true" if draft else "false", modify="true" if can_modify else "false",
|
draft="true" if draft else "false", modify="true" if can_modify else "false",
|
||||||
pull_request_data = self._PULL_REQUEST)
|
pull_request_data=self._PULL_REQUEST)
|
||||||
return self._run(query, is_mutation=True)['createPullRequest']['pullRequest']
|
return self._run(query, is_mutation=True)['createPullRequest']['pullRequest']
|
||||||
|
|
||||||
def merge_pull_request(self, id):
|
def merge_pull_request(self, id):
|
||||||
@ -271,7 +270,7 @@ class Query:
|
|||||||
}}
|
}}
|
||||||
'''
|
'''
|
||||||
|
|
||||||
query = _QUERY.format(id=id, pull_request_data = self._PULL_REQUEST)
|
query = _QUERY.format(id=id, pull_request_data=self._PULL_REQUEST)
|
||||||
return self._run(query, is_mutation=True)['mergePullRequest']['pullRequest']
|
return self._run(query, is_mutation=True)['mergePullRequest']['pullRequest']
|
||||||
|
|
||||||
# FIXME: figure out how to add more assignees at once
|
# FIXME: figure out how to add more assignees at once
|
||||||
@ -340,10 +339,10 @@ class Query:
|
|||||||
if not labels:
|
if not labels:
|
||||||
return
|
return
|
||||||
|
|
||||||
query = _SET_LABEL.format(pr_id = pull_request['id'], label_id = labels[0]['id'])
|
query = _SET_LABEL.format(pr_id=pull_request['id'], label_id=labels[0]['id'])
|
||||||
self._run(query, is_mutation=True)
|
self._run(query, is_mutation=True)
|
||||||
|
|
||||||
### OLD METHODS
|
# OLD METHODS
|
||||||
|
|
||||||
# _LABELS = '''
|
# _LABELS = '''
|
||||||
# repository(owner: "ClickHouse" name: "ClickHouse") {{
|
# repository(owner: "ClickHouse" name: "ClickHouse") {{
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
PY_LIBRARY()
|
PY23_LIBRARY()
|
||||||
|
|
||||||
PY_SRCS(
|
PY_SRCS(
|
||||||
__init__.py
|
__init__.py
|
||||||
|
@ -67,7 +67,8 @@ Xeon Gold 6230 server is using 4 x SAMSUNG datacenter class SSD in RAID-10.<br/>
|
|||||||
Results for Yandex Managed ClickHouse for "cold cache" are biased and should not be compared, because cache was not flushed for every next query.<br/>
|
Results for Yandex Managed ClickHouse for "cold cache" are biased and should not be compared, because cache was not flushed for every next query.<br/>
|
||||||
Results for AWS Lightsail is from <b>Vamsi Krishna B.</b><br/>
|
Results for AWS Lightsail is from <b>Vamsi Krishna B.</b><br/>
|
||||||
Results for Dell XPS laptop and Google Pixel phone is from <b>Alexander Kuzmenkov</b>.<br/>
|
Results for Dell XPS laptop and Google Pixel phone is from <b>Alexander Kuzmenkov</b>.<br/>
|
||||||
Results for Android phones for "cold cache" are done without cache flushing, so they are not "cold" and cannot be compared.
|
Results for Android phones for "cold cache" are done without cache flushing, so they are not "cold" and cannot be compared.<br/>
|
||||||
|
Results for Digital Ocean are from <b>Zimin Aleksey</b>.
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
54
website/benchmark/hardware/results/050_xeon_e5_2650l_v3.json
Normal file
54
website/benchmark/hardware/results/050_xeon_e5_2650l_v3.json
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"system": "DO Xeon E5-2650Lv3",
|
||||||
|
"system_full": "Digital Ocean, Xeon E5-2650L v3 @ 1.80GHz, 16 cores, 64 GB, SSD Disk 1.28 TB",
|
||||||
|
"time": "2020-01-21 00:00:00",
|
||||||
|
"kind": "cloud",
|
||||||
|
"result":
|
||||||
|
[
|
||||||
|
[0.004, 0.004, 0.004],
|
||||||
|
[0.056, 0.037, 0.036],
|
||||||
|
[0.111, 0.079, 0.078],
|
||||||
|
[0.151, 0.092, 0.095],
|
||||||
|
[0.302, 0.262, 0.233],
|
||||||
|
[0.602, 0.541, 0.552],
|
||||||
|
[0.063, 0.049, 0.049],
|
||||||
|
[0.039, 0.036, 0.036],
|
||||||
|
[0.829, 0.762, 0.746],
|
||||||
|
[0.939, 0.883, 0.862],
|
||||||
|
[0.391, 0.311, 0.316],
|
||||||
|
[0.445, 0.358, 0.356],
|
||||||
|
[1.227, 1.024, 0.986],
|
||||||
|
[1.455, 1.276, 1.235],
|
||||||
|
[1.285, 1.146, 1.104],
|
||||||
|
[1.229, 1.119, 1.062],
|
||||||
|
[2.750, 2.555, 2.497],
|
||||||
|
[1.798, 1.587, 1.527],
|
||||||
|
[5.485, 5.167, 5.165],
|
||||||
|
[0.184, 0.142, 0.106],
|
||||||
|
[2.054, 1.350, 1.302],
|
||||||
|
[2.229, 1.399, 1.290],
|
||||||
|
[4.673, 3.013, 2.946],
|
||||||
|
[3.984, 1.656, 1.566],
|
||||||
|
[0.736, 0.492, 0.461],
|
||||||
|
[0.479, 0.382, 0.378],
|
||||||
|
[0.682, 0.456, 0.448],
|
||||||
|
[1.974, 1.296, 1.146],
|
||||||
|
[2.295, 1.847, 1.694],
|
||||||
|
[2.232, 2.199, 2.213],
|
||||||
|
[1.123, 0.953, 0.944],
|
||||||
|
[1.814, 1.385, 1.279],
|
||||||
|
[7.367, 7.127, 7.355],
|
||||||
|
[4.973, 4.595, 4.775],
|
||||||
|
[5.127, 4.639, 4.612],
|
||||||
|
[1.794, 1.630, 1.633],
|
||||||
|
[0.522, 0.420, 0.431],
|
||||||
|
[0.216, 0.193, 0.202],
|
||||||
|
[0.204, 0.171, 0.163],
|
||||||
|
[0.888, 0.823, 0.789],
|
||||||
|
[0.098, 0.077, 0.058],
|
||||||
|
[0.078, 0.042, 0.041],
|
||||||
|
[0.025, 0.015, 0.020]
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
@ -74,7 +74,7 @@ body[data-spy] #content {
|
|||||||
|
|
||||||
|
|
||||||
#content pre {
|
#content pre {
|
||||||
background: #eee;
|
background: #efefef;
|
||||||
padding: 1rem;
|
padding: 1rem;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user