Merge branch 'master' of https://github.com/ClickHouse/ClickHouse into hdfs-idisk

This commit is contained in:
kssenii 2021-04-22 07:41:55 +00:00
commit 7d1e5e6a08
126 changed files with 5277 additions and 437 deletions

View File

@ -516,9 +516,9 @@ include (cmake/find/fast_float.cmake)
include (cmake/find/rapidjson.cmake)
include (cmake/find/fastops.cmake)
include (cmake/find/odbc.cmake)
include (cmake/find/nanodbc.cmake)
include (cmake/find/rocksdb.cmake)
include (cmake/find/libpqxx.cmake)
include (cmake/find/nanodbc.cmake)
include (cmake/find/nuraft.cmake)

View File

@ -1,35 +1,16 @@
option(ENABLE_NANODBC "Enalbe nanodbc" ${ENABLE_LIBRARIES})
if (NOT ENABLE_NANODBC)
set (USE_ODBC 0)
return()
endif()
if (NOT ENABLE_ODBC)
set (USE_NANODBC 0)
message (STATUS "Using nanodbc=${USE_NANODBC}")
return()
endif()
return ()
endif ()
if (NOT USE_INTERNAL_NANODBC_LIBRARY)
message (FATAL_ERROR "Only the bundled nanodbc library can be used")
endif ()
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/nanodbc/CMakeLists.txt")
message (WARNING "submodule contrib/nanodbc is missing. to fix try run: \n git submodule update --init --recursive")
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal nanodbc library")
set (USE_NANODBC 0)
return()
message (FATAL_ERROR "submodule contrib/nanodbc is missing. to fix try run: \n git submodule update --init --recursive")
endif()
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/unixodbc/include")
message (ERROR "submodule contrib/unixodbc is missing. to fix try run: \n git submodule update --init --recursive")
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal unixodbc needed for nanodbc")
set (USE_NANODBC 0)
return()
endif()
set (USE_NANODBC 1)
set (NANODBC_LIBRARY nanodbc)
set (NANODBC_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/nanodbc/nanodbc")
set (NANODBC_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/nanodbc/nanodbce")
message (STATUS "Using nanodbc=${USE_NANODBC}: ${NANODBC_INCLUDE_DIR} : ${NANODBC_LIBRARY}")
message (STATUS "Using unixodbc")
message (STATUS "Using nanodbc: ${NANODBC_INCLUDE_DIR} : ${NANODBC_LIBRARY}")

View File

@ -50,4 +50,6 @@ if (NOT EXTERNAL_ODBC_LIBRARY_FOUND)
set (USE_INTERNAL_ODBC_LIBRARY 1)
endif ()
set (USE_INTERNAL_NANODBC_LIBRARY 1)
message (STATUS "Using unixodbc")

View File

@ -47,6 +47,7 @@ add_subdirectory (lz4-cmake)
add_subdirectory (murmurhash)
add_subdirectory (replxx-cmake)
add_subdirectory (unixodbc-cmake)
add_subdirectory (nanodbc-cmake)
if (USE_INTERNAL_XZ_LIBRARY)
add_subdirectory (xz)
@ -320,10 +321,6 @@ if (USE_LIBPQXX)
add_subdirectory (libpqxx-cmake)
endif()
if (USE_NANODBC)
add_subdirectory (nanodbc-cmake)
endif()
if (USE_NURAFT)
add_subdirectory(nuraft-cmake)
endif()

View File

@ -1,3 +1,7 @@
if (NOT USE_INTERNAL_NANODBC_LIBRARY)
return ()
endif ()
set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/nanodbc)
if (NOT TARGET unixodbc)

2
contrib/zlib-ng vendored

@ -1 +1 @@
Subproject commit 7f254522fd676ff4e906c6d4e9b30d4df4214c2d
Subproject commit 5cc4d232020dc66d1d6c5438834457e2a2f6127b

View File

@ -42,15 +42,12 @@ RUN apt-get update \
clang-tidy-10 \
clang-tidy-11 \
cmake \
cmake \
curl \
g++-9 \
gcc-9 \
gdb \
git \
gperf \
gperf \
intel-opencl-icd \
libicu-dev \
libreadline-dev \
lld-10 \
@ -61,10 +58,7 @@ RUN apt-get update \
llvm-11-dev \
moreutils \
ninja-build \
ocl-icd-libopencl1 \
opencl-headers \
pigz \
pixz \
rename \
tzdata \
--yes --no-install-recommends

View File

@ -35,9 +35,6 @@ RUN apt-get update \
libjemalloc-dev \
libmsgpack-dev \
libcurl4-openssl-dev \
opencl-headers \
ocl-icd-libopencl1 \
intel-opencl-icd \
unixodbc-dev \
odbcinst \
tzdata \

View File

@ -308,10 +308,8 @@ function run_tests
01354_order_by_tuple_collate_const
01355_ilike
01411_bayesian_ab_testing
01532_collate_in_low_cardinality
01533_collate_in_nullable
01542_collate_in_array
01543_collate_in_tuple
collate
collation
_orc_
arrow
avro

View File

@ -14,9 +14,7 @@ RUN apt-get --allow-unauthenticated update -y \
expect \
gdb \
gperf \
gperf \
heimdal-multidev \
intel-opencl-icd \
libboost-filesystem-dev \
libboost-iostreams-dev \
libboost-program-options-dev \
@ -50,9 +48,7 @@ RUN apt-get --allow-unauthenticated update -y \
moreutils \
ncdu \
netcat-openbsd \
ocl-icd-libopencl1 \
odbcinst \
opencl-headers \
openssl \
perl \
pigz \

View File

@ -27,53 +27,20 @@ Or cmake3 instead of cmake on older systems.
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
```bash
```bash
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
```
For other Linux distribution - check the availability of the [prebuild packages](https://releases.llvm.org/download.html) or build clang [from sources](https://clang.llvm.org/get_started.html).
#### Use clang-11 for Builds {#use-gcc-10-for-builds}
#### Use clang-11 for Builds
``` bash
$ export CC=clang-11
$ export CXX=clang++-11
```
### Install GCC 10 {#install-gcc-10}
We recommend building ClickHouse with clang-11, GCC-10 also supported, but it is not used for production builds.
If you want to use GCC-10 there are several ways to install it.
#### Install from Repository {#install-from-repository}
On Ubuntu 19.10 or newer:
$ sudo apt-get update
$ sudo apt-get install gcc-10 g++-10
#### Install from a PPA Package {#install-from-a-ppa-package}
On older Ubuntu:
``` bash
$ sudo apt-get install software-properties-common
$ sudo apt-add-repository ppa:ubuntu-toolchain-r/test
$ sudo apt-get update
$ sudo apt-get install gcc-10 g++-10
```
#### Install from Sources {#install-from-sources}
See [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh)
#### Use GCC 10 for Builds {#use-gcc-10-for-builds}
``` bash
$ export CC=gcc-10
$ export CXX=g++-10
```
Gcc can also be used though it is discouraged.
### Checkout ClickHouse Sources {#checkout-clickhouse-sources}

View File

@ -131,17 +131,18 @@ ClickHouse uses several external libraries for building. All of them do not need
## C++ Compiler {#c-compiler}
Compilers GCC starting from version 10 and Clang version 8 or above are supported for building ClickHouse.
Compilers Clang starting from version 11 is supported for building ClickHouse.
Official Yandex builds currently use GCC because it generates machine code of slightly better performance (yielding a difference of up to several percent according to our benchmarks). And Clang is more convenient for development usually. Though, our continuous integration (CI) platform runs checks for about a dozen of build combinations.
Clang should be used instead of gcc. Though, our continuous integration (CI) platform runs checks for about a dozen of build combinations.
To install GCC on Ubuntu run: `sudo apt install gcc g++`
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
Check the version of gcc: `gcc --version`. If it is below 10, then follow the instruction here: https://clickhouse.tech/docs/en/development/build/#install-gcc-10.
```bash
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
```
Mac OS X build is supported only for Clang. Just run `brew install llvm`
Mac OS X build is also supported. Just run `brew install llvm`
If you decide to use Clang, you can also install `libc++` and `lld`, if you know what it is. Using `ccache` is also recommended.
## The Building Process {#the-building-process}
@ -152,14 +153,7 @@ Now that you are ready to build ClickHouse we recommend you to create a separate
You can have several different directories (build_release, build_debug, etc.) for different types of build.
While inside the `build` directory, configure your build by running CMake. Before the first run, you need to define environment variables that specify compiler (version 10 gcc compiler in this example).
Linux:
export CC=gcc-10 CXX=g++-10
cmake ..
Mac OS X:
While inside the `build` directory, configure your build by running CMake. Before the first run, you need to define environment variables that specify compiler.
export CC=clang CXX=clang++
cmake ..

View File

@ -701,7 +701,7 @@ But other things being equal, cross-platform or portable code is preferred.
**2.** Language: C++20 (see the list of available [C++20 features](https://en.cppreference.com/w/cpp/compiler_support#C.2B.2B20_features)).
**3.** Compiler: `gcc`. At this time (August 2020), the code is compiled using version 9.3. (It can also be compiled using `clang 8`.)
**3.** Compiler: `clang`. At this time (April 2021), the code is compiled using clang version 11. (It can also be compiled using `gcc` version 10, but it's untested and not suitable for production usage).
The standard library is used (`libc++`).
@ -711,7 +711,7 @@ The standard library is used (`libc++`).
The CPU instruction set is the minimum supported set among our servers. Currently, it is SSE 4.2.
**6.** Use `-Wall -Wextra -Werror` compilation flags.
**6.** Use `-Wall -Wextra -Werror` compilation flags. Also `-Weverything` is used with few exceptions.
**7.** Use static linking with all libraries except those that are difficult to connect to statically (see the output of the `ldd` command).

View File

@ -12,6 +12,7 @@ With this instruction you can run basic ClickHouse performance test on any serve
3. Copy the link to `clickhouse` binary for amd64 or aarch64.
4. ssh to the server and download it with wget:
```bash
# These links are outdated, please obtain the fresh link from the "commits" page.
# For amd64:
wget https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_build_check/gcc-10_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse
# For aarch64:

View File

@ -854,8 +854,6 @@ For example, when reading from a table, if it is possible to evaluate expression
Default value: the number of physical CPU cores.
If less than one SELECT query is normally run on a server at a time, set this parameter to a value slightly less than the actual number of processor cores.
For queries that are completed quickly because of a LIMIT, you can set a lower max_threads. For example, if the necessary number of entries are located in every block and max_threads = 8, then 8 blocks are retrieved, although it would have been enough to read just one.
The smaller the `max_threads` value, the less memory is consumed.

View File

@ -15,16 +15,16 @@ Columns:
- `node_name` ([String](../../sql-reference/data-types/string.md)) — Node name in ZooKeeper.
- `type` ([String](../../sql-reference/data-types/string.md)) — Type of the task in the queue, one of:
- `GET_PART` - Get the part from another replica.
- `ATTACH_PART` - Attach the part, possibly from our own replica (if found in `detached` folder).
You may think of it as a `GET_PART` with some optimisations as they're nearly identical.
- `MERGE_PARTS` - Merge the parts.
- `DROP_RANGE` - Delete the parts in the specified partition in the specified number range.
- `CLEAR_COLUMN` - NOTE: Deprecated. Drop specific column from specified partition.
- `CLEAR_INDEX` - NOTE: Deprecated. Drop specific index from specified partition.
- `REPLACE_RANGE` - Drop certain range of partitions and replace them by new ones
- `MUTATE_PART` - Apply one or several mutations to the part.
- `ALTER_METADATA` - Apply alter modification according to global /metadata and /columns paths
- `GET_PART` — Get the part from another replica.
- `ATTACH_PART` — Attach the part, possibly from our own replica (if found in the `detached` folder). You may think of it as a `GET_PART` with some optimizations as they're nearly identical.
- `MERGE_PARTS` Merge the parts.
- `DROP_RANGE` Delete the parts in the specified partition in the specified number range.
- `CLEAR_COLUMN` NOTE: Deprecated. Drop specific column from specified partition.
- `CLEAR_INDEX` NOTE: Deprecated. Drop specific index from specified partition.
- `REPLACE_RANGE` — Drop a certain range of parts and replace them with new ones.
- `MUTATE_PART` Apply one or several mutations to the part.
- `ALTER_METADATA` Apply alter modification according to global /metadata and /columns paths.
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was submitted for execution.

View File

@ -27,7 +27,37 @@ Example 2: `uniqArray(arr)` Counts the number of unique elements in all a
## -SimpleState {#agg-functions-combinator-simplestate}
If you apply this combinator, the aggregate function returns the same value but with a different type. This is an `SimpleAggregateFunction(...)` that can be stored in a table to work with [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md) table engines.
If you apply this combinator, the aggregate function returns the same value but with a different type. This is a [SimpleAggregateFunction(...)](../../sql-reference/data-types/simpleaggregatefunction.md) that can be stored in a table to work with [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md) tables.
**Syntax**
``` sql
<aggFunction>SimpleState(x)
```
**Arguments**
- `x` — Aggregate function parameters.
**Returned values**
The value of an aggregate function with the `SimpleAggregateFunction(...)` type.
**Example**
Query:
``` sql
WITH anySimpleState(number) AS c SELECT toTypeName(c), c FROM numbers(1);
```
Result:
``` text
┌─toTypeName(c)────────────────────────┬─c─┐
│ SimpleAggregateFunction(any, UInt64) │ 0 │
└──────────────────────────────────────┴───┘
```
## -State {#agg-functions-combinator-state}
@ -249,4 +279,3 @@ FROM people
└────────┴───────────────────────────┘
```

View File

@ -2,6 +2,8 @@
`SimpleAggregateFunction(name, types_of_arguments…)` data type stores current value of the aggregate function, and does not store its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does. This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. This property guarantees that partial aggregation results are enough to compute the combined one, so we dont have to store and process any extra data.
The common way to produce an aggregate function value is by calling the aggregate function with the [-SimpleState](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-simplestate) suffix.
The following aggregate functions are supported:
- [`any`](../../sql-reference/aggregate-functions/reference/any.md#agg_function-any)

View File

@ -1192,6 +1192,109 @@ SELECT defaultValueOfTypeName('Nullable(Int8)')
└──────────────────────────────────────────┘
```
## indexHint {#indexhint}
The function is intended for debugging and introspection purposes. The function ignores it's argument and always returns 1. Arguments are not even evaluated.
But for the purpose of index analysis, the argument of this function is analyzed as if it was present directly without being wrapped inside `indexHint` function. This allows to select data in index ranges by the corresponding condition but without further filtering by this condition. The index in ClickHouse is sparse and using `indexHint` will yield more data than specifying the same condition directly.
**Syntax**
```sql
SELECT * FROM table WHERE indexHint(<expression>)
```
**Returned value**
1. Type: [Uint8](https://clickhouse.yandex/docs/en/data_types/int_uint/#diapazony-uint).
**Example**
Here is the example of test data from the table [ontime](../../getting-started/example-datasets/ontime.md).
Input table:
```sql
SELECT count() FROM ontime
```
```text
┌─count()─┐
│ 4276457 │
└─────────┘
```
The table has indexes on the fields `(FlightDate, (Year, FlightDate))`.
Create a query, where the index is not used.
Query:
```sql
SELECT FlightDate AS k, count() FROM ontime GROUP BY k ORDER BY k
```
ClickHouse processed the entire table (`Processed 4.28 million rows`).
Result:
```text
┌──────────k─┬─count()─┐
│ 2017-01-01 │ 13970 │
│ 2017-01-02 │ 15882 │
........................
│ 2017-09-28 │ 16411 │
│ 2017-09-29 │ 16384 │
│ 2017-09-30 │ 12520 │
└────────────┴─────────┘
```
To apply the index, select a specific date.
Query:
```sql
SELECT FlightDate AS k, count() FROM ontime WHERE k = '2017-09-15' GROUP BY k ORDER BY k
```
By using the index, ClickHouse processed a significantly smaller number of rows (`Processed 32.74 thousand rows`).
Result:
```text
┌──────────k─┬─count()─┐
│ 2017-09-15 │ 16428 │
└────────────┴─────────┘
```
Now wrap the expression `k = '2017-09-15'` into `indexHint` function.
Query:
```sql
SELECT
FlightDate AS k,
count()
FROM ontime
WHERE indexHint(k = '2017-09-15')
GROUP BY k
ORDER BY k ASC
```
ClickHouse used the index in the same way as the previous time (`Processed 32.74 thousand rows`).
The expression `k = '2017-09-15'` was not used when generating the result.
In examle the `indexHint` function allows to see adjacent dates.
Result:
```text
┌──────────k─┬─count()─┐
│ 2017-09-14 │ 7071 │
│ 2017-09-15 │ 16428 │
│ 2017-09-16 │ 1077 │
│ 2017-09-30 │ 8167 │
└────────────┴─────────┘
```
## replicate {#other-functions-replicate}
Creates an array with a single value.

View File

@ -88,12 +88,10 @@ Read more about setting the partition expression in a section [How to specify th
This query is replicated. The replica-initiator checks whether there is data in the `detached` directory.
If data exists, the query checks its integrity. If everything is correct, the query adds the data to the table.
If the non-initiator replica, receiving the attach command, finds the part with the correct checksums in its own
`detached` folder, it attaches the data without fetching it from other replicas.
If the non-initiator replica, receiving the attach command, finds the part with the correct checksums in its own `detached` folder, it attaches the data without fetching it from other replicas.
If there is no part with the correct checksums, the data is downloaded from any replica having the part.
You can put data to the `detached` directory on one replica and use the `ALTER ... ATTACH` query to add it to the
table on all replicas.
You can put data to the `detached` directory on one replica and use the `ALTER ... ATTACH` query to add it to the table on all replicas.
## ATTACH PARTITION FROM {#alter_attach-partition-from}
@ -101,8 +99,8 @@ table on all replicas.
ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1
```
This query copies the data partition from the `table1` to `table2`.
Note that data won't be deleted neither from `table1` nor from `table2`.
This query copies the data partition from `table1` to `table2`.
Note that data will be deleted neither from `table1` nor from `table2`.
For the query to run successfully, the following conditions must be met:

View File

@ -264,9 +264,7 @@ Wait until a `ReplicatedMergeTree` table will be synced with other replicas in a
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name
```
After running this statement the `[db.]replicated_merge_tree_family_table_name` fetches commands from
the common replicated log into its own replication queue, and then the query waits till the replica processes all
of the fetched commands.
After running this statement the `[db.]replicated_merge_tree_family_table_name` fetches commands from the common replicated log into its own replication queue, and then the query waits till the replica processes all of the fetched commands.
### RESTART REPLICA {#query_language-system-restart-replica}

View File

@ -19,28 +19,17 @@ $ sudo apt-get install git cmake python ninja-build
古いシステムではcmakeの代わりにcmake3。
## GCC9のインストール {#install-gcc-10}
## Clang 11 のインストール
これを行うにはいくつかの方法があります。
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
### PPAパッケージからインストール {#install-from-a-ppa-package}
``` bash
$ sudo apt-get install software-properties-common
$ sudo apt-add-repository ppa:ubuntu-toolchain-r/test
$ sudo apt-get update
$ sudo apt-get install gcc-10 g++-10
```bash
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
```
### ソースからインスト {#install-from-sources}
見て [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh)
## ビルドにGCC9を使用する {#use-gcc-10-for-builds}
``` bash
$ export CC=gcc-10
$ export CXX=g++-10
$ export CC=clang
$ export CXX=clang++
```
## ツつィツ姪"ツ債ツつケ {#checkout-clickhouse-sources}
@ -76,7 +65,7 @@ $ cd ..
- Gitソースをチェックアウトするためにのみ使用され、ビルドには必要ありません)
- CMake3.10以降
- 忍者(推奨)または作る
- C++コンパイラ:gcc9またはclang8以降
- C++コンパイラ:clang11以降
- リンカ:lldまたはgold(古典的なGNU ldは動作しません)
- Python(LLVMビルド内でのみ使用され、オプションです)

View File

@ -133,19 +133,19 @@ ArchまたはGentooを使用する場合は、おそらくCMakeのインスト
ClickHouseはビルドに複数の外部ライブラリを使用します。 それらのすべては、サブモジュールにあるソースからClickHouseと一緒に構築されているので、別々にインストールする必要はありません。 リストは次の場所で確認できます `contrib`.
# C++コンパイラ {#c-compiler}
## C++ Compiler {#c-compiler}
ClickHouseのビルドには、バージョン9以降のGCCとClangバージョン8以降のコンパイラがサポートされます。
Compilers Clang starting from version 11 is supported for building ClickHouse.
公式のYandexビルドは、わずかに優れたパフォーマンスのマシンコードを生成するため、GCCを使用しています私たちのベンチマークに応じて最大数パーセントの そしてClangは開発のために通常より便利です。 が、当社の継続的インテグレーションCI)プラットフォームを運チェックのための十数の組み合わせとなります。
Clang should be used instead of gcc. Though, our continuous integration (CI) platform runs checks for about a dozen of build combinations.
UBUNTUにGCCをインストールするには: `sudo apt install gcc g++`
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
Gccのバージョンを確認する: `gcc --version`. の場合は下記9その指示に従う。https://clickhouse.tech/docs/ja/development/build/#install-gcc-10.
```bash
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
```
Mac OS XのビルドはClangでのみサポートされています。 ちょうど実行 `brew install llvm`
Clangを使用する場合は、次のものもインストールできます `libc++``lld` あなたがそれが何であるか知っていれば。 を使用して `ccache` また、推奨されます。
Mac OS X build is also supported. Just run `brew install llvm`
# 建築プロセス {#the-building-process}
@ -158,13 +158,6 @@ ClickHouseを構築する準備ができたので、別のディレクトリを
中の間 `build` cmakeを実行してビルドを構成します。 最初の実行の前に、コンパイラこの例ではバージョン9gccコンパイラを指定する環境変数を定義する必要があります。
Linux:
export CC=gcc-10 CXX=g++-10
cmake ..
Mac OS X:
export CC=clang CXX=clang++
cmake ..

View File

@ -136,18 +136,18 @@ ClickHouse использует для сборки некоторое коли
## Компилятор C++ {#kompiliator-c}
В качестве компилятора C++ поддерживается GCC начиная с версии 9 или Clang начиная с версии 8.
В качестве компилятора C++ поддерживается Clang начиная с версии 11.
Официальные сборки от Яндекса, на данный момент, используют GCC, так как он генерирует слегка более производительный машинный код (разница в среднем до нескольких процентов по нашим бенчмаркам). Clang обычно более удобен для разработки. Впрочем, наша среда continuous integration проверяет около десятка вариантов сборки.
Впрочем, наша среда continuous integration проверяет около десятка вариантов сборки, включая gcc, но сборка с помощью gcc непригодна для использования в продакшене.
Для установки GCC под Ubuntu, выполните: `sudo apt install gcc g++`.
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
Проверьте версию gcc: `gcc --version`. Если версия меньше 10, то следуйте инструкции: https://clickhouse.tech/docs/ru/development/build/#install-gcc-10.
```bash
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
```
Сборка под Mac OS X поддерживается только для компилятора Clang. Чтобы установить его выполните `brew install llvm`
Если вы решили использовать Clang, вы также можете установить `libc++` и `lld`, если вы знаете, что это такое. При желании, установите `ccache`.
## Процесс сборки {#protsess-sborki}
Теперь вы готовы к сборке ClickHouse. Для размещения собранных файлов, рекомендуется создать отдельную директорию build внутри директории ClickHouse:
@ -158,14 +158,7 @@ ClickHouse использует для сборки некоторое коли
Вы можете иметь несколько разных директорий (build_release, build_debug) для разных вариантов сборки.
Находясь в директории build, выполните конфигурацию сборки с помощью CMake.
Перед первым запуском необходимо выставить переменные окружения, отвечающие за выбор компилятора (в данном примере это - gcc версии 9).
Linux:
export CC=gcc-10 CXX=g++-10
cmake ..
Mac OS X:
Перед первым запуском необходимо выставить переменные окружения, отвечающие за выбор компилятора.
export CC=clang CXX=clang++
cmake ..

View File

@ -747,7 +747,7 @@ The dictionary is configured incorrectly.
Есть два основных варианта проверки на такие ошибки:
* Исключение с кодом `LOGICAL_ERROR`. Его можно использовать для важных проверок, которые делаются в том числе в релизной сборке.
* `assert`. Такие условия не проверяются в релизной сборке, можно использовать для тяжёлых и опциональных проверок.
* `assert`. Такие условия не проверяются в релизной сборке, можно использовать для тяжёлых и опциональных проверок.
Пример сообщения, у которого должен быть код `LOGICAL_ERROR`:
`Block header is inconsistent with Chunk in ICompicatedProcessor::munge(). It is a bug!`
@ -780,7 +780,7 @@ The dictionary is configured incorrectly.
**2.** Язык - C++20 (см. список доступных [C++20 фич](https://en.cppreference.com/w/cpp/compiler_support#C.2B.2B20_features)).
**3.** Компилятор - `gcc`. На данный момент (август 2020), код собирается версией 9.3. (Также код может быть собран `clang` версий 10 и 9)
**3.** Компилятор - `clang`. На данный момент (апрель 2021), код собирается версией 11. (Также код может быть собран `gcc` версии 10, но такая сборка не тестируется и непригодна для продакшена).
Используется стандартная библиотека (реализация `libc++`).

View File

@ -844,8 +844,6 @@ SELECT type, query FROM system.query_log WHERE log_comment = 'log_comment test'
Значение по умолчанию: количество процессорных ядер без учёта Hyper-Threading.
Если на сервере обычно исполняется менее одного запроса SELECT одновременно, то выставите этот параметр в значение чуть меньше количества реальных процессорных ядер.
Для запросов, которые быстро завершаются из-за LIMIT-а, имеет смысл выставить max_threads поменьше. Например, если нужное количество записей находится в каждом блоке, то при max_threads = 8 будет считано 8 блоков, хотя достаточно было прочитать один.
Чем меньше `max_threads`, тем меньше будет использоваться оперативки.

View File

@ -14,7 +14,17 @@
- `node_name` ([String](../../sql-reference/data-types/string.md)) — имя узла в ZooKeeper.
- `type` ([String](../../sql-reference/data-types/string.md)) — тип задачи в очереди: `GET_PARTS`, `MERGE_PARTS`, `DETACH_PARTS`, `DROP_PARTS` или `MUTATE_PARTS`.
- `type` ([String](../../sql-reference/data-types/string.md)) — тип задачи в очереди:
- `GET_PART` — скачать кусок с другой реплики.
- `ATTACH_PART` — присоединить кусок. Задача может быть выполнена и с куском из нашей собственной реплики (если он находится в папке `detached`). Эта задача практически идентична задаче `GET_PART`, лишь немного оптимизирована.
- `MERGE_PARTS` — выполнить слияние кусков.
- `DROP_RANGE` — удалить куски в партициях из указнного диапазона.
- `CLEAR_COLUMN` — удалить указанный столбец из указанной партиции. Примечание: не используется с 20.4.
- `CLEAR_INDEX` — удалить указанный индекс из указанной партиции. Примечание: не используется с 20.4.
- `REPLACE_RANGE` — удалить указанный диапазон кусков и заменить их на новые.
- `MUTATE_PART` — применить одну или несколько мутаций к куску.
- `ALTER_METADATA` — применить изменения структуры таблицы в результате запросов с выражением `ALTER`.
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время отправки задачи на выполнение.
@ -77,4 +87,3 @@ last_postpone_time: 1970-01-01 03:00:00
**Смотрите также**
- [Управление таблицами ReplicatedMergeTree](../../sql-reference/statements/system.md#query-language-system-replicated)

View File

@ -18,10 +18,12 @@ ClickHouse создает эту таблицу когда утсановлен
Во время соединения с сервером через `clickhouse-client`, вы видите строку похожую на `Connected to ClickHouse server version 19.18.1 revision 54429.`. Это поле содержит номер после `revision`, но не содержит строку после `version`.
- `timer_type`([Enum8](../../sql-reference/data-types/enum.md)) — тип таймера:
- `trace_type`([Enum8](../../sql-reference/data-types/enum.md)) — тип трассировки:
- `Real` означает wall-clock время.
- `CPU` означает относительное CPU время.
- `Real`сбор трассировок стека адресов вызова по времени wall-clock.
- `CPU`сбор трассировок стека адресов вызова по времени CPU.
- `Memory`сбор выделенной памяти, когда ее размер превышает относительный инкремент.
- `MemorySample`сбор случайно выделенной памяти.
- `thread_number`([UInt32](../../sql-reference/data-types/int-uint.md)) — идентификатор треда.

View File

@ -27,6 +27,40 @@ toc_title: "Комбинаторы агрегатных функций"
Комбинаторы -If и -Array можно сочетать. При этом, должен сначала идти Array, а потом If. Примеры: `uniqArrayIf(arr, cond)`, `quantilesTimingArrayIf(level1, level2)(arr, cond)`. Из-за такого порядка получается, что аргумент cond не должен быть массивом.
## -SimpleState {#agg-functions-combinator-simplestate}
При использовании этого комбинатора агрегатная функция возвращает то же значение, но типа [SimpleAggregateFunction(...)](../../sql-reference/data-types/simpleaggregatefunction.md). Текущее значение функции может храниться в таблице для последующей работы с таблицами семейства [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md).
**Синтаксис**
``` sql
<aggFunction>SimpleState(x)
```
**Аргументы**
- `x` — параметры агрегатной функции.
**Возвращаемое значение**
Значение агрегатной функции типа `SimpleAggregateFunction(...)`.
**Пример**
Запрос:
``` sql
WITH anySimpleState(number) AS c SELECT toTypeName(c), c FROM numbers(1);
```
Результат:
``` text
┌─toTypeName(c)────────────────────────┬─c─┐
│ SimpleAggregateFunction(any, UInt64) │ 0 │
└──────────────────────────────────────┴───┘
```
## -State {#state}
В случае применения этого комбинатора, агрегатная функция возвращает не готовое значение (например, в случае функции [uniq](reference/uniq.md#agg_function-uniq) — количество уникальных значений), а промежуточное состояние агрегации (например, в случае функции `uniq` — хэш-таблицу для расчёта количества уникальных значений), которое имеет тип `AggregateFunction(...)` и может использоваться для дальнейшей обработки или может быть сохранено в таблицу для последующей доагрегации.
@ -247,4 +281,3 @@ FROM people
│ [3,2] │ [11.5,12.949999809265137] │
└────────┴───────────────────────────┘
```

View File

@ -3,6 +3,8 @@
Хранит только текущее значение агрегатной функции и не сохраняет ее полное состояние, как это делает [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md). Такая оптимизация может быть применена к функциям, которые обладают следующим свойством: результат выполнения функции `f` к набору строк `S1 UNION ALL S2` может быть получен путем выполнения `f` к отдельным частям набора строк,
а затем повторного выполнения `f` к результатам: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. Это свойство гарантирует, что результатов частичной агрегации достаточно для вычисления комбинированной, поэтому хранить и обрабатывать какие-либо дополнительные данные не требуется.
Чтобы получить промежуточное значение, обычно используются агрегатные функции с суффиксом [-SimpleState](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-simplestate).
Поддерживаются следующие агрегатные функции:
- [`any`](../../sql-reference/aggregate-functions/reference/any.md#agg_function-any)

View File

@ -1133,6 +1133,111 @@ SELECT defaultValueOfTypeName('Nullable(Int8)')
└──────────────────────────────────────────┘
```
## indexHint {#indexhint}
Возвращает все данные из диапазона, в который попадают данные, соответствующие указанному выражению.
Переданное выражение не будет вычислено. Выбор диапазона производится по индексу.
Индекс в ClickHouse разреженный, при чтении диапазона в ответ попадают «лишние» соседние данные.
**Синтаксис**
```sql
SELECT * FROM table WHERE indexHint(<expression>)
```
**Возвращаемое значение**
Возвращает диапазон индекса, в котором выполняется заданное условие.
Тип: [Uint8](https://clickhouse.yandex/docs/ru/data_types/int_uint/#diapazony-uint).
**Пример**
Рассмотрим пример с использованием тестовых данных таблицы [ontime](../../getting-started/example-datasets/ontime.md).
Исходная таблица:
```sql
SELECT count() FROM ontime
```
```text
┌─count()─┐
│ 4276457 │
└─────────┘
```
В таблице есть индексы по полям `(FlightDate, (Year, FlightDate))`.
Выполним выборку по дате, где индекс не используется.
Запрос:
```sql
SELECT FlightDate AS k, count() FROM ontime GROUP BY k ORDER BY k
```
ClickHouse обработал всю таблицу (`Processed 4.28 million rows`).
Результат:
```text
┌──────────k─┬─count()─┐
│ 2017-01-01 │ 13970 │
│ 2017-01-02 │ 15882 │
........................
│ 2017-09-28 │ 16411 │
│ 2017-09-29 │ 16384 │
│ 2017-09-30 │ 12520 │
└────────────┴─────────┘
```
Для подключения индекса выбираем конкретную дату.
Запрос:
```sql
SELECT FlightDate AS k, count() FROM ontime WHERE k = '2017-09-15' GROUP BY k ORDER BY k
```
При использовании индекса ClickHouse обработал значительно меньшее количество строк (`Processed 32.74 thousand rows`).
Результат:
```text
┌──────────k─┬─count()─┐
│ 2017-09-15 │ 16428 │
└────────────┴─────────┘
```
Передадим в функцию `indexHint` выражение `k = '2017-09-15'`.
Запрос:
```sql
SELECT
FlightDate AS k,
count()
FROM ontime
WHERE indexHint(k = '2017-09-15')
GROUP BY k
ORDER BY k ASC
```
ClickHouse применил индекс по аналогии с примером выше (`Processed 32.74 thousand rows`).
Выражение `k = '2017-09-15'` не используется при формировании результата.
Функция `indexHint` позволяет увидеть соседние данные.
Результат:
```text
┌──────────k─┬─count()─┐
│ 2017-09-14 │ 7071 │
│ 2017-09-15 │ 16428 │
│ 2017-09-16 │ 1077 │
│ 2017-09-30 │ 8167 │
└────────────┴─────────┘
```
## replicate {#other-functions-replicate}
Создает массив, заполненный одним значением.

View File

@ -38,7 +38,7 @@ ALTER TABLE mt DETACH PART 'all_2_2_0';
После того как запрос будет выполнен, вы сможете производить любые операции с данными в директории `detached`. Например, можно удалить их из файловой системы.
Запрос реплицируется — данные будут перенесены в директорию `detached` и забыты на всех репликах. Обратите внимание, запрос может быть отправлен только на реплику-лидер. Чтобы узнать, является ли реплика лидером, выполните запрос `SELECT` к системной таблице [system.replicas](../../../operations/system-tables/replicas.md#system_tables-replicas). Либо можно выполнить запрос `DETACH` на всех репликах — тогда на всех репликах, кроме реплики-лидера, запрос вернет ошибку.
Запрос реплицируется — данные будут перенесены в директорию `detached` и забыты на всех репликах. Обратите внимание, запрос может быть отправлен только на реплику-лидер. Чтобы узнать, является ли реплика лидером, выполните запрос `SELECT` к системной таблице [system.replicas](../../../operations/system-tables/replicas.md#system_tables-replicas). Либо можно выполнить запрос `DETACH` на всех репликах — тогда на всех репликах, кроме реплик-лидеров (поскольку допускается несколько лидеров), запрос вернет ошибку.
## DROP PARTITION\|PART {#alter_drop-partition}
@ -83,9 +83,13 @@ ALTER TABLE visits ATTACH PART 201901_2_2_0;
Как корректно задать имя партиции или куска, см. в разделе [Как задавать имя партиции в запросах ALTER](#alter-how-to-specify-part-expr).
Этот запрос реплицируется. Реплика-иницатор проверяет, есть ли данные в директории `detached`. Если данные есть, то запрос проверяет их целостность. В случае успеха данные добавляются в таблицу. Все остальные реплики загружают данные с реплики-инициатора запроса.
Этот запрос реплицируется. Реплика-иницатор проверяет, есть ли данные в директории `detached`.
Если данные есть, то запрос проверяет их целостность. В случае успеха данные добавляются в таблицу.
Это означает, что вы можете разместить данные в директории `detached` на одной реплике и с помощью запроса `ALTER ... ATTACH` добавить их в таблицу на всех репликах.
Если реплика, не являющаяся инициатором запроса, получив команду присоединения, находит кусок с правильными контрольными суммами в своей собственной папке `detached`, она присоединяет данные, не скачивая их с других реплик.
Если нет куска с правильными контрольными суммами, данные загружаются из любой реплики, имеющей этот кусок.
Вы можете поместить данные в директорию `detached` на одной реплике и с помощью запроса `ALTER ... ATTACH` добавить их в таблицу на всех репликах.
## ATTACH PARTITION FROM {#alter_attach-partition-from}
@ -93,7 +97,8 @@ ALTER TABLE visits ATTACH PART 201901_2_2_0;
ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1
```
Копирует партицию из таблицы `table1` в таблицу `table2` и добавляет к существующим данным `table2`. Данные из `table1` не удаляются.
Копирует партицию из таблицы `table1` в таблицу `table2`.
Обратите внимание, что данные не удаляются ни из `table1`, ни из `table2`.
Следует иметь в виду:
@ -305,4 +310,3 @@ OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL;
`IN PARTITION` указывает на партицию, для которой применяются выражения [UPDATE](../../../sql-reference/statements/alter/update.md#alter-table-update-statements) или [DELETE](../../../sql-reference/statements/alter/delete.md#alter-mutations) в результате запроса `ALTER TABLE`. Новые куски создаются только в указанной партиции. Таким образом, `IN PARTITION` помогает снизить нагрузку, когда таблица разбита на множество партиций, а вам нужно обновить данные лишь точечно.
Примеры запросов `ALTER ... PARTITION` можно посмотреть в тестах: [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) и [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql).

View File

@ -204,6 +204,7 @@ SYSTEM STOP MOVES [[db.]merge_tree_family_table_name]
ClickHouse может управлять фоновыми процессами связанными c репликацией в таблицах семейства [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replacingmergetree.md).
### STOP FETCHES {#query_language-system-stop-fetches}
Позволяет остановить фоновые процессы синхронизации новыми вставленными кусками данных с другими репликами в кластере для таблиц семейства `ReplicatedMergeTree`:
Всегда возвращает `Ok.` вне зависимости от типа таблицы и даже если таблица или база данных не существет.
@ -212,6 +213,7 @@ SYSTEM STOP FETCHES [[db.]replicated_merge_tree_family_table_name]
```
### START FETCHES {#query_language-system-start-fetches}
Позволяет запустить фоновые процессы синхронизации новыми вставленными кусками данных с другими репликами в кластере для таблиц семейства `ReplicatedMergeTree`:
Всегда возвращает `Ok.` вне зависимости от типа таблицы и даже если таблица или база данных не существет.
@ -220,6 +222,7 @@ SYSTEM START FETCHES [[db.]replicated_merge_tree_family_table_name]
```
### STOP REPLICATED SENDS {#query_language-system-start-replicated-sends}
Позволяет остановить фоновые процессы отсылки новых вставленных кусков данных другим репликам в кластере для таблиц семейства `ReplicatedMergeTree`:
``` sql
@ -227,6 +230,7 @@ SYSTEM STOP REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name]
```
### START REPLICATED SENDS {#query_language-system-start-replicated-sends}
Позволяет запустить фоновые процессы отсылки новых вставленных кусков данных другим репликам в кластере для таблиц семейства `ReplicatedMergeTree`:
``` sql
@ -234,6 +238,7 @@ SYSTEM START REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name]
```
### STOP REPLICATION QUEUES {#query_language-system-stop-replication-queues}
Останавливает фоновые процессы разбора заданий из очереди репликации которая хранится в Zookeeper для таблиц семейства `ReplicatedMergeTree`. Возможные типы заданий - merges, fetches, mutation, DDL запросы с ON CLUSTER:
``` sql
@ -241,6 +246,7 @@ SYSTEM STOP REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name]
```
### START REPLICATION QUEUES {#query_language-system-start-replication-queues}
Запускает фоновые процессы разбора заданий из очереди репликации которая хранится в Zookeeper для таблиц семейства `ReplicatedMergeTree`. Возможные типы заданий - merges, fetches, mutation, DDL запросы с ON CLUSTER:
``` sql
@ -248,20 +254,24 @@ SYSTEM START REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name]
```
### SYNC REPLICA {#query_language-system-sync-replica}
Ждет когда таблица семейства `ReplicatedMergeTree` будет синхронизирована с другими репликами в кластере, будет работать до достижения `receive_timeout`, если синхронизация для таблицы отключена в настоящий момент времени:
``` sql
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name
```
После выполнения этого запроса таблица `[db.]replicated_merge_tree_family_table_name` синхронизирует команды из общего реплицированного лога в свою собственную очередь репликации. Затем запрос ждет, пока реплика не обработает все синхронизированные команды.
### RESTART REPLICA {#query_language-system-restart-replica}
Реинициализация состояния Zookeeper сессий для таблицы семейства `ReplicatedMergeTree`, сравнивает текущее состояние с тем что хранится в Zookeeper как источник правды и добавляет задачи Zookeeper очередь если необходимо
Инициализация очереди репликации на основе данных ZooKeeper, происходит так же как при attach table. На короткое время таблица станет недоступной для любых операций.
Реинициализация состояния Zookeeper-сессий для таблицы семейства `ReplicatedMergeTree`. Сравнивает текущее состояние с тем, что хранится в Zookeeper, как источник правды, и добавляет задачи в очередь репликации в Zookeeper, если необходимо.
Инициализация очереди репликации на основе данных ZooKeeper происходит так же, как при attach table. На короткое время таблица станет недоступной для любых операций.
``` sql
SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name
```
### RESTART REPLICAS {#query_language-system-restart-replicas}
Реинициализация состояния Zookeeper сессий для всех `ReplicatedMergeTree` таблиц, сравнивает текущее состояние с тем что хранится в Zookeeper как источник правды и добавляет задачи Zookeeper очередь если необходимо
Реинициализация состояния ZooKeeper-сессий для всех `ReplicatedMergeTree` таблиц. Сравнивает текущее состояние реплики с тем, что хранится в ZooKeeper, как c источником правды, и добавляет задачи в очередь репликации в ZooKeeper, если необходимо.

View File

@ -35,28 +35,12 @@ sudo apt-get install git cmake ninja-build
或cmake3而不是旧系统上的cmake。
或者在早期版本的系统中用 cmake3 替代 cmake
## 安装 GCC 10 {#an-zhuang-gcc-10}
## 安装 Clang
有几种方法可以做到这一点。
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
### 安装 PPA 包 {#an-zhuang-ppa-bao}
``` bash
sudo apt-get install software-properties-common
sudo apt-add-repository ppa:ubuntu-toolchain-r/test
sudo apt-get update
sudo apt-get install gcc-10 g++-10
```
### 源码安装 gcc {#yuan-ma-an-zhuang-gcc}
请查看 [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh)
## 使用 GCC 10 来编译 {#shi-yong-gcc-10-lai-bian-yi}
``` bash
export CC=gcc-10
export CXX=g++-10
```bash
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
```
## 拉取 ClickHouse 源码 {#la-qu-clickhouse-yuan-ma-1}

View File

@ -123,17 +123,13 @@ ClickHouse使用多个外部库进行构建。大多数外部库不需要单独
# C++ 编译器 {#c-bian-yi-qi}
GCC编译器从版本9开始以及Clang版本\>=8都可支持构建ClickHouse。
We support clang starting from version 11.
Yandex官方当前使用GCC构建ClickHouse因为它生成的机器代码性能较好根据测评最多可以相差几个百分点。Clang通常可以更加便捷的开发。我们的持续集成CI平台会运行大约十二种构建组合的检查。
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
在Ubuntu上安装GCC请执行`sudo apt install gcc g++`
请使用`gcc --version`查看gcc的版本。如果gcc版本低于9请参考此处的指示https://clickhouse.tech/docs/zh/development/build/#an-zhuang-gcc-10 。
在Mac OS X上安装GCC请执行`brew install gcc`
如果您决定使用Clang还可以同时安装 `libc++`以及`lld`,前提是您也熟悉它们。此外,也推荐使用`ccache`。
```bash
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
```
# 构建的过程 {#gou-jian-de-guo-cheng}
@ -146,7 +142,7 @@ Yandex官方当前使用GCC构建ClickHouse因为它生成的机器代码性
在`build`目录下通过运行CMake配置构建。 在第一次运行之前请定义用于指定编译器的环境变量本示例中为gcc 9 编译器)。
export CC=gcc-10 CXX=g++-10
export CC=clang CXX=clang++
cmake ..
`CC`变量指代C的编译器C Compiler的缩写而`CXX`变量指代要使用哪个C++编译器进行编译。

View File

@ -696,7 +696,7 @@ auto s = std::string{"Hello"};
**2.** 语言: C++20.
**3.** 编译器: `gcc`。 此时2020年08月代码使用9.3版编译。(它也可以使用`clang 8` 编译
**3.** 编译器: `clang`。 此时2021年03月代码使用11版编译。它也可以使用`gcc` 编译 but it is not suitable for production
使用标准库 (`libc++`)。

View File

@ -477,6 +477,103 @@ FROM
1 rows in set. Elapsed: 0.002 sec.
## indexHint {#indexhint}
输出符合索引选择范围内的所有数据,同时不实用参数中的表达式进行过滤。
传递给函数的表达式参数将不会被计算但ClickHouse使用参数中的表达式进行索引过滤。
**返回值**
- 1。
**示例**
这是一个包含[ontime](../../getting-started/example-datasets/ontime.md)测试数据集的测试表。
```
SELECT count() FROM ontime
┌─count()─┐
│ 4276457 │
└─────────┘
```
该表使用`(FlightDate, (Year, FlightDate))`作为索引。
对该表进行如下的查询:
```
:) SELECT FlightDate AS k, count() FROM ontime GROUP BY k ORDER BY k
SELECT
FlightDate AS k,
count()
FROM ontime
GROUP BY k
ORDER BY k ASC
┌──────────k─┬─count()─┐
│ 2017-01-01 │ 13970 │
│ 2017-01-02 │ 15882 │
........................
│ 2017-09-28 │ 16411 │
│ 2017-09-29 │ 16384 │
│ 2017-09-30 │ 12520 │
└────────────┴─────────┘
273 rows in set. Elapsed: 0.072 sec. Processed 4.28 million rows, 8.55 MB (59.00 million rows/s., 118.01 MB/s.)
```
在这个查询中由于没有使用索引所以ClickHouse将处理整个表的所有数据(`Processed 4.28 million rows`)。使用下面的查询尝试使用索引进行查询:
```
:) SELECT FlightDate AS k, count() FROM ontime WHERE k = '2017-09-15' GROUP BY k ORDER BY k
SELECT
FlightDate AS k,
count()
FROM ontime
WHERE k = '2017-09-15'
GROUP BY k
ORDER BY k ASC
┌──────────k─┬─count()─┐
│ 2017-09-15 │ 16428 │
└────────────┴─────────┘
1 rows in set. Elapsed: 0.014 sec. Processed 32.74 thousand rows, 65.49 KB (2.31 million rows/s., 4.63 MB/s.)
```
在最后一行的显示中通过索引ClickHouse处理的行数明显减少`Processed 32.74 thousand rows`)。
现在将表达式`k = '2017-09-15'`传递给`indexHint`函数:
```
:) SELECT FlightDate AS k, count() FROM ontime WHERE indexHint(k = '2017-09-15') GROUP BY k ORDER BY k
SELECT
FlightDate AS k,
count()
FROM ontime
WHERE indexHint(k = '2017-09-15')
GROUP BY k
ORDER BY k ASC
┌──────────k─┬─count()─┐
│ 2017-09-14 │ 7071 │
│ 2017-09-15 │ 16428 │
│ 2017-09-16 │ 1077 │
│ 2017-09-30 │ 8167 │
└────────────┴─────────┘
4 rows in set. Elapsed: 0.004 sec. Processed 32.74 thousand rows, 65.49 KB (8.97 million rows/s., 17.94 MB/s.)
```
对于这个请求根据ClickHouse显示ClickHouse与上一次相同的方式应用了索引`Processed 32.74 thousand rows`)。但是,最终返回的结果集中并没有根据`k = '2017-09-15'`表达式进行过滤结果。
由于ClickHouse中使用稀疏索引因此在读取范围时本示例中为相邻日期"额外"的数据将包含在索引结果中。使用`indexHint`函数可以查看到它们。
## 复制 {#replicate}
使用单个值填充一个数组。

View File

@ -33,8 +33,12 @@ option (ENABLE_CLICKHOUSE_OBFUSCATOR "Table data obfuscator (convert real data t
${ENABLE_CLICKHOUSE_ALL})
# https://clickhouse.tech/docs/en/operations/utilities/odbc-bridge/
option (ENABLE_CLICKHOUSE_ODBC_BRIDGE "HTTP-server working like a proxy to ODBC driver"
${ENABLE_CLICKHOUSE_ALL})
if (ENABLE_ODBC)
option (ENABLE_CLICKHOUSE_ODBC_BRIDGE "HTTP-server working like a proxy to ODBC driver"
${ENABLE_CLICKHOUSE_ALL})
else ()
option (ENABLE_CLICKHOUSE_ODBC_BRIDGE "HTTP-server working like a proxy to ODBC driver" OFF)
endif ()
option (ENABLE_CLICKHOUSE_LIBRARY_BRIDGE "HTTP-server working like a proxy to Library dictionary source"
${ENABLE_CLICKHOUSE_ALL})

View File

@ -1,8 +1,11 @@
/access
/dictionaries_lib
/flags
/format_schemas
/metadata
/metadata_dropped
/data
/store
/access
/flags
/dictionaries_lib
/format_schemas
/preprocessed_configs
/shadow
/tmp

View File

@ -1,3 +0,0 @@
*.txt
*.dat
*.idx

View File

@ -1 +0,0 @@
*.sql

View File

@ -79,7 +79,7 @@ public:
place_data->sum += rhs_data->sum + (rhs_data->first - place_data->last);
place_data->last = rhs_data->last;
}
else if ((rhs_data->last < place_data->first && rhs_data->seen_last && place_data->seen_first))
else if ((rhs_data->first < place_data->last && rhs_data->seen_last && place_data->seen_first))
{
// In the opposite scenario, the lhs comes after the rhs, e.g. [4, 6] [1, 2]. Since we
// assume the input interval states are sorted by time, we assume this is a counter
@ -87,9 +87,9 @@ public:
// rhs last value.
place_data->sum += rhs_data->sum;
place_data->first = rhs_data->first;
place_data->last = rhs_data->last;
}
else if (rhs_data->seen_first)
else if (rhs_data->seen_first && !place_data->seen_first)
{
// If we're here then the lhs is an empty state and the rhs does have some state, so
// we'll just take that state.

View File

@ -13,6 +13,7 @@ namespace ErrorCodes
extern const int MISMATCH_REPLICAS_DATA_SOURCES;
extern const int NO_AVAILABLE_REPLICA;
extern const int TIMEOUT_EXCEEDED;
extern const int UNKNOWN_PACKET_FROM_SERVER;
}
@ -278,7 +279,22 @@ Packet MultiplexedConnections::receivePacketUnlocked(AsyncCallback async_callbac
Packet packet;
{
AsyncCallbackSetter async_setter(current_connection, std::move(async_callback));
packet = current_connection->receivePacket();
try
{
packet = current_connection->receivePacket();
}
catch (Exception & e)
{
if (e.code() == ErrorCodes::UNKNOWN_PACKET_FROM_SERVER)
{
/// Exception may happen when packet is received, e.g. when got unknown packet.
/// In this case, invalidate replica, so that we would not read from it anymore.
current_connection->disconnect();
invalidateReplica(state);
}
throw;
}
}
switch (packet.type)

View File

@ -484,7 +484,7 @@ DataTypes Block::getDataTypes() const
template <typename ReturnType>
static ReturnType checkBlockStructure(const Block & lhs, const Block & rhs, const std::string & context_description)
static ReturnType checkBlockStructure(const Block & lhs, const Block & rhs, const std::string & context_description, bool allow_remove_constants)
{
auto on_error = [](const std::string & message [[maybe_unused]], int code [[maybe_unused]])
{
@ -515,7 +515,16 @@ static ReturnType checkBlockStructure(const Block & lhs, const Block & rhs, cons
if (!actual.column || !expected.column)
continue;
if (actual.column->getName() != expected.column->getName())
const IColumn * actual_column = actual.column.get();
/// If we allow to remove constants, and expected column is not const, then unwrap actual constant column.
if (allow_remove_constants && !isColumnConst(*expected.column))
{
if (const auto * column_const = typeid_cast<const ColumnConst *>(actual_column))
actual_column = &column_const->getDataColumn();
}
if (actual_column->getName() != expected.column->getName())
return on_error("Block structure mismatch in " + context_description + " stream: different columns:\n"
+ lhs.dumpStructure() + "\n" + rhs.dumpStructure(), ErrorCodes::LOGICAL_ERROR);
@ -537,13 +546,25 @@ static ReturnType checkBlockStructure(const Block & lhs, const Block & rhs, cons
bool blocksHaveEqualStructure(const Block & lhs, const Block & rhs)
{
return checkBlockStructure<bool>(lhs, rhs, {});
return checkBlockStructure<bool>(lhs, rhs, {}, false);
}
void assertBlocksHaveEqualStructure(const Block & lhs, const Block & rhs, const std::string & context_description)
{
checkBlockStructure<void>(lhs, rhs, context_description);
checkBlockStructure<void>(lhs, rhs, context_description, false);
}
bool isCompatibleHeader(const Block & actual, const Block & desired)
{
return checkBlockStructure<bool>(actual, desired, {}, true);
}
void assertCompatibleHeader(const Block & actual, const Block & desired, const std::string & context_description)
{
checkBlockStructure<void>(actual, desired, context_description, true);
}

View File

@ -184,6 +184,12 @@ bool blocksHaveEqualStructure(const Block & lhs, const Block & rhs);
/// Throw exception when blocks are different.
void assertBlocksHaveEqualStructure(const Block & lhs, const Block & rhs, const std::string & context_description);
/// Actual header is compatible to desired if block have equal structure except constants.
/// It is allowed when column from actual header is constant, but in desired is not.
/// If both columns are constant, it is checked that they have the same value.
bool isCompatibleHeader(const Block & actual, const Block & desired);
void assertCompatibleHeader(const Block & actual, const Block & desired, const std::string & context_description);
/// Calculate difference in structure of blocks and write description into output strings. NOTE It doesn't compare values of constant columns.
void getBlocksDifference(const Block & lhs, const Block & rhs, std::string & out_lhs_diff, std::string & out_rhs_diff);

View File

@ -225,6 +225,7 @@ class IColumn;
/** Settings for testing hedged requests */ \
M(Milliseconds, sleep_in_send_tables_status_ms, 0, "Time to sleep in sending tables status response in TCPHandler", 0) \
M(Milliseconds, sleep_in_send_data_ms, 0, "Time to sleep in sending data in TCPHandler", 0) \
M(UInt64, unknown_packet_in_send_data, 0, "Send unknown packet instead of data Nth data packet", 0) \
\
M(Bool, insert_allow_materialized_columns, 0, "If setting is enabled, Allow materialized columns in INSERT.", 0) \
M(Seconds, http_connection_timeout, DEFAULT_HTTP_READ_BUFFER_CONNECTION_TIMEOUT, "HTTP connection timeout.", 0) \

View File

@ -19,9 +19,12 @@ public:
TimezoneMixin(const TimezoneMixin &) = default;
const DateLUTImpl & getTimeZone() const { return time_zone; }
bool hasExplicitTimeZone() const { return has_explicit_time_zone; }
protected:
/// true if time zone name was provided in data type parameters, false if it's using default time zone.
bool has_explicit_time_zone;
const DateLUTImpl & time_zone;
const DateLUTImpl & utc_time_zone;
};

View File

@ -360,7 +360,8 @@ inline bool isEnum(const DataTypePtr & data_type) { return WhichDataType(data_ty
inline bool isDecimal(const DataTypePtr & data_type) { return WhichDataType(data_type).isDecimal(); }
inline bool isTuple(const DataTypePtr & data_type) { return WhichDataType(data_type).isTuple(); }
inline bool isArray(const DataTypePtr & data_type) { return WhichDataType(data_type).isArray(); }
inline bool isMap(const DataTypePtr & data_type) {return WhichDataType(data_type).isMap(); }
inline bool isMap(const DataTypePtr & data_type) { return WhichDataType(data_type).isMap(); }
inline bool isNothing(const DataTypePtr & data_type) { return WhichDataType(data_type).isNothing(); }
template <typename T>
inline bool isUInt8(const T & data_type)

View File

@ -5,7 +5,6 @@
#include <Functions/CustomWeekTransforms.h>
#include <Functions/IFunctionImpl.h>
#include <Functions/TransformDateTime64.h>
#include <Functions/extractTimeZoneFromFunctionArguments.h>
#include <IO/WriteHelpers.h>

View File

@ -2496,7 +2496,7 @@ private:
}
}
WrapperType createArrayWrapper(const DataTypePtr & from_type_untyped, const DataTypeArray * to_type) const
WrapperType createArrayWrapper(const DataTypePtr & from_type_untyped, const DataTypeArray & to_type) const
{
/// Conversion from String through parsing.
if (checkAndGetDataType<DataTypeString>(from_type_untyped.get()))
@ -2507,24 +2507,23 @@ private:
};
}
DataTypePtr from_nested_type;
DataTypePtr to_nested_type;
const auto * from_type = checkAndGetDataType<DataTypeArray>(from_type_untyped.get());
/// get the most nested type
if (from_type && to_type)
if (!from_type)
{
from_nested_type = from_type->getNestedType();
to_nested_type = to_type->getNestedType();
from_type = checkAndGetDataType<DataTypeArray>(from_nested_type.get());
to_type = checkAndGetDataType<DataTypeArray>(to_nested_type.get());
throw Exception(ErrorCodes::TYPE_MISMATCH,
"CAST AS Array can only be perforamed between same-dimensional Array or String types");
}
/// both from_type and to_type should be nullptr now is array types had same dimensions
if ((from_type == nullptr) != (to_type == nullptr))
throw Exception{"CAST AS Array can only be performed between same-dimensional array types or from String",
ErrorCodes::TYPE_MISMATCH};
DataTypePtr from_nested_type = from_type->getNestedType();
/// In query SELECT CAST([] AS Array(Array(String))) from type is Array(Nothing)
bool from_empty_array = isNothing(from_nested_type);
if (from_type->getNumberOfDimensions() != to_type.getNumberOfDimensions() && !from_empty_array)
throw Exception(ErrorCodes::TYPE_MISMATCH,
"CAST AS Array can only be perforamed between same-dimensional array types");
const DataTypePtr & to_nested_type = to_type.getNestedType();
/// Prepare nested type conversion
const auto nested_function = prepareUnpackDictionaries(from_nested_type, to_nested_type);
@ -3090,14 +3089,12 @@ private:
return createStringWrapper(from_type);
case TypeIndex::FixedString:
return createFixedStringWrapper(from_type, checkAndGetDataType<DataTypeFixedString>(to_type.get())->getN());
case TypeIndex::Array:
return createArrayWrapper(from_type, checkAndGetDataType<DataTypeArray>(to_type.get()));
return createArrayWrapper(from_type, static_cast<const DataTypeArray &>(*to_type));
case TypeIndex::Tuple:
return createTupleWrapper(from_type, checkAndGetDataType<DataTypeTuple>(to_type.get()));
case TypeIndex::Map:
return createMapWrapper(from_type, checkAndGetDataType<DataTypeMap>(to_type.get()));
case TypeIndex::AggregateFunction:
return createAggregateFunctionWrapper(from_type, checkAndGetDataType<DataTypeAggregateFunction>(to_type.get()));
default:

View File

@ -44,9 +44,9 @@ std::string extractTimeZoneNameFromFunctionArguments(const ColumnsWithTypeAndNam
/// If time zone is attached to an argument of type DateTime.
if (const auto * type = checkAndGetDataType<DataTypeDateTime>(arguments[datetime_arg_num].type.get()))
return type->getTimeZone().getTimeZone();
return type->hasExplicitTimeZone() ? type->getTimeZone().getTimeZone() : std::string();
if (const auto * type = checkAndGetDataType<DataTypeDateTime64>(arguments[datetime_arg_num].type.get()))
return type->getTimeZone().getTimeZone();
return type->hasExplicitTimeZone() ? type->getTimeZone().getTimeZone() : std::string();
return {};
}

View File

@ -13,6 +13,7 @@ namespace DB
class Block;
/// Determine working timezone either from optional argument with time zone name or from time zone in DateTime type of argument.
/// Returns empty string if default time zone should be used.
std::string extractTimeZoneNameFromFunctionArguments(
const ColumnsWithTypeAndName & arguments, size_t time_zone_arg_num, size_t datetime_arg_num);

View File

@ -0,0 +1,70 @@
#include <Functions/IFunctionImpl.h>
#include <Functions/FunctionFactory.h>
#include <DataTypes/DataTypesNumber.h>
namespace DB
{
/** The `indexHint` function takes any number of any arguments and always returns one.
*
* This function has a special meaning (see ExpressionAnalyzer, KeyCondition)
* - the expressions inside it are not evaluated;
* - but when analyzing the index (selecting ranges for reading), this function is treated the same way,
* as if instead of using it the expression itself would be.
*
* Example: WHERE something AND indexHint(CounterID = 34)
* - do not read or calculate CounterID = 34, but select ranges in which the CounterID = 34 expression can be true.
*
* The function can be used for debugging purposes, as well as for (hidden from the user) query conversions.
*/
class FunctionIndexHint : public IFunction
{
public:
static constexpr auto name = "indexHint";
static FunctionPtr create(ContextPtr)
{
return std::make_shared<FunctionIndexHint>();
}
bool isVariadic() const override
{
return true;
}
size_t getNumberOfArguments() const override
{
return 0;
}
bool useDefaultImplementationForNulls() const override { return false; }
bool isSuitableForConstantFolding() const override { return false; }
String getName() const override
{
return name;
}
DataTypePtr getReturnTypeImpl(const DataTypes & /*arguments*/) const override
{
return std::make_shared<DataTypeUInt8>();
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName &, const DataTypePtr &, size_t input_rows_count) const override
{
return DataTypeUInt8().createColumnConst(input_rows_count, 1u);
}
ColumnPtr getResultIfAlwaysReturnsConstantAndHasArguments(const ColumnsWithTypeAndName &) const override
{
return DataTypeUInt8().createColumnConst(1, 1u);
}
};
void registerFunctionIndexHint(FunctionFactory & factory)
{
factory.registerFunction<FunctionIndexHint>();
}
}

View File

@ -39,7 +39,7 @@ struct DivideIntegralByConstantImpl
static ResultType process(A a, B b) { return Op::template apply<ResultType>(a, b); }
static NO_INLINE void vectorConstant(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size)
static void NO_INLINE NO_SANITIZE_UNDEFINED vectorConstant(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size)
{
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsign-compare"

View File

@ -43,7 +43,7 @@ struct ModuloByConstantImpl
static ResultType process(A a, B b) { return Op::template apply<ResultType>(a, b); }
static void NO_INLINE vectorConstant(const A * __restrict src, B b, ResultType * __restrict dst, size_t size)
static void NO_INLINE NO_SANITIZE_UNDEFINED vectorConstant(const A * __restrict src, B b, ResultType * __restrict dst, size_t size)
{
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsign-compare"

View File

@ -28,6 +28,7 @@ void registerFunctionSleep(FunctionFactory &);
void registerFunctionSleepEachRow(FunctionFactory &);
void registerFunctionMaterialize(FunctionFactory &);
void registerFunctionIgnore(FunctionFactory &);
void registerFunctionIndexHint(FunctionFactory &);
void registerFunctionIdentity(FunctionFactory &);
void registerFunctionArrayJoin(FunctionFactory &);
void registerFunctionReplicate(FunctionFactory &);
@ -101,6 +102,7 @@ void registerFunctionsMiscellaneous(FunctionFactory & factory)
registerFunctionSleepEachRow(factory);
registerFunctionMaterialize(factory);
registerFunctionIgnore(factory);
registerFunctionIndexHint(factory);
registerFunctionIdentity(factory);
registerFunctionArrayJoin(factory);
registerFunctionReplicate(factory);

View File

@ -303,6 +303,7 @@ SRCS(
ignore.cpp
ilike.cpp
in.cpp
indexHint.cpp
initializeAggregation.cpp
intDiv.cpp
intDivOrZero.cpp

View File

@ -181,7 +181,7 @@ const ActionsDAG::Node & ActionsDAG::addFunction(
}
}
/// Some functions like ignore() or getTypeName() always return constant result even if arguments are not constant.
/// Some functions like ignore(), indexHint() or getTypeName() always return constant result even if arguments are not constant.
/// We can't do constant folding, but can specify in sample block that function result is constant to avoid
/// unnecessary materialization.
if (!node.column && node.function_base->isSuitableForConstantFolding())

View File

@ -811,6 +811,14 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data &
}
}
/// A special function `indexHint`. Everything that is inside it is not calculated
if (node.name == "indexHint")
{
// Arguments are removed. We add function instead of constant column to avoid constant folding.
data.addFunction(FunctionFactory::instance().get("indexHint", data.getContext()), {}, column_name);
return;
}
if (node.is_window_function)
{
// Also add columns from PARTITION BY and ORDER BY of window functions.

View File

@ -1510,7 +1510,8 @@ ExpressionAnalysisResult::ExpressionAnalysisResult(
optimize_read_in_order =
settings.optimize_read_in_order
&& storage && query.orderBy()
&& storage
&& query.orderBy()
&& !query_analyzer.hasAggregation()
&& !query_analyzer.hasWindow()
&& !query.final()

View File

@ -15,15 +15,22 @@ namespace ErrorCodes
extern const int MULTIPLE_EXPRESSIONS_FOR_ALIAS;
}
static String wrongAliasMessage(const ASTPtr & ast, const ASTPtr & prev_ast, const String & alias)
namespace
{
WriteBufferFromOwnString message;
message << "Different expressions with the same alias " << backQuoteIfNeed(alias) << ":\n";
formatAST(*ast, message, false, true);
message << "\nand\n";
formatAST(*prev_ast, message, false, true);
message << '\n';
return message.str();
constexpr auto dummy_subquery_name_prefix = "_subquery";
String wrongAliasMessage(const ASTPtr & ast, const ASTPtr & prev_ast, const String & alias)
{
WriteBufferFromOwnString message;
message << "Different expressions with the same alias " << backQuoteIfNeed(alias) << ":\n";
formatAST(*ast, message, false, true);
message << "\nand\n";
formatAST(*prev_ast, message, false, true);
message << '\n';
return message.str();
}
}
@ -99,7 +106,7 @@ void QueryAliasesMatcher<T>::visit(const ASTSubquery & const_subquery, const AST
String alias;
do
{
alias = "_subquery" + std::to_string(++subquery_index);
alias = dummy_subquery_name_prefix + std::to_string(++subquery_index);
}
while (aliases.count(alias));
@ -124,6 +131,30 @@ void QueryAliasesMatcher<T>::visitOther(const ASTPtr & ast, Data & data)
aliases[alias] = ast;
}
/** QueryAliasesVisitor is executed before ExecuteScalarSubqueriesVisitor.
For example we have subquery in our query (SELECT sum(number) FROM numbers(10)).
After running QueryAliasesVisitor it will be (SELECT sum(number) FROM numbers(10)) as _subquery_1
and prefer_alias_to_column_name for this subquery will be true.
After running ExecuteScalarSubqueriesVisitor it will be converted to (45 as _subquery_1)
and prefer_alias_to_column_name for ast literal will be true.
But if we send such query on remote host with Distributed engine for example we cannot send prefer_alias_to_column_name
information for our ast node with query string. And this alias will be dropped because prefer_alias_to_column_name for ASTWIthAlias
by default is false.
It is imporant that subquery can be converted to literal during ExecuteScalarSubqueriesVisitor.
And code below check if we previously set for subquery alias as _subquery, and if it is true
then set prefer_alias_to_column_name = true for node that was optimized during ExecuteScalarSubqueriesVisitor.
*/
if (auto * ast_with_alias = dynamic_cast<ASTWithAlias *>(ast.get()))
{
if (startsWith(alias, dummy_subquery_name_prefix))
ast_with_alias->prefer_alias_to_column_name = true;
}
}
/// Explicit template instantiations

View File

@ -51,8 +51,10 @@ bool RequiredSourceColumnsMatcher::needChildVisit(const ASTPtr & node, const AST
if (const auto * f = node->as<ASTFunction>())
{
/// "indexHint" is a special function for index analysis.
/// Everything that is inside it is not calculated. See KeyCondition
/// "lambda" visit children itself.
if (f->name == "lambda")
if (f->name == "indexHint" || f->name == "lambda")
return false;
}

View File

@ -24,7 +24,6 @@ namespace ErrorCodes
ArrowBlockInputFormat::ArrowBlockInputFormat(ReadBuffer & in_, const Block & header_, bool stream_)
: IInputFormat(header_, in_), stream{stream_}
{
prepareReader();
}
Chunk ArrowBlockInputFormat::generate()
@ -35,12 +34,18 @@ Chunk ArrowBlockInputFormat::generate()
if (stream)
{
if (!stream_reader)
prepareReader();
batch_result = stream_reader->Next();
if (batch_result.ok() && !(*batch_result))
return res;
}
else
{
if (!file_reader)
prepareReader();
if (record_batch_current >= record_batch_total)
return res;
@ -71,7 +76,7 @@ void ArrowBlockInputFormat::resetParser()
stream_reader.reset();
else
file_reader.reset();
prepareReader();
record_batch_current = 0;
}
void ArrowBlockInputFormat::prepareReader()

View File

@ -8,6 +8,7 @@
#include <Processors/Transforms/ExtremesTransform.h>
#include <Processors/Formats/IOutputFormat.h>
#include <Processors/Sources/NullSource.h>
#include <Columns/ColumnConst.h>
namespace DB
{
@ -250,6 +251,47 @@ static Pipes removeEmptyPipes(Pipes pipes)
return res;
}
/// Calculate common header for pipes.
/// This function is needed only to remove ColumnConst from common header in case if some columns are const, and some not.
/// E.g. if the first header is `x, const y, const z` and the second is `const x, y, const z`, the common header will be `x, y, const z`.
static Block getCommonHeader(const Pipes & pipes)
{
Block res;
for (const auto & pipe : pipes)
{
if (const auto & header = pipe.getHeader())
{
res = header;
break;
}
}
for (const auto & pipe : pipes)
{
const auto & header = pipe.getHeader();
for (size_t i = 0; i < res.columns(); ++i)
{
/// We do not check that headers are compatible here. Will do it later.
if (i >= header.columns())
break;
auto & common = res.getByPosition(i).column;
const auto & cur = header.getByPosition(i).column;
/// Only remove const from common header if it is not const for current pipe.
if (cur && common && !isColumnConst(*cur))
{
if (const auto * column_const = typeid_cast<const ColumnConst *>(common.get()))
common = column_const->getDataColumnPtr();
}
}
}
return res;
}
Pipe Pipe::unitePipes(Pipes pipes)
{
return Pipe::unitePipes(std::move(pipes), nullptr, false);
@ -276,23 +318,12 @@ Pipe Pipe::unitePipes(Pipes pipes, Processors * collected_processors, bool allow
OutputPortRawPtrs totals;
OutputPortRawPtrs extremes;
res.collected_processors = collected_processors;
res.header = pipes.front().header;
if (allow_empty_header && !res.header)
{
for (const auto & pipe : pipes)
{
if (const auto & header = pipe.getHeader())
{
res.header = header;
break;
}
}
}
res.header = getCommonHeader(pipes);
for (auto & pipe : pipes)
{
if (!allow_empty_header || pipe.header)
assertBlocksHaveEqualStructure(res.header, pipe.header, "Pipe::unitePipes");
assertCompatibleHeader(pipe.header, res.header, "Pipe::unitePipes");
res.processors.insert(res.processors.end(), pipe.processors.begin(), pipe.processors.end());
res.output_ports.insert(res.output_ports.end(), pipe.output_ports.begin(), pipe.output_ports.end());

View File

@ -16,7 +16,7 @@ void connect(OutputPort & output, InputPort & input)
auto out_name = output.getProcessor().getName();
auto in_name = input.getProcessor().getName();
assertBlocksHaveEqualStructure(input.getHeader(), output.getHeader(), " function connect between " + out_name + " and " + in_name);
assertCompatibleHeader(output.getHeader(), input.getHeader(), " function connect between " + out_name + " and " + in_name);
input.output_port = &output;
output.input_port = &input;

View File

@ -232,8 +232,6 @@ QueryPipeline QueryPipeline::unitePipelines(
pipeline.checkInitialized();
pipeline.pipe.collected_processors = collected_processors;
assertBlocksHaveEqualStructure(pipeline.getHeader(), common_header, "QueryPipeline::unitePipelines");
pipes.emplace_back(std::move(pipeline.pipe));
max_threads += pipeline.max_threads;

View File

@ -311,6 +311,8 @@ void TCPHandler::runImpl()
/// Processing Query
state.io = executeQuery(state.query, query_context, false, state.stage, may_have_embedded_data);
unknown_packet_in_send_data = query_context->getSettingsRef().unknown_packet_in_send_data;
after_check_cancelled.restart();
after_send_progress.restart();
@ -1472,6 +1474,14 @@ void TCPHandler::sendData(const Block & block)
try
{
/// For testing hedged requests
if (unknown_packet_in_send_data)
{
--unknown_packet_in_send_data;
if (unknown_packet_in_send_data == 0)
writeVarUInt(UInt64(-1), *out);
}
writeVarUInt(Protocol::Server::Data, *out);
/// Send external table name (empty name is the main table)
writeStringBinary("", *out);

View File

@ -135,6 +135,8 @@ private:
ContextPtr connection_context;
ContextPtr query_context;
size_t unknown_packet_in_send_data = 0;
/// Streams for reading/writing from/to client connection socket.
std::shared_ptr<ReadBuffer> in;
std::shared_ptr<WriteBuffer> out;

View File

@ -309,11 +309,11 @@ static const std::map<std::string, std::string> inverse_relations = {
bool isLogicalOperator(const String & func_name)
{
return (func_name == "and" || func_name == "or" || func_name == "not");
return (func_name == "and" || func_name == "or" || func_name == "not" || func_name == "indexHint");
}
/// The node can be one of:
/// - Logical operator (AND, OR, NOT)
/// - Logical operator (AND, OR, NOT and indexHint() - logical NOOP)
/// - An "atom" (relational operator, constant, expression)
/// - A logical constant expression
/// - Any other function
@ -330,7 +330,8 @@ ASTPtr cloneASTWithInversionPushDown(const ASTPtr node, const bool need_inversio
const auto result_node = makeASTFunction(func->name);
if (need_inversion)
/// indexHint() is a special case - logical NOOP function
if (result_node->name != "indexHint" && need_inversion)
{
result_node->name = (result_node->name == "and") ? "or" : "and";
}
@ -965,6 +966,8 @@ bool KeyCondition::isKeyPossiblyWrappedByMonotonicFunctions(
{
const auto & args = (*it)->arguments->children;
auto func_builder = FunctionFactory::instance().tryGet((*it)->name, context);
if (!func_builder)
return false;
ColumnsWithTypeAndName arguments;
ColumnWithTypeAndName const_arg;
FunctionWithOptionalConstArg::Kind kind = FunctionWithOptionalConstArg::Kind::NO_CONST;
@ -1277,6 +1280,8 @@ bool KeyCondition::tryParseAtomFromAST(const ASTPtr & node, ContextPtr context,
bool KeyCondition::tryParseLogicalOperatorFromAST(const ASTFunction * func, RPNElement & out)
{
/// Functions AND, OR, NOT.
/// Also a special function `indexHint` - works as if instead of calling a function there are just parentheses
/// (or, the same thing - calling the function `and` from one argument).
const ASTs & args = func->arguments->children;
if (func->name == "not")
@ -1288,7 +1293,7 @@ bool KeyCondition::tryParseLogicalOperatorFromAST(const ASTFunction * func, RPNE
}
else
{
if (func->name == "and")
if (func->name == "and" || func->name == "indexHint")
out.function = RPNElement::FUNCTION_AND;
else if (func->name == "or")
out.function = RPNElement::FUNCTION_OR;

View File

@ -39,7 +39,6 @@
#include <DataTypes/DataTypeUUID.h>
#include <DataTypes/DataTypesNumber.h>
#include <Storages/VirtualColumnUtils.h>
#include <DataStreams/materializeBlock.h>
namespace ProfileEvents
{

View File

@ -392,7 +392,7 @@ bool MergeTreeIndexConditionSet::operatorFromAST(ASTPtr & node)
func->name = "__bitSwapLastTwo";
}
else if (func->name == "and")
else if (func->name == "and" || func->name == "indexHint")
{
auto last_arg = args.back();
args.pop_back();
@ -448,7 +448,7 @@ bool MergeTreeIndexConditionSet::checkASTUseless(const ASTPtr & node, bool atomi
const ASTs & args = func->arguments->children;
if (func->name == "and")
if (func->name == "and" || func->name == "indexHint")
return checkASTUseless(args[0], atomic) && checkASTUseless(args[1], atomic);
else if (func->name == "or")
return checkASTUseless(args[0], atomic) || checkASTUseless(args[1], atomic);

View File

@ -339,6 +339,10 @@ bool MergeTreeWhereOptimizer::cannotBeMoved(const ASTPtr & ptr, bool is_final) c
if ("globalIn" == function_ptr->name
|| "globalNotIn" == function_ptr->name)
return true;
/// indexHint is a special function that it does not make sense to transfer to PREWHERE
if ("indexHint" == function_ptr->name)
return true;
}
else if (auto opt_name = IdentifierSemantic::getColumnName(ptr))
{

View File

@ -90,6 +90,8 @@ private:
bool operatorFromAST(const ASTFunction * func, RPNElement & out)
{
/// Functions AND, OR, NOT.
/// Also a special function `indexHint` - works as if instead of calling a function there are just parentheses
/// (or, the same thing - calling the function `and` from one argument).
const ASTs & args = typeid_cast<const ASTExpressionList &>(*func->arguments).children;
if (func->name == "not")
@ -101,7 +103,7 @@ private:
}
else
{
if (func->name == "and")
if (func->name == "and" || func->name == "indexHint")
out.function = RPNElement::FUNCTION_AND;
else if (func->name == "or")
out.function = RPNElement::FUNCTION_OR;

View File

@ -44,7 +44,7 @@ InputOrderInfoPtr ReadInOrderOptimizer::getInputOrder(const StorageMetadataPtr &
int read_direction = required_sort_description.at(0).direction;
size_t prefix_size = std::min(required_sort_description.size(), sorting_key_columns.size());
auto aliase_columns = metadata_snapshot->getColumns().getAliases();
auto aliased_columns = metadata_snapshot->getColumns().getAliases();
for (size_t i = 0; i < prefix_size; ++i)
{
@ -55,13 +55,18 @@ InputOrderInfoPtr ReadInOrderOptimizer::getInputOrder(const StorageMetadataPtr &
/// or in some simple cases when order key element is wrapped into monotonic function.
auto apply_order_judge = [&] (const ExpressionActions::Actions & actions, const String & sort_column)
{
/// If required order depend on collation, it cannot be matched with primary key order.
/// Because primary keys cannot have collations.
if (required_sort_description[i].collator)
return false;
int current_direction = required_sort_description[i].direction;
/// For the path: order by (sort_column, ...)
/// For the path: order by (sort_column, ...)
if (sort_column == sorting_key_columns[i] && current_direction == read_direction)
{
return true;
}
/// For the path: order by (function(sort_column), ...)
/// For the path: order by (function(sort_column), ...)
/// Allow only one simple monotonic functions with one argument
/// Why not allow multi monotonic functions?
else
@ -125,7 +130,7 @@ InputOrderInfoPtr ReadInOrderOptimizer::getInputOrder(const StorageMetadataPtr &
/// currently we only support alias column without any function wrapper
/// ie: `order by aliased_column` can have this optimization, but `order by function(aliased_column)` can not.
/// This suits most cases.
if (context->getSettingsRef().optimize_respect_aliases && aliase_columns.contains(required_sort_description[i].column_name))
if (context->getSettingsRef().optimize_respect_aliases && aliased_columns.contains(required_sort_description[i].column_name))
{
auto column_expr = metadata_snapshot->getColumns().get(required_sort_description[i].column_name).default_desc.expression->clone();
replaceAliasColumnsInQuery(column_expr, metadata_snapshot->getColumns(), forbidden_columns, context);

View File

@ -585,42 +585,24 @@ bool StorageReplicatedMergeTree::createTableIfNotExists(const StorageMetadataPtr
/// This is Ok because another replica is definitely going to drop the table.
LOG_WARNING(log, "Removing leftovers from table {} (this might take several minutes)", zookeeper_path);
String drop_lock_path = zookeeper_path + "/dropped/lock";
Coordination::Error code = zookeeper->tryCreate(drop_lock_path, "", zkutil::CreateMode::Ephemeral);
Strings children;
Coordination::Error code = zookeeper->tryGetChildren(zookeeper_path, children);
if (code == Coordination::Error::ZNONODE)
if (code == Coordination::Error::ZNONODE || code == Coordination::Error::ZNODEEXISTS)
{
LOG_WARNING(log, "Table {} is already finished removing by another replica right now", replica_path);
LOG_WARNING(log, "The leftovers from table {} were removed by another replica", zookeeper_path);
}
else if (code != Coordination::Error::ZOK)
{
throw Coordination::Exception(code, drop_lock_path);
}
else
{
for (const auto & child : children)
if (child != "dropped")
zookeeper->tryRemoveRecursive(zookeeper_path + "/" + child);
Coordination::Requests ops;
Coordination::Responses responses;
ops.emplace_back(zkutil::makeRemoveRequest(zookeeper_path + "/dropped", -1));
ops.emplace_back(zkutil::makeRemoveRequest(zookeeper_path, -1));
code = zookeeper->tryMulti(ops, responses);
if (code == Coordination::Error::ZNONODE)
auto metadata_drop_lock = zkutil::EphemeralNodeHolder::existing(drop_lock_path, *zookeeper);
if (!removeTableNodesFromZooKeeper(zookeeper, zookeeper_path, metadata_drop_lock, log))
{
LOG_WARNING(log, "Table {} is already finished removing by another replica right now", replica_path);
}
else if (code == Coordination::Error::ZNOTEMPTY)
{
throw Exception(fmt::format(
"The old table was not completely removed from ZooKeeper, {} still exists and may contain some garbage. But it should never happen according to the logic of operations (it's a bug).", zookeeper_path), ErrorCodes::LOGICAL_ERROR);
}
else if (code != Coordination::Error::ZOK)
{
/// It is still possible that ZooKeeper session is expired or server is killed in the middle of the delete operation.
zkutil::KeeperMultiException::check(code, ops, responses);
}
else
{
LOG_WARNING(log, "The leftovers from table {} was successfully removed from ZooKeeper", zookeeper_path);
/// Someone is recursively removing table right now, we cannot create new table until old one is removed
continue;
}
}
}
@ -633,10 +615,6 @@ bool StorageReplicatedMergeTree::createTableIfNotExists(const StorageMetadataPtr
Coordination::Requests ops;
ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path, "", zkutil::CreateMode::Persistent));
/// Check that the table is not being dropped right now.
ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/dropped", "", zkutil::CreateMode::Persistent));
ops.emplace_back(zkutil::makeRemoveRequest(zookeeper_path + "/dropped", -1));
ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/metadata", metadata_str,
zkutil::CreateMode::Persistent));
ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/columns", metadata_snapshot->getColumns().toString(),
@ -824,10 +802,18 @@ void StorageReplicatedMergeTree::dropReplica(zkutil::ZooKeeperPtr zookeeper, con
* because table creation is executed in single transaction that will conflict with remaining nodes.
*/
/// Node /dropped works like a lock that protects from concurrent removal of old table and creation of new table.
/// But recursive removal may fail in the middle of operation leaving some garbage in zookeeper_path, so
/// we remove it on table creation if there is /dropped node. Creating thread may remove /dropped node created by
/// removing thread, and it causes race condition if removing thread is not finished yet.
/// To avoid this we also create ephemeral child before starting recursive removal.
/// (The existence of child node does not allow to remove parent node).
Coordination::Requests ops;
Coordination::Responses responses;
String drop_lock_path = zookeeper_path + "/dropped/lock";
ops.emplace_back(zkutil::makeRemoveRequest(zookeeper_path + "/replicas", -1));
ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/dropped", "", zkutil::CreateMode::Persistent));
ops.emplace_back(zkutil::makeCreateRequest(drop_lock_path, "", zkutil::CreateMode::Ephemeral));
Coordination::Error code = zookeeper->tryMulti(ops, responses);
if (code == Coordination::Error::ZNONODE || code == Coordination::Error::ZNODEEXISTS)
@ -844,48 +830,57 @@ void StorageReplicatedMergeTree::dropReplica(zkutil::ZooKeeperPtr zookeeper, con
}
else
{
auto metadata_drop_lock = zkutil::EphemeralNodeHolder::existing(drop_lock_path, *zookeeper);
LOG_INFO(logger, "Removing table {} (this might take several minutes)", zookeeper_path);
Strings children;
code = zookeeper->tryGetChildren(zookeeper_path, children);
if (code == Coordination::Error::ZNONODE)
{
LOG_WARNING(logger, "Table {} is already finished removing by another replica right now", remote_replica_path);
}
else
{
for (const auto & child : children)
if (child != "dropped")
zookeeper->tryRemoveRecursive(zookeeper_path + "/" + child);
ops.clear();
responses.clear();
ops.emplace_back(zkutil::makeRemoveRequest(zookeeper_path + "/dropped", -1));
ops.emplace_back(zkutil::makeRemoveRequest(zookeeper_path, -1));
code = zookeeper->tryMulti(ops, responses);
if (code == Coordination::Error::ZNONODE)
{
LOG_WARNING(logger, "Table {} is already finished removing by another replica right now", remote_replica_path);
}
else if (code == Coordination::Error::ZNOTEMPTY)
{
LOG_ERROR(logger, "Table was not completely removed from ZooKeeper, {} still exists and may contain some garbage.",
zookeeper_path);
}
else if (code != Coordination::Error::ZOK)
{
/// It is still possible that ZooKeeper session is expired or server is killed in the middle of the delete operation.
zkutil::KeeperMultiException::check(code, ops, responses);
}
else
{
LOG_INFO(logger, "Table {} was successfully removed from ZooKeeper", zookeeper_path);
}
}
removeTableNodesFromZooKeeper(zookeeper, zookeeper_path, metadata_drop_lock, logger);
}
}
bool StorageReplicatedMergeTree::removeTableNodesFromZooKeeper(zkutil::ZooKeeperPtr zookeeper,
const String & zookeeper_path, const zkutil::EphemeralNodeHolder::Ptr & metadata_drop_lock, Poco::Logger * logger)
{
bool completely_removed = false;
Strings children;
Coordination::Error code = zookeeper->tryGetChildren(zookeeper_path, children);
if (code == Coordination::Error::ZNONODE)
throw Exception(ErrorCodes::LOGICAL_ERROR, "There is a race condition between creation and removal of replicated table. It's a bug");
for (const auto & child : children)
if (child != "dropped")
zookeeper->tryRemoveRecursive(zookeeper_path + "/" + child);
Coordination::Requests ops;
Coordination::Responses responses;
ops.emplace_back(zkutil::makeRemoveRequest(metadata_drop_lock->getPath(), -1));
ops.emplace_back(zkutil::makeRemoveRequest(zookeeper_path + "/dropped", -1));
ops.emplace_back(zkutil::makeRemoveRequest(zookeeper_path, -1));
code = zookeeper->tryMulti(ops, responses);
if (code == Coordination::Error::ZNONODE)
{
throw Exception(ErrorCodes::LOGICAL_ERROR, "There is a race condition between creation and removal of replicated table. It's a bug");
}
else if (code == Coordination::Error::ZNOTEMPTY)
{
LOG_ERROR(logger, "Table was not completely removed from ZooKeeper, {} still exists and may contain some garbage,"
"but someone is removing it right now.", zookeeper_path);
}
else if (code != Coordination::Error::ZOK)
{
/// It is still possible that ZooKeeper session is expired or server is killed in the middle of the delete operation.
zkutil::KeeperMultiException::check(code, ops, responses);
}
else
{
metadata_drop_lock->setAlreadyRemoved();
completely_removed = true;
LOG_INFO(logger, "Table {} was successfully removed from ZooKeeper", zookeeper_path);
}
return completely_removed;
}
/** Verify that list of columns and table storage_settings_ptr match those specified in ZK (/metadata).
* If not, throw an exception.

View File

@ -208,6 +208,10 @@ public:
*/
static void dropReplica(zkutil::ZooKeeperPtr zookeeper, const String & zookeeper_path, const String & replica, Poco::Logger * logger);
/// Removes table from ZooKeeper after the last replica was dropped
static bool removeTableNodesFromZooKeeper(zkutil::ZooKeeperPtr zookeeper, const String & zookeeper_path,
const zkutil::EphemeralNodeHolder::Ptr & metadata_drop_lock, Poco::Logger * logger);
/// Get job to execute in background pool (merge, mutate, drop range and so on)
std::optional<JobAndPool> getDataProcessingJob() override;

View File

@ -12,7 +12,16 @@ namespace DB
class Context;
/** Base class for system tables whose all columns have String type.
/** IStorageSystemOneBlock is base class for system tables whose all columns can be synchronously fetched.
*
* Client class need to provide static method static NamesAndTypesList getNamesAndTypes() that will return list of column names and
* their types. IStorageSystemOneBlock during read will create result columns in same order as result of getNamesAndTypes
* and pass it with fillData method.
*
* Client also must override fillData and fill result columns.
*
* If subclass want to support virtual columns, it should override getVirtuals method of IStorage interface.
* IStorageSystemOneBlock will add virtuals columns at the end of result columns of fillData method.
*/
template <typename Self>
class IStorageSystemOneBlock : public IStorage
@ -41,9 +50,10 @@ public:
size_t /*max_block_size*/,
unsigned /*num_streams*/) override
{
metadata_snapshot->check(column_names, getVirtuals(), getStorageID());
auto virtuals_names_and_types = getVirtuals();
metadata_snapshot->check(column_names, virtuals_names_and_types, getStorageID());
Block sample_block = metadata_snapshot->getSampleBlock();
Block sample_block = metadata_snapshot->getSampleBlockWithVirtuals(virtuals_names_and_types);
MutableColumns res_columns = sample_block.cloneEmptyColumns();
fillData(res_columns, context, query_info);

View File

@ -50,6 +50,13 @@ NamesAndTypesList StorageSystemDictionaries::getNamesAndTypes()
};
}
NamesAndTypesList StorageSystemDictionaries::getVirtuals() const
{
return {
{"key", std::make_shared<DataTypeString>()}
};
}
void StorageSystemDictionaries::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo & /*query_info*/) const
{
const auto access = context->getAccess();
@ -128,6 +135,9 @@ void StorageSystemDictionaries::fillData(MutableColumns & res_columns, ContextPt
else
res_columns[i++]->insertDefault();
/// Start fill virtual columns
res_columns[i++]->insert(dictionary_structure.getKeyDescription());
}
}

View File

@ -18,6 +18,8 @@ public:
static NamesAndTypesList getNamesAndTypes();
NamesAndTypesList getVirtuals() const override;
protected:
using IStorageSystemOneBlock::IStorageSystemOneBlock;

File diff suppressed because one or more lines are too long

View File

@ -11,11 +11,10 @@ function thread()
while true; do
$CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS test_table_$1 SYNC;
CREATE TABLE test_table_$1 (a UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r_$1') ORDER BY tuple();" 2>&1 |
grep -vP '(^$)|(^Received exception from server)|(^\d+\. )|because the last replica of the table was dropped right now|is already started to be removing by another replica right now|is already finished removing by another replica right now|Removing leftovers from table|Another replica was suddenly created|was successfully removed from ZooKeeper|was created by another server at the same moment|was suddenly removed|some other replicas were created at the same time'
grep -vP '(^$)|(^Received exception from server)|(^\d+\. )|because the last replica of the table was dropped right now|is already started to be removing by another replica right now| were removed by another replica|Removing leftovers from table|Another replica was suddenly created|was created by another server at the same moment|was suddenly removed|some other replicas were created at the same time'
done
}
# https://stackoverflow.com/questions/9954794/execute-a-shell-function-with-timeout
export -f thread;

View File

@ -7,3 +7,4 @@
2
2.25
6.5
7

View File

@ -7,3 +7,4 @@ select deltaSumMerge(rows) from (select deltaSumState(arrayJoin([0, 1])) as rows
select deltaSumMerge(rows) from (select deltaSumState(arrayJoin([4, 5])) as rows union all select deltaSumState(arrayJoin([0, 1])) as rows);
select deltaSum(arrayJoin([2.25, 3, 4.5]));
select deltaSumMerge(rows) from (select deltaSumState(arrayJoin([0.1, 0.3, 0.5])) as rows union all select deltaSumState(arrayJoin([4.1, 5.1, 6.6])) as rows);
select deltaSumMerge(rows) from (select deltaSumState(arrayJoin([3, 5])) as rows union all select deltaSumState(arrayJoin([1, 2])) as rows union all select deltaSumState(arrayJoin([4, 6])) as rows);

View File

@ -0,0 +1,35 @@
-- { echo }
drop table if exists tbl;
create table tbl (p Int64, t Int64, f Float64) Engine=MergeTree partition by p order by t settings index_granularity=1;
insert into tbl select number / 4, number, 0 from numbers(16);
select * from tbl WHERE indexHint(t = 1) order by t;
0 0 0
0 1 0
select * from tbl WHERE indexHint(t in (select toInt64(number) + 2 from numbers(3))) order by t;
0 1 0
0 2 0
0 3 0
1 4 0
select * from tbl WHERE indexHint(p = 2) order by t;
2 8 0
2 9 0
2 10 0
2 11 0
select * from tbl WHERE indexHint(p in (select toInt64(number) - 2 from numbers(3))) order by t;
0 0 0
0 1 0
0 2 0
0 3 0
drop table tbl;
drop table if exists XXXX;
create table XXXX (t Int64, f Float64) Engine=MergeTree order by t settings index_granularity=128;
insert into XXXX select number*60, 0 from numbers(100000);
SELECT count() FROM XXXX WHERE indexHint(t = 42);
128
drop table if exists XXXX;
create table XXXX (t Int64, f Float64) Engine=MergeTree order by t settings index_granularity=8192;
insert into XXXX select number*60, 0 from numbers(100000);
SELECT count() FROM XXXX WHERE indexHint(t = toDateTime(0));
100000
drop table XXXX;

View File

@ -0,0 +1,35 @@
-- { echo }
drop table if exists tbl;
create table tbl (p Int64, t Int64, f Float64) Engine=MergeTree partition by p order by t settings index_granularity=1;
insert into tbl select number / 4, number, 0 from numbers(16);
select * from tbl WHERE indexHint(t = 1) order by t;
select * from tbl WHERE indexHint(t in (select toInt64(number) + 2 from numbers(3))) order by t;
select * from tbl WHERE indexHint(p = 2) order by t;
select * from tbl WHERE indexHint(p in (select toInt64(number) - 2 from numbers(3))) order by t;
drop table tbl;
drop table if exists XXXX;
create table XXXX (t Int64, f Float64) Engine=MergeTree order by t settings index_granularity=128;
insert into XXXX select number*60, 0 from numbers(100000);
SELECT count() FROM XXXX WHERE indexHint(t = 42);
drop table if exists XXXX;
create table XXXX (t Int64, f Float64) Engine=MergeTree order by t settings index_granularity=8192;
insert into XXXX select number*60, 0 from numbers(100000);
SELECT count() FROM XXXX WHERE indexHint(t = toDateTime(0));
drop table XXXX;

View File

@ -0,0 +1,18 @@
DROP TABLE IF EXISTS data;
CREATE TABLE data (a Int64, b Int64) ENGINE = TinyLog();
DROP TABLE IF EXISTS data_distributed;
CREATE TABLE data_distributed (a Int64, b Int64) ENGINE = Distributed(test_shard_localhost, currentDatabase(), 'data');
INSERT INTO data VALUES (0, 0);
SET prefer_localhost_replica = 1;
SELECT a / (SELECT sum(number) FROM numbers(10)) FROM data_distributed;
SELECT a < (SELECT 1) FROM data_distributed;
SET prefer_localhost_replica = 0;
SELECT a / (SELECT sum(number) FROM numbers(10)) FROM data_distributed;
SELECT a < (SELECT 1) FROM data_distributed;
DROP TABLE data_distributed;
DROP TABLE data;

View File

@ -0,0 +1,9 @@
#!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
for _ in {1..10}; do $CLICKHOUSE_CLIENT -q "select number from remote('127.0.0.{2,3}', numbers(20)) limit 8 settings max_block_size = 2, unknown_packet_in_send_data=4, sleep_in_send_data_ms=100, async_socket_for_remote=1 format Null" > /dev/null 2>&1 || true; done

View File

@ -0,0 +1,20 @@
drop table if exists t0;
CREATE TABLE t0 (c0 String) ENGINE = Log();
SELECT isNull(t0.c0) OR COUNT('\n?pVa')
FROM t0
GROUP BY t0.c0
HAVING isNull(t0.c0)
UNION ALL
SELECT isNull(t0.c0) OR COUNT('\n?pVa')
FROM t0
GROUP BY t0.c0
HAVING NOT isNull(t0.c0)
UNION ALL
SELECT isNull(t0.c0) OR COUNT('\n?pVa')
FROM t0
GROUP BY t0.c0
HAVING isNull(isNull(t0.c0))
SETTINGS aggregate_functions_null_for_empty = 1, enable_optimize_predicate_expression = 0;
drop table if exists t0;

View File

@ -0,0 +1,6 @@
a a
A A
b b
B B
c c
C C

View File

@ -0,0 +1,21 @@
DROP TABLE IF EXISTS test_collation;
CREATE TABLE test_collation
(
`v` String,
`v2` String
)
ENGINE = MergeTree
ORDER BY v
SETTINGS index_granularity = 8192;
insert into test_collation values ('A', 'A');
insert into test_collation values ('B', 'B');
insert into test_collation values ('C', 'C');
insert into test_collation values ('a', 'a');
insert into test_collation values ('b', 'b');
insert into test_collation values ('c', 'c');
SELECT * FROM test_collation ORDER BY v ASC COLLATE 'en';
DROP TABLE test_collation;

View File

@ -0,0 +1,27 @@
#!/usr/bin/env bash
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} --multiquery --query "
drop table if exists aliases_lazyness;
create table aliases_lazyness (x UInt32, y ALIAS sleepEachRow(0.1)) Engine=MergeTree ORDER BY x;
insert into aliases_lazyness(x) select * from numbers(40);
"
# In very old ClickHouse versions alias column was calculated for every row.
# If it works this way, the query will take at least 0.1 * 40 = 4 seconds.
# If the issue does not exist, the query should take slightly more than 0.1 seconds.
# The exact time is not guaranteed, so we check in a loop that at least once
# the query will process in less than one second, that proves that the behaviour is not like it was long time ago.
while true
do
timeout 1 ${CLICKHOUSE_CLIENT} --query "SELECT x, y FROM aliases_lazyness WHERE x = 1 FORMAT Null" && break
done
${CLICKHOUSE_CLIENT} --multiquery --query "
drop table aliases_lazyness;
SELECT 'Ok';
"

View File

@ -0,0 +1,2 @@
2017-12-15 1 1
2017-12-15 1 1

View File

@ -0,0 +1,21 @@
DROP TABLE IF EXISTS db;
CREATE TABLE tb
(
date Date,
`index` Int32,
value Int32,
idx Int32 ALIAS `index`
)
ENGINE = MergeTree
PARTITION BY date
ORDER BY (date, `index`);
insert into tb values ('2017-12-15', 1, 1);
SET force_primary_key = 1;
select * from tb where `index` >= 0 AND `index` <= 2;
select * from tb where idx >= 0 AND idx <= 2;
DROP TABLE tb;

View File

@ -0,0 +1,6 @@
DateTime
DateTime
DateTime(\'UTC\')
DateTime64(3)
DateTime64(3)
DateTime64(3, \'UTC\')

View File

@ -0,0 +1,26 @@
SELECT toTypeName(now());
SELECT toTypeName(now() - 1);
SELECT toTypeName(now('UTC') - 1);
SELECT toTypeName(now64(3));
SELECT toTypeName(now64(3) - 1);
SELECT toTypeName(toTimeZone(now64(3), 'UTC') - 1);
DROP TABLE IF EXISTS tt_null;
DROP TABLE IF EXISTS tt;
DROP TABLE IF EXISTS tt_mv;
create table tt_null(p String) engine = Null;
create table tt(p String,tmin AggregateFunction(min, DateTime))
engine = AggregatingMergeTree order by p;
create materialized view tt_mv to tt as
select p, minState(now() - interval 30 minute) as tmin
from tt_null group by p;
insert into tt_null values('x');
DROP TABLE tt_null;
DROP TABLE tt;
DROP TABLE tt_mv;

View File

@ -0,0 +1,2 @@
[]
[]

Some files were not shown because too many files have changed in this diff Show More