mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 17:12:03 +00:00
Merge branch 'master' into fix_terminate_when_not_enough_memory
This commit is contained in:
commit
a6510cc4b7
@ -3,7 +3,7 @@ SET(VERSION_REVISION 54445)
|
||||
SET(VERSION_MAJOR 21)
|
||||
SET(VERSION_MINOR 1)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 53d0c9fa7255aa1dc48991d19f4246ff71cc2fd7)
|
||||
SET(VERSION_DESCRIBE v21.1.1.5643-prestable)
|
||||
SET(VERSION_STRING 21.1.1.5643)
|
||||
SET(VERSION_GITHASH 667dd0cf0ccecdaa6f334177b7ece2f53bd196a1)
|
||||
SET(VERSION_DESCRIBE v21.1.1.5646-prestable)
|
||||
SET(VERSION_STRING 21.1.1.5646)
|
||||
# end of autochange
|
||||
|
@ -1,5 +1,4 @@
|
||||
# Freebsd: contrib/cppkafka/include/cppkafka/detail/endianness.h:53:23: error: 'betoh16' was not declared in this scope
|
||||
if (NOT ARCH_ARM AND NOT OS_FREEBSD AND OPENSSL_FOUND)
|
||||
if (NOT ARCH_ARM AND OPENSSL_FOUND)
|
||||
option (ENABLE_RDKAFKA "Enable kafka" ${ENABLE_LIBRARIES})
|
||||
elseif(ENABLE_RDKAFKA AND NOT OPENSSL_FOUND)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use librdkafka without SSL")
|
||||
|
@ -1,2 +1,2 @@
|
||||
wget https://github.com/phracker/MacOSX-SDKs/releases/download/10.14-beta4/MacOSX10.14.sdk.tar.xz
|
||||
tar xJf MacOSX10.14.sdk.tar.xz --strip-components=1
|
||||
wget https://github.com/phracker/MacOSX-SDKs/releases/download/10.15/MacOSX10.15.sdk.tar.xz
|
||||
tar xJf MacOSX10.15.sdk.tar.xz --strip-components=1
|
||||
|
2
contrib/libcxx
vendored
2
contrib/libcxx
vendored
@ -1 +1 @@
|
||||
Subproject commit 95650a0db4399ee871d5fd698ad12384fe9fa964
|
||||
Subproject commit 8b80a151d12b98ffe2d0c22f7cec12c3b9ff88d7
|
@ -5,6 +5,8 @@ set(LIBCXX_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxx)
|
||||
set(SRCS
|
||||
${LIBCXX_SOURCE_DIR}/src/algorithm.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/any.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/atomic.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/barrier.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/bind.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/charconv.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/chrono.cpp
|
||||
@ -20,6 +22,7 @@ ${LIBCXX_SOURCE_DIR}/src/functional.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/future.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/hash.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/ios.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/ios.instantiations.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/iostream.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/locale.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/memory.cpp
|
||||
@ -28,6 +31,7 @@ ${LIBCXX_SOURCE_DIR}/src/mutex_destructor.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/new.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/optional.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/random.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/random_shuffle.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/regex.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/shared_mutex.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/stdexcept.cpp
|
||||
|
2
contrib/libcxxabi
vendored
2
contrib/libcxxabi
vendored
@ -1 +1 @@
|
||||
Subproject commit 1ebc83af4c06dbcd56b4d166c1314a7d4c1173f9
|
||||
Subproject commit df8f1e727dbc9e2bedf2282096fa189dc3fe0076
|
@ -11,7 +11,6 @@ ${LIBCXXABI_SOURCE_DIR}/src/cxa_personality.cpp
|
||||
${LIBCXXABI_SOURCE_DIR}/src/stdlib_exception.cpp
|
||||
${LIBCXXABI_SOURCE_DIR}/src/abort_message.cpp
|
||||
${LIBCXXABI_SOURCE_DIR}/src/cxa_demangle.cpp
|
||||
${LIBCXXABI_SOURCE_DIR}/src/cxa_unexpected.cpp
|
||||
${LIBCXXABI_SOURCE_DIR}/src/cxa_exception.cpp
|
||||
${LIBCXXABI_SOURCE_DIR}/src/cxa_handlers.cpp
|
||||
${LIBCXXABI_SOURCE_DIR}/src/cxa_exception_storage.cpp
|
||||
|
@ -83,7 +83,8 @@
|
||||
#if (__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ <= 101400)
|
||||
#define _TTHREAD_EMULATE_TIMESPEC_GET_
|
||||
#endif
|
||||
|
||||
#elif defined(__FreeBSD__)
|
||||
#define HAVE_PTHREAD_SETNAME_FREEBSD 1
|
||||
#else
|
||||
// pthread_setname_gnu
|
||||
#define HAVE_PTHREAD_SETNAME_GNU 1
|
||||
|
@ -82,7 +82,7 @@ RUN git clone https://github.com/tpoechtrager/cctools-port.git \
|
||||
&& rm -rf cctools-port
|
||||
|
||||
# Download toolchain for Darwin
|
||||
RUN wget -nv https://github.com/phracker/MacOSX-SDKs/releases/download/10.14-beta4/MacOSX10.14.sdk.tar.xz
|
||||
RUN wget -nv https://github.com/phracker/MacOSX-SDKs/releases/download/10.15/MacOSX10.15.sdk.tar.xz
|
||||
|
||||
# Download toolchain for ARM
|
||||
# It contains all required headers and libraries. Note that it's named as "gcc" but actually we are using clang for cross compiling.
|
||||
|
@ -3,7 +3,7 @@
|
||||
set -x -e
|
||||
|
||||
mkdir -p build/cmake/toolchain/darwin-x86_64
|
||||
tar xJf MacOSX10.14.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1
|
||||
tar xJf MacOSX10.15.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1
|
||||
|
||||
mkdir -p build/cmake/toolchain/linux-aarch64
|
||||
tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build/cmake/toolchain/linux-aarch64 --strip-components=1
|
||||
|
@ -42,9 +42,9 @@ Also, we need to download macOS X SDK into the working tree.
|
||||
|
||||
``` bash
|
||||
cd ClickHouse
|
||||
wget 'https://github.com/phracker/MacOSX-SDKs/releases/download/10.14-beta4/MacOSX10.14.sdk.tar.xz'
|
||||
wget 'https://github.com/phracker/MacOSX-SDKs/releases/download/10.15/MacOSX10.15.sdk.tar.xz'
|
||||
mkdir -p build-darwin/cmake/toolchain/darwin-x86_64
|
||||
tar xJf MacOSX10.14.sdk.tar.xz -C build-darwin/cmake/toolchain/darwin-x86_64 --strip-components=1
|
||||
tar xJf MacOSX10.15.sdk.tar.xz -C build-darwin/cmake/toolchain/darwin-x86_64 --strip-components=1
|
||||
```
|
||||
|
||||
## Build ClickHouse {#build-clickhouse}
|
||||
|
@ -23,6 +23,7 @@ The following actions are supported:
|
||||
- [CLEAR COLUMN](#alter_clear-column) — Resets column values.
|
||||
- [COMMENT COLUMN](#alter_comment-column) — Adds a text comment to the column.
|
||||
- [MODIFY COLUMN](#alter_modify-column) — Changes column’s type, default expression and TTL.
|
||||
- [MODIFY COLUMN REMOVE](#modify-remove) — Removes one of the column properties.
|
||||
|
||||
These actions are described in detail below.
|
||||
|
||||
@ -145,6 +146,26 @@ The `ALTER` query is atomic. For MergeTree tables it is also lock-free.
|
||||
|
||||
The `ALTER` query for changing columns is replicated. The instructions are saved in ZooKeeper, then each replica applies them. All `ALTER` queries are run in the same order. The query waits for the appropriate actions to be completed on the other replicas. However, a query to change columns in a replicated table can be interrupted, and all actions will be performed asynchronously.
|
||||
|
||||
## MODIFY COLUMN REMOVE {#modify-remove}
|
||||
|
||||
Removes one of the column properties: `DEFAULT`, `ALIAS`, `MATERIALIZED`, `CODEC`, `COMMENT`, `TTL`.
|
||||
|
||||
Syntax:
|
||||
|
||||
```sql
|
||||
ALTER TABLE table_name MODIFY column_name REMOVE property;
|
||||
```
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL;
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
- [REMOVE TTL](ttl.md).
|
||||
|
||||
## Limitations {#alter-query-limitations}
|
||||
|
||||
The `ALTER` query lets you create and delete separate elements (columns) in nested data structures, but not whole nested data structures. To add a nested data structure, you can add columns with a name like `name.nested_name` and the type `Array(T)`. A nested data structure is equivalent to multiple array columns with a name that has the same prefix before the dot.
|
||||
|
@ -3,10 +3,83 @@ toc_priority: 44
|
||||
toc_title: TTL
|
||||
---
|
||||
|
||||
### Manipulations with Table TTL {#manipulations-with-table-ttl}
|
||||
# Manipulations with Table TTL {#manipulations-with-table-ttl}
|
||||
|
||||
## MODIFY TTL {#modify-ttl}
|
||||
|
||||
You can change [table TTL](../../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl) with a request of the following form:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table-name MODIFY TTL ttl-expression
|
||||
ALTER TABLE table_name MODIFY TTL ttl_expression;
|
||||
```
|
||||
|
||||
## REMOVE TTL {#remove-ttl}
|
||||
|
||||
TTL-property can be removed from table with the following query:
|
||||
|
||||
```sql
|
||||
ALTER TABLE table_name REMOVE TTL
|
||||
```
|
||||
|
||||
**Example**
|
||||
|
||||
Consider the table with table `TTL`:
|
||||
|
||||
```sql
|
||||
CREATE TABLE table_with_ttl
|
||||
(
|
||||
event_time DateTime,
|
||||
UserID UInt64,
|
||||
Comment String
|
||||
)
|
||||
ENGINE MergeTree()
|
||||
ORDER BY tuple()
|
||||
TTL event_time + INTERVAL 3 MONTH;
|
||||
SETTINGS min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO table_with_ttl VALUES (now(), 1, 'username1');
|
||||
|
||||
INSERT INTO table_with_ttl VALUES (now() - INTERVAL 4 MONTH, 2, 'username2');
|
||||
```
|
||||
|
||||
Run `OPTIMIZE` to force `TTL` cleanup:
|
||||
|
||||
```sql
|
||||
OPTIMIZE TABLE table_with_ttl FINAL;
|
||||
SELECT * FROM table_with_ttl FORMAT PrettyCompact;
|
||||
```
|
||||
Second row was deleted from table.
|
||||
|
||||
```text
|
||||
┌─────────event_time────┬──UserID─┬─────Comment──┐
|
||||
│ 2020-12-11 12:44:57 │ 1 │ username1 │
|
||||
└───────────────────────┴─────────┴──────────────┘
|
||||
```
|
||||
|
||||
Now remove table `TTL` with the following query:
|
||||
|
||||
```sql
|
||||
ALTER TABLE table_with_ttl REMOVE TTL;
|
||||
```
|
||||
|
||||
Re-insert the deleted row and force the `TTL` cleanup again with `OPTIMIZE`:
|
||||
|
||||
```sql
|
||||
INSERT INTO table_with_ttl VALUES (now() - INTERVAL 4 MONTH, 2, 'username2');
|
||||
OPTIMIZE TABLE table_with_ttl FINAL;
|
||||
SELECT * FROM table_with_ttl FORMAT PrettyCompact;
|
||||
```
|
||||
|
||||
The `TTL` is no longer there, so the second row is not deleted:
|
||||
|
||||
```text
|
||||
┌─────────event_time────┬──UserID─┬─────Comment──┐
|
||||
│ 2020-12-11 12:44:57 │ 1 │ username1 │
|
||||
│ 2020-08-11 12:44:57 │ 2 │ username2 │
|
||||
└───────────────────────┴─────────┴──────────────┘
|
||||
```
|
||||
|
||||
### See Also
|
||||
|
||||
- More about the [TTL-expression](../../../sql-reference/statements/create/table#ttl-expression).
|
||||
- Modify column [with TTL](../../../sql-reference/statements/alter/column#alter_modify-column).
|
||||
|
@ -18,10 +18,6 @@ It is possible to obtain the same result by applying [GROUP BY](../../../sql-ref
|
||||
- When [ORDER BY](../../../sql-reference/statements/select/order-by.md) is omitted and [LIMIT](../../../sql-reference/statements/select/limit.md) is defined, the query stops running immediately after the required number of different rows has been read.
|
||||
- Data blocks are output as they are processed, without waiting for the entire query to finish running.
|
||||
|
||||
## Limitations {#limitations}
|
||||
|
||||
`DISTINCT` is not supported if `SELECT` has at least one array column.
|
||||
|
||||
## Examples {#examples}
|
||||
|
||||
ClickHouse supports using the `DISTINCT` and `ORDER BY` clauses for different columns in one query. The `DISTINCT` clause is executed before the `ORDER BY` clause.
|
||||
|
@ -44,9 +44,9 @@ Además, necesitamos descargar macOS X SDK en el árbol de trabajo.
|
||||
|
||||
``` bash
|
||||
cd ClickHouse
|
||||
wget 'https://github.com/phracker/MacOSX-SDKs/releases/download/10.14-beta4/MacOSX10.14.sdk.tar.xz'
|
||||
wget 'https://github.com/phracker/MacOSX-SDKs/releases/download/10.15/MacOSX10.15.sdk.tar.xz'
|
||||
mkdir -p build-darwin/cmake/toolchain/darwin-x86_64
|
||||
tar xJf MacOSX10.14.sdk.tar.xz -C build-darwin/cmake/toolchain/darwin-x86_64 --strip-components=1
|
||||
tar xJf MacOSX10.15.sdk.tar.xz -C build-darwin/cmake/toolchain/darwin-x86_64 --strip-components=1
|
||||
```
|
||||
|
||||
# Construir ClickHouse {#build-clickhouse}
|
||||
|
@ -44,9 +44,9 @@ En outre, nous devons télécharger macOS X SDK dans l'arbre de travail.
|
||||
|
||||
``` bash
|
||||
cd ClickHouse
|
||||
wget 'https://github.com/phracker/MacOSX-SDKs/releases/download/10.14-beta4/MacOSX10.14.sdk.tar.xz'
|
||||
wget 'https://github.com/phracker/MacOSX-SDKs/releases/download/10.15/MacOSX10.15.sdk.tar.xz'
|
||||
mkdir -p build-darwin/cmake/toolchain/darwin-x86_64
|
||||
tar xJf MacOSX10.14.sdk.tar.xz -C build-darwin/cmake/toolchain/darwin-x86_64 --strip-components=1
|
||||
tar xJf MacOSX10.15.sdk.tar.xz -C build-darwin/cmake/toolchain/darwin-x86_64 --strip-components=1
|
||||
```
|
||||
|
||||
# Construire ClickHouse {#build-clickhouse}
|
||||
|
@ -45,9 +45,9 @@ make install
|
||||
|
||||
``` bash
|
||||
cd ClickHouse
|
||||
wget 'https://github.com/phracker/MacOSX-SDKs/releases/download/10.14-beta4/MacOSX10.14.sdk.tar.xz'
|
||||
wget 'https://github.com/phracker/MacOSX-SDKs/releases/download/10.15/MacOSX10.15.sdk.tar.xz'
|
||||
mkdir -p build-darwin/cmake/toolchain/darwin-x86_64
|
||||
tar xJf MacOSX10.14.sdk.tar.xz -C build-darwin/cmake/toolchain/darwin-x86_64 --strip-components=1
|
||||
tar xJf MacOSX10.15.sdk.tar.xz -C build-darwin/cmake/toolchain/darwin-x86_64 --strip-components=1
|
||||
```
|
||||
|
||||
# ビルドClickHouse {#build-clickhouse}
|
||||
|
@ -12,6 +12,7 @@ toc_title: "\u041c\u0430\u043d\u0438\u043f\u0443\u043b\u044f\u0446\u0438\u0438\u
|
||||
- [CLEAR COLUMN](#alter_clear-column) — сбрасывает все значения в столбце для заданной партиции;
|
||||
- [COMMENT COLUMN](#alter_comment-column) — добавляет комментарий к столбцу;
|
||||
- [MODIFY COLUMN](#alter_modify-column) — изменяет тип столбца, выражение для значения по умолчанию и TTL.
|
||||
- [MODIFY COLUMN REMOVE](#modify-remove) — удаляет какое-либо из свойств столбца.
|
||||
|
||||
Подробное описание для каждого действия приведено ниже.
|
||||
|
||||
@ -135,6 +136,28 @@ ALTER TABLE visits MODIFY COLUMN browser Array(String)
|
||||
|
||||
Запрос `ALTER` на изменение столбцов реплицируется. Соответствующие инструкции сохраняются в ZooKeeper, и затем каждая реплика их применяет. Все запросы `ALTER` выполняются в одном и том же порядке. Запрос ждёт выполнения соответствующих действий на всех репликах. Но при этом, запрос на изменение столбцов в реплицируемой таблице можно прервать, и все действия будут осуществлены асинхронно.
|
||||
|
||||
## MODIFY COLUMN REMOVE {#modify-remove}
|
||||
|
||||
Удаляет какое-либо из свойств столбца: `DEFAULT`, `ALIAS`, `MATERIALIZED`, `CODEC`, `COMMENT`, `TTL`.
|
||||
|
||||
Синтаксис:
|
||||
|
||||
```sql
|
||||
ALTER TABLE table_name MODIFY column_name REMOVE property;
|
||||
```
|
||||
|
||||
**Пример**
|
||||
|
||||
Удаление свойства TTL:
|
||||
|
||||
```sql
|
||||
ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL;
|
||||
```
|
||||
|
||||
## Смотрите также
|
||||
|
||||
- [REMOVE TTL](ttl.md).
|
||||
|
||||
## Ограничения запроса ALTER {#ogranicheniia-zaprosa-alter}
|
||||
|
||||
Запрос `ALTER` позволяет создавать и удалять отдельные элементы (столбцы) вложенных структур данных, но не вложенные структуры данных целиком. Для добавления вложенной структуры данных, вы можете добавить столбцы с именем вида `name.nested_name` и типом `Array(T)` - вложенная структура данных полностью эквивалентна нескольким столбцам-массивам с именем, имеющим одинаковый префикс до точки.
|
||||
|
@ -5,10 +5,82 @@ toc_title: TTL
|
||||
|
||||
# Манипуляции с TTL таблицы {#manipuliatsii-s-ttl-tablitsy}
|
||||
|
||||
## MODIFY TTL {#modify-ttl}
|
||||
|
||||
Вы можете изменить [TTL для таблицы](../../../engines/table-engines/mergetree-family/mergetree.md#mergetree-column-ttl) запросом следующего вида:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table-name MODIFY TTL ttl-expression
|
||||
```
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/alter/ttl/) <!--hide-->
|
||||
## REMOVE TTL {#remove-ttl}
|
||||
|
||||
Удалить табличный TTL можно запросом следующего вида:
|
||||
|
||||
```sql
|
||||
ALTER TABLE table_name REMOVE TTL
|
||||
```
|
||||
|
||||
**Пример**
|
||||
|
||||
Создадим таблицу с табличным `TTL` и заполним её данными:
|
||||
|
||||
```sql
|
||||
CREATE TABLE table_with_ttl
|
||||
(
|
||||
event_time DateTime,
|
||||
UserID UInt64,
|
||||
Comment String
|
||||
)
|
||||
ENGINE MergeTree()
|
||||
ORDER BY tuple()
|
||||
TTL event_time + INTERVAL 3 MONTH;
|
||||
SETTINGS min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO table_with_ttl VALUES (now(), 1, 'username1');
|
||||
|
||||
INSERT INTO table_with_ttl VALUES (now() - INTERVAL 4 MONTH, 2, 'username2');
|
||||
```
|
||||
|
||||
Выполним `OPTIMIZE` для принудительной очистки по `TTL`:
|
||||
|
||||
```sql
|
||||
OPTIMIZE TABLE table_with_ttl FINAL;
|
||||
SELECT * FROM table_with_ttl;
|
||||
```
|
||||
В результате видно, что вторая строка удалена.
|
||||
|
||||
```text
|
||||
┌─────────event_time────┬──UserID─┬─────Comment──┐
|
||||
│ 2020-12-11 12:44:57 │ 1 │ username1 │
|
||||
└───────────────────────┴─────────┴──────────────┘
|
||||
```
|
||||
|
||||
Удаляем табличный `TTL`:
|
||||
|
||||
```sql
|
||||
ALTER TABLE table_with_ttl REMOVE TTL;
|
||||
```
|
||||
|
||||
Заново вставляем удаленную строку и снова принудительно запускаем очистку по `TTL` с помощью `OPTIMIZE`:
|
||||
|
||||
```sql
|
||||
INSERT INTO table_with_ttl VALUES (now() - INTERVAL 4 MONTH, 2, 'username2');
|
||||
OPTIMIZE TABLE table_with_ttl FINAL;
|
||||
SELECT * FROM table_with_ttl;
|
||||
```
|
||||
|
||||
`TTL` больше нет, поэтому данные не удаляются:
|
||||
|
||||
```text
|
||||
┌─────────event_time────┬──UserID─┬─────Comment──┐
|
||||
│ 2020-12-11 12:44:57 │ 1 │ username1 │
|
||||
│ 2020-08-11 12:44:57 │ 2 │ username2 │
|
||||
└───────────────────────┴─────────┴──────────────┘
|
||||
```
|
||||
|
||||
### Смотрите также
|
||||
|
||||
- Подробнее о [свойстве TTL](../../../engines/table-engines/mergetree-family/mergetree#table_engine-mergetree-ttl).
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/alter/ttl/) <!--hide-->
|
||||
|
@ -18,10 +18,6 @@ toc_title: DISTINCT
|
||||
- Когда секция [ORDER BY](order-by.md) опущена, а секция [LIMIT](limit.md) присутствует, запрос прекращает выполнение сразу после считывания необходимого количества различных строк.
|
||||
- Блоки данных выводятся по мере их обработки, не дожидаясь завершения выполнения всего запроса.
|
||||
|
||||
## Ограничения {#limitations}
|
||||
|
||||
`DISTINCT` не поддерживается, если `SELECT` имеет по крайней мере один столбец-массив.
|
||||
|
||||
## Примеры {#examples}
|
||||
|
||||
ClickHouse поддерживает использование секций `DISTINCT` и `ORDER BY` для разных столбцов в одном запросе. Секция `DISTINCT` выполняется до секции `ORDER BY`.
|
||||
|
@ -33,8 +33,8 @@ cd cctools-port/cctools
|
||||
make install
|
||||
|
||||
cd ${CCTOOLS}
|
||||
wget https://github.com/phracker/MacOSX-SDKs/releases/download/10.14-beta4/MacOSX10.14.sdk.tar.xz
|
||||
tar xJf MacOSX10.14.sdk.tar.xz
|
||||
wget https://github.com/phracker/MacOSX-SDKs/releases/download/10.15/MacOSX10.15.sdk.tar.xz
|
||||
tar xJf MacOSX10.15.sdk.tar.xz
|
||||
```
|
||||
|
||||
# 编译 ClickHouse {#bian-yi-clickhouse}
|
||||
@ -46,7 +46,7 @@ CC=clang-8 CXX=clang++-8 cmake . -Bbuild-osx -DCMAKE_SYSTEM_NAME=Darwin \
|
||||
-DCMAKE_AR:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ar \
|
||||
-DCMAKE_RANLIB:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ranlib \
|
||||
-DLINKER_NAME=${CCTOOLS}/bin/x86_64-apple-darwin-ld \
|
||||
-DSDK_PATH=${CCTOOLS}/MacOSX10.14.sdk
|
||||
-DSDK_PATH=${CCTOOLS}/MacOSX10.15.sdk
|
||||
ninja -C build-osx
|
||||
```
|
||||
|
||||
|
@ -61,9 +61,12 @@ Block ColumnGathererStream::readImpl()
|
||||
|
||||
MutableColumnPtr output_column = column.column->cloneEmpty();
|
||||
output_block = Block{column.cloneEmpty()};
|
||||
/// Surprisingly this call may directly change output_block, bypassing
|
||||
/// output_column. See ColumnGathererStream::gather.
|
||||
output_column->gather(*this);
|
||||
if (!output_column->empty())
|
||||
output_block.getByPosition(0).column = std::move(output_column);
|
||||
|
||||
return output_block;
|
||||
}
|
||||
|
||||
|
@ -436,12 +436,6 @@ void ActionsDAG::project(const NamesWithAliases & projection)
|
||||
settings.projected_output = true;
|
||||
}
|
||||
|
||||
void ActionsDAG::removeColumn(const std::string & column_name)
|
||||
{
|
||||
auto & node = getNode(column_name);
|
||||
index.remove(&node);
|
||||
}
|
||||
|
||||
bool ActionsDAG::tryRestoreColumn(const std::string & column_name)
|
||||
{
|
||||
if (index.contains(column_name))
|
||||
@ -550,6 +544,11 @@ std::string ActionsDAG::dumpDAG() const
|
||||
out << "\n";
|
||||
}
|
||||
|
||||
out << "Index:";
|
||||
for (const auto * node : index)
|
||||
out << ' ' << map[node];
|
||||
out << '\n';
|
||||
|
||||
return out.str();
|
||||
}
|
||||
|
||||
@ -698,7 +697,8 @@ ActionsDAGPtr ActionsDAG::merge(ActionsDAG && first, ActionsDAG && second)
|
||||
/// Will store merged result in `first`.
|
||||
|
||||
/// This map contains nodes which should be removed from `first` index, cause they are used as inputs for `second`.
|
||||
std::unordered_set<Node *> removed_first_result;
|
||||
/// The second element is the number of removes (cause one node may be repeated several times in result).
|
||||
std::unordered_map<Node *, size_t> removed_first_result;
|
||||
/// Map inputs of `second` to nodes of `first`.
|
||||
std::unordered_map<Node *, Node *> inputs_map;
|
||||
|
||||
@ -723,7 +723,7 @@ ActionsDAGPtr ActionsDAG::merge(ActionsDAG && first, ActionsDAG && second)
|
||||
else
|
||||
{
|
||||
inputs_map[node] = it->second.front();
|
||||
removed_first_result.emplace(it->second.front());
|
||||
removed_first_result[it->second.front()] += 1;
|
||||
it->second.pop_front();
|
||||
}
|
||||
}
|
||||
@ -767,8 +767,12 @@ ActionsDAGPtr ActionsDAG::merge(ActionsDAG && first, ActionsDAG && second)
|
||||
auto cur = it;
|
||||
++it;
|
||||
|
||||
if (removed_first_result.count(*cur))
|
||||
auto jt = removed_first_result.find(*cur);
|
||||
if (jt != removed_first_result.end() && jt->second > 0)
|
||||
{
|
||||
first.index.remove(cur);
|
||||
--jt->second;
|
||||
}
|
||||
}
|
||||
|
||||
for (auto * node : second.index)
|
||||
|
@ -133,16 +133,6 @@ public:
|
||||
insert(node);
|
||||
}
|
||||
|
||||
void remove(Node * node)
|
||||
{
|
||||
auto it = map.find(node->result_name);
|
||||
if (it != map.end())
|
||||
return;
|
||||
|
||||
list.erase(it->second);
|
||||
map.erase(it);
|
||||
}
|
||||
|
||||
void remove(std::list<Node *>::iterator it)
|
||||
{
|
||||
auto map_it = map.find((*it)->result_name);
|
||||
@ -219,8 +209,6 @@ public:
|
||||
/// Add alias actions and remove unused columns from index. Also specify result columns order in index.
|
||||
void project(const NamesWithAliases & projection);
|
||||
|
||||
/// Removes column from index.
|
||||
void removeColumn(const std::string & column_name);
|
||||
/// If column is not in index, try to find it in nodes and insert back into index.
|
||||
bool tryRestoreColumn(const std::string & column_name);
|
||||
|
||||
|
@ -87,6 +87,7 @@ public:
|
||||
const Actions & getActions() const { return actions; }
|
||||
const std::list<Node> & getNodes() const { return actions_dag->getNodes(); }
|
||||
const ActionsDAG & getActionsDAG() const { return *actions_dag; }
|
||||
const ColumnNumbers & getResultPositions() const { return result_positions; }
|
||||
|
||||
/// Get a list of input columns.
|
||||
Names getRequiredColumns() const;
|
||||
|
@ -1489,23 +1489,6 @@ void ExpressionAnalysisResult::finalize(const ExpressionActionsChain & chain, si
|
||||
columns_to_remove.insert(step.required_output[i]);
|
||||
}
|
||||
|
||||
if (!columns_to_remove.empty())
|
||||
{
|
||||
auto columns = prewhere_info->prewhere_actions->getResultColumns();
|
||||
|
||||
auto remove_actions = std::make_shared<ActionsDAG>();
|
||||
for (const auto & column : columns)
|
||||
{
|
||||
if (columns_to_remove.count(column.name))
|
||||
{
|
||||
remove_actions->addInput(column);
|
||||
remove_actions->removeColumn(column.name);
|
||||
}
|
||||
}
|
||||
|
||||
prewhere_info->remove_columns_actions = std::move(remove_actions);
|
||||
}
|
||||
|
||||
columns_to_remove_after_prewhere = std::move(columns_to_remove);
|
||||
}
|
||||
else if (hasFilter())
|
||||
|
@ -467,8 +467,11 @@ void InterpreterSystemQuery::restartReplicas(Context & system_context)
|
||||
guard.second = catalog.getDDLGuard(guard.first.database_name, guard.first.table_name);
|
||||
|
||||
ThreadPool pool(std::min(size_t(getNumberOfPhysicalCPUCores()), replica_names.size()));
|
||||
for (auto & table : replica_names)
|
||||
pool.scheduleOrThrowOnError([&]() { tryRestartReplica(table, system_context, false); });
|
||||
for (auto & replica : replica_names)
|
||||
{
|
||||
LOG_TRACE(log, "Restarting replica on {}", replica.getNameForLogs());
|
||||
pool.scheduleOrThrowOnError([&]() { tryRestartReplica(replica, system_context, false); });
|
||||
}
|
||||
pool.wait();
|
||||
}
|
||||
|
||||
|
@ -7,6 +7,9 @@
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -46,7 +49,7 @@ void PollingQueue::addTask(size_t thread_number, void * data, int fd)
|
||||
{
|
||||
std::uintptr_t key = reinterpret_cast<uintptr_t>(data);
|
||||
if (tasks.count(key))
|
||||
throw Exception("Task was already added to task queue", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Task {} was already added to task queue", key);
|
||||
|
||||
tasks[key] = TaskData{thread_number, data, fd};
|
||||
|
||||
@ -58,6 +61,22 @@ void PollingQueue::addTask(size_t thread_number, void * data, int fd)
|
||||
throwFromErrno("Cannot add socket descriptor to epoll", ErrorCodes::CANNOT_OPEN_FILE);
|
||||
}
|
||||
|
||||
static std::string dumpTasks(const std::unordered_map<std::uintptr_t, PollingQueue::TaskData> & tasks)
|
||||
{
|
||||
WriteBufferFromOwnString res;
|
||||
res << "Tasks = [";
|
||||
|
||||
for (const auto & task : tasks)
|
||||
{
|
||||
res << "(id " << task.first << " thread " << task.second.thread_num << " ptr ";
|
||||
writePointerHex(task.second.data, res);
|
||||
res << " fd " << task.second.fd << ")";
|
||||
}
|
||||
|
||||
res << "]";
|
||||
return res.str();
|
||||
}
|
||||
|
||||
PollingQueue::TaskData PollingQueue::wait(std::unique_lock<std::mutex> & lock)
|
||||
{
|
||||
if (is_finished)
|
||||
@ -81,10 +100,14 @@ PollingQueue::TaskData PollingQueue::wait(std::unique_lock<std::mutex> & lock)
|
||||
if (event.data.ptr == pipe_fd)
|
||||
return {};
|
||||
|
||||
std::uintptr_t key = reinterpret_cast<uintptr_t>(event.data.ptr);
|
||||
void * ptr = event.data.ptr;
|
||||
std::uintptr_t key = reinterpret_cast<uintptr_t>(ptr);
|
||||
auto it = tasks.find(key);
|
||||
if (it == tasks.end())
|
||||
throw Exception("Task was not found in task queue", ErrorCodes::LOGICAL_ERROR);
|
||||
{
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Task {} ({}) was not found in task queue: {}",
|
||||
key, ptr, dumpTasks(tasks));
|
||||
}
|
||||
|
||||
auto res = it->second;
|
||||
tasks.erase(it);
|
||||
@ -98,7 +121,6 @@ PollingQueue::TaskData PollingQueue::wait(std::unique_lock<std::mutex> & lock)
|
||||
void PollingQueue::finish()
|
||||
{
|
||||
is_finished = true;
|
||||
tasks.clear();
|
||||
|
||||
uint64_t buf = 0;
|
||||
while (-1 == write(pipe_fd[1], &buf, sizeof(buf)))
|
||||
|
@ -100,6 +100,11 @@ void ExpressionStep::describeActions(FormatSettings & settings) const
|
||||
first = false;
|
||||
settings.out << action.toString() << '\n';
|
||||
}
|
||||
|
||||
settings.out << prefix << "Positions:";
|
||||
for (const auto & pos : expression->getResultPositions())
|
||||
settings.out << ' ' << pos;
|
||||
settings.out << '\n';
|
||||
}
|
||||
|
||||
JoinStep::JoinStep(const DataStream & input_stream_, JoinPtr join_)
|
||||
|
@ -140,6 +140,12 @@ void MergeTreeDataPartWriterWide::shiftCurrentMark(const Granules & granules_wri
|
||||
/// If we didn't finished last granule than we will continue to write it from new block
|
||||
if (!last_granule.is_complete)
|
||||
{
|
||||
if (settings.blocks_are_granules_size)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Incomplete granules are not allowed while blocks are granules size. "
|
||||
"Mark number {} (rows {}), rows written in last mark {}, rows to write in last mark from block {} (from row {}), total marks currently {}",
|
||||
last_granule.mark_number, index_granularity.getMarkRows(last_granule.mark_number), rows_written_in_last_mark,
|
||||
last_granule.rows_to_write, last_granule.start_row, index_granularity.getMarksCount());
|
||||
|
||||
/// Shift forward except last granule
|
||||
setCurrentMark(getCurrentMark() + granules_written.size() - 1);
|
||||
bool still_in_the_same_granule = granules_written.size() == 1;
|
||||
@ -161,7 +167,7 @@ void MergeTreeDataPartWriterWide::write(const Block & block, const IColumn::Perm
|
||||
{
|
||||
/// Fill index granularity for this block
|
||||
/// if it's unknown (in case of insert data or horizontal merge,
|
||||
/// but not in case of vertical merge)
|
||||
/// but not in case of vertical part of vertical merge)
|
||||
if (compute_granularity)
|
||||
{
|
||||
size_t index_granularity_for_block = computeIndexGranularity(block);
|
||||
@ -451,12 +457,25 @@ void MergeTreeDataPartWriterWide::validateColumnOfFixedSize(const String & name,
|
||||
/// Now they must be equal
|
||||
if (column->size() != index_granularity_rows)
|
||||
{
|
||||
if (must_be_last && !settings.can_use_adaptive_granularity)
|
||||
break;
|
||||
|
||||
if (must_be_last)
|
||||
{
|
||||
/// The only possible mark after bin.eof() is final mark. When we
|
||||
/// cannot use adaptive granularity we cannot have last mark.
|
||||
/// So finish validation.
|
||||
if (!settings.can_use_adaptive_granularity)
|
||||
break;
|
||||
|
||||
/// If we don't compute granularity then we are not responsible
|
||||
/// for last mark (for example we mutating some column from part
|
||||
/// with fixed granularity where last mark is not adjusted)
|
||||
if (!compute_granularity)
|
||||
continue;
|
||||
}
|
||||
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR, "Incorrect mark rows for mark #{} (compressed offset {}, decompressed offset {}), actually in bin file {}, in mrk file {}",
|
||||
mark_num, offset_in_compressed_file, offset_in_decompressed_block, column->size(), index_granularity.getMarkRows(mark_num));
|
||||
ErrorCodes::LOGICAL_ERROR, "Incorrect mark rows for mark #{} (compressed offset {}, decompressed offset {}), actually in bin file {}, in mrk file {}, total marks {}",
|
||||
mark_num, offset_in_compressed_file, offset_in_decompressed_block, column->size(), index_granularity.getMarkRows(mark_num), index_granularity.getMarksCount());
|
||||
}
|
||||
}
|
||||
|
||||
@ -483,7 +502,14 @@ void MergeTreeDataPartWriterWide::finishDataSerialization(IMergeTreeDataPart::Ch
|
||||
serialize_settings.low_cardinality_use_single_dictionary_for_part = global_settings.low_cardinality_use_single_dictionary_for_part != 0;
|
||||
WrittenOffsetColumns offset_columns;
|
||||
if (rows_written_in_last_mark > 0)
|
||||
{
|
||||
if (settings.blocks_are_granules_size)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Incomplete granule is not allowed while blocks are granules size even for last granule. "
|
||||
"Mark number {} (rows {}), rows written for last mark {}, total marks {}",
|
||||
getCurrentMark(), index_granularity.getMarkRows(getCurrentMark()), rows_written_in_last_mark, index_granularity.getMarksCount());
|
||||
|
||||
adjustLastMarkIfNeedAndFlushToDisk(rows_written_in_last_mark);
|
||||
}
|
||||
|
||||
bool write_final_mark = (with_final_mark && data_written);
|
||||
|
||||
@ -514,7 +540,7 @@ void MergeTreeDataPartWriterWide::finishDataSerialization(IMergeTreeDataPart::Ch
|
||||
|
||||
#ifndef NDEBUG
|
||||
/// Heavy weight validation of written data. Checks that we are able to read
|
||||
/// data according to marks. Otherwise throws LOGICAL_ERROR (equal to about in debug mode)
|
||||
/// data according to marks. Otherwise throws LOGICAL_ERROR (equal to abort in debug mode)
|
||||
for (const auto & column : columns_list)
|
||||
{
|
||||
if (column.type->isValueRepresentedByNumber() && !column.type->haveSubtypes())
|
||||
|
@ -115,6 +115,7 @@ struct Settings;
|
||||
/** Obsolete settings. Kept for backward compatibility only. */ \
|
||||
M(UInt64, min_relative_delay_to_yield_leadership, 120, "Obsolete setting, does nothing.", 0) \
|
||||
M(UInt64, check_delay_period, 60, "Obsolete setting, does nothing.", 0) \
|
||||
M(Bool, allow_floating_point_partition_key, false, "Allow floating point as partition key", 0) \
|
||||
/// Settings that should not change after the creation of a table.
|
||||
#define APPLY_FOR_IMMUTABLE_MERGE_TREE_SETTINGS(M) \
|
||||
M(index_granularity)
|
||||
|
@ -718,6 +718,15 @@ static StoragePtr create(const StorageFactory::Arguments & args)
|
||||
++arg_num;
|
||||
}
|
||||
|
||||
DataTypes data_types = metadata.partition_key.data_types;
|
||||
if (!args.attach && !storage_settings->allow_floating_point_partition_key)
|
||||
{
|
||||
for (size_t i = 0; i < data_types.size(); ++i)
|
||||
if (isFloat(data_types[i]))
|
||||
throw Exception(
|
||||
"Donot support float point as partition key: " + metadata.partition_key.column_names[i], ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
|
||||
if (arg_num != arg_cnt)
|
||||
throw Exception("Wrong number of engine arguments.", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <Storages/MutationCommands.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <Parsers/formatAST.h>
|
||||
#include <Parsers/ExpressionListParsers.h>
|
||||
#include <Parsers/ParserAlterQuery.h>
|
||||
@ -133,13 +134,13 @@ void MutationCommands::writeText(WriteBuffer & out) const
|
||||
{
|
||||
WriteBufferFromOwnString commands_buf;
|
||||
formatAST(*ast(), commands_buf, /* hilite = */ false, /* one_line = */ true);
|
||||
out << escape << commands_buf.str();
|
||||
writeEscapedString(commands_buf.str(), out);
|
||||
}
|
||||
|
||||
void MutationCommands::readText(ReadBuffer & in)
|
||||
{
|
||||
String commands_str;
|
||||
in >> escape >> commands_str;
|
||||
readEscapedString(commands_str, in);
|
||||
|
||||
ParserAlterCommandList p_alter_commands;
|
||||
auto commands_ast = parseQuery(
|
||||
|
@ -5561,9 +5561,9 @@ void StorageReplicatedMergeTree::getClearBlocksInPartitionOps(
|
||||
continue;
|
||||
|
||||
ReadBufferFromString buf(result.data);
|
||||
Int64 block_num = 0;
|
||||
bool parsed = tryReadIntText(block_num, buf) && buf.eof();
|
||||
if (!parsed || (min_block_num <= block_num && block_num <= max_block_num))
|
||||
MergeTreePartInfo part_info;
|
||||
bool parsed = MergeTreePartInfo::tryParsePartName(result.data, &part_info, format_version);
|
||||
if (!parsed || (min_block_num <= part_info.min_block && part_info.max_block <= max_block_num))
|
||||
ops.emplace_back(zkutil::makeRemoveRequest(path, -1));
|
||||
}
|
||||
}
|
||||
@ -6150,7 +6150,7 @@ bool StorageReplicatedMergeTree::dropPart(
|
||||
|
||||
Coordination::Requests ops;
|
||||
getClearBlocksInPartitionOps(ops, *zookeeper, part_info.partition_id, part_info.min_block, part_info.max_block);
|
||||
size_t clean_block_ops_size = ops.size();
|
||||
size_t clear_block_ops_size = ops.size();
|
||||
|
||||
/// Set fake level to treat this part as virtual in queue.
|
||||
auto drop_part_info = part->info;
|
||||
@ -6178,7 +6178,7 @@ bool StorageReplicatedMergeTree::dropPart(
|
||||
else
|
||||
zkutil::KeeperMultiException::check(rc, ops, responses);
|
||||
|
||||
String log_znode_path = dynamic_cast<const Coordination::CreateResponse &>(*responses[clean_block_ops_size + 1]).path_created;
|
||||
String log_znode_path = dynamic_cast<const Coordination::CreateResponse &>(*responses[clear_block_ops_size + 1]).path_created;
|
||||
entry.znode_name = log_znode_path.substr(log_znode_path.find_last_of('/') + 1);
|
||||
|
||||
return true;
|
||||
|
@ -58,7 +58,7 @@ def test_mutate_and_upgrade(start_cluster):
|
||||
assert node1.query("SELECT COUNT() FROM mt") == "2\n"
|
||||
assert node2.query("SELECT COUNT() FROM mt") == "2\n"
|
||||
|
||||
node1.query("ALTER TABLE mt MODIFY COLUMN id String DEFAULT '0'",
|
||||
node1.query("ALTER TABLE mt MODIFY COLUMN id Int32 DEFAULT 0",
|
||||
settings={"replication_alter_partitions_sync": "2"})
|
||||
|
||||
node2.query("OPTIMIZE TABLE mt FINAL")
|
||||
|
@ -151,7 +151,7 @@ $CLICKHOUSE_CLIENT --query="SELECT count(), sum(d), uniqExact(_part) FROM dst_r2
|
||||
|
||||
$CLICKHOUSE_CLIENT --query="SELECT 'After restart';"
|
||||
$CLICKHOUSE_CLIENT --query="SYSTEM RESTART REPLICA dst_r1;"
|
||||
$CLICKHOUSE_CLIENT --query="SYSTEM RESTART REPLICAS;"
|
||||
$CLICKHOUSE_CLIENT --query="SYSTEM RESTART REPLICA dst_r2;"
|
||||
$CLICKHOUSE_CLIENT --query="SELECT count(), sum(d) FROM dst_r1;"
|
||||
$CLICKHOUSE_CLIENT --query="SELECT count(), sum(d) FROM dst_r2;"
|
||||
|
||||
|
@ -70,4 +70,5 @@ SELECT count(*) FROM bitmap_test WHERE 0 = bitmapHasAny((SELECT groupBitmapState
|
||||
|
||||
SELECT bitmapToArray(bitmapAnd(groupBitmapState(uid), bitmapBuild(CAST([4294967296, 4294967297, 4294967298], 'Array(UInt64)')))) FROM bitmap_test GROUP BY city_id;
|
||||
|
||||
|
||||
DROP TABLE bitmap_state_test;
|
||||
DROP TABLE bitmap_test;
|
||||
|
@ -19,7 +19,7 @@ function rename_thread_1()
|
||||
replica_01108_3 TO replica_01108_3_tmp,
|
||||
replica_01108_4 TO replica_01108_4_tmp";
|
||||
sleep 0.$RANDOM;
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
function rename_thread_2()
|
||||
@ -30,23 +30,27 @@ function rename_thread_2()
|
||||
replica_01108_3_tmp TO replica_01108_4,
|
||||
replica_01108_4_tmp TO replica_01108_1";
|
||||
sleep 0.$RANDOM;
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
function restart_thread_1()
|
||||
function restart_replicas_loop()
|
||||
{
|
||||
while true; do
|
||||
$CLICKHOUSE_CLIENT -q "SYSTEM RESTART REPLICAS";
|
||||
sleep 0.$RANDOM;
|
||||
for i in $(seq 4); do
|
||||
$CLICKHOUSE_CLIENT -q "SYSTEM RESTART REPLICA replica_01108_${i}";
|
||||
$CLICKHOUSE_CLIENT -q "SYSTEM RESTART REPLICA replica_01108_${i}_tmp";
|
||||
done
|
||||
sleep 0.$RANDOM;
|
||||
done
|
||||
}
|
||||
function restart_thread_1()
|
||||
{
|
||||
restart_replicas_loop
|
||||
}
|
||||
|
||||
function restart_thread_2()
|
||||
{
|
||||
while true; do
|
||||
$CLICKHOUSE_CLIENT -q "SYSTEM RESTART REPLICAS";
|
||||
sleep 0.$RANDOM;
|
||||
done
|
||||
restart_replicas_loop
|
||||
}
|
||||
|
||||
export -f rename_thread_1;
|
||||
|
@ -50,4 +50,4 @@ $CLICKHOUSE_CLIENT -q "SELECT if(quantile(0.5)(query_duration_ms) < $max_time_ms
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "SELECT count() * $count_multiplier, i, d, s, n.i, n.f FROM $db.table_merge GROUP BY i, d, s, n.i, n.f ORDER BY i"
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "DROP DATABASE $db"
|
||||
$CLICKHOUSE_CLIENT -q "DROP DATABASE IF EXISTS $db"
|
||||
|
@ -17,7 +17,9 @@ ALTER TABLE test_alter_r2 MODIFY COLUMN x DEFAULT '2000-01-01' SETTINGS replicat
|
||||
DESCRIBE TABLE test_alter_r1;
|
||||
DESCRIBE TABLE test_alter_r2;
|
||||
|
||||
SYSTEM RESTART REPLICAS;
|
||||
SYSTEM RESTART REPLICA test_alter_r1;
|
||||
SYSTEM RESTART REPLICA test_alter_r2;
|
||||
|
||||
DESCRIBE TABLE test_alter_r1;
|
||||
DESCRIBE TABLE test_alter_r2;
|
||||
|
||||
|
@ -15,10 +15,12 @@ for i in $(seq 1 $NUM_REPLICAS); do
|
||||
"
|
||||
done
|
||||
|
||||
valid_exceptions_to_retry='Quorum for previous write has not been satisfied yet|Another quorum insert has been already started|Unexpected logical error while adding block'
|
||||
|
||||
function thread {
|
||||
for x in {0..99}; do
|
||||
while true; do
|
||||
$CLICKHOUSE_CLIENT --insert_quorum 5 --insert_quorum_parallel 0 --query "INSERT INTO r$1 SELECT $x" 2>&1 | grep -qF 'Quorum for previous write has not been satisfied yet' || break
|
||||
$CLICKHOUSE_CLIENT --insert_quorum 5 --insert_quorum_parallel 0 --query "INSERT INTO r$1 SELECT $x" 2>&1 | grep -qE "$valid_exceptions_to_retry" || break
|
||||
done
|
||||
done
|
||||
}
|
||||
@ -32,7 +34,9 @@ wait
|
||||
for i in $(seq 1 $NUM_REPLICAS); do
|
||||
$CLICKHOUSE_CLIENT -n -q "
|
||||
SYSTEM SYNC REPLICA r$i;
|
||||
SELECT count(), min(x), max(x), sum(x) FROM r$i;
|
||||
DROP TABLE IF EXISTS r$i;
|
||||
"
|
||||
SELECT count(), min(x), max(x), sum(x) FROM r$i;"
|
||||
done
|
||||
|
||||
for i in $(seq 1 $NUM_REPLICAS); do
|
||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS r$i;"
|
||||
done
|
||||
|
@ -8,13 +8,13 @@ DROP TABLE IF EXISTS binary_op_mono7;
|
||||
DROP TABLE IF EXISTS binary_op_mono8;
|
||||
|
||||
CREATE TABLE binary_op_mono1(i int, j int) ENGINE MergeTree PARTITION BY toDate(i / 1000) ORDER BY j;
|
||||
CREATE TABLE binary_op_mono2(i int, j int) ENGINE MergeTree PARTITION BY 1000 / i ORDER BY j;
|
||||
CREATE TABLE binary_op_mono2(i int, j int) ENGINE MergeTree PARTITION BY 1000 / i ORDER BY j settings allow_floating_point_partition_key=true;;
|
||||
CREATE TABLE binary_op_mono3(i int, j int) ENGINE MergeTree PARTITION BY i + 1000 ORDER BY j;
|
||||
CREATE TABLE binary_op_mono4(i int, j int) ENGINE MergeTree PARTITION BY 1000 + i ORDER BY j;
|
||||
CREATE TABLE binary_op_mono5(i int, j int) ENGINE MergeTree PARTITION BY i - 1000 ORDER BY j;
|
||||
CREATE TABLE binary_op_mono6(i int, j int) ENGINE MergeTree PARTITION BY 1000 - i ORDER BY j;
|
||||
CREATE TABLE binary_op_mono7(i int, j int) ENGINE MergeTree PARTITION BY i / 1000.0 ORDER BY j;
|
||||
CREATE TABLE binary_op_mono8(i int, j int) ENGINE MergeTree PARTITION BY 1000.0 / i ORDER BY j;
|
||||
CREATE TABLE binary_op_mono7(i int, j int) ENGINE MergeTree PARTITION BY i / 1000.0 ORDER BY j settings allow_floating_point_partition_key=true;;
|
||||
CREATE TABLE binary_op_mono8(i int, j int) ENGINE MergeTree PARTITION BY 1000.0 / i ORDER BY j settings allow_floating_point_partition_key=true;;
|
||||
|
||||
INSERT INTO binary_op_mono1 VALUES (toUnixTimestamp('2020-09-01 00:00:00') * 1000, 1), (toUnixTimestamp('2020-09-01 00:00:00') * 1000, 2);
|
||||
INSERT INTO binary_op_mono2 VALUES (1, 1), (10000, 2);
|
||||
|
@ -84,3 +84,6 @@ function test_with_engine {
|
||||
test_with_engine TinyLog
|
||||
test_with_engine StripeLog
|
||||
test_with_engine Log
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS t1"
|
||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS t2"
|
||||
|
@ -0,0 +1,7 @@
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test (a Float32, b int) Engine = MergeTree() ORDER BY tuple() PARTITION BY a; -- { serverError 36 }
|
||||
CREATE TABLE test (a Float32, b int) Engine = MergeTree() ORDER BY tuple() PARTITION BY a settings allow_floating_point_partition_key=true;
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test (a Float32, b int, c String, d Float64) Engine = MergeTree() ORDER BY tuple() PARTITION BY (b, c, d) settings allow_floating_point_partition_key=false; -- { serverError 36 }
|
||||
CREATE TABLE test (a Float32, b int, c String, d Float64) Engine = MergeTree() ORDER BY tuple() PARTITION BY (b, c, d) settings allow_floating_point_partition_key=true;
|
||||
DROP TABLE IF EXISTS test;
|
@ -0,0 +1,2 @@
|
||||
x Date
|
||||
s String
|
@ -0,0 +1,5 @@
|
||||
DROP TABLE IF EXISTS data_01646;
|
||||
CREATE TABLE data_01646 (x Date, s String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01646/data_01646', 'r') ORDER BY s PARTITION BY x;
|
||||
SYSTEM RESTART REPLICAS;
|
||||
DESCRIBE TABLE data_01646;
|
||||
DROP TABLE data_01646;
|
@ -0,0 +1,2 @@
|
||||
foo
|
||||
foo
|
18
tests/queries/0_stateless/01648_mutations_and_escaping.sql
Normal file
18
tests/queries/0_stateless/01648_mutations_and_escaping.sql
Normal file
@ -0,0 +1,18 @@
|
||||
DROP TABLE IF EXISTS mutations_and_escaping_1648;
|
||||
|
||||
CREATE TABLE mutations_and_escaping_1648 (d Date, e Enum8('foo'=1, 'bar'=2)) Engine = MergeTree(d, (d), 8192);
|
||||
INSERT INTO mutations_and_escaping_1648 (d, e) VALUES ('2018-01-01', 'foo');
|
||||
INSERT INTO mutations_and_escaping_1648 (d, e) VALUES ('2018-01-02', 'bar');
|
||||
|
||||
-- slow mutation
|
||||
ALTER TABLE mutations_and_escaping_1648 UPDATE e = CAST('foo', 'Enum8(\'foo\' = 1, \'bar\' = 2)') WHERE d='2018-01-02' and sleepEachRow(1) = 0;
|
||||
|
||||
-- check that we able to read mutation text after serialization
|
||||
DETACH TABLE mutations_and_escaping_1648;
|
||||
ATTACH TABLE mutations_and_escaping_1648;
|
||||
|
||||
ALTER TABLE mutations_and_escaping_1648 UPDATE e = CAST('foo', 'Enum8(\'foo\' = 1, \'bar\' = 2)') WHERE d='2018-01-02' SETTINGS mutations_sync = 1;
|
||||
|
||||
SELECT e FROM mutations_and_escaping_1648 ORDER BY d;
|
||||
|
||||
DROP TABLE mutations_and_escaping_1648;
|
@ -0,0 +1,46 @@
|
||||
1 1_0_0_0
|
||||
1 1_1_1_0
|
||||
2 2_0_0_0
|
||||
2 2_1_1_0
|
||||
3 3_0_0_0
|
||||
3 3_1_1_0
|
||||
1_ 1_0_0_0
|
||||
1_ 1_1_1_0
|
||||
2_ 2_0_0_0
|
||||
2_ 2_1_1_0
|
||||
3_ 3_0_0_0
|
||||
3_ 3_1_1_0
|
||||
1 1_0_0_0
|
||||
1 1_1_1_0
|
||||
2 2_0_0_0
|
||||
2 2_1_1_0
|
||||
3 3_0_0_0
|
||||
3 3_1_1_0
|
||||
1_ 1_0_0_0
|
||||
1_ 1_1_1_0
|
||||
2_ 2_0_0_0
|
||||
2_ 2_1_1_0
|
||||
3_ 3_0_0_0
|
||||
3_ 3_1_1_0
|
||||
1 1_0_0_0
|
||||
1 1_1_1_0
|
||||
2 2_0_0_0
|
||||
2 2_1_1_0
|
||||
3 3_0_0_0
|
||||
1_ 1_0_0_0
|
||||
1_ 1_1_1_0
|
||||
2_ 2_0_0_0
|
||||
2_ 2_1_1_0
|
||||
3_ 3_0_0_0
|
||||
1 1_0_0_0
|
||||
1 1_1_1_0
|
||||
2 2_0_0_0
|
||||
2 2_1_1_0
|
||||
3 3_0_0_0
|
||||
3 3_2_2_0
|
||||
1_ 1_0_0_0
|
||||
1_ 1_1_1_0
|
||||
2_ 2_0_0_0
|
||||
2_ 2_1_1_0
|
||||
3_ 3_0_0_0
|
||||
3_ 3_2_2_0
|
@ -0,0 +1,39 @@
|
||||
DROP TABLE IF EXISTS partitioned_table;
|
||||
|
||||
CREATE TABLE partitioned_table (
|
||||
key UInt64,
|
||||
partitioner UInt8,
|
||||
value String
|
||||
)
|
||||
ENGINE ReplicatedMergeTree('/clickhouse/test/01650_drop_part_and_deduplication/partitioned_table', '1')
|
||||
ORDER BY key
|
||||
PARTITION BY partitioner;
|
||||
|
||||
SYSTEM STOP MERGES partitioned_table;
|
||||
|
||||
INSERT INTO partitioned_table VALUES (1, 1, 'A'), (2, 2, 'B'), (3, 3, 'C');
|
||||
INSERT INTO partitioned_table VALUES (11, 1, 'AA'), (22, 2, 'BB'), (33, 3, 'CC');
|
||||
|
||||
SELECT partition_id, name FROM system.parts WHERE table = 'partitioned_table' AND database = currentDatabase() ORDER BY name;
|
||||
|
||||
SELECT substring(name, 1, 2), value FROM system.zookeeper WHERE path='/clickhouse/test/01650_drop_part_and_deduplication/partitioned_table/blocks/' ORDER BY value;
|
||||
|
||||
INSERT INTO partitioned_table VALUES (33, 3, 'CC'); -- must be deduplicated
|
||||
|
||||
SELECT partition_id, name FROM system.parts WHERE table = 'partitioned_table' AND database = currentDatabase() ORDER BY name;
|
||||
|
||||
SELECT substring(name, 1, 2), value FROM system.zookeeper WHERE path='/clickhouse/test/01650_drop_part_and_deduplication/partitioned_table/blocks/' ORDER BY value;
|
||||
|
||||
ALTER TABLE partitioned_table DROP PART '3_1_1_0';
|
||||
|
||||
SELECT partition_id, name FROM system.parts WHERE table = 'partitioned_table' AND database = currentDatabase() ORDER BY name;
|
||||
|
||||
SELECT substring(name, 1, 2), value FROM system.zookeeper WHERE path='/clickhouse/test/01650_drop_part_and_deduplication/partitioned_table/blocks/' ORDER BY value;
|
||||
|
||||
INSERT INTO partitioned_table VALUES (33, 3, 'CC'); -- mustn't be deduplicated
|
||||
|
||||
SELECT partition_id, name FROM system.parts WHERE table = 'partitioned_table' AND database = currentDatabase() ORDER BY name;
|
||||
|
||||
SELECT substring(name, 1, 2), value FROM system.zookeeper WHERE path='/clickhouse/test/01650_drop_part_and_deduplication/partitioned_table/blocks/' ORDER BY value;
|
||||
|
||||
DROP TABLE IF EXISTS partitioned_table;
|
@ -0,0 +1 @@
|
||||
\N \N
|
21
tests/queries/0_stateless/01650_expressions_merge_bug.sql
Normal file
21
tests/queries/0_stateless/01650_expressions_merge_bug.sql
Normal file
@ -0,0 +1,21 @@
|
||||
|
||||
SELECT
|
||||
NULL IN
|
||||
(
|
||||
SELECT
|
||||
9223372036854775807,
|
||||
9223372036854775807
|
||||
),
|
||||
NULL
|
||||
FROM
|
||||
(
|
||||
SELECT DISTINCT
|
||||
NULL,
|
||||
NULL,
|
||||
NULL IN
|
||||
(
|
||||
SELECT (NULL, '-1')
|
||||
),
|
||||
NULL
|
||||
FROM numbers(1024)
|
||||
)
|
@ -15,7 +15,8 @@
|
||||
"01526_max_untracked_memory", /// requires TraceCollector, does not available under sanitizers
|
||||
"01474_executable_dictionary", /// informational stderr from sanitizer at start
|
||||
"functions_bad_arguments", /// Too long for TSan
|
||||
"01603_read_with_backoff_bug" /// Too long for TSan
|
||||
"01603_read_with_backoff_bug", /// Too long for TSan
|
||||
"01646_system_restart_replicas_smoke" /// RESTART REPLICAS can acquire too much locks, while only 64 is possible from one thread under TSan
|
||||
],
|
||||
"address-sanitizer": [
|
||||
"00877",
|
||||
@ -103,14 +104,37 @@
|
||||
"01482_move_to_prewhere_and_cast" /// bug, shoud be fixed
|
||||
],
|
||||
"antlr": [
|
||||
"00186_very_long_arrays",
|
||||
"00233_position_function_sql_comparibilty",
|
||||
"00417_kill_query",
|
||||
"00534_functions_bad_arguments12",
|
||||
"00534_functions_bad_arguments2",
|
||||
"00534_functions_bad_arguments4",
|
||||
"00534_functions_bad_arguments9",
|
||||
"00564_temporary_table_management",
|
||||
"00626_replace_partition_from_table_zookeeper",
|
||||
"00652_replicated_mutations_zookeeper",
|
||||
"00687_top_and_offset",
|
||||
"00746_sql_fuzzy",
|
||||
"00763_create_query_as_table_engine_bug",
|
||||
"00765_sql_compatibility_aliases",
|
||||
"00825_protobuf_format_input",
|
||||
"00826_cross_to_inner_join",
|
||||
"00834_not_between",
|
||||
"00909_kill_not_initialized_query",
|
||||
"00938_template_input_format",
|
||||
"00939_limit_by_offset",
|
||||
"00943_materialize_index",
|
||||
"00944_clear_index_in_partition",
|
||||
"00952_input_function",
|
||||
"00953_constraints_operations",
|
||||
"00954_client_prepared_statements",
|
||||
"00956_sensitive_data_masking",
|
||||
"00969_columns_clause",
|
||||
"00975_indices_mutation_replicated_zookeeper",
|
||||
"00975_values_list",
|
||||
"00976_system_stop_ttl_merges",
|
||||
"00977_int_div",
|
||||
"00978_table_function_values_alias",
|
||||
"00980_merge_alter_settings",
|
||||
"00980_zookeeper_merge_tree_alter_settings",
|
||||
@ -121,16 +145,22 @@
|
||||
"01001_enums_in_in_section",
|
||||
"01011_group_uniq_array_memsan",
|
||||
"01011_test_create_as_skip_indices",
|
||||
"01014_format_custom_separated",
|
||||
"01015_attach_part",
|
||||
"01015_database_bad_tables",
|
||||
"01017_uniqCombined_memory_usage",
|
||||
"01019_alter_materialized_view_atomic",
|
||||
"01019_alter_materialized_view_consistent",
|
||||
"01019_alter_materialized_view_query",
|
||||
"01021_tuple_parser",
|
||||
"01025_array_compact_generic",
|
||||
"01030_limit_by_with_ties_error",
|
||||
"01033_quota_dcl",
|
||||
"01034_with_fill_and_push_down_predicate",
|
||||
"01035_avg_weighted_long",
|
||||
"01039_row_policy_dcl",
|
||||
"01039_test_setting_parse",
|
||||
"01042_system_reload_dictionary_reloads_completely",
|
||||
"01045_dictionaries_restrictions",
|
||||
"01048_exists_query",
|
||||
"01055_compact_parts_1",
|
||||
@ -142,11 +172,15 @@
|
||||
"01073_show_tables_not_like",
|
||||
"01074_partial_revokes",
|
||||
"01075_allowed_client_hosts",
|
||||
"01085_regexp_input_format",
|
||||
"01086_regexp_input_format_skip_unmatched",
|
||||
"01089_alter_settings_old_format",
|
||||
"01095_tpch_like_smoke",
|
||||
"01107_atomic_db_detach_attach",
|
||||
"01109_exchange_tables",
|
||||
"01109_sc0rp10_string_hash_map_zero_bytes",
|
||||
"01110_dictionary_layout_without_arguments",
|
||||
"01114_database_atomic",
|
||||
"01114_materialize_clear_index_compact_parts",
|
||||
"01115_join_with_dictionary",
|
||||
"01117_comma_and_others_join_mix",
|
||||
@ -156,9 +190,13 @@
|
||||
"01144_multiword_data_types",
|
||||
"01145_with_fill_const",
|
||||
"01149_zookeeper_mutation_stuck_after_replace_partition",
|
||||
"01150_ddl_guard_rwr",
|
||||
"01185_create_or_replace_table",
|
||||
"01187_set_profile_as_setting",
|
||||
"01188_attach_table_from_path",
|
||||
"01190_full_attach_syntax",
|
||||
"01191_rename_dictionary",
|
||||
"01192_rename_database_zookeeper",
|
||||
"01210_drop_view",
|
||||
"01213_alter_rename_column",
|
||||
"01232_untuple",
|
||||
@ -173,19 +211,23 @@
|
||||
"01272_offset_without_limit",
|
||||
"01275_parallel_mv",
|
||||
"01277_alter_rename_column_constraint_zookeeper",
|
||||
"01278_min_insert_block_size_rows_for_materialized_views",
|
||||
"01280_min_map_max_map",
|
||||
"01280_null_in",
|
||||
"01280_ttl_where_group_by_negative",
|
||||
"01280_ttl_where_group_by",
|
||||
"01280_unicode_whitespaces_lexer",
|
||||
"01292_create_user",
|
||||
"01293_create_role",
|
||||
"01293_pretty_max_value_width",
|
||||
"01293_show_settings",
|
||||
"01294_create_settings_profile",
|
||||
"01294_lazy_database_concurrent_recreate_reattach_and_show_tables",
|
||||
"01295_create_row_policy",
|
||||
"01296_create_row_policy_in_current_database",
|
||||
"01297_create_quota",
|
||||
"01308_row_policy_and_trivial_count_query",
|
||||
"01317_no_password_in_command_line",
|
||||
"01318_map_add_map_subtract",
|
||||
"01322_any_input_optimize",
|
||||
"01324_if_transform_strings_to_enum",
|
||||
@ -197,7 +239,10 @@
|
||||
"01378_alter_rename_with_ttl_zookeeper",
|
||||
"01379_with_fill_several_columns",
|
||||
"01397_in_bad_arguments",
|
||||
"01412_mod_float",
|
||||
"01415_table_function_view",
|
||||
"01417_freeze_partition_verbose_zookeeper",
|
||||
"01417_freeze_partition_verbose",
|
||||
"01419_merge_tree_settings_sanity_check",
|
||||
"01430_modify_sample_by_zookeeper",
|
||||
"01447_json_strings",
|
||||
@ -210,19 +255,25 @@
|
||||
"01463_test_alter_live_view_refresh",
|
||||
"01465_ttl_recompression",
|
||||
"01470_columns_transformers",
|
||||
"01470_columns_transformers2",
|
||||
"01470_explain",
|
||||
"01470_show_databases_like",
|
||||
"01470_test_insert_select_asterisk",
|
||||
"01491_nested_multiline_comments",
|
||||
"01493_table_function_null",
|
||||
"01495_subqueries_in_with_statement",
|
||||
"01495_subqueries_in_with_statement_2",
|
||||
"01495_subqueries_in_with_statement_3",
|
||||
"01495_subqueries_in_with_statement",
|
||||
"01501_clickhouse_client_INSERT_exception",
|
||||
"01504_compression_multiple_streams",
|
||||
"01508_explain_header",
|
||||
"01508_partition_pruning",
|
||||
"01509_check_parallel_quorum_inserts",
|
||||
"01509_parallel_quorum_and_merge",
|
||||
"01515_mv_and_array_join_optimisation_bag",
|
||||
"01516_create_table_primary_key",
|
||||
"01517_drop_mv_with_inner_table",
|
||||
"01523_client_local_queries_file_parameter",
|
||||
"01523_interval_operator_support_string_literal",
|
||||
"01525_select_with_offset_fetch_clause",
|
||||
"01526_client_start_and_exit",
|
||||
@ -230,6 +281,7 @@
|
||||
"01530_drop_database_atomic_sync",
|
||||
"01532_execute_merges_on_single_replica",
|
||||
"01532_primary_key_without_order_by_zookeeper",
|
||||
"01541_max_memory_usage_for_user",
|
||||
"01551_mergetree_read_in_order_spread",
|
||||
"01552_dict_fixedstring",
|
||||
"01554_bloom_filter_index_big_integer_uuid",
|
||||
@ -238,14 +290,26 @@
|
||||
"01562_optimize_monotonous_functions_in_order_by",
|
||||
"01581_deduplicate_by_columns_local",
|
||||
"01581_deduplicate_by_columns_replicated",
|
||||
"01582_distinct_optimization",
|
||||
"01590_countSubstrings",
|
||||
"01593_insert_settings",
|
||||
"01594_too_low_memory_limits",
|
||||
"01596_setting_limit_offset",
|
||||
"01600_log_queries_with_extensive_info",
|
||||
"01600_quota_by_forwarded_ip",
|
||||
"01601_detach_permanently",
|
||||
"01602_show_create_view",
|
||||
"01603_read_with_backoff_bug",
|
||||
"01604_explain_ast_of_nonselect_query",
|
||||
"01605_drop_settings_profile_while_assigned",
|
||||
"01605_skip_idx_compact_parts",
|
||||
"01606_git_import"
|
||||
"01606_git_import",
|
||||
"01606_merge_from_wide_to_compact",
|
||||
"01614_with_fill_with_limit",
|
||||
"01632_max_partitions_to_read",
|
||||
"01638_div_mod_ambiguities",
|
||||
"01642_if_nullable_regression",
|
||||
"01643_system_suspend"
|
||||
],
|
||||
"parallel":
|
||||
[
|
||||
|
Loading…
Reference in New Issue
Block a user