mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 16:42:05 +00:00
Merge branch 'master' into insert_quorum_parallel-default
This commit is contained in:
commit
e4433157e7
@ -6,6 +6,7 @@ set (SRCS
|
||||
demangle.cpp
|
||||
getFQDNOrHostName.cpp
|
||||
getMemoryAmount.cpp
|
||||
getPageSize.cpp
|
||||
getThreadId.cpp
|
||||
JSON.cpp
|
||||
LineReader.cpp
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <stdexcept>
|
||||
#include "common/getMemoryAmount.h"
|
||||
#include "common/getPageSize.h"
|
||||
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
@ -18,7 +19,7 @@ uint64_t getMemoryAmountOrZero()
|
||||
if (num_pages <= 0)
|
||||
return 0;
|
||||
|
||||
int64_t page_size = sysconf(_SC_PAGESIZE);
|
||||
int64_t page_size = getPageSize();
|
||||
if (page_size <= 0)
|
||||
return 0;
|
||||
|
||||
|
8
base/common/getPageSize.cpp
Normal file
8
base/common/getPageSize.cpp
Normal file
@ -0,0 +1,8 @@
|
||||
#include "common/getPageSize.h"
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
Int64 getPageSize()
|
||||
{
|
||||
return sysconf(_SC_PAGESIZE);
|
||||
}
|
6
base/common/getPageSize.h
Normal file
6
base/common/getPageSize.h
Normal file
@ -0,0 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include "common/types.h"
|
||||
|
||||
/// Get memory page size
|
||||
Int64 getPageSize();
|
@ -47,6 +47,7 @@ SRCS(
|
||||
errnoToString.cpp
|
||||
getFQDNOrHostName.cpp
|
||||
getMemoryAmount.cpp
|
||||
getPageSize.cpp
|
||||
getResource.cpp
|
||||
getThreadId.cpp
|
||||
mremap.cpp
|
||||
|
2
contrib/boost
vendored
2
contrib/boost
vendored
@ -1 +1 @@
|
||||
Subproject commit a7ceabe4747ecc3309dd3dcd9de4b29660dfd298
|
||||
Subproject commit 0b98b443aa7bb77d65efd7b23b3b8c8a0ab5f1f3
|
@ -31,6 +31,7 @@ find . -name '*.so.*' -print -exec mv '{}' /output \;
|
||||
if [ "performance" == "$COMBINED_OUTPUT" ]
|
||||
then
|
||||
cp -r ../tests/performance /output
|
||||
cp -r ../tests/config/top_level_domains /
|
||||
cp -r ../docker/test/performance-comparison/config /output ||:
|
||||
rm /output/unit_tests_dbms ||:
|
||||
rm /output/clickhouse-odbc-bridge ||:
|
||||
|
@ -39,7 +39,7 @@ data_compressed_bytes: 499
|
||||
last_exception:
|
||||
```
|
||||
|
||||
**See also**
|
||||
**See Also**
|
||||
|
||||
- [Distributed table engine](../../engines/table-engines/special/distributed.md)
|
||||
|
||||
|
81
docs/en/operations/system-tables/replication_queue.md
Normal file
81
docs/en/operations/system-tables/replication_queue.md
Normal file
@ -0,0 +1,81 @@
|
||||
# system.replication_queue {#system_tables-replication_queue}
|
||||
|
||||
Contains information about tasks from replication queues stored in ZooKeeper for tables in the `ReplicatedMergeTree` family.
|
||||
|
||||
Columns:
|
||||
|
||||
- `database` ([String](../../sql-reference/data-types/string.md)) — Name of the database.
|
||||
|
||||
- `table` ([String](../../sql-reference/data-types/string.md)) — Name of the table.
|
||||
|
||||
- `replica_name` ([String](../../sql-reference/data-types/string.md)) — Replica name in ZooKeeper. Different replicas of the same table have different names.
|
||||
|
||||
- `position` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Position of the task in the queue.
|
||||
|
||||
- `node_name` ([String](../../sql-reference/data-types/string.md)) — Node name in ZooKeeper.
|
||||
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — Type of the task in the queue: `GET_PARTS`, `MERGE_PARTS`, `DETACH_PARTS`, `DROP_PARTS`, or `MUTATE_PARTS`.
|
||||
|
||||
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was submitted for execution.
|
||||
|
||||
- `required_quorum` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of replicas waiting for the task to complete with confirmation of completion. This column is only relevant for the `GET_PARTS` task.
|
||||
|
||||
- `source_replica` ([String](../../sql-reference/data-types/string.md)) — Name of the source replica.
|
||||
|
||||
- `new_part_name` ([String](../../sql-reference/data-types/string.md)) — Name of the new part.
|
||||
|
||||
- `parts_to_merge` ([Array](../../sql-reference/data-types/array.md) ([String](../../sql-reference/data-types/string.md))) — Names of parts to merge or update.
|
||||
|
||||
- `is_detach` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The flag indicates whether the `DETACH_PARTS` task is in the queue.
|
||||
|
||||
- `is_currently_executing` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The flag indicates whether a specific task is being performed right now.
|
||||
|
||||
- `num_tries` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of failed attempts to complete the task.
|
||||
|
||||
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — Text message about the last error that occurred (if any).
|
||||
|
||||
- `last_attempt_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was last attempted.
|
||||
|
||||
- `num_postponed` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of postponed tasks.
|
||||
|
||||
- `postpone_reason` ([String](../../sql-reference/data-types/string.md)) — The reason why the task was postponed.
|
||||
|
||||
- `last_postpone_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was last postponed.
|
||||
|
||||
- `merge_type` ([String](../../sql-reference/data-types/string.md)) — Type of the current merge. Empty if it's a mutation.
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.replication_queue LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
database: merge
|
||||
table: visits_v2
|
||||
replica_name: mtgiga001-1t.metrika.yandex.net
|
||||
position: 15
|
||||
node_name: queue-0009325559
|
||||
type: MERGE_PARTS
|
||||
create_time: 2020-12-07 14:04:21
|
||||
required_quorum: 0
|
||||
source_replica: mtgiga001-1t.metrika.yandex.net
|
||||
new_part_name: 20201130_121373_121384_2
|
||||
parts_to_merge: ['20201130_121373_121378_1','20201130_121379_121379_0','20201130_121380_121380_0','20201130_121381_121381_0','20201130_121382_121382_0','20201130_121383_121383_0','20201130_121384_121384_0']
|
||||
is_detach: 0
|
||||
is_currently_executing: 0
|
||||
num_tries: 36
|
||||
last_exception: Code: 226, e.displayText() = DB::Exception: Marks file '/opt/clickhouse/data/merge/visits_v2/tmp_fetch_20201130_121373_121384_2/CounterID.mrk' doesn't exist (version 20.8.7.15 (official build))
|
||||
last_attempt_time: 2020-12-08 17:35:54
|
||||
num_postponed: 0
|
||||
postpone_reason:
|
||||
last_postpone_time: 1970-01-01 03:00:00
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Managing ReplicatedMergeTree Tables](../../sql-reference/statements/system.md/#query-language-system-replicated)
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/replication_queue) <!--hide-->
|
@ -25,6 +25,10 @@ Example 2: `uniqArray(arr)` – Counts the number of unique elements in all ‘a
|
||||
|
||||
-If and -Array can be combined. However, ‘Array’ must come first, then ‘If’. Examples: `uniqArrayIf(arr, cond)`, `quantilesTimingArrayIf(level1, level2)(arr, cond)`. Due to this order, the ‘cond’ argument won’t be an array.
|
||||
|
||||
## -SimpleState {#agg-functions-combinator-simplestate}
|
||||
|
||||
If you apply this combinator, the aggregate function returns the same value but with a different type. This is an `SimpleAggregateFunction(...)` that can be stored in a table to work with [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md) table engines.
|
||||
|
||||
## -State {#agg-functions-combinator-state}
|
||||
|
||||
If you apply this combinator, the aggregate function doesn’t return the resulting value (such as the number of unique values for the [uniq](../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) function), but an intermediate state of the aggregation (for `uniq`, this is the hash table for calculating the number of unique values). This is an `AggregateFunction(...)` that can be used for further processing or stored in a table to finish aggregating later.
|
||||
|
@ -1288,12 +1288,30 @@ Returns the index of the first element in the `arr1` array for which `func` retu
|
||||
|
||||
Note that the `arrayFirstIndex` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You must pass a lambda function to it as the first argument, and it can’t be omitted.
|
||||
|
||||
## arrayMin(\[func,\] arr1, …) {#array-min}
|
||||
|
||||
Returns the sum of the `func` values. If the function is omitted, it just returns the min of the array elements.
|
||||
|
||||
Note that the `arrayMin` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument.
|
||||
|
||||
## arrayMax(\[func,\] arr1, …) {#array-max}
|
||||
|
||||
Returns the sum of the `func` values. If the function is omitted, it just returns the min of the array elements.
|
||||
|
||||
Note that the `arrayMax` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument.
|
||||
|
||||
## arraySum(\[func,\] arr1, …) {#array-sum}
|
||||
|
||||
Returns the sum of the `func` values. If the function is omitted, it just returns the sum of the array elements.
|
||||
|
||||
Note that the `arraySum` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument.
|
||||
|
||||
## arrayAvg(\[func,\] arr1, …) {#array-avg}
|
||||
|
||||
Returns the sum of the `func` values. If the function is omitted, it just returns the average of the array elements.
|
||||
|
||||
Note that the `arrayAvg` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument.
|
||||
|
||||
## arrayCumSum(\[func,\] arr1, …) {#arraycumsumfunc-arr1}
|
||||
|
||||
Returns an array of partial sums of elements in the source array (a running sum). If the `func` function is specified, then the values of the array elements are converted by this function before summing.
|
||||
|
@ -204,7 +204,7 @@ SYSTEM STOP MOVES [[db.]merge_tree_family_table_name]
|
||||
|
||||
## Managing ReplicatedMergeTree Tables {#query-language-system-replicated}
|
||||
|
||||
ClickHouse can manage background replication related processes in [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication/#table_engines-replication) tables.
|
||||
ClickHouse can manage background replication related processes in [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md#table_engines-replication) tables.
|
||||
|
||||
### STOP FETCHES {#query_language-system-stop-fetches}
|
||||
|
||||
|
25
docs/ru/faq/general/index.md
Normal file
25
docs/ru/faq/general/index.md
Normal file
@ -0,0 +1,25 @@
|
||||
---
|
||||
title: General questions about ClickHouse
|
||||
toc_hidden_folder: true
|
||||
toc_priority: 1
|
||||
toc_title: Общие вопросы
|
||||
---
|
||||
|
||||
# Общие вопросы о ClickHouse {#obshchie-voprosy}
|
||||
|
||||
Вопросы:
|
||||
|
||||
- Что такое ClickHouse?
|
||||
- Почему ClickHouse такой быстрый?
|
||||
- Кто пользуется ClickHouse?
|
||||
- Что обозначает название “ClickHouse”?
|
||||
- Что значит “Не тормозит”?
|
||||
- Что такое OLAP?
|
||||
- Что такое колоночная база данных?
|
||||
- [Почему бы не использовать системы типа MapReduce?](mapreduce.md)
|
||||
|
||||
!!! info "Если вы не нашли то, что искали:"
|
||||
Загляните в другие категории F.A.Q. или поищите в других разделах документации, ориентируйтесь по оглавлению слева.
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/ru/faq/general/) ##}
|
||||
|
@ -1,8 +1,12 @@
|
||||
# Общие вопросы {#obshchie-voprosy}
|
||||
---
|
||||
title: Why not use something like MapReduce?
|
||||
toc_hidden: true
|
||||
toc_priority: 110
|
||||
---
|
||||
|
||||
## Почему бы не использовать системы типа MapReduce? {#pochemu-by-ne-ispolzovat-sistemy-tipa-mapreduce}
|
||||
|
||||
Системами типа MapReduce будем называть системы распределённых вычислений, в которых операция reduce сделана на основе распределённой сортировки. Наиболее распространённым opensource решением данного класса является [Apache Hadoop](http://hadoop.apache.org). Яндекс использует собственное решение — YT.
|
||||
Системами типа MapReduce будем называть системы распределённых вычислений, в которых операция reduce сделана на основе распределённой сортировки. Наиболее распространённым opensource решением данного класса является [Apache Hadoop](http://hadoop.apache.org). Яндекс использует собственное решение — YT.
|
||||
|
||||
Такие системы не подходят для онлайн запросов в силу слишком большой latency. То есть, не могут быть использованы в качестве бэкенда для веб-интерфейса.
|
||||
Такие системы не подходят для обновления данных в реальном времени.
|
||||
@ -10,47 +14,3 @@
|
||||
Распределённая сортировка является основной причиной тормозов при выполнении несложных map-reduce задач.
|
||||
|
||||
Большинство реализаций MapReduce позволяют выполнять произвольный код на кластере. Но для OLAP задач лучше подходит декларативный язык запросов, который позволяет быстро проводить исследования. Для примера, для Hadoop существует Hive и Pig. Также смотрите Cloudera Impala, Shark (устаревший) для Spark, а также Spark SQL, Presto, Apache Drill. Впрочем, производительность при выполнении таких задач является сильно неоптимальной по сравнению со специализированными системами, а сравнительно высокая latency не позволяет использовать эти системы в качестве бэкенда для веб-интерфейса.
|
||||
|
||||
## Что делать, если у меня проблема с кодировками при использовании Oracle через ODBC? {#oracle-odbc-encodings}
|
||||
|
||||
Если вы используете Oracle через драйвер ODBC в качестве источника внешних словарей, необходимо задать правильное значение для переменной окружения `NLS_LANG` в `/etc/default/clickhouse`. Подробнее читайте в [Oracle NLS_LANG FAQ](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html).
|
||||
|
||||
**Пример**
|
||||
|
||||
``` sql
|
||||
NLS_LANG=RUSSIAN_RUSSIA.UTF8
|
||||
```
|
||||
|
||||
## Как экспортировать данные из ClickHouse в файл? {#how-to-export-to-file}
|
||||
|
||||
### Секция INTO OUTFILE {#sektsiia-into-outfile}
|
||||
|
||||
Добавьте секцию [INTO OUTFILE](../sql-reference/statements/select/into-outfile.md#into-outfile-clause) к своему запросу.
|
||||
|
||||
Например:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM table INTO OUTFILE 'file'
|
||||
```
|
||||
|
||||
По умолчанию, для выдачи данных ClickHouse использует формат [TabSeparated](../interfaces/formats.md#tabseparated). Чтобы выбрать [формат данных](../interfaces/formats.md), используйте [секцию FORMAT](../sql-reference/statements/select/format.md#format-clause).
|
||||
|
||||
Например:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM table INTO OUTFILE 'file' FORMAT CSV
|
||||
```
|
||||
|
||||
### Таблица с движком File {#tablitsa-s-dvizhkom-file}
|
||||
|
||||
Смотрите [File](../engines/table-engines/special/file.md).
|
||||
|
||||
### Перенаправление в командой строке {#perenapravlenie-v-komandoi-stroke}
|
||||
|
||||
``` sql
|
||||
$ clickhouse-client --query "SELECT * from table" --format FormatName > result.txt
|
||||
```
|
||||
|
||||
Смотрите [clickhouse-client](../interfaces/cli.md).
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/en/faq/general/) <!--hide-->
|
@ -4,3 +4,14 @@ toc_hidden: true
|
||||
toc_priority: 76
|
||||
---
|
||||
|
||||
# Содержание F.A.Q. {#soderzhanie}
|
||||
|
||||
В этом разделе документации собрали вопросы о ClickHouse, которые задают чаще всего.
|
||||
|
||||
Категории:
|
||||
|
||||
- **[Общие вопросы](../faq/general/index.md)**
|
||||
- **[Применение](../faq/use-cases/index.md)**
|
||||
- **[Операции](../faq/operations/index.md)**
|
||||
- **[Интеграция](../faq/integration/index.md)**
|
||||
|
||||
|
37
docs/ru/faq/integration/file-export.md
Normal file
37
docs/ru/faq/integration/file-export.md
Normal file
@ -0,0 +1,37 @@
|
||||
---
|
||||
title: How do I export data from ClickHouse to a file?
|
||||
toc_hidden: true
|
||||
toc_priority: 10
|
||||
---
|
||||
|
||||
## Как экспортировать данные из ClickHouse в файл? {#how-to-export-to-file-rus}
|
||||
|
||||
### Секция INTO OUTFILE {#sektsiia-into-outfile-rus}
|
||||
|
||||
Добавьте секцию [INTO OUTFILE](../../sql-reference/statements/select/into-outfile.md#into-outfile-clause) к своему запросу.
|
||||
|
||||
Например:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM table INTO OUTFILE 'file'
|
||||
```
|
||||
|
||||
По умолчанию, для выдачи данных ClickHouse использует формат [TabSeparated](../../interfaces/formats.md#tabseparated). Чтобы выбрать [формат данных](../../interfaces/formats.md), используйте секцию [FORMAT](../../sql-reference/statements/select/format.md#format-clause).
|
||||
|
||||
Например:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM table INTO OUTFILE 'file' FORMAT CSV
|
||||
```
|
||||
|
||||
## Таблица с движком File {#using-a-file-engine-table}
|
||||
|
||||
Смотрите [File](../../engines/table-engines/special/file.md).
|
||||
|
||||
## Перенаправление в командой строке {#using-command-line-redirection}
|
||||
|
||||
``` bash
|
||||
$ clickhouse-client --query "SELECT * from table" --format FormatName > result.txt
|
||||
```
|
||||
|
||||
Смотрите [clickhouse-client](../../interfaces/cli.md).
|
19
docs/ru/faq/integration/index.md
Normal file
19
docs/ru/faq/integration/index.md
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
title: Questions about integrating ClickHouse and other systems
|
||||
toc_hidden_folder: true
|
||||
toc_priority: 4
|
||||
toc_title: Интеграция
|
||||
---
|
||||
|
||||
# Вопросы об интеграции ClickHouse с другими системами {#question-about-integrating-clickhouse-and-other-systems-rus}
|
||||
|
||||
Вопросы:
|
||||
|
||||
- [Как экспортировать данные из ClickHouse в файл?](file-export.md)
|
||||
- Как импортировать JSON в ClickHouse?
|
||||
- [Что делать, если у меня проблема с кодировками при использовании Oracle через ODBC?](oracle-odbc.md)
|
||||
|
||||
!!! info "Если вы не нашли то, что искали"
|
||||
Загляните в другие подразделы F.A.Q. или поищите в остальных разделах документации, ориентируйтесь по оглавлению слева.
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/ru/faq/integration/) ##}
|
15
docs/ru/faq/integration/oracle-odbc.md
Normal file
15
docs/ru/faq/integration/oracle-odbc.md
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
title: What if I have a problem with encodings when using Oracle via ODBC?
|
||||
toc_hidden: true
|
||||
toc_priority: 20
|
||||
---
|
||||
|
||||
## Что делать, если у меня проблема с кодировками при использовании Oracle через ODBC? {#oracle-odbc-encodings-rus}
|
||||
|
||||
Если вы используете Oracle через драйвер ODBC в качестве источника внешних словарей, необходимо задать правильное значение для переменной окружения `NLS_LANG` в `/etc/default/clickhouse`. Подробнее читайте в [Oracle NLS_LANG FAQ](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html).
|
||||
|
||||
**Пример**
|
||||
|
||||
``` sql
|
||||
NLS_LANG=RUSSIAN_RUSSIA.UTF8
|
||||
```
|
18
docs/ru/faq/operations/index.md
Normal file
18
docs/ru/faq/operations/index.md
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
title: Question about operating ClickHouse servers and clusters
|
||||
toc_hidden_folder: true
|
||||
toc_priority: 3
|
||||
toc_title: Операции
|
||||
---
|
||||
|
||||
# Вопросы о производительности серверов и кластеров ClickHouse {#voprosy-ob-operating-clickhouse-servers-and-clusters}
|
||||
|
||||
Вопросы:
|
||||
|
||||
- Which ClickHouse version to use in production?
|
||||
- Is it possible to delete old records from a ClickHouse table?
|
||||
|
||||
!!! info "Don’t see what you were looking for?"
|
||||
Check out [other F.A.Q. categories](../../faq/index.md) or browse around main documentation articles found in the left sidebar.
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/faq/production/) ##}
|
14
docs/ru/faq/use-cases/index.md
Normal file
14
docs/ru/faq/use-cases/index.md
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
title: Questions about ClickHouse use cases
|
||||
toc_hidden_folder: true
|
||||
toc_priority: 2
|
||||
toc_title: Применение
|
||||
---
|
||||
|
||||
# Вопросы о применении ClickHouse {#voprosy-o-primenenii}
|
||||
|
||||
Вопросы:
|
||||
|
||||
- Can I use ClickHouse as a time-series database?
|
||||
- Can I use ClickHouse as a key-value storage?
|
||||
|
81
docs/ru/operations/system-tables/replication_queue.md
Normal file
81
docs/ru/operations/system-tables/replication_queue.md
Normal file
@ -0,0 +1,81 @@
|
||||
# system.replication_queue {#system_tables-replication_queue}
|
||||
|
||||
Содержит информацию о задачах из очередей репликации, хранящихся в ZooKeeper, для таблиц семейства `ReplicatedMergeTree`.
|
||||
|
||||
Столбцы:
|
||||
|
||||
- `database` ([String](../../sql-reference/data-types/string.md)) — имя базы данных.
|
||||
|
||||
- `table` ([String](../../sql-reference/data-types/string.md)) — имя таблицы.
|
||||
|
||||
- `replica_name` ([String](../../sql-reference/data-types/string.md)) — имя реплики в ZooKeeper. Разные реплики одной и той же таблицы имеют различные имена.
|
||||
|
||||
- `position` ([UInt32](../../sql-reference/data-types/int-uint.md)) — позиция задачи в очереди.
|
||||
|
||||
- `node_name` ([String](../../sql-reference/data-types/string.md)) — имя узла в ZooKeeper.
|
||||
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — тип задачи в очереди: `GET_PARTS`, `MERGE_PARTS`, `DETACH_PARTS`, `DROP_PARTS` или `MUTATE_PARTS`.
|
||||
|
||||
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время отправки задачи на выполнение.
|
||||
|
||||
- `required_quorum` ([UInt32](../../sql-reference/data-types/int-uint.md)) — количество реплик, ожидающих завершения задачи, с подтверждением о завершении. Этот столбец актуален только для задачи `GET_PARTS`.
|
||||
|
||||
- `source_replica` ([String](../../sql-reference/data-types/string.md)) — имя исходной реплики.
|
||||
|
||||
- `new_part_name` ([String](../../sql-reference/data-types/string.md)) — имя нового куска.
|
||||
|
||||
- `parts_to_merge` ([Array](../../sql-reference/data-types/array.md) ([String](../../sql-reference/data-types/string.md))) — имена кусков, которые требуется смержить или обновить.
|
||||
|
||||
- `is_detach` ([UInt8](../../sql-reference/data-types/int-uint.md)) — флаг, указывающий на присутствие в очереди задачи `DETACH_PARTS`.
|
||||
|
||||
- `is_currently_executing` ([UInt8](../../sql-reference/data-types/int-uint.md)) — флаг, указывающий на выполнение конкретной задачи на данный момент.
|
||||
|
||||
- `num_tries` ([UInt32](../../sql-reference/data-types/int-uint.md)) — количество неудачных попыток выполнить задачу.
|
||||
|
||||
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — текст сообщения о последней возникшей ошибке, если таковые имеются.
|
||||
|
||||
- `last_attempt_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время последней попытки выполнить задачу.
|
||||
|
||||
- `num_postponed` ([UInt32](../../sql-reference/data-types/int-uint.md)) — количество отложенных задач.
|
||||
|
||||
- `postpone_reason` ([String](../../sql-reference/data-types/string.md)) — причина, по которой была отложена задача.
|
||||
|
||||
- `last_postpone_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время, когда была отложена задача в последний раз.
|
||||
|
||||
- `merge_type` ([String](../../sql-reference/data-types/string.md)) — тип текущего слияния. Пусто, если это мутация.
|
||||
|
||||
**Пример**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.replication_queue LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
database: merge
|
||||
table: visits_v2
|
||||
replica_name: mtgiga001-1t.metrika.yandex.net
|
||||
position: 15
|
||||
node_name: queue-0009325559
|
||||
type: MERGE_PARTS
|
||||
create_time: 2020-12-07 14:04:21
|
||||
required_quorum: 0
|
||||
source_replica: mtgiga001-1t.metrika.yandex.net
|
||||
new_part_name: 20201130_121373_121384_2
|
||||
parts_to_merge: ['20201130_121373_121378_1','20201130_121379_121379_0','20201130_121380_121380_0','20201130_121381_121381_0','20201130_121382_121382_0','20201130_121383_121383_0','20201130_121384_121384_0']
|
||||
is_detach: 0
|
||||
is_currently_executing: 0
|
||||
num_tries: 36
|
||||
last_exception: Code: 226, e.displayText() = DB::Exception: Marks file '/opt/clickhouse/data/merge/visits_v2/tmp_fetch_20201130_121373_121384_2/CounterID.mrk' doesn't exist (version 20.8.7.15 (official build))
|
||||
last_attempt_time: 2020-12-08 17:35:54
|
||||
num_postponed: 0
|
||||
postpone_reason:
|
||||
last_postpone_time: 1970-01-01 03:00:00
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
|
||||
- [Управление таблицами ReplicatedMergeTree](../../sql-reference/statements/system.md/#query-language-system-replicated)
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/replication_queue) <!--hide-->
|
@ -199,7 +199,7 @@ SOURCE(ODBC(
|
||||
|
||||
ClickHouse получает от ODBC-драйвера информацию о квотировании и квотирует настройки в запросах к драйверу, поэтому имя таблицы нужно указывать в соответствии с регистром имени таблицы в базе данных.
|
||||
|
||||
Если у вас есть проблемы с кодировками при использовании Oracle, ознакомьтесь с соответствующим разделом [FAQ](../../../faq/general.md#oracle-odbc-encodings).
|
||||
Если у вас есть проблемы с кодировками при использовании Oracle, ознакомьтесь с соответствующим разделом [FAQ](../../../faq/integration/oracle-odbc.md).
|
||||
|
||||
### Выявленная уязвимость в функционировании ODBC словарей {#vyiavlennaia-uiazvimost-v-funktsionirovanii-odbc-slovarei}
|
||||
|
||||
|
@ -9,19 +9,21 @@ toc_title: "\u5176\u4ED6"
|
||||
|
||||
## ATTACH {#attach}
|
||||
|
||||
这个查询是完全一样的 `CREATE`,但是
|
||||
与`CREATE`类似,但有所区别
|
||||
|
||||
- 而不是这个词 `CREATE` 它使用这个词 `ATTACH`.
|
||||
- 查询不会在磁盘上创建数据,但假定数据已经在适当的位置,只是将有关表的信息添加到服务器。
|
||||
执行附加查询后,服务器将知道表的存在。
|
||||
- 使用关键词 `ATTACH`
|
||||
- 查询不会在磁盘上创建数据。但会假定数据已经在对应位置存放,同时将与表相关的信息添加到服务器。
|
||||
执行 `ATTACH` 查询后,服务器将知道表已经被创建。
|
||||
|
||||
如果表之前已分离 (`DETACH`),意味着其结构是已知的,可以使用速记而不限定该结构。
|
||||
如果表之前已分离 (`DETACH`),意味着其结构是已知的,可以使用简要的写法来建立表,即不需要定义表结构的Schema细节。
|
||||
|
||||
``` sql
|
||||
ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster]
|
||||
```
|
||||
|
||||
启动服务器时使用此查询。 服务器将表元数据作为文件存储 `ATTACH` 查询,它只是在启动时运行(除了在服务器上显式创建的系统表)。
|
||||
启动服务器时会自动触发此查询。
|
||||
|
||||
服务器将表的元数据作为文件存储 `ATTACH` 查询,它只是在启动时运行。有些表例外,如系统表,它们是在服务器上显式指定的。
|
||||
|
||||
## CHECK TABLE {#check-table}
|
||||
|
||||
@ -31,13 +33,12 @@ ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster]
|
||||
CHECK TABLE [db.]name
|
||||
```
|
||||
|
||||
该 `CHECK TABLE` 查询将实际文件大小与存储在服务器上的预期值进行比较。 如果文件大小与存储的值不匹配,则表示数据已损坏。 例如,这可能是由查询执行期间的系统崩溃引起的。
|
||||
`CHECK TABLE` 查询会比较存储在服务器上的实际文件大小与预期值。 如果文件大小与存储的值不匹配,则表示数据已损坏。 例如,这可能是由查询执行期间的系统崩溃引起的。
|
||||
|
||||
查询响应包含 `result` 具有单行的列。 该行的值为
|
||||
[布尔值](../../sql-reference/data-types/boolean.md) 类型:
|
||||
查询返回一行结果,列名为 `result`, 该行的值为 [布尔值](../../sql-reference/data-types/boolean.md) 类型:
|
||||
|
||||
- 0-表中的数据已损坏。
|
||||
- 1-数据保持完整性。
|
||||
- 0-表中的数据已损坏;
|
||||
- 1-数据保持完整性;
|
||||
|
||||
该 `CHECK TABLE` 查询支持下表引擎:
|
||||
|
||||
@ -56,13 +57,14 @@ CHECK TABLE [db.]name
|
||||
|
||||
如果表已损坏,则可以将未损坏的数据复制到另一个表。 要做到这一点:
|
||||
|
||||
1. 创建一个与损坏的表结构相同的新表。 要做到这一点,请执行查询 `CREATE TABLE <new_table_name> AS <damaged_table_name>`.
|
||||
1. 创建一个与损坏的表结构相同的新表。 请执行查询 `CREATE TABLE <new_table_name> AS <damaged_table_name>`.
|
||||
2. 将 [max_threads](../../operations/settings/settings.md#settings-max_threads) 值设置为1,以在单个线程中处理下一个查询。 要这样做,请运行查询 `SET max_threads = 1`.
|
||||
3. 执行查询 `INSERT INTO <new_table_name> SELECT * FROM <damaged_table_name>`. 此请求将未损坏的数据从损坏的表复制到另一个表。 只有损坏部分之前的数据才会被复制。
|
||||
4. 重新启动 `clickhouse-client` 以重置 `max_threads` 值。
|
||||
|
||||
## DESCRIBE TABLE {#misc-describe-table}
|
||||
|
||||
查看表的描述信息,返回各列的Schema,语法如下:
|
||||
``` sql
|
||||
DESC|DESCRIBE TABLE [db.]table [INTO OUTFILE filename] [FORMAT format]
|
||||
```
|
||||
@ -73,24 +75,25 @@ DESC|DESCRIBE TABLE [db.]table [INTO OUTFILE filename] [FORMAT format]
|
||||
- `type`— 列的类型。
|
||||
- `default_type` — [默认表达式](create.md#create-default-values) (`DEFAULT`, `MATERIALIZED` 或 `ALIAS`)中使用的子句。 如果没有指定默认表达式,则列包含一个空字符串。
|
||||
- `default_expression` — `DEFAULT` 子句中指定的值。
|
||||
- `comment_expression` — 注释。
|
||||
- `comment_expression` — 注释信息。
|
||||
|
||||
嵌套数据结构以 “expanded” 格式输出。 每列分别显示,列名后加点号。
|
||||
|
||||
## DETACH {#detach}
|
||||
|
||||
从服务器中删除有关 ‘name’ 表的信息。 服务器停止了解该表的存在。
|
||||
从服务器中删除目标表信息(删除对象是表), 执行查询后,服务器视作该表已经不存在。
|
||||
|
||||
``` sql
|
||||
DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster]
|
||||
```
|
||||
|
||||
这不会删除表的数据或元数据。 在下一次服务器启动时,服务器将读取元数据并再次查找该表。
|
||||
同样,可以使用 `ATTACH` 查询重新连接一个 “detached” 的表(系统表除外,没有为它们存储元数据)。
|
||||
也可以不停止服务器的情况下,使用前面介绍的 `ATTACH` 查询来重新关联该表(系统表除外,没有为它们存储元数据)。
|
||||
|
||||
## DROP {#drop}
|
||||
|
||||
删除已经存在的实体。如果指定 `IF EXISTS`, 则如果实体不存在,则不返回错误。
|
||||
建议使用时添加 `IF EXISTS` 修饰符。
|
||||
|
||||
## DROP DATABASE {#drop-database}
|
||||
|
||||
@ -135,7 +138,7 @@ DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
|
||||
|
||||
删除角色。
|
||||
|
||||
已删除的角色将从授予该角色的所有实体撤销。
|
||||
同时该角色所拥有的权限也会被收回。
|
||||
|
||||
语法:
|
||||
|
||||
@ -199,6 +202,8 @@ EXISTS [TEMPORARY] [TABLE|DICTIONARY] [db.]name [INTO OUTFILE filename] [FORMAT
|
||||
|
||||
## KILL QUERY {#kill-query-statement}
|
||||
|
||||
|
||||
|
||||
``` sql
|
||||
KILL QUERY [ON CLUSTER cluster]
|
||||
WHERE <where expression to SELECT FROM system.processes query>
|
||||
@ -219,16 +224,17 @@ KILL QUERY WHERE query_id='2-857d-4a57-9ee0-327da5d60a90'
|
||||
KILL QUERY WHERE user='username' SYNC
|
||||
```
|
||||
|
||||
只读用户只能停止自己的查询。
|
||||
只读用户只能停止自己提交的查询。
|
||||
|
||||
默认情况下,使用异步版本的查询 (`ASYNC`),不等待确认查询已停止。
|
||||
默认情况下,使用异步版本的查询 (`ASYNC`),不需要等待确认查询已停止。
|
||||
|
||||
同步版本 (`SYNC`)等待所有查询停止,并在停止时显示有关每个进程的信息。
|
||||
响应包含 `kill_status` 列,该列可以采用以下值:
|
||||
而相对的,终止同步版本 (`SYNC`)的查询会显示每步停止时间。
|
||||
|
||||
返回信息包含 `kill_status` 列,该列可以采用以下值:
|
||||
|
||||
1. ‘finished’ – 查询已成功终止。
|
||||
2. ‘waiting’ – 发送查询信号终止后,等待查询结束。
|
||||
3. 其他值解释为什么查询不能停止。
|
||||
3. 其他值,会解释为什么查询不能停止。
|
||||
|
||||
测试查询 (`TEST`)仅检查用户的权限,并显示要停止的查询列表。
|
||||
|
||||
|
@ -115,7 +115,7 @@ void ODBCColumnsInfoHandler::handleRequest(Poco::Net::HTTPServerRequest & reques
|
||||
std::string name = schema_name.empty() ? backQuoteIfNeed(table_name) : backQuoteIfNeed(schema_name) + "." + backQuoteIfNeed(table_name);
|
||||
WriteBufferFromOwnString buf;
|
||||
std::string input = "SELECT * FROM " + name + " WHERE 1 = 0";
|
||||
ParserQueryWithOutput parser;
|
||||
ParserQueryWithOutput parser(input.data() + input.size());
|
||||
ASTPtr select = parseQuery(parser, input.data(), input.data() + input.size(), "", context_settings.max_query_size, context_settings.max_parser_depth);
|
||||
|
||||
IAST::FormatSettings settings(buf, true);
|
||||
|
@ -139,6 +139,28 @@ void setupTmpPath(Poco::Logger * log, const std::string & path)
|
||||
}
|
||||
}
|
||||
|
||||
int waitServersToFinish(std::vector<DB::ProtocolServerAdapter> & servers, size_t seconds_to_wait)
|
||||
{
|
||||
const int sleep_max_ms = 1000 * seconds_to_wait;
|
||||
const int sleep_one_ms = 100;
|
||||
int sleep_current_ms = 0;
|
||||
int current_connections = 0;
|
||||
while (sleep_current_ms < sleep_max_ms)
|
||||
{
|
||||
current_connections = 0;
|
||||
for (auto & server : servers)
|
||||
{
|
||||
server.stop();
|
||||
current_connections += server.currentConnections();
|
||||
}
|
||||
if (!current_connections)
|
||||
break;
|
||||
sleep_current_ms += sleep_one_ms;
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(sleep_one_ms));
|
||||
}
|
||||
return current_connections;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
namespace DB
|
||||
@ -366,7 +388,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
LOG_WARNING(log, "Server was built in debug mode. It will work slowly.");
|
||||
#endif
|
||||
|
||||
#if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER)
|
||||
#if defined(SANITIZER)
|
||||
LOG_WARNING(log, "Server was built with sanitizer. It will work slowly.");
|
||||
#endif
|
||||
|
||||
@ -794,8 +816,29 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
|
||||
LOG_DEBUG(log, "Shut down storages.");
|
||||
|
||||
for (auto & server : servers_to_start_before_tables)
|
||||
server.stop();
|
||||
if (!servers_to_start_before_tables.empty())
|
||||
{
|
||||
LOG_DEBUG(log, "Waiting for current connections to servers for tables to finish.");
|
||||
int current_connections = 0;
|
||||
for (auto & server : servers_to_start_before_tables)
|
||||
{
|
||||
server.stop();
|
||||
current_connections += server.currentConnections();
|
||||
}
|
||||
|
||||
if (current_connections)
|
||||
LOG_INFO(log, "Closed all listening sockets. Waiting for {} outstanding connections.", current_connections);
|
||||
else
|
||||
LOG_INFO(log, "Closed all listening sockets.");
|
||||
|
||||
if (current_connections > 0)
|
||||
current_connections = waitServersToFinish(servers_to_start_before_tables, config().getInt("shutdown_wait_unfinished", 5));
|
||||
|
||||
if (current_connections)
|
||||
LOG_INFO(log, "Closed connections to servers for tables. But {} remain. Probably some tables of other users cannot finish their connections after context shutdown.", current_connections);
|
||||
else
|
||||
LOG_INFO(log, "Closed connections to servers for tables.");
|
||||
}
|
||||
|
||||
/** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available.
|
||||
* At this moment, no one could own shared part of Context.
|
||||
@ -1167,24 +1210,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
global_context->getProcessList().killAllQueries();
|
||||
|
||||
if (current_connections)
|
||||
{
|
||||
const int sleep_max_ms = 1000 * config().getInt("shutdown_wait_unfinished", 5);
|
||||
const int sleep_one_ms = 100;
|
||||
int sleep_current_ms = 0;
|
||||
while (sleep_current_ms < sleep_max_ms)
|
||||
{
|
||||
current_connections = 0;
|
||||
for (auto & server : servers)
|
||||
{
|
||||
server.stop();
|
||||
current_connections += server.currentConnections();
|
||||
}
|
||||
if (!current_connections)
|
||||
break;
|
||||
sleep_current_ms += sleep_one_ms;
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(sleep_one_ms));
|
||||
}
|
||||
}
|
||||
current_connections = waitServersToFinish(servers, config().getInt("shutdown_wait_unfinished", 5));
|
||||
|
||||
if (current_connections)
|
||||
LOG_INFO(log, "Closed connections. But {} remain."
|
||||
|
@ -156,6 +156,25 @@ bool SettingsConstraints::checkImpl(const Settings & current_settings, SettingCh
|
||||
{
|
||||
const String & setting_name = change.name;
|
||||
|
||||
if (setting_name == "profile")
|
||||
{
|
||||
/// TODO Check profile settings in Context::setProfile(...), not here. It will be backward incompatible.
|
||||
const String & profile_name = change.value.safeGet<String>();
|
||||
const auto & profile_settings_changes = manager->getProfileSettings(profile_name);
|
||||
try
|
||||
{
|
||||
/// NOTE We cannot use CLAMP_ON_VIOLATION here, because we cannot modify elements of profile_settings_changes
|
||||
for (auto change_copy : *profile_settings_changes)
|
||||
checkImpl(current_settings, change_copy, THROW_ON_VIOLATION);
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
e.addMessage(", while trying to set settings profile {}", profile_name);
|
||||
throw;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool cannot_cast;
|
||||
auto cast_value = [&](const Field & x) -> Field
|
||||
{
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <Poco/String.h>
|
||||
#include "registerAggregateFunctions.h"
|
||||
|
||||
#include <Functions/FunctionFactory.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -135,12 +136,17 @@ AggregateFunctionPtr AggregateFunctionFactory::getImpl(
|
||||
return combinator->transformAggregateFunction(nested_function, out_properties, argument_types, parameters);
|
||||
}
|
||||
|
||||
|
||||
String extra_info;
|
||||
if (FunctionFactory::instance().hasNameOrAlias(name))
|
||||
extra_info = ". There is an ordinary function with the same name, but aggregate function is expected here";
|
||||
|
||||
auto hints = this->getHints(name);
|
||||
if (!hints.empty())
|
||||
throw Exception(fmt::format("Unknown aggregate function {}. Maybe you meant: {}", name, toString(hints)),
|
||||
ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION);
|
||||
throw Exception(ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION,
|
||||
"Unknown aggregate function {}{}. Maybe you meant: {}", name, extra_info, toString(hints));
|
||||
else
|
||||
throw Exception(fmt::format("Unknown aggregate function {}", name), ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION);
|
||||
throw Exception(ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION, "Unknown aggregate function {}{}", name, extra_info);
|
||||
}
|
||||
|
||||
|
||||
|
32
src/AggregateFunctions/AggregateFunctionSimpleState.cpp
Normal file
32
src/AggregateFunctions/AggregateFunctionSimpleState.cpp
Normal file
@ -0,0 +1,32 @@
|
||||
#include <AggregateFunctions/AggregateFunctionCombinatorFactory.h>
|
||||
#include <AggregateFunctions/AggregateFunctionSimpleState.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace
|
||||
{
|
||||
class AggregateFunctionCombinatorSimpleState final : public IAggregateFunctionCombinator
|
||||
{
|
||||
public:
|
||||
String getName() const override { return "SimpleState"; }
|
||||
|
||||
DataTypes transformArguments(const DataTypes & arguments) const override { return arguments; }
|
||||
|
||||
AggregateFunctionPtr transformAggregateFunction(
|
||||
const AggregateFunctionPtr & nested_function,
|
||||
const AggregateFunctionProperties &,
|
||||
const DataTypes & arguments,
|
||||
const Array & params) const override
|
||||
{
|
||||
return std::make_shared<AggregateFunctionSimpleState>(nested_function, arguments, params);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
void registerAggregateFunctionCombinatorSimpleState(AggregateFunctionCombinatorFactory & factory)
|
||||
{
|
||||
factory.registerCombinator(std::make_shared<AggregateFunctionCombinatorSimpleState>());
|
||||
}
|
||||
|
||||
}
|
77
src/AggregateFunctions/AggregateFunctionSimpleState.h
Normal file
77
src/AggregateFunctions/AggregateFunctionSimpleState.h
Normal file
@ -0,0 +1,77 @@
|
||||
#pragma once
|
||||
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <DataTypes/DataTypeCustomSimpleAggregateFunction.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** Not an aggregate function, but an adapter of aggregate functions.
|
||||
* Aggregate functions with the `SimpleState` suffix is almost identical to the corresponding ones,
|
||||
* except the return type becomes DataTypeCustomSimpleAggregateFunction.
|
||||
*/
|
||||
class AggregateFunctionSimpleState final : public IAggregateFunctionHelper<AggregateFunctionSimpleState>
|
||||
{
|
||||
private:
|
||||
AggregateFunctionPtr nested_func;
|
||||
DataTypes arguments;
|
||||
Array params;
|
||||
|
||||
public:
|
||||
AggregateFunctionSimpleState(AggregateFunctionPtr nested_, const DataTypes & arguments_, const Array & params_)
|
||||
: IAggregateFunctionHelper<AggregateFunctionSimpleState>(arguments_, params_)
|
||||
, nested_func(nested_)
|
||||
, arguments(arguments_)
|
||||
, params(params_)
|
||||
{
|
||||
}
|
||||
|
||||
String getName() const override { return nested_func->getName() + "SimpleState"; }
|
||||
|
||||
DataTypePtr getReturnType() const override
|
||||
{
|
||||
DataTypeCustomSimpleAggregateFunction::checkSupportedFunctions(nested_func);
|
||||
// Need to make a clone because it'll be customized.
|
||||
auto storage_type = DataTypeFactory::instance().get(nested_func->getReturnType()->getName());
|
||||
DataTypeCustomNamePtr custom_name
|
||||
= std::make_unique<DataTypeCustomSimpleAggregateFunction>(nested_func, DataTypes{nested_func->getReturnType()}, params);
|
||||
storage_type->setCustomization(std::make_unique<DataTypeCustomDesc>(std::move(custom_name), nullptr));
|
||||
return storage_type;
|
||||
}
|
||||
|
||||
void create(AggregateDataPtr place) const override { nested_func->create(place); }
|
||||
|
||||
void destroy(AggregateDataPtr place) const noexcept override { nested_func->destroy(place); }
|
||||
|
||||
bool hasTrivialDestructor() const override { return nested_func->hasTrivialDestructor(); }
|
||||
|
||||
size_t sizeOfData() const override { return nested_func->sizeOfData(); }
|
||||
|
||||
size_t alignOfData() const override { return nested_func->alignOfData(); }
|
||||
|
||||
void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena * arena) const override
|
||||
{
|
||||
nested_func->add(place, columns, row_num, arena);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs, Arena * arena) const override { nested_func->merge(place, rhs, arena); }
|
||||
|
||||
void serialize(ConstAggregateDataPtr place, WriteBuffer & buf) const override { nested_func->serialize(place, buf); }
|
||||
|
||||
void deserialize(AggregateDataPtr place, ReadBuffer & buf, Arena * arena) const override
|
||||
{
|
||||
nested_func->deserialize(place, buf, arena);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr place, IColumn & to, Arena * arena) const override
|
||||
{
|
||||
nested_func->insertResultInto(place, to, arena);
|
||||
}
|
||||
|
||||
bool allocatesMemoryInArena() const override { return nested_func->allocatesMemoryInArena(); }
|
||||
|
||||
AggregateFunctionPtr getNestedFunction() const { return nested_func; }
|
||||
};
|
||||
|
||||
}
|
@ -47,6 +47,7 @@ class AggregateFunctionCombinatorFactory;
|
||||
void registerAggregateFunctionCombinatorIf(AggregateFunctionCombinatorFactory &);
|
||||
void registerAggregateFunctionCombinatorArray(AggregateFunctionCombinatorFactory &);
|
||||
void registerAggregateFunctionCombinatorForEach(AggregateFunctionCombinatorFactory &);
|
||||
void registerAggregateFunctionCombinatorSimpleState(AggregateFunctionCombinatorFactory &);
|
||||
void registerAggregateFunctionCombinatorState(AggregateFunctionCombinatorFactory &);
|
||||
void registerAggregateFunctionCombinatorMerge(AggregateFunctionCombinatorFactory &);
|
||||
void registerAggregateFunctionCombinatorNull(AggregateFunctionCombinatorFactory &);
|
||||
@ -104,6 +105,7 @@ void registerAggregateFunctions()
|
||||
registerAggregateFunctionCombinatorIf(factory);
|
||||
registerAggregateFunctionCombinatorArray(factory);
|
||||
registerAggregateFunctionCombinatorForEach(factory);
|
||||
registerAggregateFunctionCombinatorSimpleState(factory);
|
||||
registerAggregateFunctionCombinatorState(factory);
|
||||
registerAggregateFunctionCombinatorMerge(factory);
|
||||
registerAggregateFunctionCombinatorNull(factory);
|
||||
|
@ -41,6 +41,7 @@ SRCS(
|
||||
AggregateFunctionRetention.cpp
|
||||
AggregateFunctionSequenceMatch.cpp
|
||||
AggregateFunctionSimpleLinearRegression.cpp
|
||||
AggregateFunctionSimpleState.cpp
|
||||
AggregateFunctionState.cpp
|
||||
AggregateFunctionStatistics.cpp
|
||||
AggregateFunctionStatisticsSimple.cpp
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define DISABLE_MREMAP 1
|
||||
#endif
|
||||
#include <common/mremap.h>
|
||||
#include <common/getPageSize.h>
|
||||
|
||||
#include <Common/MemoryTracker.h>
|
||||
#include <Common/Exception.h>
|
||||
@ -59,7 +60,6 @@
|
||||
*/
|
||||
extern const size_t MMAP_THRESHOLD;
|
||||
|
||||
static constexpr size_t MMAP_MIN_ALIGNMENT = 4096;
|
||||
static constexpr size_t MALLOC_MIN_ALIGNMENT = 8;
|
||||
|
||||
namespace DB
|
||||
@ -194,10 +194,11 @@ private:
|
||||
void * allocNoTrack(size_t size, size_t alignment)
|
||||
{
|
||||
void * buf;
|
||||
size_t mmap_min_alignment = ::getPageSize();
|
||||
|
||||
if (size >= MMAP_THRESHOLD)
|
||||
{
|
||||
if (alignment > MMAP_MIN_ALIGNMENT)
|
||||
if (alignment > mmap_min_alignment)
|
||||
throw DB::Exception(fmt::format("Too large alignment {}: more than page size when allocating {}.",
|
||||
ReadableSize(alignment), ReadableSize(size)), DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
|
@ -83,10 +83,11 @@ private:
|
||||
/// Last contiguous chunk of memory.
|
||||
Chunk * head;
|
||||
size_t size_in_bytes;
|
||||
size_t page_size;
|
||||
|
||||
static size_t roundUpToPageSize(size_t s)
|
||||
static size_t roundUpToPageSize(size_t s, size_t page_size)
|
||||
{
|
||||
return (s + 4096 - 1) / 4096 * 4096;
|
||||
return (s + page_size - 1) / page_size * page_size;
|
||||
}
|
||||
|
||||
/// If chunks size is less than 'linear_growth_threshold', then use exponential growth, otherwise - linear growth
|
||||
@ -113,7 +114,7 @@ private:
|
||||
}
|
||||
|
||||
assert(size_after_grow >= min_next_size);
|
||||
return roundUpToPageSize(size_after_grow);
|
||||
return roundUpToPageSize(size_after_grow, page_size);
|
||||
}
|
||||
|
||||
/// Add next contiguous chunk of memory with size not less than specified.
|
||||
@ -129,7 +130,8 @@ private:
|
||||
public:
|
||||
Arena(size_t initial_size_ = 4096, size_t growth_factor_ = 2, size_t linear_growth_threshold_ = 128 * 1024 * 1024)
|
||||
: growth_factor(growth_factor_), linear_growth_threshold(linear_growth_threshold_),
|
||||
head(new Chunk(initial_size_, nullptr)), size_in_bytes(head->size())
|
||||
head(new Chunk(initial_size_, nullptr)), size_in_bytes(head->size()),
|
||||
page_size(static_cast<size_t>(::getPageSize()))
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -13,6 +13,8 @@
|
||||
#include <boost/noncopyable.hpp>
|
||||
#include <ext/scope_guard.h>
|
||||
|
||||
#include <common/getPageSize.h>
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <Common/formatReadable.h>
|
||||
@ -326,8 +328,6 @@ private:
|
||||
return (x + (rounding - 1)) / rounding * rounding;
|
||||
}
|
||||
|
||||
static constexpr size_t page_size = 4096;
|
||||
|
||||
/// Sizes and addresses of allocated memory will be aligned to specified boundary.
|
||||
static constexpr size_t alignment = 16;
|
||||
|
||||
@ -505,6 +505,7 @@ private:
|
||||
|
||||
/// If nothing was found and total size of allocated chunks plus required size is lower than maximum,
|
||||
/// allocate a new chunk.
|
||||
size_t page_size = static_cast<size_t>(::getPageSize());
|
||||
size_t required_chunk_size = std::max(min_chunk_size, roundUp(size, page_size));
|
||||
if (total_chunks_size + required_chunk_size <= max_total_size)
|
||||
{
|
||||
|
@ -106,6 +106,11 @@ public:
|
||||
return aliases.count(name) || case_insensitive_aliases.count(name);
|
||||
}
|
||||
|
||||
bool hasNameOrAlias(const String & name) const
|
||||
{
|
||||
return getMap().count(name) || getCaseInsensitiveMap().count(name) || isAlias(name);
|
||||
}
|
||||
|
||||
virtual ~IFactoryWithAliases() override {}
|
||||
|
||||
private:
|
||||
|
@ -8,10 +8,11 @@
|
||||
|
||||
#include "MemoryStatisticsOS.h"
|
||||
|
||||
#include <common/logger_useful.h>
|
||||
#include <common/getPageSize.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <IO/ReadBufferFromMemory.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <common/logger_useful.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -26,7 +27,6 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
static constexpr auto filename = "/proc/self/statm";
|
||||
static constexpr size_t PAGE_SIZE = 4096;
|
||||
|
||||
MemoryStatisticsOS::MemoryStatisticsOS()
|
||||
{
|
||||
@ -93,11 +93,12 @@ MemoryStatisticsOS::Data MemoryStatisticsOS::get() const
|
||||
skipWhitespaceIfAny(in);
|
||||
readIntText(data.data_and_stack, in);
|
||||
|
||||
data.virt *= PAGE_SIZE;
|
||||
data.resident *= PAGE_SIZE;
|
||||
data.shared *= PAGE_SIZE;
|
||||
data.code *= PAGE_SIZE;
|
||||
data.data_and_stack *= PAGE_SIZE;
|
||||
size_t page_size = static_cast<size_t>(::getPageSize());
|
||||
data.virt *= page_size;
|
||||
data.resident *= page_size;
|
||||
data.shared *= page_size;
|
||||
data.code *= page_size;
|
||||
data.data_and_stack *= page_size;
|
||||
|
||||
return data;
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <common/getPageSize.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include <Common/UTF8Helpers.h>
|
||||
@ -37,7 +38,7 @@ struct StringSearcherBase
|
||||
{
|
||||
#ifdef __SSE2__
|
||||
static constexpr auto n = sizeof(__m128i);
|
||||
const int page_size = getpagesize();
|
||||
const int page_size = ::getPageSize();
|
||||
|
||||
bool pageSafe(const void * const ptr) const
|
||||
{
|
||||
|
@ -2,11 +2,14 @@
|
||||
#include <Common/ThreadProfileEvents.h>
|
||||
#include <Common/QueryProfiler.h>
|
||||
#include <Common/ThreadStatus.h>
|
||||
#include <common/errnoToString.h>
|
||||
#include <Interpreters/OpenTelemetrySpanLog.h>
|
||||
|
||||
#include <Poco/Logger.h>
|
||||
#include <common/getThreadId.h>
|
||||
|
||||
#include <signal.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -21,6 +24,11 @@ namespace ErrorCodes
|
||||
thread_local ThreadStatus * current_thread = nullptr;
|
||||
thread_local ThreadStatus * main_thread = nullptr;
|
||||
|
||||
#if !defined(SANITIZER) && !defined(ARCADIA_BUILD)
|
||||
alignas(4096) static thread_local char alt_stack[4096];
|
||||
static thread_local bool has_alt_stack = false;
|
||||
#endif
|
||||
|
||||
|
||||
ThreadStatus::ThreadStatus()
|
||||
: thread_id{getThreadId()}
|
||||
@ -34,6 +42,46 @@ ThreadStatus::ThreadStatus()
|
||||
|
||||
/// NOTE: It is important not to do any non-trivial actions (like updating ProfileEvents or logging) before ThreadStatus is created
|
||||
/// Otherwise it could lead to SIGSEGV due to current_thread dereferencing
|
||||
|
||||
/// Will set alternative signal stack to provide diagnostics for stack overflow errors.
|
||||
/// If not already installed for current thread.
|
||||
/// Sanitizer makes larger stack usage and also it's incompatible with alternative stack by default (it sets up and relies on its own).
|
||||
#if !defined(SANITIZER) && !defined(ARCADIA_BUILD)
|
||||
if (!has_alt_stack)
|
||||
{
|
||||
/// Don't repeat tries even if not installed successfully.
|
||||
has_alt_stack = true;
|
||||
|
||||
/// We have to call 'sigaltstack' before first 'sigaction'. (It does not work other way, for unknown reason).
|
||||
stack_t altstack_description{};
|
||||
altstack_description.ss_sp = alt_stack;
|
||||
altstack_description.ss_flags = 0;
|
||||
altstack_description.ss_size = sizeof(alt_stack);
|
||||
|
||||
if (0 != sigaltstack(&altstack_description, nullptr))
|
||||
{
|
||||
LOG_WARNING(log, "Cannot set alternative signal stack for thread, {}", errnoToString(errno));
|
||||
}
|
||||
else
|
||||
{
|
||||
/// Obtain existing sigaction and modify it by adding a flag.
|
||||
struct sigaction action{};
|
||||
if (0 != sigaction(SIGSEGV, nullptr, &action))
|
||||
{
|
||||
LOG_WARNING(log, "Cannot obtain previous signal action to set alternative signal stack for thread, {}", errnoToString(errno));
|
||||
}
|
||||
else if (!(action.sa_flags & SA_ONSTACK))
|
||||
{
|
||||
action.sa_flags |= SA_ONSTACK;
|
||||
|
||||
if (0 != sigaction(SIGSEGV, &action, nullptr))
|
||||
{
|
||||
LOG_WARNING(log, "Cannot set action with alternative signal stack for thread, {}", errnoToString(errno));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
ThreadStatus::~ThreadStatus()
|
||||
|
@ -798,6 +798,21 @@ void TestKeeperStorage::clearDeadWatches(int64_t session_id)
|
||||
if (watches_for_path.empty())
|
||||
watches.erase(watch);
|
||||
}
|
||||
|
||||
auto list_watch = list_watches.find(watch_path);
|
||||
if (list_watch != list_watches.end())
|
||||
{
|
||||
auto & list_watches_for_path = list_watch->second;
|
||||
for (auto w_it = list_watches_for_path.begin(); w_it != list_watches_for_path.end();)
|
||||
{
|
||||
if (w_it->session_id == session_id)
|
||||
w_it = list_watches_for_path.erase(w_it);
|
||||
else
|
||||
++w_it;
|
||||
}
|
||||
if (list_watches_for_path.empty())
|
||||
list_watches.erase(list_watch);
|
||||
}
|
||||
}
|
||||
sessions_and_watchers.erase(watches_it);
|
||||
}
|
||||
|
@ -239,6 +239,8 @@ class IColumn;
|
||||
* Almost all limits apply to each stream individually. \
|
||||
*/ \
|
||||
\
|
||||
M(UInt64, limit, 0, "Limit on read rows from the most 'end' result for select query, default 0 means no limit length", 0) \
|
||||
M(UInt64, offset, 0, "Offset on read rows from the most 'end' result for select query", 0) \
|
||||
M(UInt64, max_rows_to_read, 0, "Limit on read rows from the most 'deep' sources. That is, only in the deepest subquery. When reading from a remote server, it is only checked on a remote server.", 0) \
|
||||
M(UInt64, max_bytes_to_read, 0, "Limit on read bytes (after decompression) from the most 'deep' sources. That is, only in the deepest subquery. When reading from a remote server, it is only checked on a remote server.", 0) \
|
||||
M(OverflowMode, read_overflow_mode, OverflowMode::THROW, "What to do when the limit is exceeded.", 0) \
|
||||
|
@ -25,10 +25,19 @@ namespace ErrorCodes
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
static const std::vector<String> supported_functions{"any", "anyLast", "min",
|
||||
"max", "sum", "sumWithOverflow", "groupBitAnd", "groupBitOr", "groupBitXor",
|
||||
"sumMap", "minMap", "maxMap", "groupArrayArray", "groupUniqArrayArray"};
|
||||
void DataTypeCustomSimpleAggregateFunction::checkSupportedFunctions(const AggregateFunctionPtr & function)
|
||||
{
|
||||
static const std::vector<String> supported_functions{"any", "anyLast", "min",
|
||||
"max", "sum", "sumWithOverflow", "groupBitAnd", "groupBitOr", "groupBitXor",
|
||||
"sumMap", "minMap", "maxMap", "groupArrayArray", "groupUniqArrayArray"};
|
||||
|
||||
// check function
|
||||
if (std::find(std::begin(supported_functions), std::end(supported_functions), function->getName()) == std::end(supported_functions))
|
||||
{
|
||||
throw Exception("Unsupported aggregate function " + function->getName() + ", supported functions are " + boost::algorithm::join(supported_functions, ","),
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
}
|
||||
|
||||
String DataTypeCustomSimpleAggregateFunction::getName() const
|
||||
{
|
||||
@ -114,12 +123,7 @@ static std::pair<DataTypePtr, DataTypeCustomDescPtr> create(const ASTPtr & argum
|
||||
AggregateFunctionProperties properties;
|
||||
function = AggregateFunctionFactory::instance().get(function_name, argument_types, params_row, properties);
|
||||
|
||||
// check function
|
||||
if (std::find(std::begin(supported_functions), std::end(supported_functions), function->getName()) == std::end(supported_functions))
|
||||
{
|
||||
throw Exception("Unsupported aggregate function " + function->getName() + ", supported functions are " + boost::algorithm::join(supported_functions, ","),
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
DataTypeCustomSimpleAggregateFunction::checkSupportedFunctions(function);
|
||||
|
||||
DataTypePtr storage_type = DataTypeFactory::instance().get(argument_types[0]->getName());
|
||||
|
||||
|
@ -37,6 +37,7 @@ public:
|
||||
|
||||
const AggregateFunctionPtr getFunction() const { return function; }
|
||||
String getName() const override;
|
||||
static void checkSupportedFunctions(const AggregateFunctionPtr & function);
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -451,6 +451,7 @@ public:
|
||||
static bool isSpecialCompressionAllowed(const SubstreamPath & path);
|
||||
private:
|
||||
friend class DataTypeFactory;
|
||||
friend class AggregateFunctionSimpleState;
|
||||
/// Customize this DataType
|
||||
void setCustomization(DataTypeCustomDescPtr custom_desc_) const;
|
||||
|
||||
|
@ -217,6 +217,9 @@ void DatabaseAtomic::renameTable(const Context & context, const String & table_n
|
||||
if (is_dictionary && !inside_database)
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot move dictionary to other database");
|
||||
|
||||
if (!exchange)
|
||||
other_db.checkMetadataFilenameAvailabilityUnlocked(to_table_name, inside_database ? db_lock : other_db_lock);
|
||||
|
||||
StoragePtr table = getTableUnlocked(table_name, db_lock);
|
||||
table->checkTableCanBeRenamed();
|
||||
assert_can_move_mat_view(table);
|
||||
|
@ -42,6 +42,14 @@ void DatabaseLazy::loadStoredObjects(
|
||||
iterateMetadataFiles(context, [this](const String & file_name)
|
||||
{
|
||||
const std::string table_name = file_name.substr(0, file_name.size() - 4);
|
||||
|
||||
auto detached_permanently_flag = Poco::File(getMetadataPath() + "/" + file_name + detached_suffix);
|
||||
if (detached_permanently_flag.exists())
|
||||
{
|
||||
LOG_DEBUG(log, "Skipping permanently detached table {}.", backQuote(table_name));
|
||||
return;
|
||||
}
|
||||
|
||||
attachTable(table_name, nullptr, {});
|
||||
});
|
||||
}
|
||||
|
@ -164,20 +164,38 @@ void DatabaseOnDisk::createTable(
|
||||
/// But there is protection from it - see using DDLGuard in InterpreterCreateQuery.
|
||||
|
||||
if (isDictionaryExist(table_name))
|
||||
throw Exception("Dictionary " + backQuote(getDatabaseName()) + "." + backQuote(table_name) + " already exists.",
|
||||
ErrorCodes::DICTIONARY_ALREADY_EXISTS);
|
||||
throw Exception(ErrorCodes::DICTIONARY_ALREADY_EXISTS, "Dictionary {}.{} already exists", backQuote(getDatabaseName()), backQuote(table_name));
|
||||
|
||||
if (isTableExist(table_name, global_context))
|
||||
throw Exception("Table " + backQuote(getDatabaseName()) + "." + backQuote(table_name) + " already exists.", ErrorCodes::TABLE_ALREADY_EXISTS);
|
||||
throw Exception(ErrorCodes::TABLE_ALREADY_EXISTS, "Table {}.{} already exists", backQuote(getDatabaseName()), backQuote(table_name));
|
||||
|
||||
String table_metadata_path = getObjectMetadataPath(table_name);
|
||||
|
||||
if (create.attach_short_syntax)
|
||||
{
|
||||
/// Metadata already exists, table was detached
|
||||
attachTable(table_name, table, getTableDataPath(create));
|
||||
removeDetachedPermanentlyFlag(table_name, table_metadata_path);
|
||||
return;
|
||||
}
|
||||
|
||||
String table_metadata_path = getObjectMetadataPath(table_name);
|
||||
if (!create.attach)
|
||||
checkMetadataFilenameAvailability(table_name);
|
||||
|
||||
if (create.attach && Poco::File(table_metadata_path).exists())
|
||||
{
|
||||
ASTPtr ast_detached = parseQueryFromMetadata(log, context, table_metadata_path);
|
||||
auto & create_detached = ast_detached->as<ASTCreateQuery &>();
|
||||
|
||||
// either both should be Nil, either values should be equal
|
||||
if (create.uuid != create_detached.uuid)
|
||||
throw Exception(
|
||||
ErrorCodes::TABLE_ALREADY_EXISTS,
|
||||
"Table {}.{} already exist (detached permanently). To attach it back "
|
||||
"you need to use short ATTACH syntax or a full statement with the same UUID",
|
||||
backQuote(getDatabaseName()), backQuote(table_name));
|
||||
}
|
||||
|
||||
String table_metadata_tmp_path = table_metadata_path + create_suffix;
|
||||
String statement;
|
||||
|
||||
@ -194,6 +212,26 @@ void DatabaseOnDisk::createTable(
|
||||
}
|
||||
|
||||
commitCreateTable(create, table, table_metadata_tmp_path, table_metadata_path);
|
||||
|
||||
removeDetachedPermanentlyFlag(table_name, table_metadata_path);
|
||||
}
|
||||
|
||||
/// If the table was detached permanently we will have a flag file with
|
||||
/// .sql.detached extension, is not needed anymore since we attached the table back
|
||||
void DatabaseOnDisk::removeDetachedPermanentlyFlag(const String & table_name, const String & table_metadata_path) const
|
||||
{
|
||||
try
|
||||
{
|
||||
auto detached_permanently_flag = Poco::File(table_metadata_path + detached_suffix);
|
||||
|
||||
if (detached_permanently_flag.exists())
|
||||
detached_permanently_flag.remove();
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
e.addMessage("while trying to remove permanenty detached flag. Table {}.{} may still be marked as permanently detached, and will not be reattached during server restart.", backQuote(getDatabaseName()), backQuote(table_name));
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
void DatabaseOnDisk::commitCreateTable(const ASTCreateQuery & query, const StoragePtr & table,
|
||||
@ -215,6 +253,22 @@ void DatabaseOnDisk::commitCreateTable(const ASTCreateQuery & query, const Stora
|
||||
}
|
||||
}
|
||||
|
||||
void DatabaseOnDisk::detachTablePermanently(const String & table_name)
|
||||
{
|
||||
auto table = detachTable(table_name);
|
||||
|
||||
Poco::File detached_permanently_flag(getObjectMetadataPath(table_name) + detached_suffix);
|
||||
try
|
||||
{
|
||||
detached_permanently_flag.createFile();
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
e.addMessage("while trying to set permanenty detached flag. Table {}.{} may be reattached during server restart.", backQuote(getDatabaseName()), backQuote(table_name));
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
void DatabaseOnDisk::dropTable(const Context & context, const String & table_name, bool /*no_delay*/)
|
||||
{
|
||||
String table_metadata_path = getObjectMetadataPath(table_name);
|
||||
@ -253,6 +307,27 @@ void DatabaseOnDisk::dropTable(const Context & context, const String & table_nam
|
||||
Poco::File(table_metadata_path_drop).remove();
|
||||
}
|
||||
|
||||
void DatabaseOnDisk::checkMetadataFilenameAvailability(const String & to_table_name) const
|
||||
{
|
||||
std::unique_lock lock(mutex);
|
||||
checkMetadataFilenameAvailabilityUnlocked(to_table_name, lock);
|
||||
}
|
||||
|
||||
void DatabaseOnDisk::checkMetadataFilenameAvailabilityUnlocked(const String & to_table_name, std::unique_lock<std::mutex> &) const
|
||||
{
|
||||
String table_metadata_path = getObjectMetadataPath(to_table_name);
|
||||
|
||||
if (Poco::File(table_metadata_path).exists())
|
||||
{
|
||||
auto detached_permanently_flag = Poco::File(table_metadata_path + detached_suffix);
|
||||
|
||||
if (detached_permanently_flag.exists())
|
||||
throw Exception(ErrorCodes::TABLE_ALREADY_EXISTS, "Table {}.{} already exists (detached permanently)", backQuote(database_name), backQuote(to_table_name));
|
||||
else
|
||||
throw Exception(ErrorCodes::TABLE_ALREADY_EXISTS, "Table {}.{} already exists (detached)", backQuote(database_name), backQuote(to_table_name));
|
||||
}
|
||||
}
|
||||
|
||||
void DatabaseOnDisk::renameTable(
|
||||
const Context & context,
|
||||
const String & table_name,
|
||||
@ -299,6 +374,9 @@ void DatabaseOnDisk::renameTable(
|
||||
if (from_atomic_to_ordinary)
|
||||
create.uuid = UUIDHelpers::Nil;
|
||||
|
||||
if (auto * target_db = dynamic_cast<DatabaseOnDisk *>(&to_database))
|
||||
target_db->checkMetadataFilenameAvailability(to_table_name);
|
||||
|
||||
/// Notify the table that it is renamed. It will move data to new path (if it stores data on disk) and update StorageID
|
||||
table->rename(to_database.getTableDataPath(create), StorageID(create));
|
||||
}
|
||||
@ -328,6 +406,8 @@ void DatabaseOnDisk::renameTable(
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// It returns create table statement (even if table is detached)
|
||||
ASTPtr DatabaseOnDisk::getCreateTableQueryImpl(const String & table_name, const Context &, bool throw_on_error) const
|
||||
{
|
||||
ASTPtr ast;
|
||||
@ -430,8 +510,11 @@ void DatabaseOnDisk::iterateMetadataFiles(const Context & context, const Iterati
|
||||
if (endsWith(dir_it.name(), ".sql.bak"))
|
||||
continue;
|
||||
|
||||
static const char * tmp_drop_ext = ".sql.tmp_drop";
|
||||
if (endsWith(dir_it.name(), tmp_drop_ext))
|
||||
/// Permanently detached table flag
|
||||
if (endsWith(dir_it.name(), ".sql.detached"))
|
||||
continue;
|
||||
|
||||
if (endsWith(dir_it.name(), ".sql.tmp_drop"))
|
||||
{
|
||||
/// There are files that we tried to delete previously
|
||||
metadata_files.emplace(dir_it.name(), false);
|
||||
|
@ -39,6 +39,8 @@ public:
|
||||
const StoragePtr & table,
|
||||
const ASTPtr & query) override;
|
||||
|
||||
void detachTablePermanently(const String & table_name) override;
|
||||
|
||||
void dropTable(
|
||||
const Context & context,
|
||||
const String & table_name,
|
||||
@ -67,9 +69,14 @@ public:
|
||||
|
||||
static ASTPtr parseQueryFromMetadata(Poco::Logger * log, const Context & context, const String & metadata_file_path, bool throw_on_error = true, bool remove_empty = false);
|
||||
|
||||
/// will throw when the table we want to attach already exists (in active / detached / detached permanently form)
|
||||
void checkMetadataFilenameAvailability(const String & to_table_name) const;
|
||||
void checkMetadataFilenameAvailabilityUnlocked(const String & to_table_name, std::unique_lock<std::mutex> &) const;
|
||||
|
||||
protected:
|
||||
static constexpr const char * create_suffix = ".tmp";
|
||||
static constexpr const char * drop_suffix = ".tmp_drop";
|
||||
static constexpr const char * detached_suffix = ".detached";
|
||||
|
||||
using IteratingFunction = std::function<void(const String &)>;
|
||||
|
||||
@ -87,6 +94,9 @@ protected:
|
||||
|
||||
const String metadata_path;
|
||||
const String data_path;
|
||||
|
||||
private:
|
||||
void removeDetachedPermanentlyFlag(const String & table_name, const String & table_metadata_path) const;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -135,6 +135,19 @@ void DatabaseOrdinary::loadStoredObjects(Context & context, bool has_force_resto
|
||||
{
|
||||
auto * create_query = ast->as<ASTCreateQuery>();
|
||||
create_query->database = database_name;
|
||||
|
||||
auto detached_permanently_flag = Poco::File(full_path.string() + detached_suffix);
|
||||
if (detached_permanently_flag.exists())
|
||||
{
|
||||
/// FIXME: even if we don't load the table we can still mark the uuid of it as taken.
|
||||
/// if (create_query->uuid != UUIDHelpers::Nil)
|
||||
/// DatabaseCatalog::instance().addUUIDMapping(create_query->uuid);
|
||||
|
||||
const std::string table_name = file_name.substr(0, file_name.size() - 4);
|
||||
LOG_DEBUG(log, "Skipping permanently detached table {}.", backQuote(table_name));
|
||||
return;
|
||||
}
|
||||
|
||||
std::lock_guard lock{file_names_mutex};
|
||||
file_names[file_name] = ast;
|
||||
total_dictionaries += create_query->is_dictionary;
|
||||
|
@ -72,7 +72,7 @@ StoragePtr DatabaseWithOwnTablesBase::detachTableUnlocked(const String & table_n
|
||||
|
||||
auto it = tables.find(table_name);
|
||||
if (it == tables.end())
|
||||
throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist.",
|
||||
throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist",
|
||||
backQuote(database_name), backQuote(table_name));
|
||||
res = it->second;
|
||||
tables.erase(it);
|
||||
@ -157,7 +157,7 @@ StoragePtr DatabaseWithOwnTablesBase::getTableUnlocked(const String & table_name
|
||||
auto it = tables.find(table_name);
|
||||
if (it != tables.end())
|
||||
return it->second;
|
||||
throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist.",
|
||||
throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist",
|
||||
backQuote(database_name), backQuote(table_name));
|
||||
}
|
||||
|
||||
|
@ -221,6 +221,8 @@ public:
|
||||
}
|
||||
|
||||
/// Add a table to the database, but do not add it to the metadata. The database may not support this method.
|
||||
///
|
||||
/// Note: ATTACH TABLE statement actually uses createTable method.
|
||||
virtual void attachTable(const String & /*name*/, const StoragePtr & /*table*/, [[maybe_unused]] const String & relative_table_path = {})
|
||||
{
|
||||
throw Exception("There is no ATTACH TABLE query for Database" + getEngineName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
@ -245,6 +247,13 @@ public:
|
||||
throw Exception("There is no DETACH DICTIONARY query for Database" + getEngineName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
/// Forget about the table without deleting it's data, but rename metadata file to prevent reloading it
|
||||
/// with next restart. The database may not support this method.
|
||||
virtual void detachTablePermanently(const String & /*name*/)
|
||||
{
|
||||
throw Exception("There is no DETACH TABLE PERMANENTLY query for Database" + getEngineName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
/// Rename the table and possibly move the table to another database.
|
||||
virtual void renameTable(
|
||||
const Context & /*context*/,
|
||||
|
@ -395,7 +395,7 @@ void DatabaseConnectionMySQL::loadStoredObjects(Context &, bool, bool /*force_at
|
||||
}
|
||||
}
|
||||
|
||||
void DatabaseConnectionMySQL::dropTable(const Context &, const String & table_name, bool /*no_delay*/)
|
||||
void DatabaseConnectionMySQL::detachTablePermanently(const String & table_name)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock{mutex};
|
||||
|
||||
@ -429,6 +429,11 @@ void DatabaseConnectionMySQL::dropTable(const Context &, const String & table_na
|
||||
table_iter->second.second->is_dropped = true;
|
||||
}
|
||||
|
||||
void DatabaseConnectionMySQL::dropTable(const Context &, const String & table_name, bool /*no_delay*/)
|
||||
{
|
||||
detachTablePermanently(table_name);
|
||||
}
|
||||
|
||||
DatabaseConnectionMySQL::~DatabaseConnectionMySQL()
|
||||
{
|
||||
try
|
||||
|
@ -72,6 +72,8 @@ public:
|
||||
|
||||
StoragePtr detachTable(const String & table_name) override;
|
||||
|
||||
void detachTablePermanently(const String & table_name) override;
|
||||
|
||||
void dropTable(const Context &, const String & table_name, bool no_delay) override;
|
||||
|
||||
void attachTable(const String & table_name, const StoragePtr & storage, const String & relative_table_path) override;
|
||||
|
@ -158,7 +158,9 @@ public:
|
||||
__msan_unpoison(dst_pos, outlen);
|
||||
|
||||
source += srclen + 1;
|
||||
dst_pos += outlen + 1;
|
||||
dst_pos += outlen;
|
||||
*dst_pos = '\0';
|
||||
dst_pos += 1;
|
||||
|
||||
dst_offsets[row] = dst_pos - dst;
|
||||
src_offset_prev = src_offsets[row];
|
||||
|
@ -8,6 +8,8 @@
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -46,12 +48,15 @@ FunctionOverloadResolverImplPtr FunctionFactory::getImpl(
|
||||
auto res = tryGetImpl(name, context);
|
||||
if (!res)
|
||||
{
|
||||
String extra_info;
|
||||
if (AggregateFunctionFactory::instance().hasNameOrAlias(name))
|
||||
extra_info = ". There is an aggregate function with the same name, but ordinary function is expected here";
|
||||
|
||||
auto hints = this->getHints(name);
|
||||
if (!hints.empty())
|
||||
throw Exception("Unknown function " + name + ". Maybe you meant: " + toString(hints),
|
||||
ErrorCodes::UNKNOWN_FUNCTION);
|
||||
throw Exception(ErrorCodes::UNKNOWN_FUNCTION, "Unknown function {}{}. Maybe you meant: {}", name, extra_info, toString(hints));
|
||||
else
|
||||
throw Exception("Unknown function " + name, ErrorCodes::UNKNOWN_FUNCTION);
|
||||
throw Exception(ErrorCodes::UNKNOWN_FUNCTION, "Unknown function {}{}", name, extra_info);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -28,8 +28,8 @@ enum class AggregateOperation
|
||||
* During array aggregation we derive result type from operation.
|
||||
* For array min or array max we use array element as result type.
|
||||
* For array average we use Float64.
|
||||
* For array sum for decimal numbers we use Decimal128, for floating point numbers Float64, for numeric unsigned Int64,
|
||||
* and for numeric signed UInt64.
|
||||
* For array sum for for big integers, we use same type representation, decimal numbers we use Decimal128,
|
||||
* for floating point numbers Float64, for numeric unsigned Int64, and for numeric signed UInt64.
|
||||
*/
|
||||
|
||||
template <typename ArrayElement, AggregateOperation operation>
|
||||
@ -56,13 +56,14 @@ struct ArrayAggregateResultImpl<ArrayElement, AggregateOperation::average>
|
||||
template <typename ArrayElement>
|
||||
struct ArrayAggregateResultImpl<ArrayElement, AggregateOperation::sum>
|
||||
{
|
||||
using Result = std::conditional_t<
|
||||
IsDecimalNumber<ArrayElement>,
|
||||
Decimal128,
|
||||
std::conditional_t<
|
||||
std::is_floating_point_v<ArrayElement>,
|
||||
Float64,
|
||||
std::conditional_t<std::is_signed_v<ArrayElement>, Int64, UInt64>>>;
|
||||
using Result =
|
||||
std::conditional_t<std::is_same_v<ArrayElement, Int128>, Int128,
|
||||
std::conditional_t<std::is_same_v<ArrayElement, Int256>, Int256,
|
||||
std::conditional_t<std::is_same_v<ArrayElement, UInt256>, UInt256,
|
||||
std::conditional_t<IsDecimalNumber<ArrayElement>, Decimal128,
|
||||
std::conditional_t<std::is_floating_point_v<ArrayElement>, Float64,
|
||||
std::conditional_t<std::is_signed_v<ArrayElement>, Int64,
|
||||
UInt64>>>>>>;
|
||||
};
|
||||
|
||||
template <typename ArrayElement, AggregateOperation operation>
|
||||
@ -126,12 +127,12 @@ struct ArrayAggregateImpl
|
||||
using ColVecType = std::conditional_t<IsDecimalNumber<Element>, ColumnDecimal<Element>, ColumnVector<Element>>;
|
||||
using ColVecResult = std::conditional_t<IsDecimalNumber<Result>, ColumnDecimal<Result>, ColumnVector<Result>>;
|
||||
|
||||
/// For average on decimal array we return Float64 as result,
|
||||
/// but to keep decimal presisision we convert to Float64 as last step of average computation
|
||||
static constexpr bool use_decimal_for_average_aggregation
|
||||
= aggregate_operation == AggregateOperation::average && IsDecimalNumber<Element>;
|
||||
/// For average of array we return Float64 as result, but we want to keep precision
|
||||
/// so we convert to Float64 as last step, but intermediate sum is represented as result of sum operation
|
||||
static constexpr bool is_average_operation = aggregate_operation == AggregateOperation::average;
|
||||
using SummAggregationType = ArrayAggregateResult<Element, AggregateOperation::sum>;
|
||||
|
||||
using AggregationType = std::conditional_t<use_decimal_for_average_aggregation, Decimal128, Result>;
|
||||
using AggregationType = std::conditional_t<is_average_operation, SummAggregationType, Result>;
|
||||
|
||||
|
||||
const ColVecType * column = checkAndGetColumn<ColVecType>(&*mapped);
|
||||
@ -246,12 +247,15 @@ struct ArrayAggregateImpl
|
||||
|
||||
if constexpr (aggregate_operation == AggregateOperation::average)
|
||||
{
|
||||
s = s / count;
|
||||
}
|
||||
|
||||
if constexpr (use_decimal_for_average_aggregation)
|
||||
{
|
||||
res[i] = DecimalUtils::convertTo<Result>(s, data.getScale());
|
||||
if constexpr (IsDecimalNumber<Element>)
|
||||
{
|
||||
s = s / count;
|
||||
res[i] = DecimalUtils::convertTo<Result>(s, data.getScale());
|
||||
}
|
||||
else
|
||||
{
|
||||
res[i] = static_cast<Result>(s) / count;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -272,10 +276,13 @@ struct ArrayAggregateImpl
|
||||
executeType<UInt16>(mapped, offsets, res) ||
|
||||
executeType<UInt32>(mapped, offsets, res) ||
|
||||
executeType<UInt64>(mapped, offsets, res) ||
|
||||
executeType<UInt256>(mapped, offsets, res) ||
|
||||
executeType<Int8>(mapped, offsets, res) ||
|
||||
executeType<Int16>(mapped, offsets, res) ||
|
||||
executeType<Int32>(mapped, offsets, res) ||
|
||||
executeType<Int64>(mapped, offsets, res) ||
|
||||
executeType<Int128>(mapped, offsets, res) ||
|
||||
executeType<Int256>(mapped, offsets, res) ||
|
||||
executeType<Float32>(mapped, offsets, res) ||
|
||||
executeType<Float64>(mapped, offsets, res) ||
|
||||
executeType<Decimal32>(mapped, offsets, res) ||
|
||||
|
@ -62,11 +62,11 @@ namespace DB
|
||||
typename ColVecTo::Container & vec_to = col_to->getData();
|
||||
|
||||
ColumnUInt8::MutablePtr col_null_map_to;
|
||||
ColumnUInt8::Container * vec_null_map_to [[maybe_unused]] = nullptr;
|
||||
UInt8 * vec_null_map_to [[maybe_unused]] = nullptr;
|
||||
if constexpr (nullOnErrors)
|
||||
{
|
||||
col_null_map_to = ColumnUInt8::create(input_rows_count);
|
||||
vec_null_map_to = &col_null_map_to->getData();
|
||||
vec_null_map_to = col_null_map_to->getData().data();
|
||||
}
|
||||
|
||||
size_t current_offset = 0;
|
||||
@ -83,12 +83,15 @@ namespace DB
|
||||
{
|
||||
const GregorianDate<> date(read_buffer);
|
||||
vec_to[i] = date.toModifiedJulianDay<typename ToDataType::FieldType>();
|
||||
(*vec_null_map_to)[i] = false;
|
||||
vec_null_map_to[i] = false;
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
if (e.code() == ErrorCodes::CANNOT_PARSE_INPUT_ASSERTION_FAILED || e.code() == ErrorCodes::CANNOT_PARSE_DATE)
|
||||
(*vec_null_map_to)[i] = true;
|
||||
{
|
||||
vec_to[i] = static_cast<Int32>(0);
|
||||
vec_null_map_to[i] = true;
|
||||
}
|
||||
else
|
||||
throw;
|
||||
}
|
||||
|
@ -61,9 +61,10 @@ public:
|
||||
return std::make_shared<DataTypeUInt8>();
|
||||
}
|
||||
|
||||
[[clang::optnone]] void executeImpl(Block & columns, const ColumnNumbers & arguments, size_t result, size_t input_rows_count) const override
|
||||
[[clang::optnone]]
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & block, const DataTypePtr & result_type, size_t input_rows_count) const override
|
||||
{
|
||||
if (const ColumnConst * column = checkAndGetColumnConst<ColumnString>(columns[arguments[0]].column.get()))
|
||||
if (const ColumnConst * column = checkAndGetColumnConst<ColumnString>(block[0].column.get()))
|
||||
{
|
||||
String mode = column->getValue<String>();
|
||||
|
||||
@ -135,6 +136,10 @@ public:
|
||||
{
|
||||
(void)context.getCurrentQueryId();
|
||||
}
|
||||
else if (mode == "stack overflow")
|
||||
{
|
||||
executeImpl(block, result_type, input_rows_count);
|
||||
}
|
||||
else if (mode == "mmap many")
|
||||
{
|
||||
std::vector<void *> maps;
|
||||
@ -160,7 +165,7 @@ public:
|
||||
else
|
||||
throw Exception("The only argument for function " + getName() + " must be constant String", ErrorCodes::ILLEGAL_COLUMN);
|
||||
|
||||
columns[result].column = columns[result].type->createColumnConst(input_rows_count, 0ULL);
|
||||
return result_type->createColumnConst(input_rows_count, 0ULL);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <common/getPageSize.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/MMapReadBufferFromFileDescriptor.h>
|
||||
|
||||
@ -38,7 +39,9 @@ void MMapReadBufferFromFileDescriptor::init(int fd_, size_t offset, size_t lengt
|
||||
ErrorCodes::CANNOT_ALLOCATE_MEMORY);
|
||||
|
||||
BufferBase::set(static_cast<char *>(buf), length, 0);
|
||||
ReadBuffer::padded = (length % 4096) > 0 && (length % 4096) <= (4096 - 15); /// TODO determine page size
|
||||
|
||||
size_t page_size = static_cast<size_t>(::getPageSize());
|
||||
ReadBuffer::padded = (length % page_size) > 0 && (length % page_size) <= (page_size - 15);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <iostream>
|
||||
|
||||
#include <common/types.h>
|
||||
#include <common/getPageSize.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
@ -16,6 +17,7 @@ int main(int, char **)
|
||||
{
|
||||
static const size_t N = 100000;
|
||||
static const size_t BUF_SIZE = 1048576;
|
||||
size_t page_size = static_cast<size_t>(::getPageSize());
|
||||
|
||||
ReadBufferFromFile rand_in("/dev/urandom");
|
||||
unsigned rand = 0;
|
||||
@ -33,7 +35,7 @@ int main(int, char **)
|
||||
}
|
||||
|
||||
{
|
||||
ReadBufferFromFile rb("test1", BUF_SIZE, O_RDONLY | O_DIRECT, nullptr, 4096);
|
||||
ReadBufferFromFile rb("test1", BUF_SIZE, O_RDONLY | O_DIRECT, nullptr, page_size);
|
||||
String res;
|
||||
for (size_t i = 0; i < N; ++i)
|
||||
readStringBinary(res, rb);
|
||||
@ -44,14 +46,14 @@ int main(int, char **)
|
||||
/// Write to file with O_DIRECT, read as usual.
|
||||
|
||||
{
|
||||
WriteBufferFromFile wb("test2", BUF_SIZE, O_WRONLY | O_CREAT | O_TRUNC | O_DIRECT, 0666, nullptr, 4096);
|
||||
WriteBufferFromFile wb("test2", BUF_SIZE, O_WRONLY | O_CREAT | O_TRUNC | O_DIRECT, 0666, nullptr, page_size);
|
||||
|
||||
for (size_t i = 0; i < N; ++i)
|
||||
writeStringBinary(test, wb);
|
||||
|
||||
if (wb.offset() % 4096 != 0)
|
||||
if (wb.offset() % page_size != 0)
|
||||
{
|
||||
size_t pad = 4096 - wb.offset() % 4096;
|
||||
size_t pad = page_size - wb.offset() % page_size;
|
||||
memset(wb.position(), 0, pad);
|
||||
wb.position() += pad;
|
||||
}
|
||||
|
@ -445,6 +445,8 @@ struct ContextShared
|
||||
|
||||
/// Stop trace collector if any
|
||||
trace_collector.reset();
|
||||
/// Stop zookeeper connection
|
||||
zookeeper.reset();
|
||||
/// Stop test_keeper storage
|
||||
test_keeper_storage.reset();
|
||||
}
|
||||
|
@ -211,7 +211,7 @@ DatabaseAndTable DatabaseCatalog::getTableImpl(
|
||||
if (!table_id)
|
||||
{
|
||||
if (exception)
|
||||
exception->emplace("Cannot find table: StorageID is empty", ErrorCodes::UNKNOWN_TABLE);
|
||||
exception->emplace(ErrorCodes::UNKNOWN_TABLE, "Cannot find table: StorageID is empty");
|
||||
return {};
|
||||
}
|
||||
|
||||
@ -223,7 +223,7 @@ DatabaseAndTable DatabaseCatalog::getTableImpl(
|
||||
{
|
||||
assert(!db_and_table.first && !db_and_table.second);
|
||||
if (exception)
|
||||
exception->emplace("Table " + table_id.getNameForLogs() + " doesn't exist.", ErrorCodes::UNKNOWN_TABLE);
|
||||
exception->emplace(ErrorCodes::UNKNOWN_TABLE, "Table {} doesn't exist", table_id.getNameForLogs());
|
||||
return {};
|
||||
}
|
||||
|
||||
@ -244,7 +244,7 @@ DatabaseAndTable DatabaseCatalog::getTableImpl(
|
||||
/// If table_id has no UUID, then the name of database was specified by user and table_id was not resolved through context.
|
||||
/// Do not allow access to TEMPORARY_DATABASE because it contains all temporary tables of all contexts and users.
|
||||
if (exception)
|
||||
exception->emplace("Direct access to `" + String(TEMPORARY_DATABASE) + "` database is not allowed.", ErrorCodes::DATABASE_ACCESS_DENIED);
|
||||
exception->emplace(ErrorCodes::DATABASE_ACCESS_DENIED, "Direct access to `{}` database is not allowed", String(TEMPORARY_DATABASE));
|
||||
return {};
|
||||
}
|
||||
|
||||
@ -255,8 +255,7 @@ DatabaseAndTable DatabaseCatalog::getTableImpl(
|
||||
if (databases.end() == it)
|
||||
{
|
||||
if (exception)
|
||||
exception->emplace("Database " + backQuoteIfNeed(table_id.getDatabaseName()) + " doesn't exist",
|
||||
ErrorCodes::UNKNOWN_DATABASE);
|
||||
exception->emplace(ErrorCodes::UNKNOWN_DATABASE, "Database {} doesn't exist", backQuoteIfNeed(table_id.getDatabaseName()));
|
||||
return {};
|
||||
}
|
||||
database = it->second;
|
||||
@ -264,7 +263,7 @@ DatabaseAndTable DatabaseCatalog::getTableImpl(
|
||||
|
||||
auto table = database->tryGetTable(table_id.table_name, context);
|
||||
if (!table && exception)
|
||||
exception->emplace("Table " + table_id.getNameForLogs() + " doesn't exist.", ErrorCodes::UNKNOWN_TABLE);
|
||||
exception->emplace(ErrorCodes::UNKNOWN_TABLE, "Table {} doesn't exist", table_id.getNameForLogs());
|
||||
if (!table)
|
||||
database = nullptr;
|
||||
|
||||
|
@ -34,6 +34,8 @@ protected:
|
||||
Block result_header;
|
||||
SelectQueryOptions options;
|
||||
size_t max_streams = 1;
|
||||
bool settings_limit_offset_needed = false;
|
||||
bool settings_limit_offset_done = false;
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -751,7 +751,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create)
|
||||
auto database = DatabaseCatalog::instance().getDatabase(database_name);
|
||||
bool if_not_exists = create.if_not_exists;
|
||||
|
||||
// Table SQL definition is available even if the table is detached
|
||||
// Table SQL definition is available even if the table is detached (even permanently)
|
||||
auto query = database->getCreateTableQuery(create.table, context);
|
||||
create = query->as<ASTCreateQuery &>(); // Copy the saved create query, but use ATTACH instead of CREATE
|
||||
if (create.is_dictionary)
|
||||
|
@ -30,6 +30,7 @@ namespace ErrorCodes
|
||||
extern const int SYNTAX_ERROR;
|
||||
extern const int UNKNOWN_TABLE;
|
||||
extern const int UNKNOWN_DICTIONARY;
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
|
||||
@ -55,6 +56,8 @@ BlockIO InterpreterDropQuery::execute()
|
||||
{
|
||||
if (!drop.is_dictionary)
|
||||
return executeToTable(drop);
|
||||
else if (drop.permanently && drop.kind == ASTDropQuery::Kind::Detach)
|
||||
throw Exception("DETACH PERMANENTLY is not implemented for dictionaries", ErrorCodes::NOT_IMPLEMENTED);
|
||||
else
|
||||
return executeToDictionary(drop.database, drop.table, drop.kind, drop.if_exists, drop.temporary, drop.no_ddl_lock);
|
||||
}
|
||||
@ -128,8 +131,18 @@ BlockIO InterpreterDropQuery::executeToTableImpl(const ASTDropQuery & query, Dat
|
||||
TableExclusiveLockHolder table_lock;
|
||||
if (database->getUUID() == UUIDHelpers::Nil)
|
||||
table_lock = table->lockExclusively(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout);
|
||||
/// Drop table from memory, don't touch data and metadata
|
||||
database->detachTable(table_id.table_name);
|
||||
|
||||
if (query.permanently)
|
||||
{
|
||||
/// Drop table from memory, don't touch data, metadata file renamed and will be skipped during server restart
|
||||
database->detachTablePermanently(table_id.table_name);
|
||||
}
|
||||
else
|
||||
{
|
||||
/// Drop table from memory, don't touch data and metadata
|
||||
database->detachTable(table_id.table_name);
|
||||
}
|
||||
|
||||
}
|
||||
else if (query.kind == ASTDropQuery::Kind::Truncate)
|
||||
{
|
||||
@ -286,6 +299,9 @@ BlockIO InterpreterDropQuery::executeToDatabaseImpl(const ASTDropQuery & query,
|
||||
bool drop = query.kind == ASTDropQuery::Kind::Drop;
|
||||
context.checkAccess(AccessType::DROP_DATABASE, database_name);
|
||||
|
||||
if (query.kind == ASTDropQuery::Kind::Detach && query.permanently)
|
||||
throw Exception("DETACH PERMANENTLY is not implemented for databases", ErrorCodes::NOT_IMPLEMENTED);
|
||||
|
||||
#if USE_MYSQL
|
||||
if (database->getEngineName() == "MaterializeMySQL")
|
||||
stopDatabaseSynchronization(database);
|
||||
|
@ -9,10 +9,14 @@
|
||||
#include <Processors/QueryPlan/IQueryPlanStep.h>
|
||||
#include <Processors/QueryPlan/QueryPlan.h>
|
||||
#include <Processors/QueryPlan/UnionStep.h>
|
||||
#include <Processors/QueryPlan/LimitStep.h>
|
||||
#include <Processors/QueryPlan/OffsetStep.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
#include <Interpreters/InDepthNodeVisitor.h>
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -130,10 +134,14 @@ InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(
|
||||
{
|
||||
ASTSelectWithUnionQuery * ast = query_ptr->as<ASTSelectWithUnionQuery>();
|
||||
|
||||
const Settings & settings = context->getSettingsRef();
|
||||
if (options.subquery_depth == 0 && (settings.limit > 0 || settings.offset > 0))
|
||||
settings_limit_offset_needed = true;
|
||||
|
||||
/// Normalize AST Tree
|
||||
if (!ast->is_normalized)
|
||||
{
|
||||
CustomizeASTSelectWithUnionQueryNormalizeVisitor::Data union_default_mode{context->getSettingsRef().union_default_mode};
|
||||
CustomizeASTSelectWithUnionQueryNormalizeVisitor::Data union_default_mode{settings.union_default_mode};
|
||||
CustomizeASTSelectWithUnionQueryNormalizeVisitor(union_default_mode).visit(query_ptr);
|
||||
|
||||
/// After normalization, if it only has one ASTSelectWithUnionQuery child,
|
||||
@ -186,6 +194,52 @@ InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(
|
||||
}
|
||||
}
|
||||
|
||||
if (num_children == 1 && settings_limit_offset_needed)
|
||||
{
|
||||
const ASTPtr first_select_ast = ast->list_of_selects->children.at(0);
|
||||
ASTSelectQuery * select_query = first_select_ast->as<ASTSelectQuery>();
|
||||
|
||||
if (!select_query->withFill() && !select_query->limit_with_ties)
|
||||
{
|
||||
UInt64 limit_length = 0;
|
||||
UInt64 limit_offset = 0;
|
||||
|
||||
const ASTPtr limit_offset_ast = select_query->limitOffset();
|
||||
if (limit_offset_ast)
|
||||
{
|
||||
limit_offset = limit_offset_ast->as<ASTLiteral &>().value.safeGet<UInt64>();
|
||||
UInt64 new_limit_offset = settings.offset + limit_offset;
|
||||
limit_offset_ast->as<ASTLiteral &>().value = Field(new_limit_offset);
|
||||
}
|
||||
else if (settings.offset)
|
||||
{
|
||||
ASTPtr new_limit_offset_ast = std::make_shared<ASTLiteral>(Field(UInt64(settings.offset)));
|
||||
select_query->setExpression(ASTSelectQuery::Expression::LIMIT_OFFSET, std::move(new_limit_offset_ast));
|
||||
}
|
||||
|
||||
const ASTPtr limit_length_ast = select_query->limitLength();
|
||||
if (limit_length_ast)
|
||||
{
|
||||
limit_length = limit_length_ast->as<ASTLiteral &>().value.safeGet<UInt64>();
|
||||
|
||||
UInt64 new_limit_length = 0;
|
||||
if (settings.offset == 0)
|
||||
new_limit_length = std::min(limit_length, UInt64(settings.limit));
|
||||
else if (settings.offset < limit_length)
|
||||
new_limit_length = settings.limit ? std::min(UInt64(settings.limit), limit_length - settings.offset) : (limit_length - settings.offset);
|
||||
|
||||
limit_length_ast->as<ASTLiteral &>().value = Field(new_limit_length);
|
||||
}
|
||||
else if (settings.limit)
|
||||
{
|
||||
ASTPtr new_limit_length_ast = std::make_shared<ASTLiteral>(Field(UInt64(settings.limit)));
|
||||
select_query->setExpression(ASTSelectQuery::Expression::LIMIT_LENGTH, std::move(new_limit_length_ast));
|
||||
}
|
||||
|
||||
settings_limit_offset_done = true;
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t query_num = 0; query_num < num_children; ++query_num)
|
||||
{
|
||||
const Names & current_required_result_column_names
|
||||
@ -293,39 +347,57 @@ void InterpreterSelectWithUnionQuery::buildQueryPlan(QueryPlan & query_plan)
|
||||
{
|
||||
// auto num_distinct_union = optimizeUnionList();
|
||||
size_t num_plans = nested_interpreters.size();
|
||||
const Settings & settings = context->getSettingsRef();
|
||||
|
||||
/// Skip union for single interpreter.
|
||||
if (num_plans == 1)
|
||||
{
|
||||
nested_interpreters.front()->buildQueryPlan(query_plan);
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
std::vector<std::unique_ptr<QueryPlan>> plans(num_plans);
|
||||
DataStreams data_streams(num_plans);
|
||||
|
||||
for (size_t i = 0; i < num_plans; ++i)
|
||||
{
|
||||
plans[i] = std::make_unique<QueryPlan>();
|
||||
nested_interpreters[i]->buildQueryPlan(*plans[i]);
|
||||
data_streams[i] = plans[i]->getCurrentDataStream();
|
||||
}
|
||||
|
||||
auto max_threads = context->getSettingsRef().max_threads;
|
||||
auto union_step = std::make_unique<UnionStep>(std::move(data_streams), result_header, max_threads);
|
||||
|
||||
query_plan.unitePlans(std::move(union_step), std::move(plans));
|
||||
|
||||
const auto & query = query_ptr->as<ASTSelectWithUnionQuery &>();
|
||||
if (query.union_mode == ASTSelectWithUnionQuery::Mode::DISTINCT)
|
||||
{
|
||||
/// Add distinct transform
|
||||
SizeLimits limits(settings.max_rows_in_distinct, settings.max_bytes_in_distinct, settings.distinct_overflow_mode);
|
||||
|
||||
auto distinct_step
|
||||
= std::make_unique<DistinctStep>(query_plan.getCurrentDataStream(), limits, 0, result_header.getNames(), false);
|
||||
|
||||
query_plan.addStep(std::move(distinct_step));
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::unique_ptr<QueryPlan>> plans(num_plans);
|
||||
DataStreams data_streams(num_plans);
|
||||
|
||||
for (size_t i = 0; i < num_plans; ++i)
|
||||
if (settings_limit_offset_needed && !settings_limit_offset_done)
|
||||
{
|
||||
plans[i] = std::make_unique<QueryPlan>();
|
||||
nested_interpreters[i]->buildQueryPlan(*plans[i]);
|
||||
data_streams[i] = plans[i]->getCurrentDataStream();
|
||||
}
|
||||
|
||||
auto max_threads = context->getSettingsRef().max_threads;
|
||||
auto union_step = std::make_unique<UnionStep>(std::move(data_streams), result_header, max_threads);
|
||||
|
||||
query_plan.unitePlans(std::move(union_step), std::move(plans));
|
||||
|
||||
const auto & query = query_ptr->as<ASTSelectWithUnionQuery &>();
|
||||
if (query.union_mode == ASTSelectWithUnionQuery::Mode::DISTINCT)
|
||||
{
|
||||
/// Add distinct transform
|
||||
const Settings & settings = context->getSettingsRef();
|
||||
SizeLimits limits(settings.max_rows_in_distinct, settings.max_bytes_in_distinct, settings.distinct_overflow_mode);
|
||||
|
||||
auto distinct_step = std::make_unique<DistinctStep>(query_plan.getCurrentDataStream(), limits, 0, result_header.getNames(), false);
|
||||
|
||||
query_plan.addStep(std::move(distinct_step));
|
||||
if (settings.limit > 0)
|
||||
{
|
||||
auto limit = std::make_unique<LimitStep>(query_plan.getCurrentDataStream(), settings.limit, settings.offset);
|
||||
limit->setStepDescription("LIMIT OFFSET for SETTINGS");
|
||||
query_plan.addStep(std::move(limit));
|
||||
}
|
||||
else
|
||||
{
|
||||
auto offset = std::make_unique<OffsetStep>(query_plan.getCurrentDataStream(), settings.offset);
|
||||
offset->setStepDescription("OFFSET for SETTINGS");
|
||||
query_plan.addStep(std::move(offset));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -79,6 +79,26 @@ String InterpreterShowTablesQuery::getRewrittenQuery()
|
||||
return rewritten_query.str();
|
||||
}
|
||||
|
||||
/// SHOW SETTINGS
|
||||
if (query.m_settings)
|
||||
{
|
||||
WriteBufferFromOwnString rewritten_query;
|
||||
rewritten_query << "SELECT name, type, value FROM system.settings";
|
||||
|
||||
if (query.changed)
|
||||
rewritten_query << " WHERE changed = 1";
|
||||
|
||||
if (!query.like.empty())
|
||||
{
|
||||
rewritten_query
|
||||
<< (query.changed ? " AND name " : " WHERE name ")
|
||||
<< (query.case_insensitive_like ? "ILIKE " : "LIKE ")
|
||||
<< DB::quote << query.like;
|
||||
}
|
||||
|
||||
return rewritten_query.str();
|
||||
}
|
||||
|
||||
if (query.temporary && !query.from.empty())
|
||||
throw Exception("The `FROM` and `TEMPORARY` cannot be used together in `SHOW TABLES`", ErrorCodes::SYNTAX_ERROR);
|
||||
|
||||
|
@ -108,6 +108,7 @@ void loadMetadata(Context & context, const String & default_database_name)
|
||||
|
||||
if (!it->isDirectory())
|
||||
{
|
||||
/// TODO: DETACH DATABASE PERMANENTLY ?
|
||||
if (endsWith(it.name(), ".sql"))
|
||||
{
|
||||
String db_name = it.name().substr(0, it.name().size() - 4);
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Common/TaskStatsInfoGetter.h>
|
||||
#include <Poco/File.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
#include <common/getPageSize.h>
|
||||
#include <common/getThreadId.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <linux/taskstats.h>
|
||||
@ -61,8 +62,9 @@ static void do_io(size_t id)
|
||||
std::string path_dst = "test_out_" + std::to_string(id);
|
||||
|
||||
{
|
||||
size_t page_size = static_cast<size_t>(::getPageSize());
|
||||
ReadBufferFromFile rb("/dev/urandom");
|
||||
WriteBufferFromFile wb(path_dst, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_CREAT | O_TRUNC | O_DIRECT, 0666, nullptr, 4096);
|
||||
WriteBufferFromFile wb(path_dst, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_CREAT | O_TRUNC | O_DIRECT, 0666, nullptr, page_size);
|
||||
copyData(rb, wb, copy_size);
|
||||
wb.close();
|
||||
}
|
||||
|
@ -67,6 +67,9 @@ void ASTDropQuery::formatQueryImpl(const FormatSettings & settings, FormatState
|
||||
|
||||
formatOnCluster(settings);
|
||||
|
||||
if (permanently)
|
||||
settings.ostr << " PERMANENTLY";
|
||||
|
||||
if (no_delay)
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << " NO DELAY" << (settings.hilite ? hilite_none : "");
|
||||
}
|
||||
|
@ -33,6 +33,9 @@ public:
|
||||
|
||||
bool no_delay{false};
|
||||
|
||||
// We detach the object permanently, so it will not be reattached back during server restart.
|
||||
bool permanently{false};
|
||||
|
||||
/** Get the text that identifies this element. */
|
||||
String getID(char) const override;
|
||||
ASTPtr clone() const override;
|
||||
|
@ -55,6 +55,12 @@ void ASTShowTablesQuery::formatQueryImpl(const FormatSettings & settings, Format
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << "SHOW CLUSTER" << (settings.hilite ? hilite_none : "");
|
||||
settings.ostr << " " << backQuoteIfNeed(cluster_str);
|
||||
}
|
||||
else if (m_settings)
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << "SHOW " << (changed ? "CHANGED " : "") << "SETTINGS" <<
|
||||
(settings.hilite ? hilite_none : "");
|
||||
formatLike(settings);
|
||||
}
|
||||
else
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << "SHOW " << (temporary ? "TEMPORARY " : "") <<
|
||||
|
@ -18,6 +18,8 @@ public:
|
||||
bool clusters{false};
|
||||
bool cluster{false};
|
||||
bool dictionaries{false};
|
||||
bool m_settings{false};
|
||||
bool changed{false};
|
||||
bool temporary{false};
|
||||
|
||||
String cluster_str;
|
||||
|
@ -11,7 +11,7 @@ namespace DB
|
||||
namespace
|
||||
{
|
||||
|
||||
bool parseDropQuery(IParser::Pos & pos, ASTPtr & node, Expected & expected, bool optional_table_keyword = false)
|
||||
bool parseDropQuery(IParser::Pos & pos, ASTPtr & node, Expected & expected, const ASTDropQuery::Kind kind)
|
||||
{
|
||||
ParserKeyword s_temporary("TEMPORARY");
|
||||
ParserKeyword s_table("TABLE");
|
||||
@ -21,6 +21,7 @@ bool parseDropQuery(IParser::Pos & pos, ASTPtr & node, Expected & expected, bool
|
||||
ParserToken s_dot(TokenType::Dot);
|
||||
ParserKeyword s_if_exists("IF EXISTS");
|
||||
ParserIdentifier name_p;
|
||||
ParserKeyword s_permanently("PERMANENTLY");
|
||||
ParserKeyword s_no_delay("NO DELAY");
|
||||
ParserKeyword s_sync("SYNC");
|
||||
|
||||
@ -32,6 +33,7 @@ bool parseDropQuery(IParser::Pos & pos, ASTPtr & node, Expected & expected, bool
|
||||
bool is_dictionary = false;
|
||||
bool is_view = false;
|
||||
bool no_delay = false;
|
||||
bool permanently = false;
|
||||
|
||||
if (s_database.ignore(pos, expected))
|
||||
{
|
||||
@ -40,15 +42,6 @@ bool parseDropQuery(IParser::Pos & pos, ASTPtr & node, Expected & expected, bool
|
||||
|
||||
if (!name_p.parse(pos, database, expected))
|
||||
return false;
|
||||
|
||||
if (ParserKeyword{"ON"}.ignore(pos, expected))
|
||||
{
|
||||
if (!ASTQueryWithOnCluster::parse(pos, cluster_str, expected))
|
||||
return false;
|
||||
}
|
||||
|
||||
if (s_no_delay.ignore(pos, expected) || s_sync.ignore(pos, expected))
|
||||
no_delay = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -59,7 +52,8 @@ bool parseDropQuery(IParser::Pos & pos, ASTPtr & node, Expected & expected, bool
|
||||
else if (s_temporary.ignore(pos, expected))
|
||||
temporary = true;
|
||||
|
||||
if (!is_view && !is_dictionary && (!s_table.ignore(pos, expected) && !optional_table_keyword))
|
||||
/// for TRUNCATE queries TABLE keyword is assumed as default and can be skipped
|
||||
if (!is_view && !is_dictionary && (!s_table.ignore(pos, expected) && kind != ASTDropQuery::Kind::Truncate))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@ -76,26 +70,32 @@ bool parseDropQuery(IParser::Pos & pos, ASTPtr & node, Expected & expected, bool
|
||||
if (!name_p.parse(pos, table, expected))
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ParserKeyword{"ON"}.ignore(pos, expected))
|
||||
{
|
||||
if (!ASTQueryWithOnCluster::parse(pos, cluster_str, expected))
|
||||
return false;
|
||||
}
|
||||
|
||||
if (s_no_delay.ignore(pos, expected) || s_sync.ignore(pos, expected))
|
||||
no_delay = true;
|
||||
}
|
||||
|
||||
/// common for tables / dictionaries / databases
|
||||
if (ParserKeyword{"ON"}.ignore(pos, expected))
|
||||
{
|
||||
if (!ASTQueryWithOnCluster::parse(pos, cluster_str, expected))
|
||||
return false;
|
||||
}
|
||||
|
||||
if (kind == ASTDropQuery::Kind::Detach && s_permanently.ignore(pos, expected))
|
||||
permanently = true;
|
||||
|
||||
/// actually for TRUNCATE NO DELAY / SYNC means nothing
|
||||
if (s_no_delay.ignore(pos, expected) || s_sync.ignore(pos, expected))
|
||||
no_delay = true;
|
||||
|
||||
auto query = std::make_shared<ASTDropQuery>();
|
||||
node = query;
|
||||
|
||||
query->kind = ASTDropQuery::Kind::Drop;
|
||||
query->kind = kind;
|
||||
query->if_exists = if_exists;
|
||||
query->temporary = temporary;
|
||||
query->is_dictionary = is_dictionary;
|
||||
query->is_view = is_view;
|
||||
query->no_delay = no_delay;
|
||||
query->permanently = permanently;
|
||||
|
||||
tryGetIdentifierNameInto(database, query->database);
|
||||
tryGetIdentifierNameInto(table, query->table);
|
||||
@ -105,28 +105,6 @@ bool parseDropQuery(IParser::Pos & pos, ASTPtr & node, Expected & expected, bool
|
||||
return true;
|
||||
}
|
||||
|
||||
bool parseDetachQuery(IParser::Pos & pos, ASTPtr & node, Expected & expected)
|
||||
{
|
||||
if (parseDropQuery(pos, node, expected))
|
||||
{
|
||||
auto * drop_query = node->as<ASTDropQuery>();
|
||||
drop_query->kind = ASTDropQuery::Kind::Detach;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool parseTruncateQuery(IParser::Pos & pos, ASTPtr & node, Expected & expected)
|
||||
{
|
||||
if (parseDropQuery(pos, node, expected, true))
|
||||
{
|
||||
auto * drop_query = node->as<ASTDropQuery>();
|
||||
drop_query->kind = ASTDropQuery::Kind::Truncate;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
bool ParserDropQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
@ -136,11 +114,11 @@ bool ParserDropQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
ParserKeyword s_truncate("TRUNCATE");
|
||||
|
||||
if (s_drop.ignore(pos, expected))
|
||||
return parseDropQuery(pos, node, expected);
|
||||
return parseDropQuery(pos, node, expected, ASTDropQuery::Kind::Drop);
|
||||
else if (s_detach.ignore(pos, expected))
|
||||
return parseDetachQuery(pos, node, expected);
|
||||
return parseDropQuery(pos, node, expected, ASTDropQuery::Kind::Detach);
|
||||
else if (s_truncate.ignore(pos, expected))
|
||||
return parseTruncateQuery(pos, node, expected);
|
||||
return parseDropQuery(pos, node, expected, ASTDropQuery::Kind::Truncate);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ namespace DB
|
||||
{
|
||||
|
||||
/** Query like this:
|
||||
* DROP|DETACH|TRUNCATE TABLE [IF EXISTS] [db.]name
|
||||
* DROP|DETACH|TRUNCATE TABLE [IF EXISTS] [db.]name [PERMANENTLY]
|
||||
*
|
||||
* Or:
|
||||
* DROP DATABASE [IF EXISTS] db
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Parsers/ParserCreateQuery.h>
|
||||
#include <Parsers/ParserSelectWithUnionQuery.h>
|
||||
#include <Parsers/ParserSetQuery.h>
|
||||
#include <Parsers/ParserQuery.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -51,7 +52,13 @@ bool ParserExplainQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
|
||||
ParserCreateTableQuery create_p;
|
||||
ParserSelectWithUnionQuery select_p;
|
||||
ASTPtr query;
|
||||
if (select_p.parse(pos, query, expected) ||
|
||||
if (kind == ASTExplainQuery::ExplainKind::ParsedAST)
|
||||
{
|
||||
ParserQuery p(end);
|
||||
if (p.parse(pos, query, expected))
|
||||
explain_query->setExplainedQuery(std::move(query));
|
||||
}
|
||||
else if (select_p.parse(pos, query, expected) ||
|
||||
create_p.parse(pos, query, expected))
|
||||
explain_query->setExplainedQuery(std::move(query));
|
||||
else
|
||||
|
@ -9,8 +9,12 @@ namespace DB
|
||||
class ParserExplainQuery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * end;
|
||||
|
||||
const char * getName() const override { return "EXPLAIN"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
public:
|
||||
ParserExplainQuery(const char* end_) : end(end_) {}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ namespace DB
|
||||
|
||||
bool ParserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
{
|
||||
ParserQueryWithOutput query_with_output_p;
|
||||
ParserQueryWithOutput query_with_output_p(end);
|
||||
ParserInsertQuery insert_p(end);
|
||||
ParserUseQuery use_p;
|
||||
ParserSetQuery set_p;
|
||||
|
@ -48,7 +48,7 @@ bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
||||
ParserShowCreateAccessEntityQuery show_create_access_entity_p;
|
||||
ParserShowGrantsQuery show_grants_p;
|
||||
ParserShowPrivilegesQuery show_privileges_p;
|
||||
ParserExplainQuery explain_p;
|
||||
ParserExplainQuery explain_p(end);
|
||||
|
||||
ASTPtr query;
|
||||
|
||||
|
@ -11,8 +11,11 @@ namespace DB
|
||||
class ParserQueryWithOutput : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * end;
|
||||
const char * getName() const override { return "Query with output"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
public:
|
||||
ParserQueryWithOutput(const char * end_) : end(end_) {}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -24,6 +24,8 @@ bool ParserShowTablesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
||||
ParserKeyword s_clusters("CLUSTERS");
|
||||
ParserKeyword s_cluster("CLUSTER");
|
||||
ParserKeyword s_dictionaries("DICTIONARIES");
|
||||
ParserKeyword s_settings("SETTINGS");
|
||||
ParserKeyword s_changed("CHANGED");
|
||||
ParserKeyword s_from("FROM");
|
||||
ParserKeyword s_in("IN");
|
||||
ParserKeyword s_not("NOT");
|
||||
@ -99,6 +101,29 @@ bool ParserShowTablesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
||||
|
||||
query->cluster_str = std::move(cluster_str);
|
||||
}
|
||||
else if (bool changed = s_changed.ignore(pos); changed || s_settings.ignore(pos))
|
||||
{
|
||||
query->m_settings = true;
|
||||
|
||||
if (changed)
|
||||
{
|
||||
query->changed = true;
|
||||
if (!s_settings.ignore(pos, expected))
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Not expected due to "SHOW SETTINGS PROFILES"
|
||||
if (bool insensitive = s_ilike.ignore(pos); insensitive || s_like.ignore(pos))
|
||||
{
|
||||
if (insensitive)
|
||||
query->case_insensitive_like = true;
|
||||
|
||||
if (!like_p.parse(pos, like, expected))
|
||||
return false;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (s_temporary.ignore(pos))
|
||||
|
@ -22,7 +22,7 @@ try
|
||||
" INTO OUTFILE 'test.out'"
|
||||
" FORMAT TabSeparated";
|
||||
|
||||
ParserQueryWithOutput parser;
|
||||
ParserQueryWithOutput parser(input.data() + input.size());
|
||||
ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0);
|
||||
|
||||
std::cout << "Success." << std::endl;
|
||||
|
@ -11,7 +11,7 @@ try
|
||||
{
|
||||
std::string input = std::string(reinterpret_cast<const char*>(data), size);
|
||||
|
||||
DB::ParserQueryWithOutput parser;
|
||||
DB::ParserQueryWithOutput parser(input.data() + input.size());
|
||||
DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0);
|
||||
|
||||
DB::formatAST(*ast, std::cerr);
|
||||
|
@ -419,28 +419,40 @@ Coordination::OpNum TestKeeperTCPHandler::receiveRequest()
|
||||
Coordination::ZooKeeperRequestPtr request = Coordination::ZooKeeperRequestFactory::instance().get(opnum);
|
||||
request->xid = xid;
|
||||
request->readImpl(*in);
|
||||
int response_fd = poll_wrapper->getResponseFD();
|
||||
auto promise = std::make_shared<std::promise<Coordination::ZooKeeperResponsePtr>>();
|
||||
zkutil::ResponseCallback callback = [response_fd, promise] (const Coordination::ZooKeeperResponsePtr & response)
|
||||
if (opnum != Coordination::OpNum::Close)
|
||||
{
|
||||
promise->set_value(response);
|
||||
[[maybe_unused]] int result = write(response_fd, &RESPONSE_BYTE, sizeof(RESPONSE_BYTE));
|
||||
};
|
||||
|
||||
if (request->has_watch)
|
||||
{
|
||||
auto watch_promise = std::make_shared<std::promise<Coordination::ZooKeeperResponsePtr>>();
|
||||
zkutil::ResponseCallback watch_callback = [response_fd, watch_promise] (const Coordination::ZooKeeperResponsePtr & response)
|
||||
int response_fd = poll_wrapper->getResponseFD();
|
||||
zkutil::ResponseCallback callback = [response_fd, promise] (const Coordination::ZooKeeperResponsePtr & response)
|
||||
{
|
||||
watch_promise->set_value(response);
|
||||
[[maybe_unused]] int result = write(response_fd, &WATCH_RESPONSE_BYTE, sizeof(WATCH_RESPONSE_BYTE));
|
||||
promise->set_value(response);
|
||||
[[maybe_unused]] int result = write(response_fd, &RESPONSE_BYTE, sizeof(RESPONSE_BYTE));
|
||||
};
|
||||
test_keeper_storage->putRequest(request, session_id, callback, watch_callback);
|
||||
responses.push(promise->get_future());
|
||||
watch_responses.emplace_back(watch_promise->get_future());
|
||||
|
||||
if (request->has_watch)
|
||||
{
|
||||
auto watch_promise = std::make_shared<std::promise<Coordination::ZooKeeperResponsePtr>>();
|
||||
zkutil::ResponseCallback watch_callback = [response_fd, watch_promise] (const Coordination::ZooKeeperResponsePtr & response)
|
||||
{
|
||||
watch_promise->set_value(response);
|
||||
[[maybe_unused]] int result = write(response_fd, &WATCH_RESPONSE_BYTE, sizeof(WATCH_RESPONSE_BYTE));
|
||||
};
|
||||
test_keeper_storage->putRequest(request, session_id, callback, watch_callback);
|
||||
responses.push(promise->get_future());
|
||||
watch_responses.emplace_back(watch_promise->get_future());
|
||||
}
|
||||
else
|
||||
{
|
||||
test_keeper_storage->putRequest(request, session_id, callback);
|
||||
responses.push(promise->get_future());
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
zkutil::ResponseCallback callback = [promise] (const Coordination::ZooKeeperResponsePtr & response)
|
||||
{
|
||||
promise->set_value(response);
|
||||
};
|
||||
test_keeper_storage->putRequest(request, session_id, callback);
|
||||
responses.push(promise->get_future());
|
||||
}
|
||||
|
@ -793,6 +793,7 @@ void AlterCommands::apply(StorageInMemoryMetadata & metadata, const Context & co
|
||||
throw DB::Exception("Alter commands is not prepared. Cannot apply. It's a bug", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
auto metadata_copy = metadata;
|
||||
|
||||
for (const AlterCommand & command : *this)
|
||||
if (!command.ignore)
|
||||
command.apply(metadata_copy, context);
|
||||
@ -823,6 +824,7 @@ void AlterCommands::apply(StorageInMemoryMetadata & metadata, const Context & co
|
||||
|
||||
/// Changes in columns may lead to changes in TTL expressions.
|
||||
auto column_ttl_asts = metadata_copy.columns.getColumnTTLs();
|
||||
metadata_copy.column_ttls_by_name.clear();
|
||||
for (const auto & [name, ast] : column_ttl_asts)
|
||||
{
|
||||
auto new_ttl_entry = TTLDescription::getTTLFromAST(ast, metadata_copy.columns, context, metadata_copy.primary_key);
|
||||
@ -830,7 +832,7 @@ void AlterCommands::apply(StorageInMemoryMetadata & metadata, const Context & co
|
||||
}
|
||||
|
||||
if (metadata_copy.table_ttl.definition_ast != nullptr)
|
||||
metadata.table_ttl = TTLTableDescription::getTTLForTableFromAST(
|
||||
metadata_copy.table_ttl = TTLTableDescription::getTTLForTableFromAST(
|
||||
metadata_copy.table_ttl.definition_ast, metadata_copy.columns, context, metadata_copy.primary_key);
|
||||
|
||||
metadata = std::move(metadata_copy);
|
||||
|
@ -13,10 +13,16 @@ Block MergeTreeBlockOutputStream::getHeader() const
|
||||
}
|
||||
|
||||
|
||||
void MergeTreeBlockOutputStream::writePrefix()
|
||||
{
|
||||
/// Only check "too many parts" before write,
|
||||
/// because interrupting long-running INSERT query in the middle is not convenient for users.
|
||||
storage.delayInsertOrThrowIfNeeded();
|
||||
}
|
||||
|
||||
|
||||
void MergeTreeBlockOutputStream::write(const Block & block)
|
||||
{
|
||||
storage.delayInsertOrThrowIfNeeded();
|
||||
|
||||
auto part_blocks = storage.writer.splitBlockIntoParts(block, max_parts_per_block, metadata_snapshot);
|
||||
for (auto & current_block : part_blocks)
|
||||
{
|
||||
|
@ -24,6 +24,7 @@ public:
|
||||
|
||||
Block getHeader() const override;
|
||||
void write(const Block & block) override;
|
||||
void writePrefix() override;
|
||||
|
||||
private:
|
||||
StorageMergeTree & storage;
|
||||
|
@ -2380,8 +2380,6 @@ void MergeTreeData::delayInsertOrThrowIfNeeded(Poco::Event * until) const
|
||||
}
|
||||
|
||||
const size_t parts_count_in_partition = getMaxPartsCountForPartition();
|
||||
if (parts_count_in_partition < settings->parts_to_delay_insert)
|
||||
return;
|
||||
|
||||
if (parts_count_in_partition >= settings->parts_to_throw_insert)
|
||||
{
|
||||
@ -2389,6 +2387,9 @@ void MergeTreeData::delayInsertOrThrowIfNeeded(Poco::Event * until) const
|
||||
throw Exception("Too many parts (" + toString(parts_count_in_partition) + "). Merges are processing significantly slower than inserts.", ErrorCodes::TOO_MANY_PARTS);
|
||||
}
|
||||
|
||||
if (parts_count_in_partition < settings->parts_to_delay_insert)
|
||||
return;
|
||||
|
||||
const size_t max_k = settings->parts_to_throw_insert - settings->parts_to_delay_insert; /// always > 0
|
||||
const size_t k = 1 + parts_count_in_partition - settings->parts_to_delay_insert; /// from 1 to max_k
|
||||
const double delay_milliseconds = ::pow(settings->max_delay_to_insert * 1000, static_cast<double>(k) / max_k);
|
||||
@ -2406,24 +2407,6 @@ void MergeTreeData::delayInsertOrThrowIfNeeded(Poco::Event * until) const
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(static_cast<size_t>(delay_milliseconds)));
|
||||
}
|
||||
|
||||
void MergeTreeData::throwInsertIfNeeded() const
|
||||
{
|
||||
const auto settings = getSettings();
|
||||
const size_t parts_count_in_total = getPartsCount();
|
||||
if (parts_count_in_total >= settings->max_parts_in_total)
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::RejectedInserts);
|
||||
throw Exception("Too many parts (" + toString(parts_count_in_total) + ") in all partitions in total. This indicates wrong choice of partition key. The threshold can be modified with 'max_parts_in_total' setting in <merge_tree> element in config.xml or with per-table setting.", ErrorCodes::TOO_MANY_PARTS);
|
||||
}
|
||||
|
||||
const size_t parts_count_in_partition = getMaxPartsCountForPartition();
|
||||
|
||||
if (parts_count_in_partition >= settings->parts_to_throw_insert)
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::RejectedInserts);
|
||||
throw Exception("Too many parts (" + toString(parts_count_in_partition) + "). Merges are processing significantly slower than inserts.", ErrorCodes::TOO_MANY_PARTS);
|
||||
}
|
||||
}
|
||||
|
||||
MergeTreeData::DataPartPtr MergeTreeData::getActiveContainingPart(
|
||||
const MergeTreePartInfo & part_info, MergeTreeData::DataPartState state, DataPartsLock & /*lock*/) const
|
||||
@ -2451,6 +2434,7 @@ MergeTreeData::DataPartPtr MergeTreeData::getActiveContainingPart(
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
void MergeTreeData::swapActivePart(MergeTreeData::DataPartPtr part_copy)
|
||||
{
|
||||
auto lock = lockParts();
|
||||
|
@ -423,7 +423,6 @@ public:
|
||||
/// If the table contains too many active parts, sleep for a while to give them time to merge.
|
||||
/// If until is non-null, wake up from the sleep earlier if the event happened.
|
||||
void delayInsertOrThrowIfNeeded(Poco::Event * until = nullptr) const;
|
||||
void throwInsertIfNeeded() const;
|
||||
|
||||
/// Renames temporary part to a permanent part and adds it to the parts set.
|
||||
/// It is assumed that the part does not intersect with existing parts.
|
||||
|
@ -235,6 +235,7 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMerge(
|
||||
/// Previous part only in boundaries of partition frame
|
||||
const MergeTreeData::DataPartPtr * prev_part = nullptr;
|
||||
|
||||
size_t parts_selected_precondition = 0;
|
||||
for (const MergeTreeData::DataPartPtr & part : data_parts)
|
||||
{
|
||||
const String & partition_id = part->info.partition_id;
|
||||
@ -282,6 +283,8 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMerge(
|
||||
part_info.compression_codec_desc = part->default_codec->getFullCodecDesc();
|
||||
part_info.shall_participate_in_merges = has_volumes_with_disabled_merges ? part->shallParticipateInMerges(storage_policy) : true;
|
||||
|
||||
++parts_selected_precondition;
|
||||
|
||||
parts_ranges.back().emplace_back(part_info);
|
||||
|
||||
/// Check for consistency of data parts. If assertion is failed, it requires immediate investigation.
|
||||
@ -294,6 +297,13 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMerge(
|
||||
prev_part = ∂
|
||||
}
|
||||
|
||||
if (parts_selected_precondition == 0)
|
||||
{
|
||||
if (out_disable_reason)
|
||||
*out_disable_reason = "No parts satisfy preconditions for merge";
|
||||
return SelectPartsDecision::CANNOT_SELECT;
|
||||
}
|
||||
|
||||
IMergeSelector::PartsRange parts_to_merge;
|
||||
|
||||
if (metadata_snapshot->hasAnyTTL() && merge_with_ttl_allowed && !ttl_merges_blocker.isCancelled())
|
||||
|
@ -123,8 +123,6 @@ void ReplicatedMergeTreeBlockOutputStream::write(const Block & block)
|
||||
{
|
||||
last_block_is_duplicate = false;
|
||||
|
||||
storage.delayInsertOrThrowIfNeeded(&storage.partial_shutdown_event);
|
||||
|
||||
auto zookeeper = storage.getZooKeeper();
|
||||
assertSessionIsNotExpired(zookeeper);
|
||||
|
||||
@ -524,7 +522,9 @@ void ReplicatedMergeTreeBlockOutputStream::commitPart(
|
||||
|
||||
void ReplicatedMergeTreeBlockOutputStream::writePrefix()
|
||||
{
|
||||
storage.throwInsertIfNeeded();
|
||||
/// Only check "too many parts" before write,
|
||||
/// because interrupting long-running INSERT query in the middle is not convenient for users.
|
||||
storage.delayInsertOrThrowIfNeeded(&storage.partial_shutdown_event);
|
||||
}
|
||||
|
||||
|
||||
|
@ -10,7 +10,8 @@ class ASTStorage;
|
||||
|
||||
|
||||
#define SET_RELATED_SETTINGS(M) \
|
||||
M(Bool, persistent, true, "Disable setting to avoid the overhead of writing to disk for StorageSet", 0)
|
||||
M(Bool, persistent, true, "Disable setting to avoid the overhead of writing to disk for StorageSet", 0) \
|
||||
M(String, disk, "default", "Name of the disk used to persist set data", 0)
|
||||
|
||||
#define LIST_OF_SET_SETTINGS(M) \
|
||||
SET_RELATED_SETTINGS(M) \
|
||||
|
@ -121,9 +121,9 @@ StorageDictionary::StorageDictionary(
|
||||
void StorageDictionary::checkTableCanBeDropped() const
|
||||
{
|
||||
if (location == Location::SameDatabaseAndNameAsDictionary)
|
||||
throw Exception("Cannot detach dictionary " + backQuote(dictionary_name) + " as table, use DETACH DICTIONARY query", ErrorCodes::CANNOT_DETACH_DICTIONARY_AS_TABLE);
|
||||
throw Exception("Cannot drop/detach dictionary " + backQuote(dictionary_name) + " as table, use DROP DICTIONARY or DETACH DICTIONARY query instead", ErrorCodes::CANNOT_DETACH_DICTIONARY_AS_TABLE);
|
||||
if (location == Location::DictionaryDatabase)
|
||||
throw Exception("Cannot detach table " + getStorageID().getFullTableName() + " from a database with DICTIONARY engine", ErrorCodes::CANNOT_DETACH_DICTIONARY_AS_TABLE);
|
||||
throw Exception("Cannot drop/detach table " + getStorageID().getFullTableName() + " from a database with DICTIONARY engine", ErrorCodes::CANNOT_DETACH_DICTIONARY_AS_TABLE);
|
||||
}
|
||||
|
||||
void StorageDictionary::checkTableCanBeDetached() const
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <Core/ColumnNumbers.h>
|
||||
#include <DataStreams/IBlockInputStream.h>
|
||||
#include <DataTypes/NestedUtils.h>
|
||||
#include <Disks/IDisk.h>
|
||||
#include <Interpreters/joinDispatch.h>
|
||||
#include <Interpreters/TableJoin.h>
|
||||
#include <Interpreters/castColumn.h>
|
||||
@ -35,6 +36,7 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
StorageJoin::StorageJoin(
|
||||
DiskPtr disk_,
|
||||
const String & relative_path_,
|
||||
const StorageID & table_id_,
|
||||
const Names & key_names_,
|
||||
@ -45,9 +47,8 @@ StorageJoin::StorageJoin(
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
bool overwrite_,
|
||||
const Context & context_,
|
||||
bool persistent_)
|
||||
: StorageSetOrJoinBase{relative_path_, table_id_, columns_, constraints_, context_, persistent_}
|
||||
: StorageSetOrJoinBase{disk_, relative_path_, table_id_, columns_, constraints_, persistent_}
|
||||
, key_names(key_names_)
|
||||
, use_nulls(use_nulls_)
|
||||
, limits(limits_)
|
||||
@ -69,9 +70,9 @@ StorageJoin::StorageJoin(
|
||||
void StorageJoin::truncate(
|
||||
const ASTPtr &, const StorageMetadataPtr & metadata_snapshot, const Context &, TableExclusiveLockHolder&)
|
||||
{
|
||||
Poco::File(path).remove(true);
|
||||
Poco::File(path).createDirectories();
|
||||
Poco::File(path + "tmp/").createDirectories();
|
||||
disk->removeRecursive(path);
|
||||
disk->createDirectories(path);
|
||||
disk->createDirectories(path + "tmp/");
|
||||
|
||||
increment = 0;
|
||||
join = std::make_shared<HashJoin>(table_join, metadata_snapshot->getSampleBlock().sortColumns(), overwrite);
|
||||
@ -124,6 +125,7 @@ void registerStorageJoin(StorageFactory & factory)
|
||||
auto join_any_take_last_row = settings.join_any_take_last_row;
|
||||
auto old_any_join = settings.any_join_distinct_right_table_keys;
|
||||
bool persistent = true;
|
||||
String disk_name = "default";
|
||||
|
||||
if (args.storage_def && args.storage_def->settings)
|
||||
{
|
||||
@ -141,6 +143,8 @@ void registerStorageJoin(StorageFactory & factory)
|
||||
join_any_take_last_row = setting.value;
|
||||
else if (setting.name == "any_join_distinct_right_table_keys")
|
||||
old_any_join = setting.value;
|
||||
else if (setting.name == "disk")
|
||||
disk_name = setting.value.get<String>();
|
||||
else if (setting.name == "persistent")
|
||||
{
|
||||
auto join_settings = std::make_unique<JoinSettings>();
|
||||
@ -148,12 +152,12 @@ void registerStorageJoin(StorageFactory & factory)
|
||||
persistent = join_settings->persistent;
|
||||
}
|
||||
else
|
||||
throw Exception(
|
||||
"Unknown setting " + setting.name + " for storage " + args.engine_name,
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception("Unknown setting " + setting.name + " for storage " + args.engine_name, ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
}
|
||||
|
||||
DiskPtr disk = args.context.getDisk(disk_name);
|
||||
|
||||
if (engine_args.size() < 3)
|
||||
throw Exception(
|
||||
"Storage Join requires at least 3 parameters: Join(ANY|ALL|SEMI|ANTI, LEFT|INNER|RIGHT, keys...).",
|
||||
@ -219,6 +223,7 @@ void registerStorageJoin(StorageFactory & factory)
|
||||
}
|
||||
|
||||
return StorageJoin::create(
|
||||
disk,
|
||||
args.relative_data_path,
|
||||
args.table_id,
|
||||
key_names,
|
||||
@ -229,7 +234,6 @@ void registerStorageJoin(StorageFactory & factory)
|
||||
args.columns,
|
||||
args.constraints,
|
||||
join_any_take_last_row,
|
||||
args.context,
|
||||
persistent);
|
||||
};
|
||||
|
||||
|
@ -67,6 +67,7 @@ private:
|
||||
|
||||
protected:
|
||||
StorageJoin(
|
||||
DiskPtr disk_,
|
||||
const String & relative_path_,
|
||||
const StorageID & table_id_,
|
||||
const Names & key_names_,
|
||||
@ -76,7 +77,6 @@ protected:
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
bool overwrite,
|
||||
const Context & context_,
|
||||
bool persistent_);
|
||||
};
|
||||
|
||||
|
@ -6,12 +6,11 @@
|
||||
#include <Compression/CompressedWriteBuffer.h>
|
||||
#include <DataStreams/NativeBlockOutputStream.h>
|
||||
#include <DataStreams/NativeBlockInputStream.h>
|
||||
#include <Disks/IDisk.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include <Interpreters/Set.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Poco/DirectoryIterator.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
|
||||
@ -49,7 +48,7 @@ private:
|
||||
String backup_path;
|
||||
String backup_tmp_path;
|
||||
String backup_file_name;
|
||||
WriteBufferFromFile backup_buf;
|
||||
std::unique_ptr<WriteBufferFromFileBase> backup_buf;
|
||||
CompressedWriteBuffer compressed_backup_buf;
|
||||
NativeBlockOutputStream backup_stream;
|
||||
bool persistent;
|
||||
@ -68,8 +67,8 @@ SetOrJoinBlockOutputStream::SetOrJoinBlockOutputStream(
|
||||
, backup_path(backup_path_)
|
||||
, backup_tmp_path(backup_tmp_path_)
|
||||
, backup_file_name(backup_file_name_)
|
||||
, backup_buf(backup_tmp_path + backup_file_name)
|
||||
, compressed_backup_buf(backup_buf)
|
||||
, backup_buf(table_.disk->writeFile(backup_tmp_path + backup_file_name))
|
||||
, compressed_backup_buf(*backup_buf)
|
||||
, backup_stream(compressed_backup_buf, 0, metadata_snapshot->getSampleBlock())
|
||||
, persistent(persistent_)
|
||||
{
|
||||
@ -92,9 +91,10 @@ void SetOrJoinBlockOutputStream::writeSuffix()
|
||||
{
|
||||
backup_stream.flush();
|
||||
compressed_backup_buf.next();
|
||||
backup_buf.next();
|
||||
backup_buf->next();
|
||||
backup_buf->finalize();
|
||||
|
||||
Poco::File(backup_tmp_path + backup_file_name).renameTo(backup_path + backup_file_name);
|
||||
table.disk->replaceFile(backup_tmp_path + backup_file_name, backup_path + backup_file_name);
|
||||
}
|
||||
}
|
||||
|
||||
@ -107,13 +107,14 @@ BlockOutputStreamPtr StorageSetOrJoinBase::write(const ASTPtr & /*query*/, const
|
||||
|
||||
|
||||
StorageSetOrJoinBase::StorageSetOrJoinBase(
|
||||
DiskPtr disk_,
|
||||
const String & relative_path_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
const Context & context_,
|
||||
bool persistent_)
|
||||
: IStorage(table_id_),
|
||||
disk(disk_),
|
||||
persistent(persistent_)
|
||||
{
|
||||
StorageInMemoryMetadata storage_metadata;
|
||||
@ -125,19 +126,18 @@ StorageSetOrJoinBase::StorageSetOrJoinBase(
|
||||
if (relative_path_.empty())
|
||||
throw Exception("Join and Set storages require data path", ErrorCodes::INCORRECT_FILE_NAME);
|
||||
|
||||
base_path = context_.getPath();
|
||||
path = base_path + relative_path_;
|
||||
path = relative_path_;
|
||||
}
|
||||
|
||||
|
||||
StorageSet::StorageSet(
|
||||
DiskPtr disk_,
|
||||
const String & relative_path_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
const Context & context_,
|
||||
bool persistent_)
|
||||
: StorageSetOrJoinBase{relative_path_, table_id_, columns_, constraints_, context_, persistent_},
|
||||
: StorageSetOrJoinBase{disk_, relative_path_, table_id_, columns_, constraints_, persistent_},
|
||||
set(std::make_shared<Set>(SizeLimits(), false, true))
|
||||
{
|
||||
|
||||
@ -158,9 +158,9 @@ std::optional<UInt64> StorageSet::totalBytes(const Settings &) const { return se
|
||||
|
||||
void StorageSet::truncate(const ASTPtr &, const StorageMetadataPtr & metadata_snapshot, const Context &, TableExclusiveLockHolder &)
|
||||
{
|
||||
Poco::File(path).remove(true);
|
||||
Poco::File(path).createDirectories();
|
||||
Poco::File(path + "tmp/").createDirectories();
|
||||
disk->removeRecursive(path);
|
||||
disk->createDirectories(path);
|
||||
disk->createDirectories(path + "tmp/");
|
||||
|
||||
Block header = metadata_snapshot->getSampleBlock();
|
||||
header = header.sortColumns();
|
||||
@ -173,24 +173,23 @@ void StorageSet::truncate(const ASTPtr &, const StorageMetadataPtr & metadata_sn
|
||||
|
||||
void StorageSetOrJoinBase::restore()
|
||||
{
|
||||
Poco::File tmp_dir(path + "tmp/");
|
||||
if (!tmp_dir.exists())
|
||||
if (!disk->exists(path + "tmp/"))
|
||||
{
|
||||
tmp_dir.createDirectories();
|
||||
disk->createDirectories(path + "tmp/");
|
||||
return;
|
||||
}
|
||||
|
||||
static const char * file_suffix = ".bin";
|
||||
static const auto file_suffix_size = strlen(".bin");
|
||||
|
||||
Poco::DirectoryIterator dir_end;
|
||||
for (Poco::DirectoryIterator dir_it(path); dir_end != dir_it; ++dir_it)
|
||||
for (auto dir_it{disk->iterateDirectory(path)}; dir_it->isValid(); dir_it->next())
|
||||
{
|
||||
const auto & name = dir_it.name();
|
||||
const auto & name = dir_it->name();
|
||||
const auto & file_path = dir_it->path();
|
||||
|
||||
if (dir_it->isFile()
|
||||
if (disk->isFile(file_path)
|
||||
&& endsWith(name, file_suffix)
|
||||
&& dir_it->getSize() > 0)
|
||||
&& disk->getFileSize(file_path) > 0)
|
||||
{
|
||||
/// Calculate the maximum number of available files with a backup to add the following files with large numbers.
|
||||
UInt64 file_num = parse<UInt64>(name.substr(0, name.size() - file_suffix_size));
|
||||
@ -205,8 +204,8 @@ void StorageSetOrJoinBase::restore()
|
||||
|
||||
void StorageSetOrJoinBase::restoreFromFile(const String & file_path)
|
||||
{
|
||||
ReadBufferFromFile backup_buf(file_path);
|
||||
CompressedReadBuffer compressed_backup_buf(backup_buf);
|
||||
auto backup_buf = disk->readFile(file_path);
|
||||
CompressedReadBuffer compressed_backup_buf(*backup_buf);
|
||||
NativeBlockInputStream backup_stream(compressed_backup_buf, 0);
|
||||
|
||||
backup_stream.readPrefix();
|
||||
@ -226,10 +225,9 @@ void StorageSetOrJoinBase::restoreFromFile(const String & file_path)
|
||||
void StorageSetOrJoinBase::rename(const String & new_path_to_table_data, const StorageID & new_table_id)
|
||||
{
|
||||
/// Rename directory with data.
|
||||
String new_path = base_path + new_path_to_table_data;
|
||||
Poco::File(path).renameTo(new_path);
|
||||
disk->replaceFile(path, new_path_to_table_data);
|
||||
|
||||
path = new_path;
|
||||
path = new_path_to_table_data;
|
||||
renameInMemory(new_table_id);
|
||||
}
|
||||
|
||||
@ -251,7 +249,8 @@ void registerStorageSet(StorageFactory & factory)
|
||||
set_settings->loadFromQuery(*args.storage_def);
|
||||
}
|
||||
|
||||
return StorageSet::create(args.relative_data_path, args.table_id, args.columns, args.constraints, args.context, set_settings->persistent);
|
||||
DiskPtr disk = args.context.getDisk(set_settings->disk);
|
||||
return StorageSet::create(disk, args.relative_data_path, args.table_id, args.columns, args.constraints, set_settings->persistent);
|
||||
}, StorageFactory::StorageFeatures{ .supports_settings = true, });
|
||||
}
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <ext/shared_ptr_helper.h>
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Storages/IStorage.h>
|
||||
#include <Storages/SetSettings.h>
|
||||
|
||||
@ -29,14 +30,14 @@ public:
|
||||
|
||||
protected:
|
||||
StorageSetOrJoinBase(
|
||||
DiskPtr disk_,
|
||||
const String & relative_path_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
const Context & context_,
|
||||
bool persistent_);
|
||||
|
||||
String base_path;
|
||||
DiskPtr disk;
|
||||
String path;
|
||||
bool persistent;
|
||||
|
||||
@ -85,11 +86,11 @@ private:
|
||||
|
||||
protected:
|
||||
StorageSet(
|
||||
DiskPtr disk_,
|
||||
const String & relative_path_,
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
const ConstraintsDescription & constraints_,
|
||||
const Context & context_,
|
||||
bool persistent_);
|
||||
};
|
||||
|
||||
|
@ -40,8 +40,6 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
#define INDEX_BUFFER_SIZE 4096
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
@ -319,7 +317,7 @@ Pipe StorageStripeLog::read(
|
||||
return Pipe(std::make_shared<NullSource>(metadata_snapshot->getSampleBlockForColumns(column_names, getVirtuals(), getStorageID())));
|
||||
}
|
||||
|
||||
CompressedReadBufferFromFile index_in(disk->readFile(index_file, INDEX_BUFFER_SIZE));
|
||||
CompressedReadBufferFromFile index_in(disk->readFile(index_file, 4096));
|
||||
std::shared_ptr<const IndexForNativeFormat> index{std::make_shared<IndexForNativeFormat>(index_in, column_names_set)};
|
||||
|
||||
size_t size = index->blocks.size();
|
||||
|
@ -15,16 +15,6 @@ install (
|
||||
COMPONENT clickhouse
|
||||
PATTERN "CMakeLists.txt" EXCLUDE
|
||||
PATTERN ".gitignore" EXCLUDE
|
||||
PATTERN "top_level_domains" EXCLUDE
|
||||
)
|
||||
|
||||
# Dereference symlink
|
||||
get_filename_component(TOP_LEVEL_DOMAINS_ABS_DIR config/top_level_domains REALPATH)
|
||||
install (
|
||||
DIRECTORY "${TOP_LEVEL_DOMAINS_ABS_DIR}"
|
||||
DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/clickhouse-test/config
|
||||
USE_SOURCE_PERMISSIONS
|
||||
COMPONENT clickhouse
|
||||
)
|
||||
|
||||
install (FILES server-test.xml DESTINATION ${CLICKHOUSE_ETC_DIR}/clickhouse-server COMPONENT clickhouse)
|
||||
|
@ -1 +0,0 @@
|
||||
../../docker/test/performance-comparison/config/top_level_domains
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user