mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge branch 'master' into add-system-of-table_views
This commit is contained in:
commit
52570b1c80
2
.github/ISSUE_TEMPLATE/85_bug-report.md
vendored
2
.github/ISSUE_TEMPLATE/85_bug-report.md
vendored
@ -9,7 +9,7 @@ assignees: ''
|
||||
|
||||
> You have to provide the following information whenever possible.
|
||||
|
||||
**Describe the bug**
|
||||
**Describe what's wrong**
|
||||
|
||||
> A clear and concise description of what works not as it is supposed to.
|
||||
|
||||
|
@ -45,6 +45,7 @@ include (cmake/arch.cmake)
|
||||
include (cmake/target.cmake)
|
||||
include (cmake/tools.cmake)
|
||||
include (cmake/analysis.cmake)
|
||||
include (cmake/git_status.cmake)
|
||||
|
||||
# Ignore export() since we don't use it,
|
||||
# but it gets broken with a global targets via link_libraries()
|
||||
|
17
cmake/git_status.cmake
Normal file
17
cmake/git_status.cmake
Normal file
@ -0,0 +1,17 @@
|
||||
# Print the status of the git repository (if git is available).
|
||||
# This is useful for troubleshooting build failure reports
|
||||
find_package(Git)
|
||||
|
||||
if (Git_FOUND)
|
||||
execute_process(
|
||||
COMMAND ${GIT_EXECUTABLE} rev-parse HEAD
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE GIT_COMMIT_ID
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
message(STATUS "HEAD's commit hash ${GIT_COMMIT_ID}")
|
||||
execute_process(
|
||||
COMMAND ${GIT_EXECUTABLE} status
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR})
|
||||
else()
|
||||
message(STATUS "The git program could not be found.")
|
||||
endif()
|
2
contrib/nanodbc
vendored
2
contrib/nanodbc
vendored
@ -1 +1 @@
|
||||
Subproject commit 9fc459675515d491401727ec67fca38db721f28c
|
||||
Subproject commit df52a1232dfa182f9af60974d001b91823afe9bc
|
2
contrib/replxx
vendored
2
contrib/replxx
vendored
@ -1 +1 @@
|
||||
Subproject commit c81be6c68b146f15f2096b7ef80e3f21fe27004c
|
||||
Subproject commit f97765df14f4a6236d69b8f14b53ef2051ebd95a
|
@ -79,8 +79,9 @@ RUN python3 -m pip install \
|
||||
pytest-timeout \
|
||||
pytest-xdist \
|
||||
pytest-repeat \
|
||||
pytz \
|
||||
redis \
|
||||
tzlocal \
|
||||
tzlocal==2.1 \
|
||||
urllib3 \
|
||||
requests-kerberos \
|
||||
pyhdfs
|
||||
|
@ -37,7 +37,7 @@ RUN apt-get update \
|
||||
ENV TZ=Europe/Moscow
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN pip3 install urllib3 testflows==1.7.20 docker-compose==1.29.1 docker==5.0.0 dicttoxml kazoo tzlocal python-dateutil numpy
|
||||
RUN pip3 install urllib3 testflows==1.7.20 docker-compose==1.29.1 docker==5.0.0 dicttoxml kazoo tzlocal==2.1 pytz python-dateutil numpy
|
||||
|
||||
ENV DOCKER_CHANNEL stable
|
||||
ENV DOCKER_VERSION 20.10.6
|
||||
|
@ -9,9 +9,9 @@ A date. Stored in two bytes as the number of days since 1970-01-01 (unsigned). A
|
||||
|
||||
The date value is stored without the time zone.
|
||||
|
||||
## Examples {#examples}
|
||||
**Example**
|
||||
|
||||
**1.** Creating a table with a `DateTime`-type column and inserting data into it:
|
||||
Creating a table with a `Date`-type column and inserting data into it:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE dt
|
||||
@ -23,10 +23,7 @@ ENGINE = TinyLog;
|
||||
```
|
||||
|
||||
``` sql
|
||||
INSERT INTO dt Values (1546300800, 1), ('2019-01-01', 2);
|
||||
```
|
||||
|
||||
``` sql
|
||||
INSERT INTO dt VALUES (1546300800, 1), ('2019-01-01', 2);
|
||||
SELECT * FROM dt;
|
||||
```
|
||||
|
||||
@ -37,11 +34,8 @@ SELECT * FROM dt;
|
||||
└────────────┴──────────┘
|
||||
```
|
||||
|
||||
## See Also {#see-also}
|
||||
**See Also**
|
||||
|
||||
- [Functions for working with dates and times](../../sql-reference/functions/date-time-functions.md)
|
||||
- [Operators for working with dates and times](../../sql-reference/operators/index.md#operators-datetime)
|
||||
- [`DateTime` data type](../../sql-reference/data-types/datetime.md)
|
||||
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/data_types/date/) <!--hide-->
|
||||
|
40
docs/en/sql-reference/data-types/date32.md
Normal file
40
docs/en/sql-reference/data-types/date32.md
Normal file
@ -0,0 +1,40 @@
|
||||
---
|
||||
toc_priority: 48
|
||||
toc_title: Date32
|
||||
---
|
||||
|
||||
# Date32 {#data_type-datetime32}
|
||||
|
||||
A date. Supports the date range same with [Datetime64](../../sql-reference/data-types/datetime64.md). Stored in four bytes as the number of days since 1925-01-01. Allows storing values till 2283-11-11.
|
||||
|
||||
**Examples**
|
||||
|
||||
Creating a table with a `Date32`-type column and inserting data into it:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE new
|
||||
(
|
||||
`timestamp` Date32,
|
||||
`event_id` UInt8
|
||||
)
|
||||
ENGINE = TinyLog;
|
||||
```
|
||||
|
||||
``` sql
|
||||
INSERT INTO new VALUES (4102444800, 1), ('2100-01-01', 2);
|
||||
SELECT * FROM new;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌──timestamp─┬─event_id─┐
|
||||
│ 2100-01-01 │ 1 │
|
||||
│ 2100-01-01 │ 2 │
|
||||
└────────────┴──────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [toDate32](../../sql-reference/functions/type-conversion-functions.md#todate32)
|
||||
- [toDate32OrZero](../../sql-reference/functions/type-conversion-functions.md#todate32-or-zero)
|
||||
- [toDate32OrNull](../../sql-reference/functions/type-conversion-functions.md#todate32-or-null)
|
||||
|
@ -17,7 +17,7 @@ DateTime64(precision, [timezone])
|
||||
|
||||
Internally, stores data as a number of ‘ticks’ since epoch start (1970-01-01 00:00:00 UTC) as Int64. The tick resolution is determined by the precision parameter. Additionally, the `DateTime64` type can store time zone that is the same for the entire column, that affects how the values of the `DateTime64` type values are displayed in text format and how the values specified as strings are parsed (‘2020-01-01 05:00:01.000’). The time zone is not stored in the rows of the table (or in resultset), but is stored in the column metadata. See details in [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
|
||||
Supported range from January 1, 1925 till December 31, 2283.
|
||||
Supported range from January 1, 1925 till November 11, 2283.
|
||||
|
||||
## Examples {#examples}
|
||||
|
||||
|
@ -152,6 +152,104 @@ Alias: `DATE`.
|
||||
|
||||
## toDateTimeOrNull {#todatetimeornull}
|
||||
|
||||
## toDate32 {#todate32}
|
||||
|
||||
Converts the argument to the [Date32](../../sql-reference/data-types/date32.md) data type. If the value is outside the range returns the border values supported by `Date32`. If the argument has [Date](../../sql-reference/data-types/date.md) type, borders of `Date` are taken into account.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toDate32(expr)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — The value. [String](../../sql-reference/data-types/string.md), [UInt32](../../sql-reference/data-types/int-uint.md) or [Date](../../sql-reference/data-types/date.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A calendar date.
|
||||
|
||||
Type: [Date32](../../sql-reference/data-types/date32.md).
|
||||
|
||||
**Example**
|
||||
|
||||
1. The value is within the range:
|
||||
|
||||
``` sql
|
||||
SELECT toDate32('1955-01-01') AS value, toTypeName(value);
|
||||
```
|
||||
|
||||
``` text
|
||||
┌──────value─┬─toTypeName(toDate32('1925-01-01'))─┐
|
||||
│ 1955-01-01 │ Date32 │
|
||||
└────────────┴────────────────────────────────────┘
|
||||
```
|
||||
|
||||
2. The value is outside the range:
|
||||
|
||||
``` sql
|
||||
SELECT toDate32('1924-01-01') AS value, toTypeName(value);
|
||||
```
|
||||
|
||||
``` text
|
||||
┌──────value─┬─toTypeName(toDate32('1925-01-01'))─┐
|
||||
│ 1925-01-01 │ Date32 │
|
||||
└────────────┴────────────────────────────────────┘
|
||||
```
|
||||
|
||||
3. With `Date`-type argument:
|
||||
|
||||
``` sql
|
||||
SELECT toDate32(toDate('1924-01-01')) AS value, toTypeName(value);
|
||||
```
|
||||
|
||||
``` text
|
||||
┌──────value─┬─toTypeName(toDate32(toDate('1924-01-01')))─┐
|
||||
│ 1970-01-01 │ Date32 │
|
||||
└────────────┴────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## toDate32OrZero {#todate32-or-zero}
|
||||
|
||||
The same as [toDate32](#todate32) but returns the min value of [Date32](../../sql-reference/data-types/date32.md) if invalid argument is received.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT toDate32OrZero('1924-01-01'), toDate32OrZero('');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─toDate32OrZero('1924-01-01')─┬─toDate32OrZero('')─┐
|
||||
│ 1925-01-01 │ 1925-01-01 │
|
||||
└──────────────────────────────┴────────────────────┘
|
||||
```
|
||||
|
||||
## toDate32OrNull {#todate32-or-null}
|
||||
|
||||
The same as [toDate32](#todate32) but returns `NULL` if invalid argument is received.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT toDate32OrNull('1955-01-01'), toDate32OrNull('');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─toDate32OrNull('1955-01-01')─┬─toDate32OrNull('')─┐
|
||||
│ 1955-01-01 │ ᴺᵁᴸᴸ │
|
||||
└──────────────────────────────┴────────────────────┘
|
||||
```
|
||||
|
||||
## toDecimal(32\|64\|128\|256) {#todecimal3264128256}
|
||||
|
||||
Converts `value` to the [Decimal](../../sql-reference/data-types/decimal.md) data type with precision of `S`. The `value` can be a number or a string. The `S` (scale) parameter specifies the number of decimal places.
|
||||
|
@ -14,7 +14,7 @@ You can use table functions in:
|
||||
|
||||
The method for creating a temporary table that is available only in the current query. The table is deleted when the query finishes.
|
||||
|
||||
- [CREATE TABLE AS \<table_function()\>](../../sql-reference/statements/create/table.md) query.
|
||||
- [CREATE TABLE AS table_function()](../../sql-reference/statements/create/table.md) query.
|
||||
|
||||
It's one of the methods of creating a table.
|
||||
|
||||
|
@ -36,7 +36,7 @@ ClickHouse - полноценная колоночная СУБД. Данные
|
||||
|
||||
`IDataType` и `IColumn` слабо связаны друг с другом. Различные типы данных могут быть представлены в памяти с помощью одной реализации `IColumn`. Например, и `DataTypeUInt32`, и `DataTypeDateTime` в памяти представлены как `ColumnUInt32` или `ColumnConstUInt32`. В добавок к этому, один тип данных может быть представлен различными реализациями `IColumn`. Например, `DataTypeUInt8` может быть представлен как `ColumnUInt8` и `ColumnConstUInt8`.
|
||||
|
||||
`IDataType` хранит только метаданные. Например, `DataTypeUInt8` не хранить ничего (кроме скрытого указателя `vptr`), а `DataTypeFixedString` хранит только `N` (фиксированный размер строки).
|
||||
`IDataType` хранит только метаданные. Например, `DataTypeUInt8` не хранит ничего (кроме скрытого указателя `vptr`), а `DataTypeFixedString` хранит только `N` (фиксированный размер строки).
|
||||
|
||||
В `IDataType` есть вспомогательные методы для данных различного формата. Среди них методы сериализации значений, допускающих использование кавычек, сериализации значения в JSON или XML. Среди них нет прямого соответствия форматам данных. Например, различные форматы `Pretty` и `TabSeparated` могут использовать один вспомогательный метод `serializeTextEscaped` интерфейса `IDataType`.
|
||||
|
||||
@ -62,7 +62,7 @@ ClickHouse - полноценная колоночная СУБД. Данные
|
||||
|
||||
> Потоки блоков используют «втягивающий» (pull) подход к управлению потоком выполнения: когда вы вытягиваете блок из первого потока, он, следовательно, вытягивает необходимые блоки из вложенных потоков, так и работает весь конвейер выполнения. Ни «pull» ни «push» не имеют явного преимущества, потому что поток управления неявный, и это ограничивает в реализации различных функций, таких как одновременное выполнение нескольких запросов (слияние нескольких конвейеров вместе). Это ограничение можно преодолеть с помощью сопрограмм (coroutines) или просто запуском дополнительных потоков, которые ждут друг друга. У нас может быть больше возможностей, если мы сделаем поток управления явным: если мы локализуем логику для передачи данных из одной расчетной единицы в другую вне этих расчетных единиц. Читайте эту [статью](http://journal.stuffwithstuff.com/2013/01/13/iteration-inside-and-out/) для углубленного изучения.
|
||||
|
||||
Следует отметить, что конвейер выполнения запроса создает временные данные на каждом шаге. Мы стараемся сохранить размер блока достаточно маленьким, чтобы временные данные помещались в кэш процессора. При таком допущении запись и чтение временных данных практически бесплатны по сравнению с другими расчетами. Мы могли бы рассмотреть альтернативу, которая заключается в том, чтобы объединить многие операции в конвеере вместе. Это может сделать конвейер как можно короче и удалить большую часть временных данных, что может быть преимуществом, но у такого подхода также есть недостатки. Например, разделенный конвейер позволяет легко реализовать кэширование промежуточных данных, использование промежуточных данных из аналогичных запросов, выполняемых одновременно, и объединение конвейеров для аналогичных запросов.
|
||||
Следует отметить, что конвейер выполнения запроса создает временные данные на каждом шаге. Мы стараемся сохранить размер блока достаточно маленьким, чтобы временные данные помещались в кэш процессора. При таком допущении запись и чтение временных данных практически бесплатны по сравнению с другими расчетами. Мы могли бы рассмотреть альтернативу, которая заключается в том, чтобы объединить многие операции в конвейере вместе. Это может сделать конвейер как можно короче и удалить большую часть временных данных, что может быть преимуществом, но у такого подхода также есть недостатки. Например, разделенный конвейер позволяет легко реализовать кэширование промежуточных данных, использование промежуточных данных из аналогичных запросов, выполняемых одновременно, и объединение конвейеров для аналогичных запросов.
|
||||
|
||||
## Форматы {#formats}
|
||||
|
||||
@ -119,7 +119,7 @@ ClickHouse - полноценная колоночная СУБД. Данные
|
||||
|
||||
Существуют обычные функции и агрегатные функции. Агрегатные функции смотрите в следующем разделе.
|
||||
|
||||
Обычный функции не изменяют число строк и работают так, как если бы обрабатывали каждую строку независимо. В действительности же, функции вызываются не к отдельным строкам, а блокам данных для реализации векторизованного выполнения запросов.
|
||||
Обычные функции не изменяют число строк и работают так, как если бы обрабатывали каждую строку независимо. В действительности же, функции вызываются не к отдельным строкам, а блокам данных для реализации векторизованного выполнения запросов.
|
||||
|
||||
Некоторые функции, такие как [blockSize](../sql-reference/functions/other-functions.md#function-blocksize), [rowNumberInBlock](../sql-reference/functions/other-functions.md#function-rownumberinblock), и [runningAccumulate](../sql-reference/functions/other-functions.md#runningaccumulate), эксплуатируют блочную обработку и нарушают независимость строк.
|
||||
|
||||
@ -162,7 +162,7 @@ ClickHouse имеет сильную типизацию, поэтому нет
|
||||
|
||||
Сервера в кластере в основном независимы. Вы можете создать `Распределенную` (`Distributed`) таблицу на одном или всех серверах в кластере. Такая таблица сама по себе не хранит данные - она только предоставляет возможность "просмотра" всех локальных таблиц на нескольких узлах кластера. При выполнении `SELECT` распределенная таблица переписывает запрос, выбирает удаленные узлы в соответствии с настройками балансировки нагрузки и отправляет им запрос. Распределенная таблица просит удаленные сервера обработать запрос до той стадии, когда промежуточные результаты с разных серверов могут быть объединены. Затем он получает промежуточные результаты и объединяет их. Распределенная таблица пытается возложить как можно больше работы на удаленные серверы и сократить объем промежуточных данных, передаваемых по сети.
|
||||
|
||||
Ситуация усложняется, при использовании подзапросов в случае `IN` или `JOIN`, когда каждый из них использует таблицу `Distributed`. Есть разные стратегии для выполнения таких запросов.
|
||||
Ситуация усложняется при использовании подзапросов в случае `IN` или `JOIN`, когда каждый из них использует таблицу `Distributed`. Есть разные стратегии для выполнения таких запросов.
|
||||
|
||||
Глобального плана выполнения распределенных запросов не существует. Каждый узел имеет собственный локальный план для своей части работы. У нас есть простое однонаправленное выполнение распределенных запросов: мы отправляем запросы на удаленные узлы и затем объединяем результаты. Но это невозможно для сложных запросов `GROUP BY` высокой кардинальности или запросов с большим числом временных данных в `JOIN`: в таких случаях нам необходимо перераспределить («reshuffle») данные между серверами, что требует дополнительной координации. ClickHouse не поддерживает выполнение запросов такого рода, и нам нужно работать над этим.
|
||||
|
||||
|
@ -9,9 +9,9 @@ toc_title: Date
|
||||
|
||||
Дата хранится без учёта часового пояса.
|
||||
|
||||
## Примеры {#examples}
|
||||
**Пример**
|
||||
|
||||
**1.** Создание таблицы и добавление в неё данных:
|
||||
Создание таблицы и добавление в неё данных:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE dt
|
||||
@ -24,9 +24,6 @@ ENGINE = TinyLog;
|
||||
|
||||
``` sql
|
||||
INSERT INTO dt Values (1546300800, 1), ('2019-01-01', 2);
|
||||
```
|
||||
|
||||
``` sql
|
||||
SELECT * FROM dt;
|
||||
```
|
||||
|
||||
@ -37,7 +34,7 @@ SELECT * FROM dt;
|
||||
└────────────┴──────────┘
|
||||
```
|
||||
|
||||
## Смотрите также {#see-also}
|
||||
**См. также**
|
||||
|
||||
- [Функции для работы с датой и временем](../../sql-reference/functions/date-time-functions.md)
|
||||
- [Операторы для работы с датой и временем](../../sql-reference/operators/index.md#operators-datetime)
|
||||
|
40
docs/ru/sql-reference/data-types/date32.md
Normal file
40
docs/ru/sql-reference/data-types/date32.md
Normal file
@ -0,0 +1,40 @@
|
||||
---
|
||||
toc_priority: 48
|
||||
toc_title: Date32
|
||||
---
|
||||
|
||||
# Date32 {#data_type-datetime32}
|
||||
|
||||
Дата. Поддерживается такой же диапазон дат, как для типа [Datetime64](../../sql-reference/data-types/datetime64.md). Значение хранится в четырех байтах и соответствует числу дней с 1925-01-01 по 2283-11-11.
|
||||
|
||||
**Пример**
|
||||
|
||||
Создание таблицы со столбцом типа `Date32`и добавление в нее данных:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE new
|
||||
(
|
||||
`timestamp` Date32,
|
||||
`event_id` UInt8
|
||||
)
|
||||
ENGINE = TinyLog;
|
||||
```
|
||||
|
||||
``` sql
|
||||
INSERT INTO new VALUES (4102444800, 1), ('2100-01-01', 2);
|
||||
SELECT * FROM new;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌──timestamp─┬─event_id─┐
|
||||
│ 2100-01-01 │ 1 │
|
||||
│ 2100-01-01 │ 2 │
|
||||
└────────────┴──────────┘
|
||||
```
|
||||
|
||||
**См. также**
|
||||
|
||||
- [toDate32](../../sql-reference/functions/type-conversion-functions.md#todate32)
|
||||
- [toDate32OrZero](../../sql-reference/functions/type-conversion-functions.md#todate32-or-zero)
|
||||
- [toDate32OrNull](../../sql-reference/functions/type-conversion-functions.md#todate32-or-null)
|
||||
|
@ -17,7 +17,7 @@ DateTime64(precision, [timezone])
|
||||
|
||||
Данные хранятся в виде количества ‘тиков’, прошедших с момента начала эпохи (1970-01-01 00:00:00 UTC), в Int64. Размер тика определяется параметром precision. Дополнительно, тип `DateTime64` позволяет хранить часовой пояс, единый для всей колонки, который влияет на то, как будут отображаться значения типа `DateTime64` в текстовом виде и как будут парситься значения заданные в виде строк (‘2020-01-01 05:00:01.000’). Часовой пояс не хранится в строках таблицы (выборки), а хранится в метаданных колонки. Подробнее см. [DateTime](datetime.md).
|
||||
|
||||
Поддерживаются значения от 1 января 1925 г. и до 31 декабря 2283 г.
|
||||
Поддерживаются значения от 1 января 1925 г. и до 11 ноября 2283 г.
|
||||
|
||||
## Примеры {#examples}
|
||||
|
||||
|
@ -152,6 +152,104 @@ Cиноним: `DATE`.
|
||||
|
||||
## toDateTimeOrNull {#todatetimeornull}
|
||||
|
||||
## toDate32 {#todate32}
|
||||
|
||||
Конвертирует аргумент в значение типа [Date32](../../sql-reference/data-types/date32.md). Если значение выходит за границы диапазона, возвращается пограничное значение `Date32`. Если аргумент имеет тип [Date](../../sql-reference/data-types/date.md), учитываются границы типа `Date`.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
toDate32(value)
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `value` — Значение даты. [String](../../sql-reference/data-types/string.md), [UInt32](../../sql-reference/data-types/int-uint.md) или [Date](../../sql-reference/data-types/date.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Календарная дата.
|
||||
|
||||
Тип: [Date32](../../sql-reference/data-types/date32.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
1. Значение находится в границах диапазона:
|
||||
|
||||
``` sql
|
||||
SELECT toDate32('1955-01-01') AS value, toTypeName(value);
|
||||
```
|
||||
|
||||
``` text
|
||||
┌──────value─┬─toTypeName(toDate32('1925-01-01'))─┐
|
||||
│ 1955-01-01 │ Date32 │
|
||||
└────────────┴────────────────────────────────────┘
|
||||
```
|
||||
|
||||
2. Значение выходит за границы диапазона:
|
||||
|
||||
``` sql
|
||||
SELECT toDate32('1924-01-01') AS value, toTypeName(value);
|
||||
```
|
||||
|
||||
``` text
|
||||
┌──────value─┬─toTypeName(toDate32('1925-01-01'))─┐
|
||||
│ 1925-01-01 │ Date32 │
|
||||
└────────────┴────────────────────────────────────┘
|
||||
```
|
||||
|
||||
3. С аргументом типа `Date`:
|
||||
|
||||
``` sql
|
||||
SELECT toDate32(toDate('1924-01-01')) AS value, toTypeName(value);
|
||||
```
|
||||
|
||||
``` text
|
||||
┌──────value─┬─toTypeName(toDate32(toDate('1924-01-01')))─┐
|
||||
│ 1970-01-01 │ Date32 │
|
||||
└────────────┴────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## toDate32OrZero {#todate32-or-zero}
|
||||
|
||||
То же самое, что и [toDate32](#todate32), но возвращает минимальное значение типа [Date32](../../sql-reference/data-types/date32.md), если получен недопустимый аргумент.
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT toDate32OrZero('1924-01-01'), toDate32OrZero('');
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─toDate32OrZero('1924-01-01')─┬─toDate32OrZero('')─┐
|
||||
│ 1925-01-01 │ 1925-01-01 │
|
||||
└──────────────────────────────┴────────────────────┘
|
||||
```
|
||||
|
||||
## toDate32OrNull {#todate32-or-null}
|
||||
|
||||
То же самое, что и [toDate32](#todate32), но возвращает `NULL`, если получен недопустимый аргумент.
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT toDate32OrNull('1955-01-01'), toDate32OrNull('');
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─toDate32OrNull('1955-01-01')─┬─toDate32OrNull('')─┐
|
||||
│ 1955-01-01 │ ᴺᵁᴸᴸ │
|
||||
└──────────────────────────────┴────────────────────┘
|
||||
```
|
||||
|
||||
## toDecimal(32\|64\|128\|256) {#todecimal3264128}
|
||||
|
||||
Преобразует `value` к типу данных [Decimal](../../sql-reference/functions/type-conversion-functions.md) с точностью `S`. `value` может быть числом или строкой. Параметр `S` (scale) задаёт число десятичных знаков.
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <Interpreters/executeQuery.h>
|
||||
#include <Interpreters/loadMetadata.h>
|
||||
#include <Interpreters/DatabaseCatalog.h>
|
||||
#include <Interpreters/UserDefinedObjectsLoader.h>
|
||||
#include <Interpreters/Session.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/Macros.h>
|
||||
@ -287,6 +288,12 @@ try
|
||||
/// Lock path directory before read
|
||||
status.emplace(path + "status", StatusFile::write_full_info);
|
||||
|
||||
fs::create_directories(fs::path(path) / "user_defined/");
|
||||
LOG_DEBUG(log, "Loading user defined objects from {}", path);
|
||||
Poco::File(path + "user_defined/").createDirectories();
|
||||
UserDefinedObjectsLoader::instance().loadObjects(global_context);
|
||||
LOG_DEBUG(log, "Loaded user defined objects.");
|
||||
|
||||
LOG_DEBUG(log, "Loading metadata from {}", path);
|
||||
fs::create_directories(fs::path(path) / "data/");
|
||||
fs::create_directories(fs::path(path) / "metadata/");
|
||||
|
@ -53,6 +53,7 @@
|
||||
#include <Interpreters/DNSCacheUpdater.h>
|
||||
#include <Interpreters/ExternalLoaderXMLConfigRepository.h>
|
||||
#include <Interpreters/InterserverCredentials.h>
|
||||
#include <Interpreters/UserDefinedObjectsLoader.h>
|
||||
#include <Interpreters/JIT/CompiledExpressionCache.h>
|
||||
#include <Access/AccessControlManager.h>
|
||||
#include <Storages/StorageReplicatedMergeTree.h>
|
||||
@ -774,6 +775,7 @@ if (ThreadFuzzer::instance().isEffective())
|
||||
{
|
||||
fs::create_directories(path / "data/");
|
||||
fs::create_directories(path / "metadata/");
|
||||
fs::create_directories(path / "user_defined/");
|
||||
|
||||
/// Directory with metadata of tables, which was marked as dropped by Atomic database
|
||||
fs::create_directories(path / "metadata_dropped/");
|
||||
@ -1083,6 +1085,9 @@ if (ThreadFuzzer::instance().isEffective())
|
||||
/// Wait server pool to avoid use-after-free of destroyed context in the handlers
|
||||
server_pool.joinAll();
|
||||
|
||||
// Uses a raw pointer to global context for getting ZooKeeper.
|
||||
main_config_reloader.reset();
|
||||
|
||||
/** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available.
|
||||
* At this moment, no one could own shared part of Context.
|
||||
*/
|
||||
@ -1095,6 +1100,18 @@ if (ThreadFuzzer::instance().isEffective())
|
||||
/// system logs may copy global context.
|
||||
global_context->setCurrentDatabaseNameInGlobalContext(default_database);
|
||||
|
||||
LOG_INFO(log, "Loading user defined objects from {}", path_str);
|
||||
try
|
||||
{
|
||||
UserDefinedObjectsLoader::instance().loadObjects(global_context);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "Caught exception while loading user defined objects");
|
||||
throw;
|
||||
}
|
||||
LOG_DEBUG(log, "Loaded user defined objects");
|
||||
|
||||
LOG_INFO(log, "Loading metadata from {}", path_str);
|
||||
|
||||
try
|
||||
@ -1514,7 +1531,6 @@ if (ThreadFuzzer::instance().isEffective())
|
||||
LOG_INFO(log, "Closed connections.");
|
||||
|
||||
dns_cache_updater.reset();
|
||||
main_config_reloader.reset();
|
||||
|
||||
if (current_connections)
|
||||
{
|
||||
|
@ -87,6 +87,7 @@ enum class AccessType
|
||||
M(CREATE_DICTIONARY, "", DICTIONARY, CREATE) /* allows to execute {CREATE|ATTACH} DICTIONARY */\
|
||||
M(CREATE_TEMPORARY_TABLE, "", GLOBAL, CREATE) /* allows to create and manipulate temporary tables;
|
||||
implicitly enabled by the grant CREATE_TABLE on any table */ \
|
||||
M(CREATE_FUNCTION, "", DATABASE, CREATE) /* allows to execute CREATE FUNCTION */ \
|
||||
M(CREATE, "", GROUP, ALL) /* allows to execute {CREATE|ATTACH} */ \
|
||||
\
|
||||
M(DROP_DATABASE, "", DATABASE, DROP) /* allows to execute {DROP|DETACH} DATABASE */\
|
||||
@ -94,6 +95,7 @@ enum class AccessType
|
||||
M(DROP_VIEW, "", VIEW, DROP) /* allows to execute {DROP|DETACH} TABLE for views;
|
||||
implicitly enabled by the grant DROP_TABLE */\
|
||||
M(DROP_DICTIONARY, "", DICTIONARY, DROP) /* allows to execute {DROP|DETACH} DICTIONARY */\
|
||||
M(DROP_FUNCTION, "", DATABASE, DROP) /* allows to execute DROP FUNCTION */\
|
||||
M(DROP, "", GROUP, ALL) /* allows to execute {DROP|DETACH} */\
|
||||
\
|
||||
M(TRUNCATE, "TRUNCATE TABLE", TABLE, ALL) \
|
||||
|
@ -45,7 +45,7 @@ TEST(AccessRights, Union)
|
||||
lhs.grant(AccessType::INSERT);
|
||||
rhs.grant(AccessType::ALL, "db1");
|
||||
lhs.makeUnion(rhs);
|
||||
ASSERT_EQ(lhs.toString(), "GRANT INSERT ON *.*, GRANT SHOW, SELECT, ALTER, CREATE DATABASE, CREATE TABLE, CREATE VIEW, CREATE DICTIONARY, DROP, TRUNCATE, OPTIMIZE, SYSTEM MERGES, SYSTEM TTL MERGES, SYSTEM FETCHES, SYSTEM MOVES, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, SYSTEM RESTART REPLICA, SYSTEM RESTORE REPLICA, SYSTEM FLUSH DISTRIBUTED, dictGet ON db1.*");
|
||||
ASSERT_EQ(lhs.toString(), "GRANT INSERT ON *.*, GRANT SHOW, SELECT, ALTER, CREATE DATABASE, CREATE TABLE, CREATE VIEW, CREATE DICTIONARY, CREATE FUNCTION, DROP, TRUNCATE, OPTIMIZE, SYSTEM MERGES, SYSTEM TTL MERGES, SYSTEM FETCHES, SYSTEM MOVES, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, SYSTEM RESTART REPLICA, SYSTEM RESTORE REPLICA, SYSTEM FLUSH DISTRIBUTED, dictGet ON db1.*");
|
||||
}
|
||||
|
||||
|
||||
|
@ -578,6 +578,12 @@
|
||||
M(607, BACKUP_ELEMENT_DUPLICATE) \
|
||||
M(608, CANNOT_RESTORE_TABLE) \
|
||||
\
|
||||
M(598, FUNCTION_ALREADY_EXISTS) \
|
||||
M(599, CANNOT_DROP_SYSTEM_FUNCTION) \
|
||||
M(600, CANNOT_CREATE_RECURSIVE_FUNCTION) \
|
||||
M(601, OBJECT_ALREADY_STORED_ON_DISK) \
|
||||
M(602, OBJECT_WAS_NOT_STORED_ON_DISK) \
|
||||
\
|
||||
M(998, POSTGRESQL_CONNECTION_FAILURE) \
|
||||
M(999, KEEPER_EXCEPTION) \
|
||||
M(1000, POCO_EXCEPTION) \
|
||||
|
@ -28,12 +28,6 @@ namespace ErrorCodes
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
}
|
||||
|
||||
template <typename T, typename SFINAE = void>
|
||||
struct NearestFieldTypeImpl;
|
||||
|
||||
template <typename T>
|
||||
using NearestFieldType = typename NearestFieldTypeImpl<T>::Type;
|
||||
|
||||
class Field;
|
||||
using FieldVector = std::vector<Field, AllocatorWithMemoryTracking<Field>>;
|
||||
|
||||
@ -168,6 +162,12 @@ template <> constexpr inline bool is_decimal_field<DecimalField<Decimal64>> = tr
|
||||
template <> constexpr inline bool is_decimal_field<DecimalField<Decimal128>> = true;
|
||||
template <> constexpr inline bool is_decimal_field<DecimalField<Decimal256>> = true;
|
||||
|
||||
template <typename T, typename SFINAE = void>
|
||||
struct NearestFieldTypeImpl;
|
||||
|
||||
template <typename T>
|
||||
using NearestFieldType = typename NearestFieldTypeImpl<T>::Type;
|
||||
|
||||
/// char may be signed or unsigned, and behave identically to signed char or unsigned char,
|
||||
/// but they are always three different types.
|
||||
/// signedness of char is different in Linux on x86 and Linux on ARM.
|
||||
@ -230,6 +230,16 @@ struct NearestFieldTypeImpl<T, std::enable_if_t<std::is_enum_v<T>>>
|
||||
using Type = NearestFieldType<std::underlying_type_t<T>>;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
decltype(auto) castToNearestFieldType(T && x)
|
||||
{
|
||||
using U = NearestFieldType<std::decay_t<T>>;
|
||||
if constexpr (std::is_same_v<std::decay_t<T>, U>)
|
||||
return std::forward<T>(x);
|
||||
else
|
||||
return U(x);
|
||||
}
|
||||
|
||||
/** 32 is enough. Round number is used for alignment and for better arithmetic inside std::vector.
|
||||
* NOTE: Actually, sizeof(std::string) is 32 when using libc++, so Field is 40 bytes.
|
||||
*/
|
||||
@ -322,9 +332,10 @@ public:
|
||||
|
||||
/// Templates to avoid ambiguity.
|
||||
template <typename T, typename Z = void *>
|
||||
using enable_if_not_field_or_stringlike_t = std::enable_if_t<
|
||||
!std::is_same_v<std::decay_t<T>, Field>
|
||||
&& !std::is_same_v<NearestFieldType<std::decay_t<T>>, String>, Z>;
|
||||
using enable_if_not_field_or_bool_or_stringlike_t = std::enable_if_t<
|
||||
!std::is_same_v<std::decay_t<T>, Field> &&
|
||||
!std::is_same_v<std::decay_t<T>, bool> &&
|
||||
!std::is_same_v<NearestFieldType<std::decay_t<T>>, String>, Z>;
|
||||
|
||||
Field() //-V730
|
||||
: which(Types::Null)
|
||||
@ -345,7 +356,9 @@ public:
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Field(T && rhs, enable_if_not_field_or_stringlike_t<T> = nullptr);
|
||||
Field(T && rhs, enable_if_not_field_or_bool_or_stringlike_t<T> = nullptr);
|
||||
|
||||
Field(bool rhs) : Field(castToNearestFieldType(rhs)) {}
|
||||
|
||||
/// Create a string inplace.
|
||||
Field(const std::string_view & str) { create(str.data(), str.size()); }
|
||||
@ -395,9 +408,11 @@ public:
|
||||
/// 1. float <--> int needs explicit cast
|
||||
/// 2. customized types needs explicit cast
|
||||
template <typename T>
|
||||
enable_if_not_field_or_stringlike_t<T, Field> &
|
||||
enable_if_not_field_or_bool_or_stringlike_t<T, Field> &
|
||||
operator=(T && rhs);
|
||||
|
||||
Field & operator= (bool rhs) { return *this = castToNearestFieldType(rhs); }
|
||||
|
||||
Field & operator= (const std::string_view & str);
|
||||
Field & operator= (const String & str) { return *this = std::string_view{str}; }
|
||||
Field & operator= (String && str);
|
||||
@ -876,24 +891,14 @@ template <> inline constexpr const char * TypeName<AggregateFunctionStateData> =
|
||||
|
||||
|
||||
template <typename T>
|
||||
decltype(auto) castToNearestFieldType(T && x)
|
||||
{
|
||||
using U = NearestFieldType<std::decay_t<T>>;
|
||||
if constexpr (std::is_same_v<std::decay_t<T>, U>)
|
||||
return std::forward<T>(x);
|
||||
else
|
||||
return U(x);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Field::Field(T && rhs, enable_if_not_field_or_stringlike_t<T>) //-V730
|
||||
Field::Field(T && rhs, enable_if_not_field_or_bool_or_stringlike_t<T>) //-V730
|
||||
{
|
||||
auto && val = castToNearestFieldType(std::forward<T>(rhs));
|
||||
createConcrete(std::forward<decltype(val)>(val));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Field::enable_if_not_field_or_stringlike_t<T, Field> &
|
||||
Field::enable_if_not_field_or_bool_or_stringlike_t<T, Field> &
|
||||
Field::operator=(T && rhs)
|
||||
{
|
||||
auto && val = castToNearestFieldType(std::forward<T>(rhs));
|
||||
@ -908,7 +913,6 @@ Field::operator=(T && rhs)
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
inline Field & Field::operator=(const std::string_view & str)
|
||||
{
|
||||
if (which != Types::String)
|
||||
|
@ -120,7 +120,7 @@ class IColumn;
|
||||
M(UInt64, parallel_replicas_count, 0, "", 0) \
|
||||
M(UInt64, parallel_replica_offset, 0, "", 0) \
|
||||
\
|
||||
M(Bool, skip_unavailable_shards, false, "If 1, ClickHouse silently skips unavailable shards and nodes unresolvable through DNS. Shard is marked as unavailable when none of the replicas can be reached.", 0) \
|
||||
M(Bool, skip_unavailable_shards, false, "If true, ClickHouse silently skips unavailable shards and nodes unresolvable through DNS. Shard is marked as unavailable when none of the replicas can be reached.", 0) \
|
||||
\
|
||||
M(UInt64, parallel_distributed_insert_select, 0, "Process distributed INSERT SELECT query in the same cluster on local tables on every shard, if 1 SELECT is executed on each shard, if 2 SELECT and INSERT is executed on each shard", 0) \
|
||||
M(UInt64, distributed_group_by_no_merge, 0, "If 1, Do not merge aggregation states from different servers for distributed queries (shards will process query up to the Complete stage, initiator just proxies the data from the shards). If 2 the initiator will apply ORDER BY and LIMIT stages (it is not in case when shard process query up to the Complete stage)", 0) \
|
||||
@ -157,8 +157,8 @@ class IColumn;
|
||||
M(UInt64, min_bytes_to_use_mmap_io, 0, "The minimum number of bytes for reading the data with mmap option during SELECT queries execution. 0 - disabled.", 0) \
|
||||
M(Bool, checksum_on_read, true, "Validate checksums on reading. It is enabled by default and should be always enabled in production. Please do not expect any benefits in disabling this setting. It may only be used for experiments and benchmarks. The setting only applicable for tables of MergeTree family. Checksums are always validated for other table engines and when receiving data over network.", 0) \
|
||||
\
|
||||
M(Bool, force_index_by_date, 0, "Throw an exception if there is a partition key in a table, and it is not used.", 0) \
|
||||
M(Bool, force_primary_key, 0, "Throw an exception if there is primary key in a table, and it is not used.", 0) \
|
||||
M(Bool, force_index_by_date, false, "Throw an exception if there is a partition key in a table, and it is not used.", 0) \
|
||||
M(Bool, force_primary_key, false, "Throw an exception if there is primary key in a table, and it is not used.", 0) \
|
||||
M(String, force_data_skipping_indices, "", "Comma separated list of strings or literals with the name of the data skipping indices that should be used during query execution, otherwise an exception will be thrown.", 0) \
|
||||
\
|
||||
M(Float, max_streams_to_max_threads_ratio, 1, "Allows you to use more sources than the number of threads - to more evenly distribute work across threads. It is assumed that this is a temporary solution, since it will be possible in the future to make the number of sources equal to the number of threads, but for each source to dynamically select available work for itself.", 0) \
|
||||
@ -171,8 +171,8 @@ class IColumn;
|
||||
M(UInt64, priority, 0, "Priority of the query. 1 - the highest, higher value - lower priority; 0 - do not use priorities.", 0) \
|
||||
M(Int64, os_thread_priority, 0, "If non zero - set corresponding 'nice' value for query processing threads. Can be used to adjust query priority for OS scheduler.", 0) \
|
||||
\
|
||||
M(Bool, log_queries, 1, "Log requests and write the log to the system table.", 0) \
|
||||
M(Bool, log_formatted_queries, 0, "Log formatted queries and write the log to the system table.", 0) \
|
||||
M(Bool, log_queries, true, "Log requests and write the log to the system table.", 0) \
|
||||
M(Bool, log_formatted_queries, false, "Log formatted queries and write the log to the system table.", 0) \
|
||||
M(LogQueriesType, log_queries_min_type, QueryLogElementType::QUERY_START, "Minimal type in query_log to log, possible values (from low to high): QUERY_START, QUERY_FINISH, EXCEPTION_BEFORE_START, EXCEPTION_WHILE_PROCESSING.", 0) \
|
||||
M(Milliseconds, log_queries_min_query_duration_ms, 0, "Minimal time for the query to run, to get to the query_log/query_thread_log/query_views_log.", 0) \
|
||||
M(UInt64, log_queries_cut_to_length, 100000, "If query length is greater than specified threshold (in bytes), then cut query when writing to query log. Also limit length of printed query in ordinary text log.", 0) \
|
||||
@ -198,10 +198,10 @@ class IColumn;
|
||||
\
|
||||
M(Float, memory_tracker_fault_probability, 0., "For testing of `exception safety` - throw an exception every time you allocate memory with the specified probability.", 0) \
|
||||
\
|
||||
M(Bool, enable_http_compression, 0, "Compress the result if the client over HTTP said that it understands data compressed by gzip or deflate.", 0) \
|
||||
M(Bool, enable_http_compression, false, "Compress the result if the client over HTTP said that it understands data compressed by gzip or deflate.", 0) \
|
||||
M(Int64, http_zlib_compression_level, 3, "Compression level - used if the client on HTTP said that it understands data compressed by gzip or deflate.", 0) \
|
||||
\
|
||||
M(Bool, http_native_compression_disable_checksumming_on_decompress, 0, "If you uncompress the POST data from the client compressed by the native format, do not check the checksum.", 0) \
|
||||
M(Bool, http_native_compression_disable_checksumming_on_decompress, false, "If you uncompress the POST data from the client compressed by the native format, do not check the checksum.", 0) \
|
||||
\
|
||||
M(String, count_distinct_implementation, "uniqExact", "What aggregate function to use for implementation of count(DISTINCT ...)", 0) \
|
||||
\
|
||||
@ -215,9 +215,9 @@ class IColumn;
|
||||
\
|
||||
M(UInt64, http_headers_progress_interval_ms, 100, "Do not send HTTP headers X-ClickHouse-Progress more frequently than at each specified interval.", 0) \
|
||||
\
|
||||
M(Bool, fsync_metadata, 1, "Do fsync after changing metadata for tables and databases (.sql files). Could be disabled in case of poor latency on server with high load of DDL queries and high load of disk subsystem.", 0) \
|
||||
M(Bool, fsync_metadata, true, "Do fsync after changing metadata for tables and databases (.sql files). Could be disabled in case of poor latency on server with high load of DDL queries and high load of disk subsystem.", 0) \
|
||||
\
|
||||
M(Bool, join_use_nulls, 0, "Use NULLs for non-joined rows of outer JOINs for types that can be inside Nullable. If false, use default value of corresponding columns data type.", IMPORTANT) \
|
||||
M(Bool, join_use_nulls, false, "Use NULLs for non-joined rows of outer JOINs for types that can be inside Nullable. If false, use default value of corresponding columns data type.", IMPORTANT) \
|
||||
\
|
||||
M(JoinStrictness, join_default_strictness, JoinStrictness::ALL, "Set default strictness in JOIN query. Possible values: empty string, 'ANY', 'ALL'. If empty, query without strictness will throw exception.", 0) \
|
||||
M(Bool, any_join_distinct_right_table_keys, false, "Enable old ANY JOIN logic with many-to-one left-to-right table keys mapping for all ANY JOINs. It leads to confusing not equal results for 't1 ANY LEFT JOIN t2' and 't2 ANY RIGHT JOIN t1'. ANY RIGHT JOIN needs one-to-many keys mapping to be consistent with LEFT one.", IMPORTANT) \
|
||||
@ -225,7 +225,7 @@ class IColumn;
|
||||
M(UInt64, preferred_block_size_bytes, 1000000, "", 0) \
|
||||
\
|
||||
M(UInt64, max_replica_delay_for_distributed_queries, 300, "If set, distributed queries of Replicated tables will choose servers with replication delay in seconds less than the specified value (not inclusive). Zero means do not take delay into account.", 0) \
|
||||
M(Bool, fallback_to_stale_replicas_for_distributed_queries, 1, "Suppose max_replica_delay_for_distributed_queries is set and all replicas for the queried table are stale. If this setting is enabled, the query will be performed anyway, otherwise the error will be reported.", 0) \
|
||||
M(Bool, fallback_to_stale_replicas_for_distributed_queries, true, "Suppose max_replica_delay_for_distributed_queries is set and all replicas for the queried table are stale. If this setting is enabled, the query will be performed anyway, otherwise the error will be reported.", 0) \
|
||||
M(UInt64, preferred_max_column_in_block_size_bytes, 0, "Limit on max column size in block while reading. Helps to decrease cache misses count. Should be close to L2 cache size.", 0) \
|
||||
\
|
||||
M(Bool, insert_distributed_sync, false, "If setting is enabled, insert query into distributed waits until data will be sent to all nodes in cluster.", 0) \
|
||||
@ -241,7 +241,7 @@ class IColumn;
|
||||
/** Settings for testing connection collector */ \
|
||||
M(Milliseconds, sleep_in_receive_cancel_ms, 0, "Time to sleep in receiving cancel in TCPHandler", 0) \
|
||||
\
|
||||
M(Bool, insert_allow_materialized_columns, 0, "If setting is enabled, Allow materialized columns in INSERT.", 0) \
|
||||
M(Bool, insert_allow_materialized_columns, false, "If setting is enabled, Allow materialized columns in INSERT.", 0) \
|
||||
M(Seconds, http_connection_timeout, DEFAULT_HTTP_READ_BUFFER_CONNECTION_TIMEOUT, "HTTP connection timeout.", 0) \
|
||||
M(Seconds, http_send_timeout, DEFAULT_HTTP_READ_BUFFER_TIMEOUT, "HTTP send timeout", 0) \
|
||||
M(Seconds, http_receive_timeout, DEFAULT_HTTP_READ_BUFFER_TIMEOUT, "HTTP receive timeout", 0) \
|
||||
@ -362,18 +362,18 @@ class IColumn;
|
||||
M(Bool, log_query_views, true, "Log query dependent views into system.query_views_log table. This setting have effect only when 'log_queries' is true.", 0) \
|
||||
M(String, log_comment, "", "Log comment into system.query_log table and server log. It can be set to arbitrary string no longer than max_query_size.", 0) \
|
||||
M(LogsLevel, send_logs_level, LogsLevel::fatal, "Send server text logs with specified minimum level to client. Valid values: 'trace', 'debug', 'information', 'warning', 'error', 'fatal', 'none'", 0) \
|
||||
M(Bool, enable_optimize_predicate_expression, 1, "If it is set to true, optimize predicates to subqueries.", 0) \
|
||||
M(Bool, enable_optimize_predicate_expression_to_final_subquery, 1, "Allow push predicate to final subquery.", 0) \
|
||||
M(Bool, allow_push_predicate_when_subquery_contains_with, 1, "Allows push predicate when subquery contains WITH clause", 0) \
|
||||
M(Bool, enable_optimize_predicate_expression, true, "If it is set to true, optimize predicates to subqueries.", 0) \
|
||||
M(Bool, enable_optimize_predicate_expression_to_final_subquery, true, "Allow push predicate to final subquery.", 0) \
|
||||
M(Bool, allow_push_predicate_when_subquery_contains_with, true, "Allows push predicate when subquery contains WITH clause", 0) \
|
||||
\
|
||||
M(UInt64, low_cardinality_max_dictionary_size, 8192, "Maximum size (in rows) of shared global dictionary for LowCardinality type.", 0) \
|
||||
M(Bool, low_cardinality_use_single_dictionary_for_part, false, "LowCardinality type serialization setting. If is true, than will use additional keys when global dictionary overflows. Otherwise, will create several shared dictionaries.", 0) \
|
||||
M(Bool, decimal_check_overflow, true, "Check overflow of decimal arithmetic/comparison operations", 0) \
|
||||
\
|
||||
M(Bool, prefer_localhost_replica, 1, "1 - always send query to local replica, if it exists. 0 - choose replica to send query between local and remote ones according to load_balancing", 0) \
|
||||
M(Bool, prefer_localhost_replica, true, "If it's true then queries will be always sent to local replica (if it exists). If it's false then replica to send a query will be chosen between local and remote ones according to load_balancing", 0) \
|
||||
M(UInt64, max_fetch_partition_retries_count, 5, "Amount of retries while fetching partition from another host.", 0) \
|
||||
M(UInt64, http_max_multipart_form_data_size, 1024 * 1024 * 1024, "Limit on size of multipart/form-data content. This setting cannot be parsed from URL parameters and should be set in user profile. Note that content is parsed and external tables are created in memory before start of query execution. And this is the only limit that has effect on that stage (limits on max memory usage and max execution time have no effect while reading HTTP form data).", 0) \
|
||||
M(Bool, calculate_text_stack_trace, 1, "Calculate text stack trace in case of exceptions during query execution. This is the default. It requires symbol lookups that may slow down fuzzing tests when huge amount of wrong queries are executed. In normal cases you should not disable this option.", 0) \
|
||||
M(Bool, calculate_text_stack_trace, true, "Calculate text stack trace in case of exceptions during query execution. This is the default. It requires symbol lookups that may slow down fuzzing tests when huge amount of wrong queries are executed. In normal cases you should not disable this option.", 0) \
|
||||
M(Bool, allow_ddl, true, "If it is set to true, then a user is allowed to executed DDL queries.", 0) \
|
||||
M(Bool, parallel_view_processing, false, "Enables pushing to attached views concurrently instead of sequentially.", 0) \
|
||||
M(Bool, enable_unaligned_array_join, false, "Allow ARRAY JOIN with multiple arrays that have different sizes. When this settings is enabled, arrays will be resized to the longest one.", 0) \
|
||||
@ -520,8 +520,8 @@ class IColumn;
|
||||
|
||||
#define FORMAT_FACTORY_SETTINGS(M) \
|
||||
M(Char, format_csv_delimiter, ',', "The character to be considered as a delimiter in CSV data. If setting with a string, a string has to have a length of 1.", 0) \
|
||||
M(Bool, format_csv_allow_single_quotes, 1, "If it is set to true, allow strings in single quotes.", 0) \
|
||||
M(Bool, format_csv_allow_double_quotes, 1, "If it is set to true, allow strings in double quotes.", 0) \
|
||||
M(Bool, format_csv_allow_single_quotes, true, "If it is set to true, allow strings in single quotes.", 0) \
|
||||
M(Bool, format_csv_allow_double_quotes, true, "If it is set to true, allow strings in double quotes.", 0) \
|
||||
M(Bool, output_format_csv_crlf_end_of_line, false, "If it is set true, end of line in CSV format will be \\r\\n instead of \\n.", 0) \
|
||||
M(Bool, input_format_csv_unquoted_null_literal_as_null, false, "Consider unquoted NULL literal as \\N", 0) \
|
||||
M(Bool, input_format_csv_enum_as_number, false, "Treat inserted enum values in CSV formats as enum indices \\N", 0) \
|
||||
|
@ -3,6 +3,40 @@
|
||||
|
||||
using namespace DB;
|
||||
|
||||
GTEST_TEST(Field, FromBool)
|
||||
{
|
||||
{
|
||||
Field f{false};
|
||||
ASSERT_EQ(f.getType(), Field::Types::UInt64);
|
||||
ASSERT_EQ(f.get<UInt64>(), 0);
|
||||
ASSERT_EQ(f.get<bool>(), false);
|
||||
}
|
||||
|
||||
{
|
||||
Field f{true};
|
||||
ASSERT_EQ(f.getType(), Field::Types::UInt64);
|
||||
ASSERT_EQ(f.get<UInt64>(), 1);
|
||||
ASSERT_EQ(f.get<bool>(), true);
|
||||
}
|
||||
|
||||
{
|
||||
Field f;
|
||||
f = false;
|
||||
ASSERT_EQ(f.getType(), Field::Types::UInt64);
|
||||
ASSERT_EQ(f.get<UInt64>(), 0);
|
||||
ASSERT_EQ(f.get<bool>(), false);
|
||||
}
|
||||
|
||||
{
|
||||
Field f;
|
||||
f = true;
|
||||
ASSERT_EQ(f.getType(), Field::Types::UInt64);
|
||||
ASSERT_EQ(f.get<UInt64>(), 1);
|
||||
ASSERT_EQ(f.get<bool>(), true);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
GTEST_TEST(Field, Move)
|
||||
{
|
||||
Field f;
|
@ -70,7 +70,7 @@ PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream(
|
||||
|
||||
// Do not deduplicate insertions into MV if the main insertion is Ok
|
||||
if (disable_deduplication_for_children)
|
||||
insert_context->setSetting("insert_deduplicate", Field{false});
|
||||
insert_context->setSetting("insert_deduplicate", false);
|
||||
|
||||
// Separate min_insert_block_size_rows/min_insert_block_size_bytes for children
|
||||
if (insert_settings.min_insert_block_size_rows_for_materialized_views)
|
||||
|
@ -297,7 +297,7 @@ void registerDictionarySourceExecutablePool(DictionarySourceFactory & factory)
|
||||
/** Currently parallel parsing input format cannot read exactly max_block_size rows from input,
|
||||
* so it will be blocked on ReadBufferFromFileDescriptor because this file descriptor represent pipe that does not have eof.
|
||||
*/
|
||||
context->setSetting("input_format_parallel_parsing", Field{false});
|
||||
context->setSetting("input_format_parallel_parsing", false);
|
||||
|
||||
String settings_config_prefix = config_prefix + ".executable_pool";
|
||||
|
||||
|
@ -26,8 +26,8 @@ const String & getFunctionCanonicalNameIfAny(const String & name)
|
||||
return FunctionFactory::instance().getCanonicalNameIfAny(name);
|
||||
}
|
||||
|
||||
void FunctionFactory::registerFunction(const
|
||||
std::string & name,
|
||||
void FunctionFactory::registerFunction(
|
||||
const std::string & name,
|
||||
Value creator,
|
||||
CaseSensitiveness case_sensitiveness)
|
||||
{
|
||||
@ -119,8 +119,8 @@ FunctionOverloadResolverPtr FunctionFactory::tryGetImpl(
|
||||
}
|
||||
|
||||
FunctionOverloadResolverPtr FunctionFactory::tryGet(
|
||||
const std::string & name,
|
||||
ContextPtr context) const
|
||||
const std::string & name,
|
||||
ContextPtr context) const
|
||||
{
|
||||
auto impl = tryGetImpl(name, context);
|
||||
return impl ? std::move(impl) : nullptr;
|
||||
|
@ -1,7 +1,6 @@
|
||||
#include <Poco/Net/NetException.h>
|
||||
|
||||
#include <IO/ReadBufferFromPocoSocket.h>
|
||||
#include <IO/TimeoutSetter.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/NetException.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
|
@ -43,7 +43,6 @@ ReadBufferFromS3::ReadBufferFromS3(
|
||||
|
||||
bool ReadBufferFromS3::nextImpl()
|
||||
{
|
||||
Stopwatch watch;
|
||||
bool next_result = false;
|
||||
|
||||
if (impl)
|
||||
@ -62,19 +61,27 @@ bool ReadBufferFromS3::nextImpl()
|
||||
auto sleep_time_with_backoff_milliseconds = std::chrono::milliseconds(100);
|
||||
for (size_t attempt = 0; (attempt < max_single_read_retries) && !next_result; ++attempt)
|
||||
{
|
||||
Stopwatch watch;
|
||||
try
|
||||
{
|
||||
/// Try to read a next portion of data.
|
||||
next_result = impl->next();
|
||||
watch.stop();
|
||||
ProfileEvents::increment(ProfileEvents::S3ReadMicroseconds, watch.elapsedMicroseconds());
|
||||
break;
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
watch.stop();
|
||||
ProfileEvents::increment(ProfileEvents::S3ReadMicroseconds, watch.elapsedMicroseconds());
|
||||
ProfileEvents::increment(ProfileEvents::S3ReadRequestsErrors, 1);
|
||||
|
||||
LOG_INFO(log, "Caught exception while reading S3 object. Bucket: {}, Key: {}, Offset: {}, Attempt: {}, Message: {}",
|
||||
bucket, key, getPosition(), attempt, e.message());
|
||||
|
||||
if (attempt + 1 == max_single_read_retries)
|
||||
throw;
|
||||
|
||||
/// Pause before next attempt.
|
||||
std::this_thread::sleep_for(sleep_time_with_backoff_milliseconds);
|
||||
sleep_time_with_backoff_milliseconds *= 2;
|
||||
@ -86,9 +93,6 @@ bool ReadBufferFromS3::nextImpl()
|
||||
}
|
||||
}
|
||||
|
||||
watch.stop();
|
||||
ProfileEvents::increment(ProfileEvents::S3ReadMicroseconds, watch.elapsedMicroseconds());
|
||||
|
||||
if (!next_result)
|
||||
return false;
|
||||
|
||||
|
@ -16,7 +16,7 @@ class InDepthNodeVisitor
|
||||
public:
|
||||
using Data = typename Matcher::Data;
|
||||
|
||||
InDepthNodeVisitor(Data & data_, WriteBuffer * ostr_ = nullptr)
|
||||
explicit InDepthNodeVisitor(Data & data_, WriteBuffer * ostr_ = nullptr)
|
||||
: data(data_),
|
||||
visit_depth(0),
|
||||
ostr(ostr_)
|
||||
|
117
src/Interpreters/InterpreterCreateFunctionQuery.cpp
Normal file
117
src/Interpreters/InterpreterCreateFunctionQuery.cpp
Normal file
@ -0,0 +1,117 @@
|
||||
#include <Access/ContextAccess.h>
|
||||
#include <Parsers/ASTCreateFunctionQuery.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/ExpressionActions.h>
|
||||
#include <Interpreters/ExpressionAnalyzer.h>
|
||||
#include <Interpreters/InterpreterCreateFunctionQuery.h>
|
||||
#include <Interpreters/FunctionNameNormalizer.h>
|
||||
#include <Interpreters/UserDefinedObjectsLoader.h>
|
||||
#include <Interpreters/UserDefinedFunctionFactory.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int UNKNOWN_IDENTIFIER;
|
||||
extern const int CANNOT_CREATE_RECURSIVE_FUNCTION;
|
||||
extern const int UNSUPPORTED_METHOD;
|
||||
}
|
||||
|
||||
BlockIO InterpreterCreateFunctionQuery::execute()
|
||||
{
|
||||
auto current_context = getContext();
|
||||
current_context->checkAccess(AccessType::CREATE_FUNCTION);
|
||||
|
||||
FunctionNameNormalizer().visit(query_ptr.get());
|
||||
auto * create_function_query = query_ptr->as<ASTCreateFunctionQuery>();
|
||||
|
||||
if (!create_function_query)
|
||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Expected CREATE FUNCTION query");
|
||||
|
||||
auto & function_name = create_function_query->function_name;
|
||||
validateFunction(create_function_query->function_core, function_name);
|
||||
|
||||
UserDefinedFunctionFactory::instance().registerFunction(function_name, query_ptr);
|
||||
|
||||
if (!is_internal)
|
||||
{
|
||||
try
|
||||
{
|
||||
UserDefinedObjectsLoader::instance().storeObject(current_context, UserDefinedObjectType::Function, function_name, *query_ptr);
|
||||
}
|
||||
catch (Exception & exception)
|
||||
{
|
||||
UserDefinedFunctionFactory::instance().unregisterFunction(function_name);
|
||||
exception.addMessage(fmt::format("while storing user defined function {} on disk", backQuote(function_name)));
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
void InterpreterCreateFunctionQuery::validateFunction(ASTPtr function, const String & name)
|
||||
{
|
||||
const auto * args_tuple = function->as<ASTFunction>()->arguments->children.at(0)->as<ASTFunction>();
|
||||
std::unordered_set<String> arguments;
|
||||
for (const auto & argument : args_tuple->arguments->children)
|
||||
{
|
||||
const auto & argument_name = argument->as<ASTIdentifier>()->name();
|
||||
auto [_, inserted] = arguments.insert(argument_name);
|
||||
if (!inserted)
|
||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Identifier {} already used as function parameter", argument_name);
|
||||
}
|
||||
|
||||
ASTPtr function_body = function->as<ASTFunction>()->children.at(0)->children.at(1);
|
||||
std::unordered_set<String> identifiers_in_body = getIdentifiers(function_body);
|
||||
|
||||
for (const auto & identifier : identifiers_in_body)
|
||||
{
|
||||
if (!arguments.contains(identifier))
|
||||
throw Exception(ErrorCodes::UNKNOWN_IDENTIFIER, "Identifier {} does not exist in arguments", backQuote(identifier));
|
||||
}
|
||||
|
||||
validateFunctionRecursiveness(function_body, name);
|
||||
}
|
||||
|
||||
std::unordered_set<String> InterpreterCreateFunctionQuery::getIdentifiers(ASTPtr node)
|
||||
{
|
||||
std::unordered_set<String> identifiers;
|
||||
|
||||
std::stack<ASTPtr> ast_nodes_to_process;
|
||||
ast_nodes_to_process.push(node);
|
||||
|
||||
while (!ast_nodes_to_process.empty())
|
||||
{
|
||||
auto ast_node_to_process = ast_nodes_to_process.top();
|
||||
ast_nodes_to_process.pop();
|
||||
|
||||
for (const auto & child : ast_node_to_process->children)
|
||||
{
|
||||
auto identifier_name_opt = tryGetIdentifierName(child);
|
||||
if (identifier_name_opt)
|
||||
identifiers.insert(identifier_name_opt.value());
|
||||
|
||||
ast_nodes_to_process.push(child);
|
||||
}
|
||||
}
|
||||
|
||||
return identifiers;
|
||||
}
|
||||
|
||||
void InterpreterCreateFunctionQuery::validateFunctionRecursiveness(ASTPtr node, const String & function_to_create)
|
||||
{
|
||||
for (const auto & child : node->children)
|
||||
{
|
||||
auto function_name_opt = tryGetFunctionName(child);
|
||||
if (function_name_opt && function_name_opt.value() == function_to_create)
|
||||
throw Exception(ErrorCodes::CANNOT_CREATE_RECURSIVE_FUNCTION, "You cannot create recursive function");
|
||||
|
||||
validateFunctionRecursiveness(child, function_to_create);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
32
src/Interpreters/InterpreterCreateFunctionQuery.h
Normal file
32
src/Interpreters/InterpreterCreateFunctionQuery.h
Normal file
@ -0,0 +1,32 @@
|
||||
#pragma once
|
||||
|
||||
#include <Interpreters/IInterpreter.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class Context;
|
||||
|
||||
class InterpreterCreateFunctionQuery : public IInterpreter, WithContext
|
||||
{
|
||||
public:
|
||||
InterpreterCreateFunctionQuery(const ASTPtr & query_ptr_, ContextPtr context_, bool is_internal_)
|
||||
: WithContext(context_)
|
||||
, query_ptr(query_ptr_)
|
||||
, is_internal(is_internal_) {}
|
||||
|
||||
BlockIO execute() override;
|
||||
|
||||
void setInternal(bool internal_);
|
||||
|
||||
private:
|
||||
static void validateFunction(ASTPtr function, const String & name);
|
||||
static std::unordered_set<String> getIdentifiers(ASTPtr node);
|
||||
static void validateFunctionRecursiveness(ASTPtr node, const String & function_to_create);
|
||||
|
||||
ASTPtr query_ptr;
|
||||
bool is_internal;
|
||||
};
|
||||
|
||||
}
|
27
src/Interpreters/InterpreterDropFunctionQuery.cpp
Normal file
27
src/Interpreters/InterpreterDropFunctionQuery.cpp
Normal file
@ -0,0 +1,27 @@
|
||||
#include <Access/ContextAccess.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/FunctionNameNormalizer.h>
|
||||
#include <Interpreters/InterpreterDropFunctionQuery.h>
|
||||
#include <Interpreters/UserDefinedObjectsLoader.h>
|
||||
#include <Interpreters/UserDefinedFunctionFactory.h>
|
||||
#include <Parsers/ASTDropFunctionQuery.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
BlockIO InterpreterDropFunctionQuery::execute()
|
||||
{
|
||||
auto current_context = getContext();
|
||||
current_context->checkAccess(AccessType::DROP_FUNCTION);
|
||||
|
||||
FunctionNameNormalizer().visit(query_ptr.get());
|
||||
auto & drop_function_query = query_ptr->as<ASTDropFunctionQuery &>();
|
||||
|
||||
UserDefinedFunctionFactory::instance().unregisterFunction(drop_function_query.function_name);
|
||||
UserDefinedObjectsLoader::instance().removeObject(current_context, UserDefinedObjectType::Function, drop_function_query.function_name);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
}
|
21
src/Interpreters/InterpreterDropFunctionQuery.h
Normal file
21
src/Interpreters/InterpreterDropFunctionQuery.h
Normal file
@ -0,0 +1,21 @@
|
||||
#pragma once
|
||||
|
||||
#include <Interpreters/IInterpreter.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class Context;
|
||||
|
||||
class InterpreterDropFunctionQuery : public IInterpreter, WithMutableContext
|
||||
{
|
||||
public:
|
||||
InterpreterDropFunctionQuery(const ASTPtr & query_ptr_, ContextMutablePtr context_) : WithMutableContext(context_), query_ptr(query_ptr_) {}
|
||||
|
||||
BlockIO execute() override;
|
||||
|
||||
private:
|
||||
ASTPtr query_ptr;
|
||||
};
|
||||
|
||||
}
|
@ -7,7 +7,9 @@
|
||||
#include <Parsers/ASTCreateRowPolicyQuery.h>
|
||||
#include <Parsers/ASTCreateSettingsProfileQuery.h>
|
||||
#include <Parsers/ASTCreateUserQuery.h>
|
||||
#include <Parsers/ASTCreateFunctionQuery.h>
|
||||
#include <Parsers/ASTDropAccessEntityQuery.h>
|
||||
#include <Parsers/ASTDropFunctionQuery.h>
|
||||
#include <Parsers/ASTDropQuery.h>
|
||||
#include <Parsers/ASTExplainQuery.h>
|
||||
#include <Parsers/ASTGrantQuery.h>
|
||||
@ -36,6 +38,7 @@
|
||||
#include <Interpreters/InterpreterAlterQuery.h>
|
||||
#include <Interpreters/InterpreterBackupQuery.h>
|
||||
#include <Interpreters/InterpreterCheckQuery.h>
|
||||
#include <Interpreters/InterpreterCreateFunctionQuery.h>
|
||||
#include <Interpreters/InterpreterCreateQuery.h>
|
||||
#include <Interpreters/InterpreterCreateQuotaQuery.h>
|
||||
#include <Interpreters/InterpreterCreateRoleQuery.h>
|
||||
@ -44,6 +47,7 @@
|
||||
#include <Interpreters/InterpreterCreateUserQuery.h>
|
||||
#include <Interpreters/InterpreterDescribeQuery.h>
|
||||
#include <Interpreters/InterpreterDropAccessEntityQuery.h>
|
||||
#include <Interpreters/InterpreterDropFunctionQuery.h>
|
||||
#include <Interpreters/InterpreterDropQuery.h>
|
||||
#include <Interpreters/InterpreterExistsQuery.h>
|
||||
#include <Interpreters/InterpreterExplainQuery.h>
|
||||
@ -272,6 +276,14 @@ std::unique_ptr<IInterpreter> InterpreterFactory::get(ASTPtr & query, ContextMut
|
||||
{
|
||||
return std::make_unique<InterpreterExternalDDLQuery>(query, context);
|
||||
}
|
||||
else if (query->as<ASTCreateFunctionQuery>())
|
||||
{
|
||||
return std::make_unique<InterpreterCreateFunctionQuery>(query, context, false /*is_internal*/);
|
||||
}
|
||||
else if (query->as<ASTDropFunctionQuery>())
|
||||
{
|
||||
return std::make_unique<InterpreterDropFunctionQuery>(query, context);
|
||||
}
|
||||
else if (query->as<ASTBackupQuery>())
|
||||
{
|
||||
return std::make_unique<InterpreterBackupQuery>(query, context);
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <Interpreters/CollectJoinOnKeysVisitor.h>
|
||||
#include <Interpreters/RequiredSourceColumnsVisitor.h>
|
||||
#include <Interpreters/GetAggregatesVisitor.h>
|
||||
#include <Interpreters/UserDefinedFunctionsVisitor.h>
|
||||
#include <Interpreters/TableJoin.h>
|
||||
#include <Interpreters/ExpressionActions.h> /// getSmallestColumn()
|
||||
#include <Interpreters/getTableExpressions.h>
|
||||
@ -1045,6 +1046,9 @@ TreeRewriterResultPtr TreeRewriter::analyze(
|
||||
void TreeRewriter::normalize(
|
||||
ASTPtr & query, Aliases & aliases, const NameSet & source_columns_set, bool ignore_alias, const Settings & settings, bool allow_self_aliases)
|
||||
{
|
||||
UserDefinedFunctionsVisitor::Data data_user_defined_functions_visitor;
|
||||
UserDefinedFunctionsVisitor(data_user_defined_functions_visitor).visit(query);
|
||||
|
||||
CustomizeCountDistinctVisitor::Data data_count_distinct{settings.count_distinct_implementation};
|
||||
CustomizeCountDistinctVisitor(data_count_distinct).visit(query);
|
||||
|
||||
|
83
src/Interpreters/UserDefinedFunctionFactory.cpp
Normal file
83
src/Interpreters/UserDefinedFunctionFactory.cpp
Normal file
@ -0,0 +1,83 @@
|
||||
#include "UserDefinedFunctionFactory.h"
|
||||
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int FUNCTION_ALREADY_EXISTS;
|
||||
extern const int UNKNOWN_FUNCTION;
|
||||
extern const int CANNOT_DROP_SYSTEM_FUNCTION;
|
||||
}
|
||||
|
||||
UserDefinedFunctionFactory & UserDefinedFunctionFactory::instance()
|
||||
{
|
||||
static UserDefinedFunctionFactory result;
|
||||
return result;
|
||||
}
|
||||
|
||||
void UserDefinedFunctionFactory::registerFunction(const String & function_name, ASTPtr create_function_query)
|
||||
{
|
||||
if (FunctionFactory::instance().hasNameOrAlias(function_name))
|
||||
throw Exception(ErrorCodes::FUNCTION_ALREADY_EXISTS, "The function '{}' already exists", function_name);
|
||||
|
||||
if (AggregateFunctionFactory::instance().hasNameOrAlias(function_name))
|
||||
throw Exception(ErrorCodes::FUNCTION_ALREADY_EXISTS, "The aggregate function '{}' already exists", function_name);
|
||||
|
||||
auto [_, inserted] = function_name_to_create_query.emplace(function_name, std::move(create_function_query));
|
||||
if (!inserted)
|
||||
throw Exception(ErrorCodes::FUNCTION_ALREADY_EXISTS,
|
||||
"The function name '{}' is not unique",
|
||||
function_name);
|
||||
}
|
||||
|
||||
void UserDefinedFunctionFactory::unregisterFunction(const String & function_name)
|
||||
{
|
||||
if (FunctionFactory::instance().hasNameOrAlias(function_name) ||
|
||||
AggregateFunctionFactory::instance().hasNameOrAlias(function_name))
|
||||
throw Exception(ErrorCodes::CANNOT_DROP_SYSTEM_FUNCTION, "Cannot drop system function '{}'", function_name);
|
||||
|
||||
auto it = function_name_to_create_query.find(function_name);
|
||||
if (it == function_name_to_create_query.end())
|
||||
throw Exception(ErrorCodes::UNKNOWN_FUNCTION,
|
||||
"The function name '{}' is not registered",
|
||||
function_name);
|
||||
|
||||
function_name_to_create_query.erase(it);
|
||||
}
|
||||
|
||||
ASTPtr UserDefinedFunctionFactory::get(const String & function_name) const
|
||||
{
|
||||
auto it = function_name_to_create_query.find(function_name);
|
||||
if (it == function_name_to_create_query.end())
|
||||
throw Exception(ErrorCodes::UNKNOWN_FUNCTION,
|
||||
"The function name '{}' is not registered",
|
||||
function_name);
|
||||
|
||||
return it->second;
|
||||
}
|
||||
|
||||
ASTPtr UserDefinedFunctionFactory::tryGet(const std::string & function_name) const
|
||||
{
|
||||
auto it = function_name_to_create_query.find(function_name);
|
||||
if (it == function_name_to_create_query.end())
|
||||
return nullptr;
|
||||
|
||||
return it->second;
|
||||
}
|
||||
|
||||
std::vector<std::string> UserDefinedFunctionFactory::getAllRegisteredNames() const
|
||||
{
|
||||
std::vector<std::string> registered_names;
|
||||
registered_names.reserve(function_name_to_create_query.size());
|
||||
|
||||
for (const auto & [name, _] : function_name_to_create_query)
|
||||
registered_names.emplace_back(name);
|
||||
|
||||
return registered_names;
|
||||
}
|
||||
|
||||
}
|
32
src/Interpreters/UserDefinedFunctionFactory.h
Normal file
32
src/Interpreters/UserDefinedFunctionFactory.h
Normal file
@ -0,0 +1,32 @@
|
||||
#pragma once
|
||||
|
||||
#include <unordered_map>
|
||||
|
||||
#include <Common/NamePrompter.h>
|
||||
|
||||
#include <Parsers/ASTCreateFunctionQuery.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class UserDefinedFunctionFactory : public IHints<1, UserDefinedFunctionFactory>
|
||||
{
|
||||
public:
|
||||
static UserDefinedFunctionFactory & instance();
|
||||
|
||||
void registerFunction(const String & function_name, ASTPtr create_function_query);
|
||||
|
||||
void unregisterFunction(const String & function_name);
|
||||
|
||||
ASTPtr get(const String & function_name) const;
|
||||
|
||||
ASTPtr tryGet(const String & function_name) const;
|
||||
|
||||
std::vector<String> getAllRegisteredNames() const override;
|
||||
|
||||
private:
|
||||
|
||||
std::unordered_map<String, ASTPtr> function_name_to_create_query;
|
||||
};
|
||||
|
||||
}
|
99
src/Interpreters/UserDefinedFunctionsVisitor.cpp
Normal file
99
src/Interpreters/UserDefinedFunctionsVisitor.cpp
Normal file
@ -0,0 +1,99 @@
|
||||
#include "UserDefinedFunctionsVisitor.h"
|
||||
|
||||
#include <unordered_map>
|
||||
#include <stack>
|
||||
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTCreateFunctionQuery.h>
|
||||
#include <Parsers/ASTExpressionList.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Interpreters/UserDefinedFunctionFactory.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int UNSUPPORTED_METHOD;
|
||||
}
|
||||
|
||||
void UserDefinedFunctionsMatcher::visit(ASTPtr & ast, Data &)
|
||||
{
|
||||
auto * function = ast->as<ASTFunction>();
|
||||
if (!function)
|
||||
return;
|
||||
|
||||
auto result = tryToReplaceFunction(*function);
|
||||
if (result)
|
||||
ast = result;
|
||||
}
|
||||
|
||||
bool UserDefinedFunctionsMatcher::needChildVisit(const ASTPtr &, const ASTPtr &)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
ASTPtr UserDefinedFunctionsMatcher::tryToReplaceFunction(const ASTFunction & function)
|
||||
{
|
||||
auto user_defined_function = UserDefinedFunctionFactory::instance().tryGet(function.name);
|
||||
if (!user_defined_function)
|
||||
return nullptr;
|
||||
|
||||
const auto & function_arguments_list = function.children.at(0)->as<ASTExpressionList>();
|
||||
auto & function_arguments = function_arguments_list->children;
|
||||
|
||||
const auto & create_function_query = user_defined_function->as<ASTCreateFunctionQuery>();
|
||||
auto & function_core_expression = create_function_query->function_core->children.at(0);
|
||||
|
||||
const auto & identifiers_expression_list = function_core_expression->children.at(0)->children.at(0)->as<ASTExpressionList>();
|
||||
const auto & identifiers_raw = identifiers_expression_list->children;
|
||||
|
||||
if (function_arguments.size() != identifiers_raw.size())
|
||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD,
|
||||
"Function {} expects {} arguments actual arguments {}",
|
||||
create_function_query->function_name,
|
||||
identifiers_raw.size(),
|
||||
function_arguments.size());
|
||||
|
||||
std::unordered_map<std::string, ASTPtr> identifier_name_to_function_argument;
|
||||
|
||||
for (size_t parameter_index = 0; parameter_index < identifiers_raw.size(); ++parameter_index)
|
||||
{
|
||||
const auto & identifier = identifiers_raw[parameter_index]->as<ASTIdentifier>();
|
||||
const auto & function_argument = function_arguments[parameter_index];
|
||||
const auto & identifier_name = identifier->name();
|
||||
|
||||
identifier_name_to_function_argument.emplace(identifier_name, function_argument);
|
||||
}
|
||||
|
||||
auto function_body_to_update = function_core_expression->children.at(1)->clone();
|
||||
|
||||
std::stack<ASTPtr> ast_nodes_to_update;
|
||||
ast_nodes_to_update.push(function_body_to_update);
|
||||
|
||||
while (!ast_nodes_to_update.empty())
|
||||
{
|
||||
auto ast_node_to_update = ast_nodes_to_update.top();
|
||||
ast_nodes_to_update.pop();
|
||||
|
||||
for (auto & child : ast_node_to_update->children)
|
||||
{
|
||||
auto identifier_name_opt = tryGetIdentifierName(child);
|
||||
if (identifier_name_opt)
|
||||
{
|
||||
auto function_argument_it = identifier_name_to_function_argument.find(*identifier_name_opt);
|
||||
assert(function_argument_it != identifier_name_to_function_argument.end());
|
||||
|
||||
child = function_argument_it->second->clone();
|
||||
continue;
|
||||
}
|
||||
|
||||
ast_nodes_to_update.push(child);
|
||||
}
|
||||
}
|
||||
|
||||
return function_body_to_update;
|
||||
}
|
||||
|
||||
}
|
44
src/Interpreters/UserDefinedFunctionsVisitor.h
Normal file
44
src/Interpreters/UserDefinedFunctionsVisitor.h
Normal file
@ -0,0 +1,44 @@
|
||||
#pragma once
|
||||
|
||||
#include <Interpreters/Aliases.h>
|
||||
#include <Interpreters/InDepthNodeVisitor.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class ASTFunction;
|
||||
|
||||
/** Visits ASTFunction nodes and if it is used defined function replace it with function body.
|
||||
* Example:
|
||||
*
|
||||
* CREATE FUNCTION test_function AS a -> a + 1;
|
||||
*
|
||||
* Before applying visitor:
|
||||
* SELECT test_function(number) FROM system.numbers LIMIT 10;
|
||||
*
|
||||
* After applying visitor:
|
||||
* SELECT number + 1 FROM system.numbers LIMIT 10;
|
||||
*/
|
||||
class UserDefinedFunctionsMatcher
|
||||
{
|
||||
public:
|
||||
using Visitor = InDepthNodeVisitor<UserDefinedFunctionsMatcher, true>;
|
||||
|
||||
struct Data
|
||||
{
|
||||
};
|
||||
|
||||
static void visit(ASTPtr & ast, Data & data);
|
||||
static bool needChildVisit(const ASTPtr & node, const ASTPtr & child);
|
||||
|
||||
private:
|
||||
static void visit(ASTFunction & func, const Data & data);
|
||||
|
||||
static ASTPtr tryToReplaceFunction(const ASTFunction & function);
|
||||
|
||||
};
|
||||
|
||||
/// Visits AST nodes and collect their aliases in one map (with links to source nodes).
|
||||
using UserDefinedFunctionsVisitor = UserDefinedFunctionsMatcher::Visitor;
|
||||
|
||||
}
|
164
src/Interpreters/UserDefinedObjectsLoader.cpp
Normal file
164
src/Interpreters/UserDefinedObjectsLoader.cpp
Normal file
@ -0,0 +1,164 @@
|
||||
#include "UserDefinedObjectsLoader.h"
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/InterpreterCreateFunctionQuery.h>
|
||||
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Parsers/ASTCreateFunctionQuery.h>
|
||||
#include <Parsers/formatAST.h>
|
||||
#include <Parsers/ParserCreateFunctionQuery.h>
|
||||
|
||||
#include <Poco/DirectoryIterator.h>
|
||||
#include <Poco/Logger.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int OBJECT_ALREADY_STORED_ON_DISK;
|
||||
extern const int OBJECT_WAS_NOT_STORED_ON_DISK;
|
||||
}
|
||||
|
||||
UserDefinedObjectsLoader & UserDefinedObjectsLoader::instance()
|
||||
{
|
||||
static UserDefinedObjectsLoader ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
UserDefinedObjectsLoader::UserDefinedObjectsLoader()
|
||||
: log(&Poco::Logger::get("UserDefinedObjectsLoader"))
|
||||
{}
|
||||
|
||||
void UserDefinedObjectsLoader::loadUserDefinedObject(ContextPtr context, UserDefinedObjectType object_type, const std::string_view & name, const String & path)
|
||||
{
|
||||
auto name_ref = StringRef(name.data(), name.size());
|
||||
LOG_DEBUG(log, "Loading user defined object {} from file {}", backQuote(name_ref), path);
|
||||
|
||||
/// There is .sql file with user defined object creation statement.
|
||||
ReadBufferFromFile in(path);
|
||||
|
||||
String object_create_query;
|
||||
readStringUntilEOF(object_create_query, in);
|
||||
|
||||
try
|
||||
{
|
||||
switch (object_type)
|
||||
{
|
||||
case UserDefinedObjectType::Function:
|
||||
{
|
||||
ParserCreateFunctionQuery parser;
|
||||
ASTPtr ast = parseQuery(
|
||||
parser,
|
||||
object_create_query.data(),
|
||||
object_create_query.data() + object_create_query.size(),
|
||||
"in file " + path,
|
||||
0,
|
||||
context->getSettingsRef().max_parser_depth);
|
||||
|
||||
InterpreterCreateFunctionQuery interpreter(ast, context, true /*is internal*/);
|
||||
interpreter.execute();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
e.addMessage(fmt::format("while loading user defined objects {} from path {}", backQuote(name_ref), path));
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
void UserDefinedObjectsLoader::loadObjects(ContextPtr context)
|
||||
{
|
||||
LOG_DEBUG(log, "loading user defined objects");
|
||||
|
||||
String dir_path = context->getPath() + "user_defined/";
|
||||
Poco::DirectoryIterator dir_end;
|
||||
for (Poco::DirectoryIterator it(dir_path); it != dir_end; ++it)
|
||||
{
|
||||
if (it->isLink())
|
||||
continue;
|
||||
|
||||
const auto & file_name = it.name();
|
||||
|
||||
/// For '.svn', '.gitignore' directory and similar.
|
||||
if (file_name.at(0) == '.')
|
||||
continue;
|
||||
|
||||
if (!it->isDirectory() && endsWith(file_name, ".sql"))
|
||||
{
|
||||
std::string_view object_name = file_name;
|
||||
object_name.remove_suffix(strlen(".sql"));
|
||||
object_name.remove_prefix(strlen("function_"));
|
||||
loadUserDefinedObject(context, UserDefinedObjectType::Function, object_name, dir_path + it.name());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void UserDefinedObjectsLoader::storeObject(ContextPtr context, UserDefinedObjectType object_type, const String & object_name, const IAST & ast)
|
||||
{
|
||||
String dir_path = context->getPath() + "user_defined/";
|
||||
String file_path;
|
||||
|
||||
switch (object_type)
|
||||
{
|
||||
case UserDefinedObjectType::Function:
|
||||
{
|
||||
file_path = dir_path + "function_" + escapeForFileName(object_name) + ".sql";
|
||||
}
|
||||
}
|
||||
|
||||
if (std::filesystem::exists(file_path))
|
||||
throw Exception(ErrorCodes::OBJECT_ALREADY_STORED_ON_DISK, "User defined object {} already stored on disk", backQuote(file_path));
|
||||
|
||||
LOG_DEBUG(log, "Storing object {} to file {}", backQuote(object_name), file_path);
|
||||
|
||||
WriteBufferFromOwnString create_statement_buf;
|
||||
formatAST(ast, create_statement_buf, false);
|
||||
writeChar('\n', create_statement_buf);
|
||||
|
||||
String create_statement = create_statement_buf.str();
|
||||
WriteBufferFromFile out(file_path, create_statement.size(), O_WRONLY | O_CREAT | O_EXCL);
|
||||
writeString(create_statement, out);
|
||||
out.next();
|
||||
if (context->getSettingsRef().fsync_metadata)
|
||||
out.sync();
|
||||
out.close();
|
||||
|
||||
LOG_DEBUG(log, "Stored object {}", backQuote(object_name));
|
||||
}
|
||||
|
||||
void UserDefinedObjectsLoader::removeObject(ContextPtr context, UserDefinedObjectType object_type, const String & object_name)
|
||||
{
|
||||
String dir_path = context->getPath() + "user_defined/";
|
||||
LOG_DEBUG(log, "Removing file for user defined object {} from {}", backQuote(object_name), dir_path);
|
||||
|
||||
std::filesystem::path file_path;
|
||||
|
||||
switch (object_type)
|
||||
{
|
||||
case UserDefinedObjectType::Function:
|
||||
{
|
||||
file_path = dir_path + "function_" + escapeForFileName(object_name) + ".sql";
|
||||
}
|
||||
}
|
||||
|
||||
if (!std::filesystem::exists(file_path))
|
||||
throw Exception(ErrorCodes::OBJECT_WAS_NOT_STORED_ON_DISK, "User defined object {} was not stored on disk", backQuote(file_path.string()));
|
||||
|
||||
std::filesystem::remove(file_path);
|
||||
}
|
||||
|
||||
}
|
33
src/Interpreters/UserDefinedObjectsLoader.h
Normal file
33
src/Interpreters/UserDefinedObjectsLoader.h
Normal file
@ -0,0 +1,33 @@
|
||||
#pragma once
|
||||
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <Parsers/IAST.h>
|
||||
|
||||
#include <boost/noncopyable.hpp>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
enum class UserDefinedObjectType
|
||||
{
|
||||
Function
|
||||
};
|
||||
|
||||
class UserDefinedObjectsLoader : private boost::noncopyable
|
||||
{
|
||||
public:
|
||||
static UserDefinedObjectsLoader & instance();
|
||||
UserDefinedObjectsLoader();
|
||||
|
||||
void loadObjects(ContextPtr context);
|
||||
void storeObject(ContextPtr context, UserDefinedObjectType object_type, const String & object_name, const IAST & ast);
|
||||
void removeObject(ContextPtr context, UserDefinedObjectType object_type, const String & object_name);
|
||||
|
||||
private:
|
||||
|
||||
void loadUserDefinedObject(ContextPtr context, UserDefinedObjectType object_type, const std::string_view & object_name, const String & file_path);
|
||||
Poco::Logger * log;
|
||||
};
|
||||
|
||||
}
|
@ -71,6 +71,7 @@ SRCS(
|
||||
InternalTextLogsQueue.cpp
|
||||
InterpreterAlterQuery.cpp
|
||||
InterpreterCheckQuery.cpp
|
||||
InterpreterCreateFunctionQuery.cpp
|
||||
InterpreterCreateQuery.cpp
|
||||
InterpreterCreateQuotaQuery.cpp
|
||||
InterpreterCreateRoleQuery.cpp
|
||||
@ -79,6 +80,7 @@ SRCS(
|
||||
InterpreterCreateUserQuery.cpp
|
||||
InterpreterDescribeQuery.cpp
|
||||
InterpreterDropAccessEntityQuery.cpp
|
||||
InterpreterDropFunctionQuery.cpp
|
||||
InterpreterDropQuery.cpp
|
||||
InterpreterExistsQuery.cpp
|
||||
InterpreterExplainQuery.cpp
|
||||
@ -89,6 +91,7 @@ SRCS(
|
||||
InterpreterKillQueryQuery.cpp
|
||||
InterpreterOptimizeQuery.cpp
|
||||
InterpreterRenameQuery.cpp
|
||||
InterpreterSelectIntersectExceptQuery.cpp
|
||||
InterpreterSelectQuery.cpp
|
||||
InterpreterSelectWithUnionQuery.cpp
|
||||
InterpreterSetQuery.cpp
|
||||
@ -142,6 +145,7 @@ SRCS(
|
||||
RewriteFunctionToSubcolumnVisitor.cpp
|
||||
RewriteSumIfFunctionVisitor.cpp
|
||||
RowRefs.cpp
|
||||
SelectIntersectExceptQueryVisitor.cpp
|
||||
Set.cpp
|
||||
SetVariants.cpp
|
||||
SortedBlocksWriter.cpp
|
||||
@ -157,6 +161,9 @@ SRCS(
|
||||
TranslateQualifiedNamesVisitor.cpp
|
||||
TreeOptimizer.cpp
|
||||
TreeRewriter.cpp
|
||||
UserDefinedFunctionFactory.cpp
|
||||
UserDefinedFunctionsVisitor.cpp
|
||||
UserDefinedObjectsLoader.cpp
|
||||
WindowDescription.cpp
|
||||
ZooKeeperLog.cpp
|
||||
addMissingDefaults.cpp
|
||||
|
21
src/Parsers/ASTCreateFunctionQuery.cpp
Normal file
21
src/Parsers/ASTCreateFunctionQuery.cpp
Normal file
@ -0,0 +1,21 @@
|
||||
#include <Common/quoteString.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <Parsers/ASTCreateFunctionQuery.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
ASTPtr ASTCreateFunctionQuery::clone() const
|
||||
{
|
||||
return std::make_shared<ASTCreateFunctionQuery>(*this);
|
||||
}
|
||||
|
||||
void ASTCreateFunctionQuery::formatImpl(const IAST::FormatSettings & settings, IAST::FormatState & state, IAST::FormatStateStacked frame) const
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << "CREATE FUNCTION " << (settings.hilite ? hilite_none : "");
|
||||
settings.ostr << (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(function_name) << (settings.hilite ? hilite_none : "");
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << " AS " << (settings.hilite ? hilite_none : "");
|
||||
function_core->formatImpl(settings, state, frame);
|
||||
}
|
||||
|
||||
}
|
22
src/Parsers/ASTCreateFunctionQuery.h
Normal file
22
src/Parsers/ASTCreateFunctionQuery.h
Normal file
@ -0,0 +1,22 @@
|
||||
#pragma once
|
||||
|
||||
#include <Parsers/ASTExpressionList.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class ASTCreateFunctionQuery : public IAST
|
||||
{
|
||||
public:
|
||||
String function_name;
|
||||
ASTPtr function_core;
|
||||
|
||||
String getID(char) const override { return "CreateFunctionQuery"; }
|
||||
|
||||
ASTPtr clone() const override;
|
||||
|
||||
void formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const override;
|
||||
};
|
||||
|
||||
}
|
19
src/Parsers/ASTDropFunctionQuery.cpp
Normal file
19
src/Parsers/ASTDropFunctionQuery.cpp
Normal file
@ -0,0 +1,19 @@
|
||||
#include <Parsers/ASTDropFunctionQuery.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
ASTPtr ASTDropFunctionQuery::clone() const
|
||||
{
|
||||
return std::make_shared<ASTDropFunctionQuery>(*this);
|
||||
}
|
||||
|
||||
void ASTDropFunctionQuery::formatImpl(const IAST::FormatSettings & settings, IAST::FormatState &, IAST::FormatStateStacked) const
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << "DROP FUNCTION " << (settings.hilite ? hilite_none : "");
|
||||
settings.ostr << (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(function_name) << (settings.hilite ? hilite_none : "");
|
||||
}
|
||||
|
||||
}
|
20
src/Parsers/ASTDropFunctionQuery.h
Normal file
20
src/Parsers/ASTDropFunctionQuery.h
Normal file
@ -0,0 +1,20 @@
|
||||
#pragma once
|
||||
|
||||
#include "IAST.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class ASTDropFunctionQuery : public IAST
|
||||
{
|
||||
public:
|
||||
String function_name;
|
||||
|
||||
String getID(char) const override { return "DropFunctionQuery"; }
|
||||
|
||||
ASTPtr clone() const override;
|
||||
|
||||
void formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const override;
|
||||
};
|
||||
|
||||
}
|
@ -13,6 +13,7 @@
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/ASTSubquery.h>
|
||||
#include <Parsers/ASTWithAlias.h>
|
||||
#include <Parsers/queryToString.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -21,6 +22,7 @@ namespace DB
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int UNEXPECTED_EXPRESSION;
|
||||
extern const int UNEXPECTED_AST_STRUCTURE;
|
||||
}
|
||||
|
||||
void ASTFunction::appendColumnNameImpl(WriteBuffer & ostr) const
|
||||
@ -557,4 +559,33 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format
|
||||
}
|
||||
}
|
||||
|
||||
String getFunctionName(const IAST * ast)
|
||||
{
|
||||
String res;
|
||||
if (tryGetFunctionNameInto(ast, res))
|
||||
return res;
|
||||
throw Exception(ast ? queryToString(*ast) + " is not an function" : "AST node is nullptr", ErrorCodes::UNEXPECTED_AST_STRUCTURE);
|
||||
}
|
||||
|
||||
std::optional<String> tryGetFunctionName(const IAST * ast)
|
||||
{
|
||||
String res;
|
||||
if (tryGetFunctionNameInto(ast, res))
|
||||
return res;
|
||||
return {};
|
||||
}
|
||||
|
||||
bool tryGetFunctionNameInto(const IAST * ast, String & name)
|
||||
{
|
||||
if (ast)
|
||||
{
|
||||
if (const auto * node = ast->as<ASTFunction>())
|
||||
{
|
||||
name = node->name;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -71,4 +71,14 @@ std::shared_ptr<ASTFunction> makeASTFunction(const String & name, Args &&... arg
|
||||
return function;
|
||||
}
|
||||
|
||||
/// ASTFunction Helpers: hide casts and semantic.
|
||||
|
||||
String getFunctionName(const IAST * ast);
|
||||
std::optional<String> tryGetFunctionName(const IAST * ast);
|
||||
bool tryGetFunctionNameInto(const IAST * ast, String & name);
|
||||
|
||||
inline String getFunctionName(const ASTPtr & ast) { return getFunctionName(ast.get()); }
|
||||
inline std::optional<String> tryGetFunctionName(const ASTPtr & ast) { return tryGetFunctionName(ast.get()); }
|
||||
inline bool tryGetFunctionNameInto(const ASTPtr & ast, String & name) { return tryGetFunctionNameInto(ast.get(), name); }
|
||||
|
||||
}
|
||||
|
47
src/Parsers/ParserCreateFunctionQuery.cpp
Normal file
47
src/Parsers/ParserCreateFunctionQuery.cpp
Normal file
@ -0,0 +1,47 @@
|
||||
#include <Parsers/ASTCreateFunctionQuery.h>
|
||||
#include <Parsers/ASTExpressionList.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/CommonParsers.h>
|
||||
#include <Parsers/ExpressionElementParsers.h>
|
||||
#include <Parsers/ExpressionListParsers.h>
|
||||
#include <Parsers/ParserCreateFunctionQuery.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
bool ParserCreateFunctionQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & expected)
|
||||
{
|
||||
ParserKeyword s_create("CREATE");
|
||||
ParserKeyword s_function("FUNCTION");
|
||||
ParserIdentifier function_name_p;
|
||||
ParserKeyword s_as("AS");
|
||||
ParserLambdaExpression lambda_p;
|
||||
|
||||
ASTPtr function_name;
|
||||
ASTPtr function_core;
|
||||
|
||||
if (!s_create.ignore(pos, expected))
|
||||
return false;
|
||||
|
||||
if (!s_function.ignore(pos, expected))
|
||||
return false;
|
||||
|
||||
if (!function_name_p.parse(pos, function_name, expected))
|
||||
return false;
|
||||
|
||||
if (!s_as.ignore(pos, expected))
|
||||
return false;
|
||||
|
||||
if (!lambda_p.parse(pos, function_core, expected))
|
||||
return false;
|
||||
|
||||
auto create_function_query = std::make_shared<ASTCreateFunctionQuery>();
|
||||
node = create_function_query;
|
||||
|
||||
create_function_query->function_name = function_name->as<ASTIdentifier &>().name();
|
||||
create_function_query->function_core = function_core;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
16
src/Parsers/ParserCreateFunctionQuery.h
Normal file
16
src/Parsers/ParserCreateFunctionQuery.h
Normal file
@ -0,0 +1,16 @@
|
||||
#pragma once
|
||||
|
||||
#include "IParserBase.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// CREATE FUNCTION test AS x -> x || '1'
|
||||
class ParserCreateFunctionQuery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const override { return "CREATE FUNCTION query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
}
|
35
src/Parsers/ParserDropFunctionQuery.cpp
Normal file
35
src/Parsers/ParserDropFunctionQuery.cpp
Normal file
@ -0,0 +1,35 @@
|
||||
#include <Parsers/ASTDropFunctionQuery.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/CommonParsers.h>
|
||||
#include <Parsers/ExpressionElementParsers.h>
|
||||
#include <Parsers/ParserDropFunctionQuery.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
bool ParserDropFunctionQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & expected)
|
||||
{
|
||||
ParserKeyword s_drop("DROP");
|
||||
ParserKeyword s_function("FUNCTION");
|
||||
ParserIdentifier function_name_p;
|
||||
|
||||
ASTPtr function_name;
|
||||
|
||||
if (!s_drop.ignore(pos, expected))
|
||||
return false;
|
||||
|
||||
if (!s_function.ignore(pos, expected))
|
||||
return false;
|
||||
|
||||
if (!function_name_p.parse(pos, function_name, expected))
|
||||
return false;
|
||||
|
||||
auto drop_function_query = std::make_shared<ASTDropFunctionQuery>();
|
||||
node = drop_function_query;
|
||||
|
||||
drop_function_query->function_name = function_name->as<ASTIdentifier &>().name();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
14
src/Parsers/ParserDropFunctionQuery.h
Normal file
14
src/Parsers/ParserDropFunctionQuery.h
Normal file
@ -0,0 +1,14 @@
|
||||
#pragma once
|
||||
|
||||
#include "IParserBase.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
/// DROP FUNCTION function1
|
||||
class ParserDropFunctionQuery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const override { return "DROP FUNCTION query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
}
|
@ -1,4 +1,5 @@
|
||||
#include <Parsers/ParserAlterQuery.h>
|
||||
#include <Parsers/ParserCreateFunctionQuery.h>
|
||||
#include <Parsers/ParserBackupQuery.h>
|
||||
#include <Parsers/ParserCreateQuery.h>
|
||||
#include <Parsers/ParserCreateQuotaQuery.h>
|
||||
@ -7,6 +8,7 @@
|
||||
#include <Parsers/ParserCreateSettingsProfileQuery.h>
|
||||
#include <Parsers/ParserCreateUserQuery.h>
|
||||
#include <Parsers/ParserDropAccessEntityQuery.h>
|
||||
#include <Parsers/ParserDropFunctionQuery.h>
|
||||
#include <Parsers/ParserDropQuery.h>
|
||||
#include <Parsers/ParserGrantQuery.h>
|
||||
#include <Parsers/ParserInsertQuery.h>
|
||||
@ -37,6 +39,8 @@ bool ParserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
ParserCreateQuotaQuery create_quota_p;
|
||||
ParserCreateRowPolicyQuery create_row_policy_p;
|
||||
ParserCreateSettingsProfileQuery create_settings_profile_p;
|
||||
ParserCreateFunctionQuery create_function_p;
|
||||
ParserDropFunctionQuery drop_function_p;
|
||||
ParserDropAccessEntityQuery drop_access_entity_p;
|
||||
ParserGrantQuery grant_p;
|
||||
ParserSetRoleQuery set_role_p;
|
||||
@ -54,6 +58,8 @@ bool ParserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
|| create_quota_p.parse(pos, node, expected)
|
||||
|| create_row_policy_p.parse(pos, node, expected)
|
||||
|| create_settings_profile_p.parse(pos, node, expected)
|
||||
|| create_function_p.parse(pos, node, expected)
|
||||
|| drop_function_p.parse(pos, node, expected)
|
||||
|| drop_access_entity_p.parse(pos, node, expected)
|
||||
|| grant_p.parse(pos, node, expected)
|
||||
|| external_ddl_p.parse(pos, node, expected)
|
||||
|
@ -15,6 +15,7 @@ SRCS(
|
||||
ASTColumnsMatcher.cpp
|
||||
ASTColumnsTransformers.cpp
|
||||
ASTConstraintDeclaration.cpp
|
||||
ASTCreateFunctionQuery.cpp
|
||||
ASTCreateQuery.cpp
|
||||
ASTCreateQuotaQuery.cpp
|
||||
ASTCreateRoleQuery.cpp
|
||||
@ -25,6 +26,7 @@ SRCS(
|
||||
ASTDictionary.cpp
|
||||
ASTDictionaryAttributeDeclaration.cpp
|
||||
ASTDropAccessEntityQuery.cpp
|
||||
ASTDropFunctionQuery.cpp
|
||||
ASTDropQuery.cpp
|
||||
ASTExpressionList.cpp
|
||||
ASTFunction.cpp
|
||||
@ -89,6 +91,7 @@ SRCS(
|
||||
ParserAlterQuery.cpp
|
||||
ParserCase.cpp
|
||||
ParserCheckQuery.cpp
|
||||
ParserCreateFunctionQuery.cpp
|
||||
ParserCreateQuery.cpp
|
||||
ParserCreateQuotaQuery.cpp
|
||||
ParserCreateRoleQuery.cpp
|
||||
@ -101,6 +104,7 @@ SRCS(
|
||||
ParserDictionary.cpp
|
||||
ParserDictionaryAttributeDeclaration.cpp
|
||||
ParserDropAccessEntityQuery.cpp
|
||||
ParserDropFunctionQuery.cpp
|
||||
ParserDropQuery.cpp
|
||||
ParserExplainQuery.cpp
|
||||
ParserExternalDDLQuery.cpp
|
||||
|
@ -5,7 +5,7 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// Sink which is returned from Storage::read.
|
||||
/// Sink which is returned from Storage::write.
|
||||
/// The same as ISink, but also can hold table lock.
|
||||
class SinkToStorage : public ISink
|
||||
{
|
||||
|
@ -1268,7 +1268,7 @@ void TCPHandler::receiveQuery()
|
||||
/// compatibility.
|
||||
if (query_kind == ClientInfo::QueryKind::SECONDARY_QUERY)
|
||||
{
|
||||
query_context->setSetting("normalize_function_names", Field(0));
|
||||
query_context->setSetting("normalize_function_names", false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1211,8 +1211,8 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor
|
||||
context_for_reading->setSetting("max_streams_to_max_threads_ratio", 1);
|
||||
context_for_reading->setSetting("max_threads", 1);
|
||||
/// Allow mutations to work when force_index_by_date or force_primary_key is on.
|
||||
context_for_reading->setSetting("force_index_by_date", Field(0));
|
||||
context_for_reading->setSetting("force_primary_key", Field(0));
|
||||
context_for_reading->setSetting("force_index_by_date", false);
|
||||
context_for_reading->setSetting("force_primary_key", false);
|
||||
|
||||
MutationCommands commands_for_part;
|
||||
for (const auto & command : commands)
|
||||
|
@ -77,7 +77,7 @@ struct Settings;
|
||||
M(Seconds, execute_merges_on_single_replica_time_threshold, 0, "When greater than zero only a single replica starts the merge immediately, others wait up to that amount of time to download the result instead of doing merges locally. If the chosen replica doesn't finish the merge during that amount of time, fallback to standard behavior happens.", 0) \
|
||||
M(Seconds, remote_fs_execute_merges_on_single_replica_time_threshold, 3 * 60 * 60, "When greater than zero only a single replica starts the merge immediatelys when merged part on shared storage and 'allow_remote_fs_zero_copy_replication' is enabled.", 0) \
|
||||
M(Seconds, try_fetch_recompressed_part_timeout, 7200, "Recompression works slow in most cases, so we don't start merge with recompression until this timeout and trying to fetch recompressed part from replica which assigned this merge with recompression.", 0) \
|
||||
M(Bool, always_fetch_merged_part, 0, "If true, replica never merge parts and always download merged parts from other replicas.", 0) \
|
||||
M(Bool, always_fetch_merged_part, false, "If true, replica never merge parts and always download merged parts from other replicas.", 0) \
|
||||
M(UInt64, max_suspicious_broken_parts, 10, "Max broken parts, if more - deny automatic deletion.", 0) \
|
||||
M(UInt64, max_files_to_modify_in_alter_columns, 75, "Not apply ALTER if number of files for modification(deletion, addition) more than this.", 0) \
|
||||
M(UInt64, max_files_to_remove_in_alter_columns, 50, "Not apply ALTER, if number of files for deletion more than this.", 0) \
|
||||
@ -92,7 +92,7 @@ struct Settings;
|
||||
M(Seconds, replicated_fetches_http_receive_timeout, 0, "HTTP receive timeout for fetch part requests. Inherited from default profile `http_receive_timeout` if not set explicitly.", 0) \
|
||||
M(Bool, replicated_can_become_leader, true, "If true, Replicated tables replicas on this node will try to acquire leadership.", 0) \
|
||||
M(Seconds, zookeeper_session_expiration_check_period, 60, "ZooKeeper session expiration check period, in seconds.", 0) \
|
||||
M(Bool, detach_old_local_parts_when_cloning_replica, 1, "Do not remove old local parts when repairing lost replica.", 0) \
|
||||
M(Bool, detach_old_local_parts_when_cloning_replica, true, "Do not remove old local parts when repairing lost replica.", 0) \
|
||||
M(UInt64, max_replicated_fetches_network_bandwidth, 0, "The maximum speed of data exchange over the network in bytes per second for replicated fetches. Zero means unlimited.", 0) \
|
||||
M(UInt64, max_replicated_sends_network_bandwidth, 0, "The maximum speed of data exchange over the network in bytes per second for replicated sends. Zero means unlimited.", 0) \
|
||||
\
|
||||
@ -117,8 +117,8 @@ struct Settings;
|
||||
M(Int64, merge_with_ttl_timeout, 3600 * 4, "Minimal time in seconds, when merge with delete TTL can be repeated.", 0) \
|
||||
M(Int64, merge_with_recompression_ttl_timeout, 3600 * 4, "Minimal time in seconds, when merge with recompression TTL can be repeated.", 0) \
|
||||
M(Bool, ttl_only_drop_parts, false, "Only drop altogether the expired parts and not partially prune them.", 0) \
|
||||
M(Bool, write_final_mark, 1, "Write final mark after end of column (0 - disabled, do nothing if index_granularity_bytes=0)", 0) \
|
||||
M(Bool, enable_mixed_granularity_parts, 1, "Enable parts with adaptive and non adaptive granularity", 0) \
|
||||
M(Bool, write_final_mark, true, "Write final mark after end of column (0 - disabled, do nothing if index_granularity_bytes=0)", 0) \
|
||||
M(Bool, enable_mixed_granularity_parts, true, "Enable parts with adaptive and non adaptive granularity", 0) \
|
||||
M(MaxThreads, max_part_loading_threads, 0, "The number of threads to load data parts at startup.", 0) \
|
||||
M(MaxThreads, max_part_removal_threads, 0, "The number of threads for concurrent removal of inactive data parts. One is usually enough, but in 'Google Compute Environment SSD Persistent Disks' file removal (unlink) operation is extraordinarily slow and you probably have to increase this number (recommended is up to 16).", 0) \
|
||||
M(UInt64, concurrent_part_removal_threshold, 100, "Activate concurrent part removal (see 'max_part_removal_threads') only if the number of inactive data parts is at least this.", 0) \
|
||||
|
@ -16,7 +16,7 @@ namespace DB
|
||||
#define LIST_OF_MATERIALIZED_POSTGRESQL_SETTINGS(M) \
|
||||
M(UInt64, materialized_postgresql_max_block_size, 65536, "Number of row collected before flushing data into table.", 0) \
|
||||
M(String, materialized_postgresql_tables_list, "", "List of tables for MaterializedPostgreSQL database engine", 0) \
|
||||
M(Bool, materialized_postgresql_allow_automatic_update, 0, "Allow to reload table in the background, when schema changes are detected", 0) \
|
||||
M(Bool, materialized_postgresql_allow_automatic_update, false, "Allow to reload table in the background, when schema changes are detected", 0) \
|
||||
|
||||
DECLARE_SETTINGS_TRAITS(MaterializedPostgreSQLSettingsTraits, LIST_OF_MATERIALIZED_POSTGRESQL_SETTINGS)
|
||||
|
||||
|
@ -196,7 +196,7 @@ String StorageRabbitMQ::getTableBasedName(String name, const StorageID & table_i
|
||||
std::shared_ptr<Context> StorageRabbitMQ::addSettings(ContextPtr local_context) const
|
||||
{
|
||||
auto modified_context = Context::createCopy(local_context);
|
||||
modified_context->setSetting("input_format_skip_unknown_fields", Field{true});
|
||||
modified_context->setSetting("input_format_skip_unknown_fields", true);
|
||||
modified_context->setSetting("input_format_allow_errors_ratio", 0.);
|
||||
modified_context->setSetting("input_format_allow_errors_num", rabbitmq_settings->rabbitmq_skip_broken_messages.value);
|
||||
|
||||
|
@ -209,7 +209,7 @@ Pipe StorageMerge::read(
|
||||
* since there is no certainty that it works when one of table is MergeTree and other is not.
|
||||
*/
|
||||
auto modified_context = Context::createCopy(local_context);
|
||||
modified_context->setSetting("optimize_move_to_prewhere", Field{false});
|
||||
modified_context->setSetting("optimize_move_to_prewhere", false);
|
||||
|
||||
/// What will be result structure depending on query processed stage in source tables?
|
||||
Block header = getHeaderForProcessingStage(*this, column_names, metadata_snapshot, query_info, local_context, processed_stage);
|
||||
|
@ -6384,6 +6384,7 @@ void StorageReplicatedMergeTree::replacePartitionFrom(
|
||||
MergeTreeData & src_data = checkStructureAndGetMergeTreeData(source_table, source_metadata_snapshot, metadata_snapshot);
|
||||
String partition_id = getPartitionIDFromQuery(partition, query_context);
|
||||
|
||||
/// NOTE: Some covered parts may be missing in src_all_parts if corresponding log entries are not executed yet.
|
||||
DataPartsVector src_all_parts = src_data.getDataPartsVectorInPartition(MergeTreeDataPartState::Committed, partition_id);
|
||||
DataPartsVector src_parts;
|
||||
MutableDataPartsVector dst_parts;
|
||||
@ -6511,8 +6512,7 @@ void StorageReplicatedMergeTree::replacePartitionFrom(
|
||||
|
||||
delimiting_block_lock->getUnlockOps(ops);
|
||||
/// Check and update version to avoid race with DROP_RANGE
|
||||
ops.emplace_back(zkutil::makeCheckRequest(alter_partition_version_path, alter_partition_version_stat.version));
|
||||
ops.emplace_back(zkutil::makeSetRequest(alter_partition_version_path, "", -1));
|
||||
ops.emplace_back(zkutil::makeSetRequest(alter_partition_version_path, "", alter_partition_version_stat.version));
|
||||
/// Just update version, because merges assignment relies on it
|
||||
ops.emplace_back(zkutil::makeSetRequest(fs::path(zookeeper_path) / "log", "", -1));
|
||||
ops.emplace_back(zkutil::makeCreateRequest(fs::path(zookeeper_path) / "log/log-", entry.toString(), zkutil::CreateMode::PersistentSequential));
|
||||
@ -6594,7 +6594,39 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta
|
||||
auto src_data_id = src_data.getStorageID();
|
||||
String partition_id = getPartitionIDFromQuery(partition, query_context);
|
||||
|
||||
DataPartsVector src_all_parts = src_data.getDataPartsVectorInPartition(MergeTreeDataPartState::Committed, partition_id);
|
||||
/// A range for log entry to remove parts from the source table (myself).
|
||||
auto zookeeper = getZooKeeper();
|
||||
String alter_partition_version_path = zookeeper_path + "/alter_partition_version";
|
||||
Coordination::Stat alter_partition_version_stat;
|
||||
zookeeper->get(alter_partition_version_path, &alter_partition_version_stat);
|
||||
|
||||
MergeTreePartInfo drop_range;
|
||||
std::optional<EphemeralLockInZooKeeper> delimiting_block_lock;
|
||||
getFakePartCoveringAllPartsInPartition(partition_id, drop_range, delimiting_block_lock, true);
|
||||
String drop_range_fake_part_name = getPartNamePossiblyFake(format_version, drop_range);
|
||||
|
||||
DataPartPtr covering_part;
|
||||
DataPartsVector src_all_parts;
|
||||
{
|
||||
/// NOTE: Some covered parts may be missing in src_all_parts if corresponding log entries are not executed yet.
|
||||
auto parts_lock = src_data.lockParts();
|
||||
src_all_parts = src_data.getActivePartsToReplace(drop_range, drop_range_fake_part_name, covering_part, parts_lock);
|
||||
}
|
||||
|
||||
if (covering_part)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Got part {} covering drop range {}, it's a bug",
|
||||
covering_part->name, drop_range_fake_part_name);
|
||||
|
||||
/// After allocating block number for drop_range we must ensure that it does not intersect block numbers
|
||||
/// allocated by concurrent REPLACE query.
|
||||
/// We could check it in multi-request atomically with creation of DROP_RANGE entry in source table log,
|
||||
/// but it's better to check it here and fail as early as possible (before we have done something to destination table).
|
||||
Coordination::Error version_check_code = zookeeper->trySet(alter_partition_version_path, "", alter_partition_version_stat.version);
|
||||
if (version_check_code != Coordination::Error::ZOK)
|
||||
throw Exception(ErrorCodes::CANNOT_ASSIGN_ALTER, "Cannot DROP PARTITION in {} after copying partition to {}, "
|
||||
"because another ALTER PARTITION query was concurrently executed",
|
||||
getStorageID().getFullTableName(), dest_table_storage->getStorageID().getFullTableName());
|
||||
|
||||
DataPartsVector src_parts;
|
||||
MutableDataPartsVector dst_parts;
|
||||
Strings block_id_paths;
|
||||
@ -6604,21 +6636,11 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta
|
||||
LOG_DEBUG(log, "Cloning {} parts", src_all_parts.size());
|
||||
|
||||
static const String TMP_PREFIX = "tmp_move_from_";
|
||||
auto zookeeper = getZooKeeper();
|
||||
|
||||
/// A range for log entry to remove parts from the source table (myself).
|
||||
|
||||
MergeTreePartInfo drop_range;
|
||||
std::optional<EphemeralLockInZooKeeper> delimiting_block_lock;
|
||||
getFakePartCoveringAllPartsInPartition(partition_id, drop_range, delimiting_block_lock, true);
|
||||
String drop_range_fake_part_name = getPartNamePossiblyFake(format_version, drop_range);
|
||||
|
||||
/// Clone parts into destination table.
|
||||
|
||||
String alter_partition_version_path = dest_table_storage->zookeeper_path + "/alter_partition_version";
|
||||
Coordination::Stat alter_partition_version_stat;
|
||||
zookeeper->get(alter_partition_version_path, &alter_partition_version_stat);
|
||||
|
||||
String dest_alter_partition_version_path = dest_table_storage->zookeeper_path + "/alter_partition_version";
|
||||
Coordination::Stat dest_alter_partition_version_stat;
|
||||
zookeeper->get(dest_alter_partition_version_path, &dest_alter_partition_version_stat);
|
||||
for (const auto & src_part : src_all_parts)
|
||||
{
|
||||
if (!dest_table_storage->canReplacePartition(src_part))
|
||||
@ -6699,8 +6721,7 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta
|
||||
}
|
||||
|
||||
/// Check and update version to avoid race with DROP_RANGE
|
||||
ops.emplace_back(zkutil::makeCheckRequest(alter_partition_version_path, alter_partition_version_stat.version));
|
||||
ops.emplace_back(zkutil::makeSetRequest(alter_partition_version_path, "", -1));
|
||||
ops.emplace_back(zkutil::makeSetRequest(dest_alter_partition_version_path, "", dest_alter_partition_version_stat.version));
|
||||
/// Just update version, because merges assignment relies on it
|
||||
ops.emplace_back(zkutil::makeSetRequest(fs::path(dest_table_storage->zookeeper_path) / "log", "", -1));
|
||||
ops.emplace_back(zkutil::makeCreateRequest(fs::path(dest_table_storage->zookeeper_path) / "log/log-",
|
||||
@ -6754,26 +6775,14 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta
|
||||
}
|
||||
|
||||
/// Create DROP_RANGE for the source table
|
||||
alter_partition_version_path = zookeeper_path + "/alter_partition_version";
|
||||
zookeeper->get(alter_partition_version_path, &alter_partition_version_stat);
|
||||
|
||||
Coordination::Requests ops_src;
|
||||
ops_src.emplace_back(zkutil::makeCreateRequest(
|
||||
fs::path(zookeeper_path) / "log/log-", entry_delete.toString(), zkutil::CreateMode::PersistentSequential));
|
||||
/// Check and update version to avoid race with REPLACE_RANGE
|
||||
ops_src.emplace_back(zkutil::makeCheckRequest(alter_partition_version_path, alter_partition_version_stat.version));
|
||||
ops_src.emplace_back(zkutil::makeSetRequest(alter_partition_version_path, "", -1));
|
||||
/// Just update version, because merges assignment relies on it
|
||||
ops_src.emplace_back(zkutil::makeSetRequest(fs::path(zookeeper_path) / "log", "", -1));
|
||||
delimiting_block_lock->getUnlockOps(ops_src);
|
||||
|
||||
Coordination::Error code = zookeeper->tryMulti(ops_src, op_results);
|
||||
if (code == Coordination::Error::ZBADVERSION)
|
||||
throw Exception(ErrorCodes::CANNOT_ASSIGN_ALTER, "Cannot DROP PARTITION in {} after copying partition to {}, "
|
||||
"because another ALTER PARTITION query was concurrently executed",
|
||||
getStorageID().getFullTableName(), dest_table_storage->getStorageID().getFullTableName());
|
||||
else
|
||||
zkutil::KeeperMultiException::check(code, ops_src, op_results);
|
||||
op_results = zookeeper->multi(ops_src);
|
||||
|
||||
log_znode_path = dynamic_cast<const Coordination::CreateResponse &>(*op_results.front()).path_created;
|
||||
entry_delete.znode_name = log_znode_path.substr(log_znode_path.find_last_of('/') + 1);
|
||||
@ -7145,8 +7154,7 @@ bool StorageReplicatedMergeTree::dropAllPartsInPartition(
|
||||
/// Check and update version to avoid race with REPLACE_RANGE.
|
||||
/// Otherwise new parts covered by drop_range_info may appear after execution of current DROP_RANGE entry
|
||||
/// as a result of execution of concurrently created REPLACE_RANGE entry.
|
||||
ops.emplace_back(zkutil::makeCheckRequest(alter_partition_version_path, alter_partition_version_stat.version));
|
||||
ops.emplace_back(zkutil::makeSetRequest(alter_partition_version_path, "", -1));
|
||||
ops.emplace_back(zkutil::makeSetRequest(alter_partition_version_path, "", alter_partition_version_stat.version));
|
||||
|
||||
/// Just update version, because merges assignment relies on it
|
||||
ops.emplace_back(zkutil::makeSetRequest(fs::path(zookeeper_path) / "log", "", -1));
|
||||
|
@ -1,26 +1,38 @@
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Parsers/queryToString.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/UserDefinedFunctionFactory.h>
|
||||
#include <Storages/System/StorageSystemFunctions.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace
|
||||
{
|
||||
template <typename Factory>
|
||||
void fillRow(MutableColumns & res_columns, const String & name, UInt64 is_aggregate, const Factory & f)
|
||||
void fillRow(MutableColumns & res_columns, const String & name, UInt64 is_aggregate, const String & create_query, const Factory & f)
|
||||
{
|
||||
res_columns[0]->insert(name);
|
||||
res_columns[1]->insert(is_aggregate);
|
||||
res_columns[2]->insert(f.isCaseInsensitive(name));
|
||||
if (f.isAlias(name))
|
||||
res_columns[3]->insert(f.aliasTo(name));
|
||||
else
|
||||
|
||||
if constexpr (std::is_same_v<Factory, UserDefinedFunctionFactory>)
|
||||
{
|
||||
res_columns[2]->insert(false);
|
||||
res_columns[3]->insertDefault();
|
||||
}
|
||||
else
|
||||
{
|
||||
res_columns[2]->insert(f.isCaseInsensitive(name));
|
||||
if (f.isAlias(name))
|
||||
res_columns[3]->insert(f.aliasTo(name));
|
||||
else
|
||||
res_columns[3]->insertDefault();
|
||||
}
|
||||
|
||||
res_columns[4]->insert(create_query);
|
||||
}
|
||||
}
|
||||
|
||||
@ -31,6 +43,7 @@ NamesAndTypesList StorageSystemFunctions::getNamesAndTypes()
|
||||
{"is_aggregate", std::make_shared<DataTypeUInt8>()},
|
||||
{"case_insensitive", std::make_shared<DataTypeUInt8>()},
|
||||
{"alias_to", std::make_shared<DataTypeString>()},
|
||||
{"create_query", std::make_shared<DataTypeString>()}
|
||||
};
|
||||
}
|
||||
|
||||
@ -40,14 +53,22 @@ void StorageSystemFunctions::fillData(MutableColumns & res_columns, ContextPtr,
|
||||
const auto & function_names = functions_factory.getAllRegisteredNames();
|
||||
for (const auto & function_name : function_names)
|
||||
{
|
||||
fillRow(res_columns, function_name, UInt64(0), functions_factory);
|
||||
fillRow(res_columns, function_name, UInt64(0), "", functions_factory);
|
||||
}
|
||||
|
||||
const auto & aggregate_functions_factory = AggregateFunctionFactory::instance();
|
||||
const auto & aggregate_function_names = aggregate_functions_factory.getAllRegisteredNames();
|
||||
for (const auto & function_name : aggregate_function_names)
|
||||
{
|
||||
fillRow(res_columns, function_name, UInt64(1), aggregate_functions_factory);
|
||||
fillRow(res_columns, function_name, UInt64(1), "", aggregate_functions_factory);
|
||||
}
|
||||
|
||||
const auto & user_defined_functions_factory = UserDefinedFunctionFactory::instance();
|
||||
const auto & user_defined_functions_names = user_defined_functions_factory.getAllRegisteredNames();
|
||||
for (const auto & function_name : user_defined_functions_names)
|
||||
{
|
||||
auto create_query = queryToString(user_defined_functions_factory.get(function_name));
|
||||
fillRow(res_columns, function_name, UInt64(0), create_query, user_defined_functions_factory);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ sudo -H pip install \
|
||||
pytest \
|
||||
pytest-timeout \
|
||||
redis \
|
||||
tzlocal \
|
||||
tzlocal==2.1 \
|
||||
urllib3 \
|
||||
requests-kerberos \
|
||||
dict2xml \
|
||||
|
39
tests/integration/test_access_for_functions/test.py
Normal file
39
tests/integration/test_access_for_functions/test.py
Normal file
@ -0,0 +1,39 @@
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
instance = cluster.add_instance('instance')
|
||||
|
||||
|
||||
@pytest.fixture(scope="module", autouse=True)
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
def test_access_rights_for_funtion():
|
||||
create_function_query = "CREATE FUNCTION MySum AS (a, b) -> a + b"
|
||||
|
||||
instance.query("CREATE USER A")
|
||||
instance.query("CREATE USER B")
|
||||
assert "it's necessary to have grant CREATE FUNCTION ON *.*" in instance.query_and_get_error(create_function_query, user = 'A')
|
||||
|
||||
instance.query("GRANT CREATE FUNCTION on *.* TO A")
|
||||
|
||||
instance.query(create_function_query, user = 'A')
|
||||
assert instance.query("SELECT MySum(1, 2)") == "3\n"
|
||||
|
||||
assert "it's necessary to have grant DROP FUNCTION ON *.*" in instance.query_and_get_error("DROP FUNCTION MySum", user = 'B')
|
||||
|
||||
instance.query("GRANT DROP FUNCTION ON *.* TO B")
|
||||
instance.query("DROP FUNCTION MySum", user = 'B')
|
||||
assert "Unknown function MySum" in instance.query_and_get_error("SELECT MySum(1, 2)")
|
||||
|
||||
instance.query("REVOKE CREATE FUNCTION ON *.* FROM A")
|
||||
assert "it's necessary to have grant CREATE FUNCTION ON *.*" in instance.query_and_get_error(create_function_query, user = 'A')
|
||||
|
||||
instance.query("DROP USER IF EXISTS A")
|
||||
instance.query("DROP USER IF EXISTS B")
|
@ -1265,8 +1265,14 @@ def test_kill_while_insert(start_cluster):
|
||||
disks = get_used_disks_for_table(node1, name)
|
||||
assert set(disks) == {"jbod1"}
|
||||
|
||||
def ignore_exceptions(f, *args):
|
||||
try:
|
||||
f(*args)
|
||||
except:
|
||||
"""(っಠ‿ಠ)っ"""
|
||||
|
||||
start_time = time.time()
|
||||
long_select = threading.Thread(target=node1.query, args=("SELECT sleep(3) FROM {name}".format(name=name),))
|
||||
long_select = threading.Thread(target=ignore_exceptions, args=(node1.query, "SELECT sleep(3) FROM {name}".format(name=name)))
|
||||
long_select.start()
|
||||
|
||||
time.sleep(0.5)
|
||||
|
@ -0,0 +1,39 @@
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
instance = cluster.add_instance('instance', stay_alive=True)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module", autouse=True)
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def test_persistence():
|
||||
create_function_query1 = "CREATE FUNCTION MySum1 AS (a, b) -> a + b"
|
||||
create_function_query2 = "CREATE FUNCTION MySum2 AS (a, b) -> MySum1(a, b) + b"
|
||||
|
||||
instance.query(create_function_query1)
|
||||
instance.query(create_function_query2)
|
||||
|
||||
assert instance.query("SELECT MySum1(1,2)") == "3\n"
|
||||
assert instance.query("SELECT MySum2(1,2)") == "5\n"
|
||||
|
||||
instance.restart_clickhouse()
|
||||
|
||||
assert instance.query("SELECT MySum1(1,2)") == "3\n"
|
||||
assert instance.query("SELECT MySum2(1,2)") == "5\n"
|
||||
|
||||
instance.query("DROP FUNCTION MySum2")
|
||||
instance.query("DROP FUNCTION MySum1")
|
||||
|
||||
instance.restart_clickhouse()
|
||||
|
||||
assert "Unknown function MySum1" in instance.query_and_get_error("SELECT MySum1(1, 2)")
|
||||
assert "Unknown function MySum2" in instance.query_and_get_error("SELECT MySum2(1, 2)")
|
@ -1,5 +1,3 @@
|
||||
|
||||
|
||||
import threading
|
||||
import os
|
||||
from tempfile import NamedTemporaryFile
|
||||
@ -35,18 +33,21 @@ def started_cluster():
|
||||
|
||||
# NOTE this test have to be ported to Keeper
|
||||
def test_secure_connection(started_cluster):
|
||||
assert node1.query("SELECT count() FROM system.zookeeper WHERE path = '/'") == '2\n'
|
||||
assert node2.query("SELECT count() FROM system.zookeeper WHERE path = '/'") == '2\n'
|
||||
# no asserts, connection works
|
||||
node1.query("SELECT count() FROM system.zookeeper WHERE path = '/'")
|
||||
node2.query("SELECT count() FROM system.zookeeper WHERE path = '/'")
|
||||
|
||||
kThreadsNumber = 16
|
||||
kIterations = 100
|
||||
threads = []
|
||||
for _ in range(kThreadsNumber):
|
||||
threads.append(threading.Thread(target=(lambda:
|
||||
[node1.query("SELECT count() FROM system.zookeeper WHERE path = '/'") for _ in range(kIterations)])))
|
||||
threads_number = 16
|
||||
iterations = 100
|
||||
threads = []
|
||||
|
||||
for thread in threads:
|
||||
thread.start()
|
||||
# just checking for race conditions
|
||||
for _ in range(threads_number):
|
||||
threads.append(threading.Thread(target=(lambda:
|
||||
[node1.query("SELECT count() FROM system.zookeeper WHERE path = '/'") for _ in range(iterations)])))
|
||||
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
for thread in threads:
|
||||
thread.start()
|
||||
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
|
@ -45,11 +45,13 @@ CREATE TABLE [] TABLE CREATE
|
||||
CREATE VIEW [] VIEW CREATE
|
||||
CREATE DICTIONARY [] DICTIONARY CREATE
|
||||
CREATE TEMPORARY TABLE [] GLOBAL CREATE
|
||||
CREATE FUNCTION [] DATABASE CREATE
|
||||
CREATE [] \N ALL
|
||||
DROP DATABASE [] DATABASE DROP
|
||||
DROP TABLE [] TABLE DROP
|
||||
DROP VIEW [] VIEW DROP
|
||||
DROP DICTIONARY [] DICTIONARY DROP
|
||||
DROP FUNCTION [] DATABASE DROP
|
||||
DROP [] \N ALL
|
||||
TRUNCATE ['TRUNCATE TABLE'] TABLE ALL
|
||||
OPTIMIZE ['OPTIMIZE TABLE'] TABLE ALL
|
||||
|
@ -2,7 +2,7 @@ drop table if exists data_01730;
|
||||
|
||||
-- does not use 127.1 due to prefer_localhost_replica
|
||||
|
||||
select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by number order by number limit 20 settings distributed_group_by_no_merge=0, max_memory_usage='100Mi'; -- { serverError 241 }
|
||||
select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by number order by number limit 20 settings distributed_group_by_no_merge=0, max_memory_usage='100Mi'; -- { serverError MEMORY_LIMIT_EXCEEDED }
|
||||
-- no memory limit error, because with distributed_group_by_no_merge=2 remote servers will do ORDER BY and will cut to the LIMIT
|
||||
select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by number order by number limit 20 settings distributed_group_by_no_merge=2, max_memory_usage='100Mi';
|
||||
|
||||
@ -10,11 +10,12 @@ select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by n
|
||||
-- and the query with GROUP BY on remote servers will first do GROUP BY and then send the block,
|
||||
-- so the initiator will first receive all blocks from remotes and only after start merging,
|
||||
-- and will hit the memory limit.
|
||||
select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by number order by number limit 1e6 settings distributed_group_by_no_merge=2, max_memory_usage='100Mi'; -- { serverError 241 }
|
||||
select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by number order by number limit 1e6 settings distributed_group_by_no_merge=2, max_memory_usage='100Mi'; -- { serverError MEMORY_LIMIT_EXCEEDED }
|
||||
|
||||
-- with optimize_aggregation_in_order=1 remote servers will produce blocks more frequently,
|
||||
-- since they don't need to wait until the aggregation will be finished,
|
||||
-- and so the query will not hit the memory limit error.
|
||||
create table data_01730 engine=MergeTree() order by key as select number key from numbers(1e6);
|
||||
select * from remote('127.{2..11}', currentDatabase(), data_01730) group by key order by key limit 1e6 settings distributed_group_by_no_merge=2, max_memory_usage='100Mi', optimize_aggregation_in_order=1 format Null;
|
||||
select * from remote('127.{2..11}', currentDatabase(), data_01730) group by key order by key limit 1e6 settings distributed_group_by_no_merge=2, max_memory_usage='40Mi', optimize_aggregation_in_order=1 format Null; -- { serverError MEMORY_LIMIT_EXCEEDED }
|
||||
select * from remote('127.{2..11}', currentDatabase(), data_01730) group by key order by key limit 1e6 settings distributed_group_by_no_merge=2, max_memory_usage='50Mi', optimize_aggregation_in_order=1 format Null;
|
||||
drop table data_01730;
|
||||
|
@ -0,0 +1,2 @@
|
||||
24
|
||||
1
|
13
tests/queries/0_stateless/01856_create_function.sql
Normal file
13
tests/queries/0_stateless/01856_create_function.sql
Normal file
@ -0,0 +1,13 @@
|
||||
CREATE FUNCTION 01856_test_function_0 AS (a, b, c) -> a * b * c;
|
||||
SELECT 01856_test_function_0(2, 3, 4);
|
||||
SELECT isConstant(01856_test_function_0(1, 2, 3));
|
||||
DROP FUNCTION 01856_test_function_0;
|
||||
CREATE FUNCTION 01856_test_function_1 AS (a, b) -> a || b || c; --{serverError 47}
|
||||
CREATE FUNCTION 01856_test_function_1 AS (a, b) -> 01856_test_function_1(a, b) + 01856_test_function_1(a, b); --{serverError 600}
|
||||
CREATE FUNCTION cast AS a -> a + 1; --{serverError 598}
|
||||
CREATE FUNCTION sum AS (a, b) -> a + b; --{serverError 598}
|
||||
CREATE FUNCTION 01856_test_function_2 AS (a, b) -> a + b;
|
||||
CREATE FUNCTION 01856_test_function_2 AS (a) -> a || '!!!'; --{serverError 598}
|
||||
DROP FUNCTION 01856_test_function_2;
|
||||
DROP FUNCTION unknown_function; -- {serverError 46}
|
||||
DROP FUNCTION CAST; -- {serverError 599}
|
@ -483,6 +483,8 @@
|
||||
"01804_dictionary_decimal256_type",
|
||||
"01850_dist_INSERT_preserve_error", // uses cluster with different static databases shard_0/shard_1
|
||||
"01821_table_comment",
|
||||
"01856_create_function",
|
||||
"01857_create_function_and_check_jit_compiled",
|
||||
"01824_prefer_global_in_and_join",
|
||||
"01870_modulo_partition_key",
|
||||
"01870_buffer_flush", // creates database
|
||||
|
@ -77,6 +77,7 @@ Results for 2x AMD EPYC 7F72 3.2 Ghz (Total 96 Cores, IBM Cloud's Bare Metal Ser
|
||||
Results for 2x AMD EPYC 7742 (128 physical cores, 1 TB DDR4-3200 RAM) from <b>Yedige Davletgaliyev</b> and <b>Nikita Zhavoronkov</b> of blockchair.com.<br/>
|
||||
Results for ASUS A15 (Ryzen laptop) are from <b>Kimmo Linna</b>.<br/>
|
||||
Results for MacBook Air M1 are from <b>Denis Glazachev</b>.<br/>
|
||||
Results for Xeon Gold 6140 are from <b>Shiv Iyer</b> (ChistaDATA Labs).<br/>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
56
website/benchmark/hardware/results/xeon_gold_6140.json
Normal file
56
website/benchmark/hardware/results/xeon_gold_6140.json
Normal file
@ -0,0 +1,56 @@
|
||||
[
|
||||
{
|
||||
"system": "Xeon Gold 6140",
|
||||
"system_full": "Xeon Gold 6140, 1 socket, 32 threads, 64 GB RAM, vda",
|
||||
"cpu_vendor": "Intel",
|
||||
"cpu_model": "Xeon Gold 6140",
|
||||
"time": "2021-08-24 00:00:00",
|
||||
"kind": "server",
|
||||
"result":
|
||||
[
|
||||
[0.002, 0.002, 0.001],
|
||||
[0.017, 0.008, 0.008],
|
||||
[0.029, 0.019, 0.018],
|
||||
[0.043, 0.026, 0.025],
|
||||
[0.101, 0.087, 0.084],
|
||||
[0.213, 0.190, 0.187],
|
||||
[0.017, 0.012, 0.012],
|
||||
[0.036, 0.009, 0.009],
|
||||
[0.264, 0.240, 0.245],
|
||||
[0.313, 0.269, 0.270],
|
||||
[0.139, 0.120, 0.123],
|
||||
[0.149, 0.129, 0.134],
|
||||
[0.411, 0.342, 0.342],
|
||||
[0.490, 0.448, 0.445],
|
||||
[0.433, 0.411, 0.403],
|
||||
[0.459, 0.446, 0.445],
|
||||
[1.223, 1.143, 1.133],
|
||||
[0.723, 0.675, 0.685],
|
||||
[2.121, 2.095, 2.049],
|
||||
[0.058, 0.049, 0.030],
|
||||
[0.560, 0.429, 0.430],
|
||||
[0.653, 0.499, 0.501],
|
||||
[1.379, 1.171, 1.152],
|
||||
[1.153, 0.564, 0.562],
|
||||
[0.200, 0.155, 0.152],
|
||||
[0.150, 0.129, 0.127],
|
||||
[0.192, 0.151, 0.152],
|
||||
[0.583, 0.449, 0.459],
|
||||
[0.752, 0.666, 0.656],
|
||||
[1.073, 1.072, 1.073],
|
||||
[0.373, 0.330, 0.336],
|
||||
[0.525, 0.441, 0.441],
|
||||
[3.795, 2.731, 2.908],
|
||||
[1.918, 1.756, 1.787],
|
||||
[1.828, 1.732, 1.735],
|
||||
[0.725, 0.697, 0.711],
|
||||
[0.188, 0.171, 0.179],
|
||||
[0.072, 0.067, 0.065],
|
||||
[0.077, 0.062, 0.062],
|
||||
[0.435, 0.415, 0.391],
|
||||
[0.027, 0.022, 0.020],
|
||||
[0.023, 0.020, 0.014],
|
||||
[0.014, 0.008, 0.003]
|
||||
]
|
||||
}
|
||||
]
|
Loading…
Reference in New Issue
Block a user