mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge branch 'master' into mvcc_prototype
This commit is contained in:
commit
2e4ae37d98
@ -401,17 +401,6 @@ else ()
|
||||
option(WERROR "Enable -Werror compiler option" ON)
|
||||
endif ()
|
||||
|
||||
if (WERROR)
|
||||
# Don't pollute CMAKE_CXX_FLAGS with -Werror as it will break some CMake checks.
|
||||
# Instead, adopt modern cmake usage requirement.
|
||||
target_compile_options(global-libs INTERFACE "-Werror")
|
||||
endif ()
|
||||
|
||||
# Make this extra-checks for correct library dependencies.
|
||||
if (OS_LINUX AND NOT SANITIZE)
|
||||
target_link_options(global-libs INTERFACE "-Wl,--no-undefined")
|
||||
endif ()
|
||||
|
||||
# Increase stack size on Musl. We need big stack for our recursive-descend parser.
|
||||
if (USE_MUSL)
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-z,stack-size=2097152")
|
||||
@ -419,6 +408,7 @@ endif ()
|
||||
|
||||
include(cmake/dbms_glob_sources.cmake)
|
||||
|
||||
add_library(global-group INTERFACE)
|
||||
if (OS_LINUX OR OS_ANDROID)
|
||||
include(cmake/linux/default_libs.cmake)
|
||||
elseif (OS_DARWIN)
|
||||
@ -426,6 +416,18 @@ elseif (OS_DARWIN)
|
||||
elseif (OS_FREEBSD)
|
||||
include(cmake/freebsd/default_libs.cmake)
|
||||
endif ()
|
||||
link_libraries(global-group)
|
||||
|
||||
if (WERROR)
|
||||
# Don't pollute CMAKE_CXX_FLAGS with -Werror as it will break some CMake checks.
|
||||
# Instead, adopt modern cmake usage requirement.
|
||||
target_compile_options(global-group INTERFACE "-Werror")
|
||||
endif ()
|
||||
|
||||
# Make this extra-checks for correct library dependencies.
|
||||
if (OS_LINUX AND NOT SANITIZE)
|
||||
target_link_options(global-group INTERFACE "-Wl,--no-undefined")
|
||||
endif ()
|
||||
|
||||
######################################
|
||||
### Add targets below this comment ###
|
||||
|
@ -1,26 +1,42 @@
|
||||
#pragma once
|
||||
|
||||
#include <pdqsort.h>
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wold-style-cast"
|
||||
|
||||
#include <miniselect/floyd_rivest_select.h>
|
||||
|
||||
template <class RandomIt>
|
||||
template <typename RandomIt>
|
||||
void nth_element(RandomIt first, RandomIt nth, RandomIt last)
|
||||
{
|
||||
::miniselect::floyd_rivest_select(first, nth, last);
|
||||
}
|
||||
|
||||
template <class RandomIt>
|
||||
template <typename RandomIt>
|
||||
void partial_sort(RandomIt first, RandomIt middle, RandomIt last)
|
||||
{
|
||||
::miniselect::floyd_rivest_partial_sort(first, middle, last);
|
||||
}
|
||||
|
||||
template <class RandomIt, class Compare>
|
||||
template <typename RandomIt, typename Compare>
|
||||
void partial_sort(RandomIt first, RandomIt middle, RandomIt last, Compare compare)
|
||||
{
|
||||
::miniselect::floyd_rivest_partial_sort(first, middle, last, compare);
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
|
||||
template <typename RandomIt, typename Compare>
|
||||
void sort(RandomIt first, RandomIt last, Compare compare)
|
||||
{
|
||||
::pdqsort(first, last, compare);
|
||||
}
|
||||
|
||||
template <typename RandomIt>
|
||||
void sort(RandomIt first, RandomIt last)
|
||||
{
|
||||
using value_type = typename std::iterator_traits<RandomIt>::value_type;
|
||||
using comparator = std::less<value_type>;
|
||||
::pdqsort(first, last, comparator());
|
||||
}
|
||||
|
@ -1,80 +1,59 @@
|
||||
#include "atomic.h"
|
||||
#include <sys/auxv.h>
|
||||
#include <fcntl.h> // open
|
||||
#include <sys/stat.h> // O_RDONLY
|
||||
#include <unistd.h> // read, close
|
||||
#include <stdlib.h> // ssize_t
|
||||
#include <stdio.h> // perror, fprintf
|
||||
#include <link.h> // ElfW
|
||||
#include "atomic.h"
|
||||
#include <unistd.h> // __environ
|
||||
#include <errno.h>
|
||||
|
||||
#define ARRAY_SIZE(a) sizeof((a))/sizeof((a[0]))
|
||||
|
||||
// We don't have libc struct available here.
|
||||
// Compute aux vector manually (from /proc/self/auxv).
|
||||
//
|
||||
// Right now there is only 51 AT_* constants,
|
||||
// so 64 should be enough until this implementation will be replaced with musl.
|
||||
static unsigned long __auxv[64];
|
||||
// We don't have libc struct available here. Compute aux vector manually.
|
||||
static unsigned long * __auxv = NULL;
|
||||
static unsigned long __auxv_secure = 0;
|
||||
|
||||
static size_t __find_auxv(unsigned long type)
|
||||
{
|
||||
size_t i;
|
||||
for (i = 0; __auxv[i]; i += 2)
|
||||
{
|
||||
if (__auxv[i] == type)
|
||||
return i + 1;
|
||||
}
|
||||
return (size_t) -1;
|
||||
}
|
||||
|
||||
unsigned long __getauxval(unsigned long type)
|
||||
{
|
||||
if (type == AT_SECURE)
|
||||
return __auxv_secure;
|
||||
|
||||
if (type >= ARRAY_SIZE(__auxv))
|
||||
if (__auxv)
|
||||
{
|
||||
errno = ENOENT;
|
||||
return 0;
|
||||
size_t index = __find_auxv(type);
|
||||
if (index != ((size_t) -1))
|
||||
return __auxv[index];
|
||||
}
|
||||
|
||||
return __auxv[type];
|
||||
errno = ENOENT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void * volatile getauxval_func;
|
||||
|
||||
ssize_t __retry_read(int fd, void *buf, size_t count)
|
||||
{
|
||||
for (;;)
|
||||
{
|
||||
ssize_t ret = read(fd, buf, count);
|
||||
if (ret == -1)
|
||||
{
|
||||
if (errno == EINTR)
|
||||
continue;
|
||||
perror("Cannot read /proc/self/auxv");
|
||||
abort();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
static unsigned long __auxv_init(unsigned long type)
|
||||
{
|
||||
// od -t dL /proc/self/auxv
|
||||
int fd = open("/proc/self/auxv", O_RDONLY);
|
||||
if (fd == -1) {
|
||||
perror("Cannot read /proc/self/auxv (likely kernel is too old or procfs is not mounted)");
|
||||
abort();
|
||||
}
|
||||
|
||||
ElfW(auxv_t) aux;
|
||||
|
||||
/// NOTE: sizeof(aux) is very small (less then PAGE_SIZE), so partial read should not be possible.
|
||||
_Static_assert(sizeof(aux) < 4096, "Unexpected sizeof(aux)");
|
||||
while (__retry_read(fd, &aux, sizeof(aux)) == sizeof(aux))
|
||||
if (!__environ)
|
||||
{
|
||||
if (aux.a_type >= ARRAY_SIZE(__auxv))
|
||||
{
|
||||
fprintf(stderr, "AT_* is out of range: %li (maximum allowed is %zu)\n", aux.a_type, ARRAY_SIZE(__auxv));
|
||||
abort();
|
||||
}
|
||||
__auxv[aux.a_type] = aux.a_un.a_val;
|
||||
// __environ is not initialized yet so we can't initialize __auxv right now.
|
||||
// That's normally occurred only when getauxval() is called from some sanitizer's internal code.
|
||||
errno = ENOENT;
|
||||
return 0;
|
||||
}
|
||||
close(fd);
|
||||
|
||||
// AT_SECURE
|
||||
__auxv_secure = __getauxval(AT_SECURE);
|
||||
// Initialize __auxv and __auxv_secure.
|
||||
size_t i;
|
||||
for (i = 0; __environ[i]; i++);
|
||||
__auxv = (unsigned long *) (__environ + i + 1);
|
||||
|
||||
size_t secure_idx = __find_auxv(AT_SECURE);
|
||||
if (secure_idx != ((size_t) -1))
|
||||
__auxv_secure = __auxv[secure_idx];
|
||||
|
||||
// Now we've initialized __auxv, next time getauxval() will only call __get_auxval().
|
||||
a_cas_p(&getauxval_func, (void *)__auxv_init, (void *)__getauxval);
|
||||
|
@ -24,14 +24,10 @@ find_package(Threads REQUIRED)
|
||||
|
||||
include (cmake/find/cxx.cmake)
|
||||
|
||||
add_library(global-group INTERFACE)
|
||||
|
||||
target_link_libraries(global-group INTERFACE
|
||||
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
||||
)
|
||||
|
||||
link_libraries(global-group)
|
||||
|
||||
# FIXME: remove when all contribs will get custom cmake lists
|
||||
install(
|
||||
TARGETS global-group global-libs
|
||||
|
@ -25,14 +25,10 @@ find_package(Threads REQUIRED)
|
||||
include (cmake/find/unwind.cmake)
|
||||
include (cmake/find/cxx.cmake)
|
||||
|
||||
add_library(global-group INTERFACE)
|
||||
|
||||
target_link_libraries(global-group INTERFACE
|
||||
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
||||
)
|
||||
|
||||
link_libraries(global-group)
|
||||
|
||||
# FIXME: remove when all contribs will get custom cmake lists
|
||||
install(
|
||||
TARGETS global-group global-libs
|
||||
|
@ -45,15 +45,12 @@ endif ()
|
||||
include (cmake/find/unwind.cmake)
|
||||
include (cmake/find/cxx.cmake)
|
||||
|
||||
add_library(global-group INTERFACE)
|
||||
target_link_libraries(global-group INTERFACE
|
||||
-Wl,--start-group
|
||||
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
||||
-Wl,--end-group
|
||||
)
|
||||
|
||||
link_libraries(global-group)
|
||||
|
||||
# FIXME: remove when all contribs will get custom cmake lists
|
||||
install(
|
||||
TARGETS global-group global-libs
|
||||
|
@ -125,10 +125,6 @@ For installing CMake and Ninja on Mac OS X first install Homebrew and then insta
|
||||
|
||||
Next, check the version of CMake: `cmake --version`. If it is below 3.12, you should install a newer version from the website: https://cmake.org/download/.
|
||||
|
||||
## Optional External Libraries {#optional-external-libraries}
|
||||
|
||||
ClickHouse uses several external libraries for building. All of them do not need to be installed separately as they are built together with ClickHouse from the sources located in the submodules. You can check the list in `contrib`.
|
||||
|
||||
## C++ Compiler {#c-compiler}
|
||||
|
||||
Compilers Clang starting from version 11 is supported for building ClickHouse.
|
||||
|
@ -97,13 +97,16 @@ Structure of the `patterns` section:
|
||||
|
||||
``` text
|
||||
pattern
|
||||
rule_type
|
||||
regexp
|
||||
function
|
||||
pattern
|
||||
rule_type
|
||||
regexp
|
||||
age + precision
|
||||
...
|
||||
pattern
|
||||
rule_type
|
||||
regexp
|
||||
function
|
||||
age + precision
|
||||
@ -127,12 +130,20 @@ When processing a row, ClickHouse checks the rules in the `pattern` sections. Ea
|
||||
|
||||
Fields for `pattern` and `default` sections:
|
||||
|
||||
- `regexp`– A pattern for the metric name.
|
||||
- `rule_type` - a rule's type. It's applied only to a particular metrics. The engine use it to separate plain and tagged metrics. Optional parameter. Default value: `all`.
|
||||
It's unnecessary when performance is not critical, or only one metrics type is used, e.g. plain metrics. By default only one type of rules set is created. Otherwise, if any of special types is defined, two different sets are created. One for plain metrics (root.branch.leaf) and one for tagged metrics (root.branch.leaf;tag1=value1).
|
||||
The default rules are ended up in both sets.
|
||||
Valid values:
|
||||
- `all` (default) - a universal rule, used when `rule_type` is omitted.
|
||||
- `plain` - a rule for plain metrics. The field `regexp` is processed as regular expression.
|
||||
- `tagged` - a rule for tagged metrics (metrics are stored in DB in the format of `someName?tag1=value1&tag2=value2&tag3=value3`). Regular expression must be sorted by tags' names, first tag must be `__name__` if exists. The field `regexp` is processed as regular expression.
|
||||
- `tag_list` - a rule for tagged matrics, a simple DSL for easier metric description in graphite format `someName;tag1=value1;tag2=value2`, `someName`, or `tag1=value1;tag2=value2`. The field `regexp` is translated into a `tagged` rule. The sorting by tags' names is unnecessary, ti will be done automatically. A tag's value (but not a name) can be set as a regular expression, e.g. `env=(dev|staging)`.
|
||||
- `regexp` – A pattern for the metric name (a regular or DSL).
|
||||
- `age` – The minimum age of the data in seconds.
|
||||
- `precision`– How precisely to define the age of the data in seconds. Should be a divisor for 86400 (seconds in a day).
|
||||
- `function` – The name of the aggregating function to apply to data whose age falls within the range `[age, age + precision]`. Accepted functions: min / max / any / avg. The average is calculated imprecisely, like the average of the averages.
|
||||
|
||||
### Configuration Example {#configuration-example}
|
||||
### Configuration Example without rules types {#configuration-example}
|
||||
|
||||
``` xml
|
||||
<graphite_rollup>
|
||||
@ -167,6 +178,81 @@ Fields for `pattern` and `default` sections:
|
||||
</graphite_rollup>
|
||||
```
|
||||
|
||||
### Configuration Example with rules types {#configuration-typed-example}
|
||||
|
||||
``` xml
|
||||
<graphite_rollup>
|
||||
<version_column_name>Version</version_column_name>
|
||||
<pattern>
|
||||
<rule_type>plain</rule_type>
|
||||
<regexp>click_cost</regexp>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>5</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<rule_type>tagged</rule_type>
|
||||
<regexp>^((.*)|.)min\?</regexp>
|
||||
<function>min</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>5</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<rule_type>tagged</rule_type>
|
||||
<regexp><![CDATA[^someName\?(.*&)*tag1=value1(&|$)]]></regexp>
|
||||
<function>min</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>5</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<rule_type>tag_list</rule_type>
|
||||
<regexp>someName;tag2=value2</regexp>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>5</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<default>
|
||||
<function>max</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>3600</age>
|
||||
<precision>300</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
</default>
|
||||
</graphite_rollup>
|
||||
```
|
||||
|
||||
|
||||
!!! warning "Warning"
|
||||
Data rollup is performed during merges. Usually, for old partitions, merges are not started, so for rollup it is necessary to trigger an unscheduled merge using [optimize](../../../sql-reference/statements/optimize.md). Or use additional tools, for example [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer).
|
||||
|
||||
|
@ -54,10 +54,8 @@ If the set of columns in the Buffer table does not match the set of columns in a
|
||||
If the types do not match for one of the columns in the Buffer table and a subordinate table, an error message is entered in the server log, and the buffer is cleared.
|
||||
The same thing happens if the subordinate table does not exist when the buffer is flushed.
|
||||
|
||||
If you need to run ALTER for a subordinate table, and the Buffer table, we recommend first deleting the Buffer table, running ALTER for the subordinate table, then creating the Buffer table again.
|
||||
|
||||
!!! attention "Attention"
|
||||
Running ALTER on the Buffer table in releases made before 28 Sep 2020 will cause a `Block structure mismatch` error (see [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117)), so deleting the Buffer table and then recreating is the only option. It is advisable to check that this error is fixed in your release before trying to run ALTER on the Buffer table.
|
||||
Running ALTER on the Buffer table in releases made before 26 Oct 2021 will cause a `Block structure mismatch` error (see [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117) and [#30565](https://github.com/ClickHouse/ClickHouse/pull/30565)), so deleting the Buffer table and then recreating is the only option. It is advisable to check that this error is fixed in your release before trying to run ALTER on the Buffer table.
|
||||
|
||||
If the server is restarted abnormally, the data in the buffer is lost.
|
||||
|
||||
|
@ -25,6 +25,7 @@ Categories:
|
||||
- **[Operations](../faq/operations/index.md)**
|
||||
- [Which ClickHouse version to use in production?](../faq/operations/production.md)
|
||||
- [Is it possible to delete old records from a ClickHouse table?](../faq/operations/delete-old-data.md)
|
||||
- [Does ClickHouse support multi-region replication?](../faq/operations/multi-region-replication.md)
|
||||
- **[Integration](../faq/integration/index.md)**
|
||||
- [How do I export data from ClickHouse to a file?](../faq/integration/file-export.md)
|
||||
- [What if I have a problem with encodings when connecting to Oracle via ODBC?](../faq/integration/oracle-odbc.md)
|
||||
|
@ -23,11 +23,13 @@ Web UI can be accessed here: `http://localhost:8123/play`.
|
||||
![Web UI](../images/play.png)
|
||||
|
||||
|
||||
In health-check scripts use `GET /ping` request. This handler always returns “Ok.” (with a line feed at the end). Available from version 18.12.13.
|
||||
In health-check scripts use `GET /ping` request. This handler always returns “Ok.” (with a line feed at the end). Available from version 18.12.13. See also `/replicas_status` to check replica's delay.
|
||||
|
||||
``` bash
|
||||
$ curl 'http://localhost:8123/ping'
|
||||
Ok.
|
||||
$ curl 'http://localhost:8123/replicas_status'
|
||||
Ok.
|
||||
```
|
||||
|
||||
Send the request as a URL ‘query’ parameter, or as a POST. Or send the beginning of the query in the ‘query’ parameter, and the rest in the POST (we’ll explain later why this is necessary). The size of the URL is limited to 16 KB, so keep this in mind when sending large queries.
|
||||
|
@ -72,7 +72,7 @@ Reloads all [CatBoost](../../guides/apply-catboost-model.md#applying-catboost-mo
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
SYSTEM RELOAD MODELS
|
||||
SYSTEM RELOAD MODELS [ON CLUSTER cluster_name]
|
||||
```
|
||||
|
||||
## RELOAD MODEL {#query_language-system-reload-model}
|
||||
@ -82,7 +82,7 @@ Completely reloads a CatBoost model `model_name` if the configuration was update
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
SYSTEM RELOAD MODEL <model_name>
|
||||
SYSTEM RELOAD MODEL [ON CLUSTER cluster_name] <model_name>
|
||||
```
|
||||
|
||||
## RELOAD FUNCTIONS {#query_language-system-reload-functions}
|
||||
@ -92,8 +92,8 @@ Reloads all registered [executable user defined functions](../functions/index.md
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
RELOAD FUNCTIONS
|
||||
RELOAD FUNCTION function_name
|
||||
RELOAD FUNCTIONS [ON CLUSTER cluster_name]
|
||||
RELOAD FUNCTION [ON CLUSTER cluster_name] function_name
|
||||
```
|
||||
|
||||
## DROP DNS CACHE {#query_language-system-drop-dns-cache}
|
||||
|
@ -3,14 +3,14 @@ toc_priority: 53
|
||||
toc_title: USE
|
||||
---
|
||||
|
||||
# USE 语句 {#use}
|
||||
# USE Statement {#use}
|
||||
|
||||
``` sql
|
||||
USE db
|
||||
```
|
||||
|
||||
用于设置会话的当前数据库。
|
||||
Lets you set the current database for the session.
|
||||
|
||||
如果查询语句中没有在表名前面以加点的方式指明数据库名, 则用当前数据库进行搜索。
|
||||
The current database is used for searching for tables if the database is not explicitly defined in the query with a dot before the table name.
|
||||
|
||||
使用 HTTP 协议时无法进行此查询,因为没有会话的概念。
|
||||
This query can’t be made when using the HTTP protocol, since there is no concept of a session.
|
||||
|
BIN
docs/ko/images/column-oriented.gif
Normal file
BIN
docs/ko/images/column-oriented.gif
Normal file
Binary file not shown.
After Width: | Height: | Size: 43 KiB |
1
docs/ko/images/logo.svg
Normal file
1
docs/ko/images/logo.svg
Normal file
@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="54" height="48" markdown="1" viewBox="0 0 9 8"><style>.o{fill:#fc0}.r{fill:red}</style><path d="M0,7 h1 v1 h-1 z" class="r"/><path d="M0,0 h1 v7 h-1 z" class="o"/><path d="M2,0 h1 v8 h-1 z" class="o"/><path d="M4,0 h1 v8 h-1 z" class="o"/><path d="M6,0 h1 v8 h-1 z" class="o"/><path d="M8,3.25 h1 v1.5 h-1 z" class="o"/></svg>
|
After Width: | Height: | Size: 373 B |
BIN
docs/ko/images/play.png
Normal file
BIN
docs/ko/images/play.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 26 KiB |
BIN
docs/ko/images/row-oriented.gif
Normal file
BIN
docs/ko/images/row-oriented.gif
Normal file
Binary file not shown.
After Width: | Height: | Size: 38 KiB |
94
docs/ko/index.md
Normal file
94
docs/ko/index.md
Normal file
@ -0,0 +1,94 @@
|
||||
---
|
||||
toc_priority: 0
|
||||
toc_title: 목차
|
||||
---
|
||||
|
||||
# ClickHouse란? {#what-is-clickhouse}
|
||||
|
||||
ClickHouse® 는 query의 온라인 분석 처리(OLAP)를 위한 열 지향(column-oriented) 데이터베이스 관리 시스템(DBMS)입니다.
|
||||
|
||||
"보통의" 행 지향(row-oriented) DMBS에서는 데이터가 다음과 같은 순서로 저장됩니다.
|
||||
|
||||
| row | WatchID | JavaEnable | Title | GoodEvent | EventTime |
|
||||
|-----|-------------|------------|--------------------|-----------|---------------------|
|
||||
| #0 | 89354350662 | 1 | Investor Relations | 1 | 2016-05-18 05:19:20 |
|
||||
| #1 | 90329509958 | 0 | Contact us | 1 | 2016-05-18 08:10:20 |
|
||||
| #2 | 89953706054 | 1 | Mission | 1 | 2016-05-18 07:38:00 |
|
||||
| #N | … | … | … | … | … |
|
||||
|
||||
즉, 행과 관련된 모든 값들은 물리적으로 나란히 저장됩니다.
|
||||
|
||||
행 지향(row-oriented) DMBS의 예시로는 MySQL, Postgres, 그리고 MS SQL 서버 등이 있습니다.
|
||||
|
||||
열 지향 (column-oriented) DBMS에서는 데이터가 아래와 같은 방식으로 저장됩니다:
|
||||
|
||||
| Row: | #0 | #1 | #2 | #N |
|
||||
|-------------|---------------------|---------------------|---------------------|-----|
|
||||
| WatchID: | 89354350662 | 90329509958 | 89953706054 | … |
|
||||
| JavaEnable: | 1 | 0 | 1 | … |
|
||||
| Title: | Investor Relations | Contact us | Mission | … |
|
||||
| GoodEvent: | 1 | 1 | 1 | … |
|
||||
| EventTime: | 2016-05-18 05:19:20 | 2016-05-18 08:10:20 | 2016-05-18 07:38:00 | … |
|
||||
|
||||
이 예에서는 데이터가 정렬된 순서만을 보여줍니다. 다른 열의 값들은 서로 분리되어 저장되고, 같은 열의 정보들은 함께 저장됩니다.
|
||||
|
||||
열 지향(column-oriented) DBMS 의 종류는 Vertica, Paraccel (Actian Matrix and Amazon Redshift), Sybase IQ, Exasol, Infobright, InfiniDB, MonetDB (VectorWise and Actian Vector), LucidDB, SAP HANA, Google Dremel, Google PowerDrill, Druid, 그리고 kdb+ 등이 있습니다.
|
||||
|
||||
데이터를 저장하기 위한 서로 다른 순서는 다른 시나리오에 더 적합합니다. 데이터 접근 시나리오는 쿼리가 수행되는 빈도, 비율 및 비율을 나타내거나, 각 쿼리 유형(행, 열 및 바이트)에 대해 읽은 데이터의 양 데이터 읽기와 업데이트 사이의 관계, 데이터의 작업 크기 및 로컬에서 사용되는 방법 트랜잭션이 사용되는지 여부, 트랜잭션이 얼마나 격리되어 있는지, 데이터 복제 및 논리적 무결성에 대한 요구 사항, 각 쿼리 유형에 대한 대기 시간 및 처리량 요구 사항 등이 있습니다.
|
||||
|
||||
시스템의 부하가 높을수록 사용 시나리오의 요구 사항에 맞게 시스템 설정을 사용자 지정하는 것이 더 중요하며 이 사용자 지정은 더욱 세분화됩니다. 상당히 다른 시나리오에 똑같이 적합한 시스템은 없습니다. 만약 높은 부하에서 시스템이 넓은 시나리오 집합에 대해 적응한다면 시스템은 모든 시나리오를 모두 제대로 처리하지 못하거나 가능한 시나리오 중 하나 또는 몇 개에 대해서만 잘 작동할 것입니다.
|
||||
|
||||
## OLAP 시나리오의 중요 속성들 {#key-properties-of-olap-scenario}
|
||||
|
||||
- 요청(request)의 대부분은 읽기 접근에 관한 것입니다.
|
||||
- 데이터는 단일 행이 아니라 상당히 큰 일괄 처리(\> 1000개 행)로 업데이트됩니다. 또는 전혀 업데이트되지 않습니다.
|
||||
- 데이터는 DB에 추가되지만 수정되지는 않습니다.
|
||||
- 읽기의 경우 DB에서 상당히 많은 수의 행이 추출되지만 열은 일부만 추출됩니다.
|
||||
- 테이블은 "넓습니다". 이는 열의 수가 많다는 것을 의미합니다.
|
||||
- 쿼리는 상대적으로 드뭅니다(일반적으로 서버당 수백 또는 초당 쿼리 미만).
|
||||
- 간단한 쿼리의 경우 약 50ms의 대기 시간이 허용됩니다.
|
||||
- 열 값은 숫자와 짧은 문자열(예: URL당 60바이트)과 같이 상당히 작습니다
|
||||
- 단일 쿼리를 처리할 때 높은 처리량이 필요합니다(서버당 초당 최대 수십억 행).
|
||||
- 트랜잭션이 필요하지 않습니다.
|
||||
- 데이터 일관성에 대한 요구 사항이 낮습니다.
|
||||
- 쿼리당 하나의 큰 테이블이 존재하고 하나를 제외한 모든 테이블은 작습니다.
|
||||
- 쿼리 결과가 원본 데이터보다 훨씬 작습니다. 즉, 데이터가 필터링되거나 집계되므로 결과가 단일 서버의 RAM에 꼭 들어맞습니다.
|
||||
|
||||
OLAP 시나리오가 다른 일반적인 시나리오(OLTP 또는 키-값 액세스와 같은)와 매우 다르다는 것을 쉽게 알 수 있습니다. 따라서 적절한 성능을 얻으려면 분석 쿼리를 처리하기 위해 OLTP 또는 키-값 DB를 사용하는 것은 의미가 없습니다. 예를 들어 분석에 MongoDB나 Redis를 사용하려고 하면 OLAP 데이터베이스에 비해 성능이 매우 저하됩니다.
|
||||
|
||||
## 왜 열 지향 데이터베이스가 OLAP 시나리오에 적합한가{#why-column-oriented-databases-work-better-in-the-olap-scenario}
|
||||
|
||||
열 지향(column-oriented) 데이터베이스는 OLAP 시나리오에 더 적합합니다. 대부분의 쿼리를 처리하는 데 있어서 행 지향(row-oriented) 데이터베이스보다 100배 이상 빠릅니다. 그 이유는 아래에 자세히 설명되어 있지만 사실은 시각적으로 더 쉽게 설명할 수 있습니다.
|
||||
|
||||
**행 지향 DBMS**
|
||||
|
||||
![Row-oriented](images/row-oriented.gif#)
|
||||
|
||||
**열 지향 DBMS**
|
||||
|
||||
![Column-oriented](images/column-oriented.gif#)
|
||||
|
||||
차이가 보이시나요?
|
||||
|
||||
### 입출력 {#inputoutput}
|
||||
|
||||
1. 분석 쿼리의 경우 적은 수의 테이블 열만 읽어야 합니다. 열 지향 데이터베이스에서는 필요한 데이터만 읽을 수 있습니다. 예를 들어 100개 중 5개의 열이 필요한 경우 I/O가 20배 감소할 것으로 예상할 수 있습니다.
|
||||
2. 데이터는 패킷으로 읽히므로 압축하기가 더 쉽습니다. 열의 데이터도 압축하기 쉽습니다. 이것은 I/O의 볼륨을 더욱 감소시킵니다.
|
||||
3. 감소된 I/O로 인해 시스템 캐시에 더 많은 데이터가 들어갑니다.
|
||||
|
||||
예를 들어, "각 광고 플랫폼에 대한 레코드 수 계산" 쿼리는 압축되지 않은 1바이트를 차지하는 하나의 "광고 플랫폼 ID" 열을 읽어야 합니다. 트래픽의 대부분이 광고 플랫폼에서 발생하지 않은 경우 이 열의 최소 10배 압축을 기대할 수 있습니다. 빠른 압축 알고리즘을 사용하면 초당 최소 몇 기가바이트의 압축되지 않은 데이터의 속도로 데이터 압축 해제가 가능합니다. 즉, 이 쿼리는 단일 서버에서 초당 약 수십억 행의 속도로 처리될 수 있습니다. 이 속도는 정말 실제로 달성됩니다.
|
||||
|
||||
### CPU {#cpu}
|
||||
|
||||
쿼리를 수행하려면 많은 행을 처리해야 하므로 별도의 행이 아닌 전체 벡터에 대한 모든 연산을 디스패치하거나 쿼리 엔진을 구현하여 디스패치 비용이 거의 들지 않습니다. 반쯤 괜찮은 디스크 하위 시스템에서 이렇게 하지 않으면 쿼리 인터프리터가 불가피하게 CPU를 정지시킵니다. 데이터를 열에 저장하고 가능한 경우 열별로 처리하는 것이 좋습니다.
|
||||
|
||||
이를 수행하기위한 두가지 방법이 있습니다.
|
||||
|
||||
1. 벡터 엔진. 모든 연산은 별도의 값 대신 벡터에 대해 작성됩니다. 즉, 작업을 자주 호출할 필요가 없으며 파견 비용도 무시할 수 있습니다. 작업 코드에는 최적화된 내부 주기가 포함되어 있습니다.
|
||||
2. 코드 생성. 쿼리에 대해 생성된 코드에는 모든 간접 호출이 있습니다.
|
||||
|
||||
이것은 단순한 쿼리를 실행할 때 의미가 없기 때문에 "일반" 데이터베이스에서는 수행되지 않습니다. 그러나 예외가 있습니다. 예를 들어 MemSQL은 코드 생성을 사용하여 SQL 쿼리를 처리할 때 대기 시간을 줄입니다. (비교되게, 분석 DBMS는 대기 시간이 아닌 처리량 최적화가 필요합니다.)
|
||||
|
||||
CPU 효율성을 위해 쿼리 언어는 선언적(SQL 또는 MDX)이거나 최소한 벡터(J, K)여야 합니다. 쿼리는 최적화를 허용하는 암시적 루프만 포함해야 합니다.
|
||||
|
||||
{## [원문](https://clickhouse.com/docs/en/) ##}
|
@ -99,13 +99,16 @@ patterns
|
||||
|
||||
``` text
|
||||
pattern
|
||||
rule_type
|
||||
regexp
|
||||
function
|
||||
pattern
|
||||
rule_type
|
||||
regexp
|
||||
age + precision
|
||||
...
|
||||
pattern
|
||||
rule_type
|
||||
regexp
|
||||
function
|
||||
age + precision
|
||||
@ -129,12 +132,20 @@ default
|
||||
|
||||
Поля для разделов `pattern` и `default`:
|
||||
|
||||
- `regexp` – шаблон имени метрики.
|
||||
- `rule_type` - тип правила (применяется только к метрикам указанных типов), используется для разделения правил проверки плоских/теггированных метрик. Опциональное поле. Значение по умолчанию: `all`.
|
||||
Если используются метрики только одного типа или производительность проверки правил некритична, можно не использовать. По умолчанию создается только один тип правил для проверки. Иначе, если хотя бы для одного правила указано отличное от умолчания значение, создаются 2 независимых типа правил - для обычных (классические root.branch.leaf) и теггированных метрик (root.branch.leaf;tag1=value1).
|
||||
Правила по умолчанию попадают в оба правила обоих типов.
|
||||
Возможные значения:
|
||||
- `all` (default) - универсальное правило, назначается также по умолчанию, если поле не задано
|
||||
- `plain` - правило для плоских метрик (без тегов). Поле `regexp` обрабатывается как регулярное выражение.
|
||||
- `tagged` - правило для теггированных метрик (метрика хранится в БД в формате `someName?tag1=value1&tag2=value2&tag3=value3`), регулярное выражение должно быть отсортированно по именам тегов, первым - значение тега `__name__`, если есть. Поле `regexp` обрабатывается как регулярное выражение.
|
||||
- `tag_list` - правило для теггированных метрик, простой DSL для упрощения задания регулярного выражения в формате тегов graphite `someName;tag1=value1;tag2=value2`, `someName` или `tag1=value1;tag2=value2`. Поле `regexp` транслируется в правило `tagged`. Cортировать по именам тегов не обязательно, оно отсортируется автоматически. Значение тега (но не имя) может быть регулярным выражением (например `env=(dev|staging)`).
|
||||
- `regexp` – шаблон имени метрики (регулярное выражение или DSL).
|
||||
- `age` – минимальный возраст данных в секундах.
|
||||
- `precision` – точность определения возраста данных в секундах. Должен быть делителем для 86400 (количество секунд в сутках).
|
||||
- `function` – имя агрегирующей функции, которую следует применить к данным, чей возраст оказался в интервале `[age, age + precision]`. Допустимые функции: min/max/any/avg. Avg вычисляется неточно, как среднее от средних.
|
||||
|
||||
### Пример конфигурации {#configuration-example}
|
||||
### Пример конфигурации без разделения типа правил {#configuration-example}
|
||||
|
||||
``` xml
|
||||
<graphite_rollup>
|
||||
@ -169,6 +180,80 @@ default
|
||||
</graphite_rollup>
|
||||
```
|
||||
|
||||
### Пример конфигурации c разделением типа правил {#configuration-typed-example}
|
||||
|
||||
``` xml
|
||||
<graphite_rollup>
|
||||
<version_column_name>Version</version_column_name>
|
||||
<pattern>
|
||||
<rule_type>plain</rule_type>
|
||||
<regexp>click_cost</regexp>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>5</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<rule_type>tagged</rule_type>
|
||||
<regexp>^((.*)|.)min\?</regexp>
|
||||
<function>min</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>5</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<rule_type>tagged</rule_type>
|
||||
<regexp><![CDATA[^someName\?(.*&)*tag1=value1(&|$)]]></regexp>
|
||||
<function>min</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>5</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<rule_type>tag_list</rule_type>
|
||||
<regexp>someName;tag2=value2</regexp>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>5</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<default>
|
||||
<function>max</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>3600</age>
|
||||
<precision>300</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
</default>
|
||||
</graphite_rollup>
|
||||
```
|
||||
|
||||
|
||||
!!! warning "Внимание"
|
||||
Прореживание данных производится во время слияний. Обычно для старых партиций слияния не запускаются, поэтому для прореживания надо инициировать незапланированное слияние используя [optimize](../../../sql-reference/statements/optimize.md). Или использовать дополнительные инструменты, например [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer).
|
||||
|
@ -48,10 +48,8 @@ CREATE TABLE merge.hits_buffer AS merge.hits ENGINE = Buffer(merge, hits, 16, 10
|
||||
Если у одного из столбцов таблицы Buffer и подчинённой таблицы не совпадает тип, то в лог сервера будет записано сообщение об ошибке и буфер будет очищен.
|
||||
То же самое происходит, если подчинённая таблица не существует в момент сброса буфера.
|
||||
|
||||
Если есть необходимость выполнить ALTER для подчинённой таблицы и для таблицы Buffer, то рекомендуется удалить таблицу Buffer, затем выполнить ALTER подчинённой таблицы, а после создать таблицу Buffer заново.
|
||||
|
||||
!!! attention "Внимание"
|
||||
В релизах до 28 сентября 2020 года выполнение ALTER на таблице Buffer ломает структуру блоков и вызывает ошибку (см. [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117)), поэтому удаление буфера и его пересоздание — единственный вариант миграции для данного движка. Перед выполнением ALTER на таблице Buffer убедитесь, что в вашей версии эта ошибка устранена.
|
||||
В релизах до 26 октября 2021 года выполнение ALTER на таблице Buffer ломает структуру блоков и вызывает ошибку (см. [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117) и [#30565](https://github.com/ClickHouse/ClickHouse/pull/30565)), поэтому удаление буфера и его пересоздание — единственный вариант миграции для данного движка. Перед выполнением ALTER на таблице Buffer убедитесь, что в вашей версии эта ошибка устранена.
|
||||
|
||||
При нештатном перезапуске сервера, данные, находящиеся в буфере, будут потеряны.
|
||||
|
||||
|
@ -1 +0,0 @@
|
||||
../../../en/faq/general/dbms-naming.md
|
17
docs/zh/faq/general/dbms-naming.md
Normal file
17
docs/zh/faq/general/dbms-naming.md
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
title: "\u201CClickHouse\u201D 有什么含义?"
|
||||
toc_hidden: true
|
||||
toc_priority: 10
|
||||
---
|
||||
|
||||
# “ClickHouse” 有什么含义? {#what-does-clickhouse-mean}
|
||||
|
||||
它是“**点击**流”和“数据**仓库**”的组合。它来自于Yandex最初的用例。在Metrica网站上,ClickHouse本应该保存人们在互联网上的所有点击记录,现在它仍然在做这项工作。你可以在[ClickHouse history](../../introduction/history.md)页面上阅读更多关于这个用例的信息。
|
||||
|
||||
这个由两部分组成的意思有两个结果:
|
||||
|
||||
- 唯一正确的写“Click**H** house”的方式是用大写H。
|
||||
- 如果需要缩写,请使用“**CH**”。由于一些历史原因,缩写CK在中国也很流行,主要是因为中文中最早的一个关于ClickHouse的演讲使用了这种形式。
|
||||
|
||||
!!! info “有趣的事实”
|
||||
多年后ClickHouse闻名于世, 这种命名方法:结合各有深意的两个词被赞扬为最好的数据库命名方式, 卡内基梅隆大学数据库副教授[Andy Pavlo做的研究](https://www.cs.cmu.edu/~pavlo/blog/2020/03/on-naming-a-database-management-system.html) 。ClickHouse与Postgres共同获得“史上最佳数据库名”奖。
|
@ -18,6 +18,17 @@ $ curl 'http://localhost:8123/'
|
||||
Ok.
|
||||
```
|
||||
|
||||
Web UI 可以通过这个地址访问: `http://localhost:8123/play`.
|
||||
在运行状况检查脚本中,使用`GET /ping`请求。这个处理方法总是返回 "Ok"。(以换行结尾)。可从18.12.13版获得。请参见' /replicas_status '检查复制集的延迟。
|
||||
|
||||
|
||||
``` bash
|
||||
$ curl 'http://localhost:8123/ping'
|
||||
Ok.
|
||||
$ curl 'http://localhost:8123/replicas_status'
|
||||
Ok.
|
||||
```
|
||||
|
||||
通过URL中的 `query` 参数来发送请求,或者发送POST请求,或者将查询的开头部分放在URL的`query`参数中,其他部分放在POST中(我们会在后面解释为什么这样做是有必要的)。URL的大小会限制在16KB,所以发送大型查询时要时刻记住这点。
|
||||
|
||||
如果请求成功,将会收到200的响应状态码和响应主体中的结果。
|
||||
|
@ -1,59 +1,59 @@
|
||||
---
|
||||
toc_priority: 44
|
||||
toc_title: "要求"
|
||||
toc_title: "必备条件"
|
||||
---
|
||||
|
||||
# 要求 {#requirements}
|
||||
# 必备条件 {#requirements}
|
||||
|
||||
## CPU {#cpu}
|
||||
|
||||
对于从预构建的deb包进行安装,请使用具有x86_64架构并支持SSE4.2指令的CPU。 要使用不支持SSE4.2或具有AArch64或PowerPC64LE体系结构的处理器运行ClickHouse,您应该从源代码构建ClickHouse。
|
||||
如果您使用预编译的DEB/RPM包安装ClickHouse,请使用支持SSE4.2指令集的x86_64架构的CPU。如果需要在不支持SSE4.2指令集的CPU上,或者在AArch64(ARM)和PowerPC64LE(IBM Power)架构上运行ClickHouse,您应该从源码编译ClickHouse。
|
||||
|
||||
ClickHouse实现并行数据处理并使用所有可用的硬件资源。 在选择处理器时,考虑到ClickHouse在具有大量内核但时钟速率较低的配置中的工作效率要高于具有较少内核和较高时钟速率的配置。 例如,具有2600MHz的16核心优于具有3600MHz的8核心。
|
||||
ClickHouse实现了并行数据处理,处理时会使用所有的可用资源。在选择处理器时,请注意:ClickHouse在具有大量计算核、时钟频率稍低的平台上比计算核少、时钟频率高的平台上效率更高。例如,ClickHouse在16核 2.6GHz的CPU上运行速度高于8核 3.6GHz的CPU。
|
||||
|
||||
建议使用 **睿频加速** 和 **超线程** 技术。 它显着提高了典型工作负载的性能。
|
||||
建议使用 **睿频加速** 和 **超线程** 技术。 它显着提高了正常工作负载的性能。
|
||||
|
||||
## RAM {#ram}
|
||||
|
||||
我们建议使用至少4GB的RAM来执行重要的查询。 ClickHouse服务器可以使用少得多的RAM运行,但它需要处理查询的内存。
|
||||
我们建议使用至少4GB的内存来执行重要的查询。 ClickHouse服务器可以使用很少的内存运行,但它需要一定量的内存用于处理查询。
|
||||
|
||||
RAM所需的体积取决于:
|
||||
ClickHouse所需内存取决于:
|
||||
|
||||
- 查询的复杂性。
|
||||
- 查询中处理的数据量。
|
||||
- 查询的复杂程度。
|
||||
- 查询处理的数据量。
|
||||
|
||||
要计算所需的RAM体积,您应该估计临时数据的大小 [GROUP BY](../sql-reference/statements/select/group-by.md#select-group-by-clause), [DISTINCT](../sql-reference/statements/select/distinct.md#select-distinct), [JOIN](../sql-reference/statements/select/join.md#select-join) 和您使用的其他操作。
|
||||
要计算所需的内存大小,您应该考虑用于[GROUP BY](../sql-reference/statements/select/group-by.md#select-group-by-clause)、[DISTINCT](../sql-reference/statements/select/distinct.md#select-distinct)、[JOIN](../sql-reference/statements/select/join.md#select-join) 和其他操作所需的临时数据量。
|
||||
|
||||
ClickHouse可以使用外部存储器来存储临时数据。看 [在外部存储器中分组](../sql-reference/statements/select/group-by.md#select-group-by-in-external-memory) 有关详细信息。
|
||||
ClickHouse可以使用外部存储器来存储临时数据。详情请见[在外部存储器中分组](../sql-reference/statements/select/group-by.md#select-group-by-in-external-memory)。
|
||||
|
||||
## 交换文件 {#swap-file}
|
||||
|
||||
禁用生产环境的交换文件。
|
||||
请在生产环境禁用交换文件。
|
||||
|
||||
## 存储子系统 {#storage-subsystem}
|
||||
|
||||
您需要有2GB的可用磁盘空间来安装ClickHouse。
|
||||
|
||||
数据所需的存储量应单独计算。 评估应包括:
|
||||
数据所需的存储空间应单独计算。预估存储容量时请考虑:
|
||||
|
||||
- 估计数据量。
|
||||
- 数据量
|
||||
|
||||
您可以采取数据的样本并从中获取行的平均大小。 然后将该值乘以计划存储的行数。
|
||||
您可以对数据进行采样并计算每行的平均占用空间。然后将该值乘以计划存储的行数。
|
||||
|
||||
- 数据压缩系数。
|
||||
- 数据压缩比
|
||||
|
||||
要估计数据压缩系数,请将数据的样本加载到ClickHouse中,并将数据的实际大小与存储的表的大小进行比较。 例如,点击流数据通常被压缩6-10倍。
|
||||
要计算数据压缩比,请将样本数据写入ClickHouse,并将原始数据大小与ClickHouse实际存储的数据进行比较。例如,用户点击行为的原始数据压缩比通常为6-10。
|
||||
|
||||
要计算要存储的最终数据量,请将压缩系数应用于估计的数据量。 如果计划将数据存储在多个副本中,则将估计的量乘以副本数。
|
||||
请将原始数据的大小除以压缩比来获得实际所需存储的大小。如果您打算将数据存放于几个副本中,请将存储容量乘上副本数。
|
||||
|
||||
## 网络 {#network}
|
||||
|
||||
如果可能的话,使用10G或更高级别的网络。
|
||||
如果可能的话,请使用10G或更高级别的网络。
|
||||
|
||||
网络带宽对于处理具有大量中间结果数据的分布式查询至关重要。 此外,网络速度会影响复制过程。
|
||||
网络带宽对于处理具有大量中间结果数据的分布式查询至关重要。此外,网络速度会影响复制过程。
|
||||
|
||||
## 软件 {#software}
|
||||
|
||||
ClickHouse主要是为Linux系列操作系统开发的。 推荐的Linux发行版是Ubuntu。 `tzdata` 软件包应安装在系统中。
|
||||
ClickHouse主要是为Linux系列操作系统开发的。推荐的Linux发行版是Ubuntu。您需要检查`tzdata`(对于Ubuntu)软件包是否在安装ClickHouse之前已经安装。
|
||||
|
||||
ClickHouse也可以在其他操作系统系列中工作。 查看详细信息 [开始](../getting-started/index.md) 文档的部分。
|
||||
ClickHouse也可以在其他操作系统系列中工作。详情请查看[开始](../getting-started/index.md)。
|
||||
|
@ -1,23 +1,74 @@
|
||||
---
|
||||
toc_hidden_folder: true
|
||||
toc_priority: 42
|
||||
toc_title: INDEX
|
||||
toc_priority: 35
|
||||
toc_title: ALTER
|
||||
---
|
||||
|
||||
# 操作数据跳过索引 {#manipulations-with-data-skipping-indices}
|
||||
## ALTER {#query_language_queries_alter}
|
||||
|
||||
可以使用以下操作:
|
||||
大多数 `ALTER TABLE` 查询修改表设置或数据:
|
||||
|
||||
- `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value [FIRST|AFTER name]` - 向表元数据添加索引描述。
|
||||
- [COLUMN](../../../sql-reference/statements/alter/column.md)
|
||||
- [PARTITION](../../../sql-reference/statements/alter/partition.md)
|
||||
- [DELETE](../../../sql-reference/statements/alter/delete.md)
|
||||
- [UPDATE](../../../sql-reference/statements/alter/update.md)
|
||||
- [ORDER BY](../../../sql-reference/statements/alter/order-by.md)
|
||||
- [INDEX](../../../sql-reference/statements/alter/index/index.md)
|
||||
- [CONSTRAINT](../../../sql-reference/statements/alter/constraint.md)
|
||||
- [TTL](../../../sql-reference/statements/alter/ttl.md)
|
||||
|
||||
- `ALTER TABLE [db].name DROP INDEX name` - 从表元数据中删除索引描述并从磁盘中删除索引文件。
|
||||
!!! note "备注"
|
||||
大多数 `ALTER TABLE` 查询只支持[\*MergeTree](../../../engines/table-engines/mergetree-family/index.md)表,以及[Merge](../../../engines/table-engines/special/merge.md)和[Distributed](../../../engines/table-engines/special/distributed.md)。
|
||||
|
||||
- `ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name` - 查询在分区`partition_name`中重建二级索引`name`。 操作为[mutation](../../../sql-reference/statements/alter/index.md#mutations).
|
||||
这些 `ALTER` 语句操作视图:
|
||||
|
||||
前两个命令是轻量级的,它们只更改元数据或删除文件。
|
||||
- [ALTER TABLE ... MODIFY QUERY](../../../sql-reference/statements/alter/view.md) — 修改一个 [Materialized view](../create/view.md#materialized) 结构.
|
||||
- [ALTER LIVE VIEW](../../../sql-reference/statements/alter/view.md#alter-live-view) — 刷新一个 [Live view](../create/view.md#live-view).
|
||||
|
||||
Also, they are replicated, syncing indices metadata via ZooKeeper.
|
||||
此外,它们会被复制,会通过ZooKeeper同步索引元数据。
|
||||
这些 `ALTER` 语句修改与基于角色的访问控制相关的实体:
|
||||
|
||||
!!! note "注意"
|
||||
索引操作仅支持具有以下特征的表 [`*MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md)引擎 (包括[replicated](../../../engines/table-engines/mergetree-family/replication.md)).
|
||||
- [USER](../../../sql-reference/statements/alter/user.md)
|
||||
- [ROLE](../../../sql-reference/statements/alter/role.md)
|
||||
- [QUOTA](../../../sql-reference/statements/alter/quota.md)
|
||||
- [ROW POLICY](../../../sql-reference/statements/alter/row-policy.md)
|
||||
- [SETTINGS PROFILE](../../../sql-reference/statements/alter/settings-profile.md)
|
||||
|
||||
[ALTER TABLE ... MODIFY COMMENT](../../../sql-reference/statements/alter/comment.md) 语句添加、修改或删除表中的注释,无论之前是否设置过。
|
||||
|
||||
## Mutations 突变 {#mutations}
|
||||
|
||||
用来操作表数据的ALTER查询是通过一种叫做“突变”的机制来实现的,最明显的是[ALTER TABLE … DELETE](../../../sql-reference/statements/alter/delete.md)和[ALTER TABLE … UPDATE](../../../sql-reference/statements/alter/update.md)。它们是异步的后台进程,类似于[MergeTree](../../../engines/table-engines/mergetree-family/index.md)表的合并,产生新的“突变”版本的部件。
|
||||
|
||||
|
||||
|
||||
对于 `*MergeTree` 表,通过重写整个数据部分来执行突变。没有原子性——一旦突变的部件准备好,部件就会被替换,并且在突变期间开始执行的 `SELECT` 查询将看到来自已经突变的部件的数据,以及来自尚未突变的部件的数据。
|
||||
|
||||
|
||||
|
||||
突变完全按照它们的产生顺序排列,并按此顺序应用于每个部分。突变还与“INSERT INTO”查询进行部分排序:在提交突变之前插入表中的数据将被突变,而在此之后插入的数据将不会被突变。注意,突变不会以任何方式阻止插入。
|
||||
|
||||
|
||||
|
||||
突变查询在添加突变条目后立即返回(对于复制表到ZooKeeper,对于非复制表到文件系统)。突变本身使用系统配置文件设置异步执行。要跟踪突变的进程,可以使用[`system.mutations`](../../../operations/system-tables/mutations.md#system_tables-mutations) 表。成功提交的变异将继续执行,即使ClickHouse服务器重新启动。没有办法回滚突变一旦提交,但如果突变卡住了,它可以取消与[`KILL MUTATION`](../../../sql-reference/statements/misc.md#kill-mutation) 查询。
|
||||
|
||||
|
||||
|
||||
完成突变的条目不会立即删除(保留条目的数量由 `finished_mutations_to_keep` 存储引擎参数决定)。删除旧的突变条目。
|
||||
|
||||
## ALTER 查询的同步性 {#synchronicity-of-alter-queries}
|
||||
|
||||
|
||||
对于非复制表,所有的 `ALTER` 查询都是同步执行的。对于复制表,查询只是向“ZooKeeper”添加相应动作的指令,动作本身会尽快执行。但是,查询可以等待所有副本上的这些操作完成。
|
||||
|
||||
对于所有的“ALTER”查询,您可以使用[replication_alter_partitions_sync](../../../operations/settings/settings.md#replication-alter-partitions-sync)设置等待。
|
||||
|
||||
通过[replication_wait_for_inactive_replica_timeout](../../../operations/settings/settings.md#replication-wait-for-inactive-replica-timeout]设置,可以指定不活动的副本执行所有 `ALTER` 查询的等待时间(以秒为单位)。
|
||||
|
||||
|
||||
|
||||
!!! info "备注"
|
||||
|
||||
对于所有的 `ALTER` 查询,如果 `replication_alter_partitions_sync = 2` 和一些副本的不激活时间超过时间(在 `replication_wait_for_inactive_replica_timeout` 设置中指定),那么将抛出一个异常 `UNFINISHED`。
|
||||
|
||||
|
||||
|
||||
对于 `ALTER TABLE ... UPDATE|DELETE` 查询由 [mutations_sync](../../../operations/settings/settings.md#mutations_sync) 设置定义的同步度。
|
||||
|
@ -1 +0,0 @@
|
||||
../../../../../en/sql-reference/statements/alter/index/index.md
|
23
docs/zh/sql-reference/statements/alter/index/index.md
Normal file
23
docs/zh/sql-reference/statements/alter/index/index.md
Normal file
@ -0,0 +1,23 @@
|
||||
---
|
||||
toc_hidden_folder: true
|
||||
toc_priority: 42
|
||||
toc_title: INDEX
|
||||
---
|
||||
|
||||
# 操作数据跳过索引 {#manipulations-with-data-skipping-indices}
|
||||
|
||||
可以使用以下操作:
|
||||
|
||||
- `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value [FIRST|AFTER name]` - 向表元数据添加索引描述。
|
||||
|
||||
- `ALTER TABLE [db].name DROP INDEX name` - 从表元数据中删除索引描述并从磁盘中删除索引文件。
|
||||
|
||||
- `ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name` - 查询在分区`partition_name`中重建二级索引`name`。 操作为[mutation](../../../../sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
前两个命令是轻量级的,它们只更改元数据或删除文件。
|
||||
|
||||
Also, they are replicated, syncing indices metadata via ZooKeeper.
|
||||
此外,它们会被复制,会通过ZooKeeper同步索引元数据。
|
||||
|
||||
!!! note "注意"
|
||||
索引操作仅支持具有以下特征的表 [`*MergeTree`](../../../../engines/table-engines/mergetree-family/mergetree.md)引擎 (包括[replicated](../../../../engines/table-engines/mergetree-family/replication.md)).
|
@ -1 +0,0 @@
|
||||
../../../en/sql-reference/statements/exists.md
|
12
docs/zh/sql-reference/statements/exists.md
Normal file
12
docs/zh/sql-reference/statements/exists.md
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
toc_priority: 45
|
||||
toc_title: EXISTS
|
||||
---
|
||||
|
||||
# EXISTS 语句 {#exists-statement}
|
||||
|
||||
``` sql
|
||||
EXISTS [TEMPORARY] [TABLE|DICTIONARY] [db.]name [INTO OUTFILE filename] [FORMAT format]
|
||||
```
|
||||
|
||||
返回一个单独的 `UInt8`类型的列,如果表或数据库不存在,则包含一个值 `0`,如果表在指定的数据库中存在,则包含一个值 `1`。
|
@ -1 +0,0 @@
|
||||
../../../en/sql-reference/statements/set.md
|
23
docs/zh/sql-reference/statements/set.md
Normal file
23
docs/zh/sql-reference/statements/set.md
Normal file
@ -0,0 +1,23 @@
|
||||
---
|
||||
toc_priority: 50
|
||||
toc_title: SET
|
||||
---
|
||||
|
||||
# SET 语句 {#query-set}
|
||||
|
||||
``` sql
|
||||
SET param = value
|
||||
```
|
||||
|
||||
给当前会话的 `param` [配置项](../../operations/settings/index.md)赋值。你不能用这样的方式修改[服务器相关设置](../../operations/server-configuration-parameters/index.md)。
|
||||
|
||||
|
||||
您还可以在单个查询中设置指定设置配置文件中的所有值。
|
||||
|
||||
|
||||
|
||||
``` sql
|
||||
SET profile = 'profile-name-from-the-settings-file'
|
||||
```
|
||||
|
||||
更多详情, 详见 [配置项](../../operations/settings/settings.md).
|
@ -1 +0,0 @@
|
||||
../../../en/sql-reference/statements/truncate.md
|
31
docs/zh/sql-reference/statements/truncate.md
Normal file
31
docs/zh/sql-reference/statements/truncate.md
Normal file
@ -0,0 +1,31 @@
|
||||
---
|
||||
toc_priority: 52
|
||||
toc_title: TRUNCATE
|
||||
---
|
||||
|
||||
# TRUNCATE 语句 {#truncate-statement}
|
||||
|
||||
``` sql
|
||||
TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster]
|
||||
```
|
||||
|
||||
删除表中的所有数据。当省略子句 `IF EXISTS` 时,如果表不存在,则查询返回一个错误。
|
||||
|
||||
|
||||
|
||||
`TRUNCATE` 查询不支持[View](../../engines/table-engines/special/view.md),[File](../../engines/table-engines/special/file.md), [URL](../../engines/table-engines/special/url.md), [Buffer](../../engines/table-engines/special/buffer.md) 和 [Null](../../engines/table-engines/special/null.md)表引擎。
|
||||
|
||||
|
||||
|
||||
可以使用 replication_alter_partitions_sync 设置在复制集上等待执行的操作。
|
||||
|
||||
|
||||
|
||||
通过 replication_wait_for_inactive_replica_timeout 设置,可以指定不活动副本执行 `TRUNCATE`查询需要等待多长时间(以秒为单位)。
|
||||
|
||||
|
||||
|
||||
!!! info "注意"
|
||||
如果`replication_alter_partitions_sync` 被设置为`2`,并且某些复制集超过 `replication_wait_for_inactive_replica_timeout`设置的时间不激活,那么将抛出一个异常`UNFINISHED`。
|
||||
|
||||
|
@ -1 +0,0 @@
|
||||
../../../en/sql-reference/statements/use.md
|
16
docs/zh/sql-reference/statements/use.md
Normal file
16
docs/zh/sql-reference/statements/use.md
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
toc_priority: 53
|
||||
toc_title: USE
|
||||
---
|
||||
|
||||
# USE 语句 {#use}
|
||||
|
||||
``` sql
|
||||
USE db
|
||||
```
|
||||
|
||||
用于设置会话的当前数据库。
|
||||
|
||||
如果查询语句中没有在表名前面以加点的方式指明数据库名, 则用当前数据库进行搜索。
|
||||
|
||||
使用 HTTP 协议时无法进行此查询,因为没有会话的概念。
|
@ -194,6 +194,7 @@ namespace
|
||||
{
|
||||
|
||||
void setupTmpPath(Poco::Logger * log, const std::string & path)
|
||||
try
|
||||
{
|
||||
LOG_DEBUG(log, "Setting up {} to store temporary data in it", path);
|
||||
|
||||
@ -212,6 +213,15 @@ void setupTmpPath(Poco::Logger * log, const std::string & path)
|
||||
LOG_DEBUG(log, "Skipped file in temporary path {}", it->path().string());
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
DB::tryLogCurrentException(
|
||||
log,
|
||||
fmt::format(
|
||||
"Caught exception while setup temporary path: {}. It is ok to skip this exception as cleaning old temporary files is not "
|
||||
"necessary",
|
||||
path));
|
||||
}
|
||||
|
||||
int waitServersToFinish(std::vector<DB::ProtocolServerAdapter> & servers, size_t seconds_to_wait)
|
||||
{
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <boost/noncopyable.hpp>
|
||||
#include <base/sort.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/HashTable/SmallTable.h>
|
||||
@ -557,7 +558,7 @@ public:
|
||||
}
|
||||
if (limit < answer.size())
|
||||
{
|
||||
std::nth_element(answer.begin(), answer.begin() + limit, answer.end());
|
||||
::nth_element(answer.begin(), answer.begin() + limit, answer.end());
|
||||
answer.resize(limit);
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/sort.h>
|
||||
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/NaNUtils.h>
|
||||
|
||||
@ -72,7 +74,7 @@ private:
|
||||
private:
|
||||
void sort()
|
||||
{
|
||||
std::sort(points, points + size,
|
||||
::sort(points, points + size,
|
||||
[](const WeightedValue & first, const WeightedValue & second)
|
||||
{
|
||||
return first.mean < second.mean;
|
||||
|
@ -72,7 +72,7 @@ private:
|
||||
using Base = AggregateFunctionNullBase<result_is_nullable, serialize_flag,
|
||||
AggregateFunctionIfNullUnary<result_is_nullable, serialize_flag>>;
|
||||
|
||||
inline bool singleFilter(const IColumn ** columns, size_t row_num, size_t num_arguments) const
|
||||
inline bool singleFilter(const IColumn ** columns, size_t row_num) const
|
||||
{
|
||||
const IColumn * filter_column = columns[num_arguments - 1];
|
||||
|
||||
@ -112,7 +112,7 @@ public:
|
||||
{
|
||||
const ColumnNullable * column = assert_cast<const ColumnNullable *>(columns[0]);
|
||||
const IColumn * nested_column = &column->getNestedColumn();
|
||||
if (!column->isNullAt(row_num) && singleFilter(columns, row_num, num_arguments))
|
||||
if (!column->isNullAt(row_num) && singleFilter(columns, row_num))
|
||||
{
|
||||
this->setFlag(place);
|
||||
this->nested_function->add(this->nestedPlace(place), &nested_column, row_num, arena);
|
||||
|
@ -1,5 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <unordered_set>
|
||||
|
||||
#include <AggregateFunctions/AggregateFunctionNull.h>
|
||||
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
@ -7,6 +9,7 @@
|
||||
#include <Common/ArenaAllocator.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <base/arithmeticOverflow.h>
|
||||
#include <base/sort.h>
|
||||
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
@ -14,8 +17,6 @@
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
||||
#include <unordered_set>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -67,7 +68,7 @@ struct AggregateFunctionIntervalLengthSumData
|
||||
/// either sort whole container or do so partially merging ranges afterwards
|
||||
if (!sorted && !other.sorted)
|
||||
{
|
||||
std::sort(std::begin(segments), std::end(segments));
|
||||
::sort(std::begin(segments), std::end(segments));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -76,10 +77,10 @@ struct AggregateFunctionIntervalLengthSumData
|
||||
const auto end = std::end(segments);
|
||||
|
||||
if (!sorted)
|
||||
std::sort(begin, middle);
|
||||
::sort(begin, middle);
|
||||
|
||||
if (!other.sorted)
|
||||
std::sort(middle, end);
|
||||
::sort(middle, end);
|
||||
|
||||
std::inplace_merge(begin, middle, end);
|
||||
}
|
||||
@ -91,7 +92,7 @@ struct AggregateFunctionIntervalLengthSumData
|
||||
{
|
||||
if (!sorted)
|
||||
{
|
||||
std::sort(std::begin(segments), std::end(segments));
|
||||
::sort(std::begin(segments), std::end(segments));
|
||||
sorted = true;
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <unordered_map>
|
||||
#include <base/sort.h>
|
||||
#include <AggregateFunctions/AggregateFunctionCombinatorFactory.h>
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <Columns/ColumnFixedString.h>
|
||||
@ -226,7 +227,7 @@ public:
|
||||
{
|
||||
keys.push_back(it.first);
|
||||
}
|
||||
std::sort(keys.begin(), keys.end());
|
||||
::sort(keys.begin(), keys.end());
|
||||
|
||||
// insert using sorted keys to result column
|
||||
for (auto & key : keys)
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/logger_useful.h>
|
||||
#include <base/sort.h>
|
||||
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
@ -142,7 +143,7 @@ public:
|
||||
auto & array = this->data(place).value;
|
||||
|
||||
/// Sort by position; for equal position, sort by weight to get deterministic result.
|
||||
std::sort(array.begin(), array.end());
|
||||
::sort(array.begin(), array.end());
|
||||
|
||||
for (const auto & point_weight : array)
|
||||
{
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <base/range.h>
|
||||
#include <base/sort.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
@ -76,7 +77,7 @@ struct AggregateFunctionSequenceMatchData final
|
||||
{
|
||||
if (!sorted)
|
||||
{
|
||||
std::sort(std::begin(events_list), std::end(events_list), Comparator{});
|
||||
::sort(std::begin(events_list), std::end(events_list), Comparator{});
|
||||
sorted = true;
|
||||
}
|
||||
}
|
||||
|
@ -226,7 +226,7 @@ public:
|
||||
{
|
||||
// FIXME why is storing NearestFieldType not enough, and we
|
||||
// have to check for decimals again here?
|
||||
UInt32 scale = static_cast<const ColumnDecimal<T> &>(key_column).getData().getScale();
|
||||
UInt32 scale = static_cast<const ColumnDecimal<T> &>(key_column).getScale();
|
||||
it = merged_maps.find(DecimalField<T>(key, scale));
|
||||
}
|
||||
else
|
||||
@ -251,7 +251,7 @@ public:
|
||||
|
||||
if constexpr (is_decimal<T>)
|
||||
{
|
||||
UInt32 scale = static_cast<const ColumnDecimal<T> &>(key_column).getData().getScale();
|
||||
UInt32 scale = static_cast<const ColumnDecimal<T> &>(key_column).getScale();
|
||||
merged_maps.emplace(DecimalField<T>(key, scale), std::move(new_values));
|
||||
}
|
||||
else
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <base/types.h>
|
||||
#include <base/bit_cast.h>
|
||||
#include <base/sort.h>
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
|
||||
#include <IO/ReadBuffer.h>
|
||||
@ -134,7 +135,7 @@ private:
|
||||
++arr_it;
|
||||
}
|
||||
|
||||
std::sort(array, array + size, [](const Pair & a, const Pair & b) { return a.first < b.first; });
|
||||
::sort(array, array + size, [](const Pair & a, const Pair & b) { return a.first < b.first; });
|
||||
|
||||
Float64 threshold = std::ceil(sum_weight * level);
|
||||
Float64 accumulated = 0;
|
||||
@ -175,7 +176,7 @@ private:
|
||||
++arr_it;
|
||||
}
|
||||
|
||||
std::sort(array, array + size, [](const Pair & a, const Pair & b) { return a.first < b.first; });
|
||||
::sort(array, array + size, [](const Pair & a, const Pair & b) { return a.first < b.first; });
|
||||
|
||||
size_t level_index = 0;
|
||||
Float64 accumulated = 0;
|
||||
|
@ -88,7 +88,7 @@ struct QuantileExact : QuantileExactBase<Value, QuantileExact<Value>>
|
||||
if (!array.empty())
|
||||
{
|
||||
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
|
||||
nth_element(array.begin(), array.begin() + n, array.end()); /// NOTE: You can think of the radix-select algorithm.
|
||||
::nth_element(array.begin(), array.begin() + n, array.end()); /// NOTE: You can think of the radix-select algorithm.
|
||||
return array[n];
|
||||
}
|
||||
|
||||
@ -107,7 +107,7 @@ struct QuantileExact : QuantileExactBase<Value, QuantileExact<Value>>
|
||||
auto level = levels[indices[i]];
|
||||
|
||||
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
|
||||
nth_element(array.begin() + prev_n, array.begin() + n, array.end());
|
||||
::nth_element(array.begin() + prev_n, array.begin() + n, array.end());
|
||||
result[indices[i]] = array[n];
|
||||
prev_n = n;
|
||||
}
|
||||
@ -143,7 +143,7 @@ struct QuantileExactExclusive : public QuantileExact<Value>
|
||||
else if (n < 1)
|
||||
return static_cast<Float64>(array[0]);
|
||||
|
||||
nth_element(array.begin(), array.begin() + n - 1, array.end());
|
||||
::nth_element(array.begin(), array.begin() + n - 1, array.end());
|
||||
auto nth_elem = std::min_element(array.begin() + n, array.end());
|
||||
|
||||
return static_cast<Float64>(array[n - 1]) + (h - n) * static_cast<Float64>(*nth_elem - array[n - 1]);
|
||||
@ -172,7 +172,7 @@ struct QuantileExactExclusive : public QuantileExact<Value>
|
||||
result[indices[i]] = static_cast<Float64>(array[0]);
|
||||
else
|
||||
{
|
||||
nth_element(array.begin() + prev_n, array.begin() + n - 1, array.end());
|
||||
::nth_element(array.begin() + prev_n, array.begin() + n - 1, array.end());
|
||||
auto nth_elem = std::min_element(array.begin() + n, array.end());
|
||||
|
||||
result[indices[i]] = static_cast<Float64>(array[n - 1]) + (h - n) * static_cast<Float64>(*nth_elem - array[n - 1]);
|
||||
@ -207,7 +207,7 @@ struct QuantileExactInclusive : public QuantileExact<Value>
|
||||
return static_cast<Float64>(array[array.size() - 1]);
|
||||
else if (n < 1)
|
||||
return static_cast<Float64>(array[0]);
|
||||
nth_element(array.begin(), array.begin() + n - 1, array.end());
|
||||
::nth_element(array.begin(), array.begin() + n - 1, array.end());
|
||||
auto nth_elem = std::min_element(array.begin() + n, array.end());
|
||||
|
||||
return static_cast<Float64>(array[n - 1]) + (h - n) * static_cast<Float64>(*nth_elem - array[n - 1]);
|
||||
@ -234,7 +234,7 @@ struct QuantileExactInclusive : public QuantileExact<Value>
|
||||
result[indices[i]] = static_cast<Float64>(array[0]);
|
||||
else
|
||||
{
|
||||
nth_element(array.begin() + prev_n, array.begin() + n - 1, array.end());
|
||||
::nth_element(array.begin() + prev_n, array.begin() + n - 1, array.end());
|
||||
auto nth_elem = std::min_element(array.begin() + n, array.end());
|
||||
|
||||
result[indices[i]] = static_cast<Float64>(array[n - 1]) + (h - n) * (static_cast<Float64>(*nth_elem) - array[n - 1]);
|
||||
@ -263,7 +263,7 @@ struct QuantileExactLow : public QuantileExactBase<Value, QuantileExactLow<Value
|
||||
if (!array.empty())
|
||||
{
|
||||
// sort inputs in ascending order
|
||||
std::sort(array.begin(), array.end());
|
||||
::sort(array.begin(), array.end());
|
||||
|
||||
// if level is 0.5 then compute the "low" median of the sorted array
|
||||
// by the method of rounding.
|
||||
@ -296,7 +296,7 @@ struct QuantileExactLow : public QuantileExactBase<Value, QuantileExactLow<Value
|
||||
if (!array.empty())
|
||||
{
|
||||
// sort inputs in ascending order
|
||||
std::sort(array.begin(), array.end());
|
||||
::sort(array.begin(), array.end());
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
auto level = levels[indices[i]];
|
||||
@ -345,7 +345,7 @@ struct QuantileExactHigh : public QuantileExactBase<Value, QuantileExactHigh<Val
|
||||
if (!array.empty())
|
||||
{
|
||||
// sort inputs in ascending order
|
||||
std::sort(array.begin(), array.end());
|
||||
::sort(array.begin(), array.end());
|
||||
|
||||
// if level is 0.5 then compute the "high" median of the sorted array
|
||||
// by the method of rounding.
|
||||
@ -370,7 +370,7 @@ struct QuantileExactHigh : public QuantileExactBase<Value, QuantileExactHigh<Val
|
||||
if (!array.empty())
|
||||
{
|
||||
// sort inputs in ascending order
|
||||
std::sort(array.begin(), array.end());
|
||||
::sort(array.begin(), array.end());
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
auto level = levels[indices[i]];
|
||||
|
@ -1,5 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/sort.h>
|
||||
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
#include <Common/NaNUtils.h>
|
||||
|
||||
@ -101,7 +103,7 @@ struct QuantileExactWeighted
|
||||
++i;
|
||||
}
|
||||
|
||||
std::sort(array, array + size, [](const Pair & a, const Pair & b) { return a.first < b.first; });
|
||||
::sort(array, array + size, [](const Pair & a, const Pair & b) { return a.first < b.first; });
|
||||
|
||||
Float64 threshold = std::ceil(sum_weight * level);
|
||||
Float64 accumulated = 0;
|
||||
@ -151,7 +153,7 @@ struct QuantileExactWeighted
|
||||
++i;
|
||||
}
|
||||
|
||||
std::sort(array, array + size, [](const Pair & a, const Pair & b) { return a.first < b.first; });
|
||||
::sort(array, array + size, [](const Pair & a, const Pair & b) { return a.first < b.first; });
|
||||
|
||||
Float64 accumulated = 0;
|
||||
|
||||
|
@ -90,7 +90,7 @@ namespace detail
|
||||
/** This function must be called before get-functions. */
|
||||
void prepare() const
|
||||
{
|
||||
std::sort(elems, elems + count);
|
||||
::sort(elems, elems + count);
|
||||
}
|
||||
|
||||
UInt16 get(double level) const
|
||||
@ -183,7 +183,7 @@ namespace detail
|
||||
|
||||
/// Sorting an array will not be considered a violation of constancy.
|
||||
auto & array = elems;
|
||||
nth_element(array.begin(), array.begin() + n, array.end());
|
||||
::nth_element(array.begin(), array.begin() + n, array.end());
|
||||
quantile = array[n];
|
||||
}
|
||||
|
||||
@ -204,7 +204,7 @@ namespace detail
|
||||
? level * elems.size()
|
||||
: (elems.size() - 1);
|
||||
|
||||
nth_element(array.begin() + prev_n, array.begin() + n, array.end());
|
||||
::nth_element(array.begin() + prev_n, array.begin() + n, array.end());
|
||||
|
||||
result[level_index] = array[n];
|
||||
prev_n = n;
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include <base/sort.h>
|
||||
|
||||
#include <Common/FieldVisitorConvertToNumber.h>
|
||||
#include <Common/NaNUtils.h>
|
||||
|
||||
@ -64,7 +66,7 @@ struct QuantileLevels
|
||||
permutation[i] = i;
|
||||
}
|
||||
|
||||
std::sort(permutation.begin(), permutation.end(), [this] (size_t a, size_t b) { return levels[a] < levels[b]; });
|
||||
::sort(permutation.begin(), permutation.end(), [this] (size_t a, size_t b) { return levels[a] < levels[b]; });
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <algorithm>
|
||||
#include <climits>
|
||||
#include <base/types.h>
|
||||
#include <base/sort.h>
|
||||
#include <IO/ReadBuffer.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
@ -15,6 +16,7 @@
|
||||
#include <Poco/Exception.h>
|
||||
#include <pcg_random.hpp>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
struct Settings;
|
||||
@ -249,7 +251,7 @@ private:
|
||||
if (sorted)
|
||||
return;
|
||||
sorted = true;
|
||||
std::sort(samples.begin(), samples.end(), Comparer());
|
||||
::sort(samples.begin(), samples.end(), Comparer());
|
||||
}
|
||||
|
||||
template <typename ResultType>
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <climits>
|
||||
#include <AggregateFunctions/ReservoirSampler.h>
|
||||
#include <base/types.h>
|
||||
#include <base/sort.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <IO/ReadBuffer.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
@ -258,7 +259,8 @@ private:
|
||||
{
|
||||
if (sorted)
|
||||
return;
|
||||
std::sort(samples.begin(), samples.end(), [](const auto & lhs, const auto & rhs) { return lhs.first < rhs.first; });
|
||||
|
||||
::sort(samples.begin(), samples.end(), [](const auto & lhs, const auto & rhs) { return lhs.first < rhs.first; });
|
||||
sorted = true;
|
||||
}
|
||||
|
||||
|
@ -1,13 +1,17 @@
|
||||
#pragma once
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <Common/ArenaAllocator.h>
|
||||
|
||||
#include <numeric>
|
||||
#include <algorithm>
|
||||
#include <utility>
|
||||
|
||||
#include <base/sort.h>
|
||||
|
||||
#include <Common/ArenaAllocator.h>
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
struct Settings;
|
||||
@ -41,7 +45,7 @@ std::pair<RanksArray, Float64> computeRanksAndTieCorrection(const Values & value
|
||||
/// Save initial positions, than sort indices according to the values.
|
||||
std::vector<size_t> indexes(size);
|
||||
std::iota(indexes.begin(), indexes.end(), 0);
|
||||
std::sort(indexes.begin(), indexes.end(),
|
||||
::sort(indexes.begin(), indexes.end(),
|
||||
[&] (size_t lhs, size_t rhs) { return values[lhs] < values[rhs]; });
|
||||
|
||||
size_t left = 0;
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <Parsers/formatAST.h>
|
||||
#include <Storages/IStorage.h>
|
||||
#include <base/insertAtEnd.h>
|
||||
#include <base/sort.h>
|
||||
#include <boost/range/adaptor/reversed.hpp>
|
||||
#include <filesystem>
|
||||
|
||||
@ -632,7 +633,7 @@ BackupEntries makeBackupEntries(const Elements & elements, const ContextPtr & co
|
||||
throw Exception("Backup must not be empty", ErrorCodes::BACKUP_IS_EMPTY);
|
||||
|
||||
/// Check that all backup entries are unique.
|
||||
std::sort(
|
||||
::sort(
|
||||
backup_entries.begin(),
|
||||
backup_entries.end(),
|
||||
[](const std::pair<String, std::unique_ptr<IBackupEntry>> & lhs, const std::pair<String, std::unique_ptr<IBackupEntry>> & rhs)
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Poco/String.h>
|
||||
#include <algorithm>
|
||||
#include <base/sort.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -74,10 +75,10 @@ AvailableCollationLocales::LocalesVector AvailableCollationLocales::getAvailable
|
||||
result.push_back(name_and_locale.second);
|
||||
|
||||
auto comparator = [] (const LocaleAndLanguage & f, const LocaleAndLanguage & s)
|
||||
{
|
||||
return f.locale_name < s.locale_name;
|
||||
};
|
||||
std::sort(result.begin(), result.end(), comparator);
|
||||
{
|
||||
return f.locale_name < s.locale_name;
|
||||
};
|
||||
::sort(result.begin(), result.end(), comparator);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -1,5 +1,3 @@
|
||||
#include <string.h> // memcpy
|
||||
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
@ -9,12 +7,7 @@
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <Columns/ColumnCompressed.h>
|
||||
#include <Columns/MaskOperations.h>
|
||||
|
||||
#include <base/unaligned.h>
|
||||
#include <base/sort.h>
|
||||
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/SipHash.h>
|
||||
@ -22,6 +15,9 @@
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <base/unaligned.h>
|
||||
#include <base/sort.h>
|
||||
#include <cstring> // memcpy
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -127,18 +123,8 @@ size_t ColumnArray::size() const
|
||||
|
||||
Field ColumnArray::operator[](size_t n) const
|
||||
{
|
||||
size_t offset = offsetAt(n);
|
||||
size_t size = sizeAt(n);
|
||||
|
||||
if (size > max_array_size_as_field)
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Array of size {} is too large to be manipulated as single field, maximum size {}",
|
||||
size, max_array_size_as_field);
|
||||
|
||||
Array res(size);
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
res[i] = getData()[offset + i];
|
||||
|
||||
Field res;
|
||||
get(n, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -152,11 +138,12 @@ void ColumnArray::get(size_t n, Field & res) const
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Array of size {} is too large to be manipulated as single field, maximum size {}",
|
||||
size, max_array_size_as_field);
|
||||
|
||||
res = Array(size);
|
||||
res = Array();
|
||||
Array & res_arr = DB::get<Array &>(res);
|
||||
res_arr.reserve(size);
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
getData().get(offset + i, res_arr[i]);
|
||||
res_arr.push_back(getData()[offset + i]);
|
||||
}
|
||||
|
||||
|
||||
@ -824,9 +811,9 @@ void ColumnArray::getPermutationImpl(size_t limit, Permutation & res, Comparator
|
||||
auto less = [&cmp](size_t lhs, size_t rhs){ return cmp(lhs, rhs) < 0; };
|
||||
|
||||
if (limit)
|
||||
partial_sort(res.begin(), res.begin() + limit, res.end(), less);
|
||||
::partial_sort(res.begin(), res.begin() + limit, res.end(), less);
|
||||
else
|
||||
std::sort(res.begin(), res.end(), less);
|
||||
::sort(res.begin(), res.end(), less);
|
||||
}
|
||||
|
||||
void ColumnArray::getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <base/sort.h>
|
||||
#include <base/scope_guard.h>
|
||||
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
@ -32,12 +31,6 @@ namespace ErrorCodes
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
template class DecimalPaddedPODArray<Decimal32>;
|
||||
template class DecimalPaddedPODArray<Decimal64>;
|
||||
template class DecimalPaddedPODArray<Decimal128>;
|
||||
template class DecimalPaddedPODArray<Decimal256>;
|
||||
template class DecimalPaddedPODArray<DateTime64>;
|
||||
|
||||
template <is_decimal T>
|
||||
int ColumnDecimal<T>::compareAt(size_t n, size_t m, const IColumn & rhs_, int) const
|
||||
{
|
||||
@ -131,19 +124,6 @@ void ColumnDecimal<T>::updateHashFast(SipHash & hash) const
|
||||
template <is_decimal T>
|
||||
void ColumnDecimal<T>::getPermutation(bool reverse, size_t limit, int , IColumn::Permutation & res) const
|
||||
{
|
||||
#if 1 /// TODO: perf test
|
||||
if (data.size() <= std::numeric_limits<UInt32>::max())
|
||||
{
|
||||
PaddedPODArray<UInt32> tmp_res;
|
||||
permutation(reverse, limit, tmp_res);
|
||||
|
||||
res.resize(tmp_res.size());
|
||||
for (size_t i = 0; i < tmp_res.size(); ++i)
|
||||
res[i] = tmp_res[i];
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
permutation(reverse, limit, res);
|
||||
}
|
||||
|
||||
@ -151,7 +131,7 @@ template <is_decimal T>
|
||||
void ColumnDecimal<T>::updatePermutation(bool reverse, size_t limit, int, IColumn::Permutation & res, EqualRanges & equal_ranges) const
|
||||
{
|
||||
auto equals = [this](size_t lhs, size_t rhs) { return data[lhs] == data[rhs]; };
|
||||
auto sort = [](auto begin, auto end, auto pred) { std::sort(begin, end, pred); };
|
||||
auto sort = [](auto begin, auto end, auto pred) { ::sort(begin, end, pred); };
|
||||
auto partial_sort = [](auto begin, auto mid, auto end, auto pred) { ::partial_sort(begin, mid, end, pred); };
|
||||
|
||||
if (reverse)
|
||||
|
@ -1,66 +1,21 @@
|
||||
#pragma once
|
||||
|
||||
#include <cmath>
|
||||
|
||||
#include <base/sort.h>
|
||||
#include <base/TypeName.h>
|
||||
#include <Core/Field.h>
|
||||
#include <Core/DecimalFunctions.h>
|
||||
#include <Core/TypeId.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Columns/ColumnVectorHelper.h>
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Columns/IColumnImpl.h>
|
||||
#include <Core/Field.h>
|
||||
#include <Core/DecimalFunctions.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <base/sort.h>
|
||||
#include <Core/TypeId.h>
|
||||
#include <base/TypeName.h>
|
||||
|
||||
#include <cmath>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// PaddedPODArray extended by Decimal scale
|
||||
template <typename T>
|
||||
class DecimalPaddedPODArray : public PaddedPODArray<T>
|
||||
{
|
||||
public:
|
||||
using Base = PaddedPODArray<T>;
|
||||
using Base::operator[];
|
||||
|
||||
DecimalPaddedPODArray(size_t size, UInt32 scale_)
|
||||
: Base(size),
|
||||
scale(scale_)
|
||||
{}
|
||||
|
||||
DecimalPaddedPODArray(const DecimalPaddedPODArray & other)
|
||||
: Base(other.begin(), other.end()),
|
||||
scale(other.scale)
|
||||
{}
|
||||
|
||||
DecimalPaddedPODArray(DecimalPaddedPODArray && other)
|
||||
{
|
||||
this->swap(other);
|
||||
std::swap(scale, other.scale);
|
||||
}
|
||||
|
||||
DecimalPaddedPODArray & operator=(DecimalPaddedPODArray && other)
|
||||
{
|
||||
this->swap(other);
|
||||
std::swap(scale, other.scale);
|
||||
return *this;
|
||||
}
|
||||
|
||||
UInt32 getScale() const { return scale; }
|
||||
|
||||
private:
|
||||
UInt32 scale;
|
||||
};
|
||||
|
||||
/// Prevent implicit template instantiation of DecimalPaddedPODArray for common decimal types
|
||||
|
||||
extern template class DecimalPaddedPODArray<Decimal32>;
|
||||
extern template class DecimalPaddedPODArray<Decimal64>;
|
||||
extern template class DecimalPaddedPODArray<Decimal128>;
|
||||
extern template class DecimalPaddedPODArray<Decimal256>;
|
||||
extern template class DecimalPaddedPODArray<DateTime64>;
|
||||
|
||||
/// A ColumnVector for Decimals
|
||||
template <is_decimal T>
|
||||
class ColumnDecimal final : public COWHelper<ColumnVectorHelper, ColumnDecimal<T>>
|
||||
@ -72,16 +27,16 @@ private:
|
||||
public:
|
||||
using ValueType = T;
|
||||
using NativeT = typename T::NativeType;
|
||||
using Container = DecimalPaddedPODArray<T>;
|
||||
using Container = PaddedPODArray<T>;
|
||||
|
||||
private:
|
||||
ColumnDecimal(const size_t n, UInt32 scale_)
|
||||
: data(n, scale_),
|
||||
: data(n),
|
||||
scale(scale_)
|
||||
{}
|
||||
|
||||
ColumnDecimal(const ColumnDecimal & src)
|
||||
: data(src.data),
|
||||
: data(src.data.begin(), src.data.end()),
|
||||
scale(src.scale)
|
||||
{}
|
||||
|
||||
@ -195,7 +150,7 @@ public:
|
||||
const T & getElement(size_t n) const { return data[n]; }
|
||||
T & getElement(size_t n) { return data[n]; }
|
||||
|
||||
UInt32 getScale() const {return scale;}
|
||||
UInt32 getScale() const { return scale; }
|
||||
|
||||
protected:
|
||||
Container data;
|
||||
@ -206,17 +161,17 @@ protected:
|
||||
{
|
||||
size_t s = data.size();
|
||||
res.resize(s);
|
||||
for (U i = 0; i < s; ++i)
|
||||
res[i] = i;
|
||||
for (size_t i = 0; i < s; ++i)
|
||||
res[i] = static_cast<U>(i);
|
||||
|
||||
auto sort_end = res.end();
|
||||
if (limit && limit < s)
|
||||
sort_end = res.begin() + limit;
|
||||
|
||||
if (reverse)
|
||||
partial_sort(res.begin(), sort_end, res.end(), [this](size_t a, size_t b) { return data[a] > data[b]; });
|
||||
::partial_sort(res.begin(), sort_end, res.end(), [this](size_t a, size_t b) { return data[a] > data[b]; });
|
||||
else
|
||||
partial_sort(res.begin(), sort_end, res.end(), [this](size_t a, size_t b) { return data[a] < data[b]; });
|
||||
::partial_sort(res.begin(), sort_end, res.end(), [this](size_t a, size_t b) { return data[a] < data[b]; });
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -192,9 +192,9 @@ void ColumnFixedString::getPermutation(bool reverse, size_t limit, int /*nan_dir
|
||||
else
|
||||
{
|
||||
if (reverse)
|
||||
std::sort(res.begin(), res.end(), greater(*this));
|
||||
::sort(res.begin(), res.end(), greater(*this));
|
||||
else
|
||||
std::sort(res.begin(), res.end(), less(*this));
|
||||
::sort(res.begin(), res.end(), less(*this));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4,8 +4,6 @@
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <base/map.h>
|
||||
#include <base/range.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/WeakHash.h>
|
||||
@ -64,8 +62,9 @@ MutableColumnPtr ColumnMap::cloneResized(size_t new_size) const
|
||||
|
||||
Field ColumnMap::operator[](size_t n) const
|
||||
{
|
||||
auto array = DB::get<Array>((*nested)[n]);
|
||||
return Map(std::make_move_iterator(array.begin()), std::make_move_iterator(array.end()));
|
||||
Field res;
|
||||
get(n, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
void ColumnMap::get(size_t n, Field & res) const
|
||||
@ -74,11 +73,12 @@ void ColumnMap::get(size_t n, Field & res) const
|
||||
size_t offset = offsets[n - 1];
|
||||
size_t size = offsets[n] - offsets[n - 1];
|
||||
|
||||
res = Map(size);
|
||||
res = Map();
|
||||
auto & map = DB::get<Map &>(res);
|
||||
map.reserve(size);
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
getNestedData().get(offset + i, map[i]);
|
||||
map.push_back(getNestedData()[offset + i]);
|
||||
}
|
||||
|
||||
bool ColumnMap::isDefaultAt(size_t n) const
|
||||
|
@ -335,9 +335,9 @@ void ColumnString::getPermutationImpl(size_t limit, Permutation & res, Comparato
|
||||
auto less = [&cmp](size_t lhs, size_t rhs){ return cmp(lhs, rhs) < 0; };
|
||||
|
||||
if (limit)
|
||||
partial_sort(res.begin(), res.begin() + limit, res.end(), less);
|
||||
::partial_sort(res.begin(), res.begin() + limit, res.end(), less);
|
||||
else
|
||||
std::sort(res.begin(), res.end(), less);
|
||||
::sort(res.begin(), res.end(), less);
|
||||
}
|
||||
|
||||
void ColumnString::getPermutation(bool reverse, size_t limit, int /*nan_direction_hint*/, Permutation & res) const
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <Columns/ColumnTuple.h>
|
||||
|
||||
#include <base/sort.h>
|
||||
#include <Columns/IColumnImpl.h>
|
||||
#include <Columns/ColumnCompressed.h>
|
||||
#include <Core/Field.h>
|
||||
@ -9,9 +10,6 @@
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <base/sort.h>
|
||||
#include <base/map.h>
|
||||
#include <base/range.h>
|
||||
#include <DataTypes/Serializations/SerializationInfoTuple.h>
|
||||
|
||||
|
||||
@ -101,17 +99,21 @@ MutableColumnPtr ColumnTuple::cloneResized(size_t new_size) const
|
||||
|
||||
Field ColumnTuple::operator[](size_t n) const
|
||||
{
|
||||
return collections::map<Tuple>(columns, [n] (const auto & column) { return (*column)[n]; });
|
||||
Field res;
|
||||
get(n, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
void ColumnTuple::get(size_t n, Field & res) const
|
||||
{
|
||||
const size_t tuple_size = columns.size();
|
||||
Tuple tuple(tuple_size);
|
||||
for (const auto i : collections::range(0, tuple_size))
|
||||
columns[i]->get(n, tuple[i]);
|
||||
|
||||
res = tuple;
|
||||
res = Tuple();
|
||||
Tuple & res_tuple = DB::get<Tuple &>(res);
|
||||
res_tuple.reserve(tuple_size);
|
||||
|
||||
for (size_t i = 0; i < tuple_size; ++i)
|
||||
res_tuple.push_back((*columns[i])[n]);
|
||||
}
|
||||
|
||||
bool ColumnTuple::isDefaultAt(size_t n) const
|
||||
@ -383,9 +385,9 @@ void ColumnTuple::getPermutationImpl(size_t limit, Permutation & res, LessOperat
|
||||
limit = 0;
|
||||
|
||||
if (limit)
|
||||
partial_sort(res.begin(), res.begin() + limit, res.end(), less);
|
||||
::partial_sort(res.begin(), res.begin() + limit, res.end(), less);
|
||||
else
|
||||
std::sort(res.begin(), res.end(), less);
|
||||
::sort(res.begin(), res.end(), less);
|
||||
}
|
||||
|
||||
void ColumnTuple::updatePermutationImpl(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges, const Collator * collator) const
|
||||
@ -483,7 +485,7 @@ void ColumnTuple::getExtremes(Field & min, Field & max) const
|
||||
Tuple min_tuple(tuple_size);
|
||||
Tuple max_tuple(tuple_size);
|
||||
|
||||
for (const auto i : collections::range(0, tuple_size))
|
||||
for (size_t i = 0; i < tuple_size; ++i)
|
||||
columns[i]->getExtremes(min_tuple[i], max_tuple[i]);
|
||||
|
||||
min = min_tuple;
|
||||
@ -504,7 +506,7 @@ bool ColumnTuple::structureEquals(const IColumn & rhs) const
|
||||
if (tuple_size != rhs_tuple->columns.size())
|
||||
return false;
|
||||
|
||||
for (const auto i : collections::range(0, tuple_size))
|
||||
for (size_t i = 0; i < tuple_size; ++i)
|
||||
if (!columns[i]->structureEquals(*rhs_tuple->columns[i]))
|
||||
return false;
|
||||
|
||||
|
@ -1,6 +1,5 @@
|
||||
#include "ColumnVector.h"
|
||||
|
||||
#include <pdqsort.h>
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <Columns/ColumnCompressed.h>
|
||||
#include <Columns/MaskOperations.h>
|
||||
@ -118,7 +117,6 @@ struct ColumnVector<T>::equals
|
||||
bool operator()(size_t lhs, size_t rhs) const { return CompareHelper<T>::equals(parent.data[lhs], parent.data[rhs], nan_direction_hint); }
|
||||
};
|
||||
|
||||
|
||||
namespace
|
||||
{
|
||||
template <typename T>
|
||||
@ -158,9 +156,9 @@ void ColumnVector<T>::getPermutation(bool reverse, size_t limit, int nan_directi
|
||||
res[i] = i;
|
||||
|
||||
if (reverse)
|
||||
partial_sort(res.begin(), res.begin() + limit, res.end(), greater(*this, nan_direction_hint));
|
||||
::partial_sort(res.begin(), res.begin() + limit, res.end(), greater(*this, nan_direction_hint));
|
||||
else
|
||||
partial_sort(res.begin(), res.begin() + limit, res.end(), less(*this, nan_direction_hint));
|
||||
::partial_sort(res.begin(), res.begin() + limit, res.end(), less(*this, nan_direction_hint));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -204,16 +202,16 @@ void ColumnVector<T>::getPermutation(bool reverse, size_t limit, int nan_directi
|
||||
res[i] = i;
|
||||
|
||||
if (reverse)
|
||||
pdqsort(res.begin(), res.end(), greater(*this, nan_direction_hint));
|
||||
::sort(res.begin(), res.end(), greater(*this, nan_direction_hint));
|
||||
else
|
||||
pdqsort(res.begin(), res.end(), less(*this, nan_direction_hint));
|
||||
::sort(res.begin(), res.end(), less(*this, nan_direction_hint));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void ColumnVector<T>::updatePermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_range) const
|
||||
{
|
||||
auto sort = [](auto begin, auto end, auto pred) { pdqsort(begin, end, pred); };
|
||||
auto sort = [](auto begin, auto end, auto pred) { ::sort(begin, end, pred); };
|
||||
auto partial_sort = [](auto begin, auto mid, auto end, auto pred) { ::partial_sort(begin, mid, end, pred); };
|
||||
|
||||
if (reverse)
|
||||
|
@ -528,7 +528,7 @@ protected:
|
||||
template <typename Derived>
|
||||
void getIndicesOfNonDefaultRowsImpl(Offsets & indices, size_t from, size_t limit) const;
|
||||
|
||||
/// Uses std::sort and partial_sort as default algorithms.
|
||||
/// Uses sort and partial_sort as default algorithms.
|
||||
/// Implements 'less' and 'equals' via comparator.
|
||||
/// If 'less' and 'equals' can be implemented more optimal
|
||||
/// (e.g. with less number of comparisons), you can use
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <base/sort.h>
|
||||
#include <algorithm>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
@ -203,7 +204,7 @@ void IColumn::updatePermutationImpl(
|
||||
limit, res, equal_ranges,
|
||||
[&cmp](size_t lhs, size_t rhs) { return cmp(lhs, rhs) < 0; },
|
||||
[&cmp](size_t lhs, size_t rhs) { return cmp(lhs, rhs) == 0; },
|
||||
[](auto begin, auto end, auto pred) { std::sort(begin, end, pred); },
|
||||
[](auto begin, auto end, auto pred) { ::sort(begin, end, pred); },
|
||||
[](auto begin, auto mid, auto end, auto pred) { ::partial_sort(begin, mid, end, pred); });
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/getResource.h>
|
||||
#include <base/errnoToString.h>
|
||||
#include <base/sort.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
@ -105,7 +106,7 @@ static ElementIdentifier getElementIdentifier(Node * element)
|
||||
std::string value = node->nodeValue();
|
||||
attrs_kv.push_back(std::make_pair(name, value));
|
||||
}
|
||||
std::sort(attrs_kv.begin(), attrs_kv.end());
|
||||
::sort(attrs_kv.begin(), attrs_kv.end());
|
||||
|
||||
ElementIdentifier res;
|
||||
res.push_back(element->nodeName());
|
||||
@ -443,7 +444,7 @@ ConfigProcessor::Files ConfigProcessor::getConfigMergeFiles(const std::string &
|
||||
}
|
||||
}
|
||||
|
||||
std::sort(files.begin(), files.end());
|
||||
::sort(files.begin(), files.end());
|
||||
|
||||
return files;
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/defines.h>
|
||||
#include <base/sort.h>
|
||||
|
||||
#include <vector>
|
||||
#include <utility>
|
||||
@ -489,14 +490,14 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
std::sort(intervals_sorted_by_left_asc.begin(), intervals_sorted_by_left_asc.end(), [](auto & lhs, auto & rhs)
|
||||
::sort(intervals_sorted_by_left_asc.begin(), intervals_sorted_by_left_asc.end(), [](auto & lhs, auto & rhs)
|
||||
{
|
||||
auto & lhs_interval = getInterval(lhs);
|
||||
auto & rhs_interval = getInterval(rhs);
|
||||
return lhs_interval.left < rhs_interval.left;
|
||||
});
|
||||
|
||||
std::sort(intervals_sorted_by_right_desc.begin(), intervals_sorted_by_right_desc.end(), [](auto & lhs, auto & rhs)
|
||||
::sort(intervals_sorted_by_right_desc.begin(), intervals_sorted_by_right_desc.end(), [](auto & lhs, auto & rhs)
|
||||
{
|
||||
auto & lhs_interval = getInterval(lhs);
|
||||
auto & rhs_interval = getInterval(rhs);
|
||||
@ -681,7 +682,7 @@ private:
|
||||
size_t size = points.size();
|
||||
size_t middle_element_index = size / 2;
|
||||
|
||||
std::nth_element(points.begin(), points.begin() + middle_element_index, points.end());
|
||||
::nth_element(points.begin(), points.begin() + middle_element_index, points.end());
|
||||
|
||||
/** We should not get median as average of middle_element_index and middle_element_index - 1
|
||||
* because we want point in node to intersect some interval.
|
||||
|
@ -41,6 +41,7 @@ private:
|
||||
|
||||
ObjectPtr object;
|
||||
bool in_use = false;
|
||||
std::atomic<bool> is_expired = false;
|
||||
PoolBase & pool;
|
||||
};
|
||||
|
||||
@ -87,6 +88,14 @@ public:
|
||||
Object & operator*() & { return *data->data.object; }
|
||||
const Object & operator*() const & { return *data->data.object; }
|
||||
|
||||
/**
|
||||
* Expire an object to make it reallocated later.
|
||||
*/
|
||||
void expire()
|
||||
{
|
||||
data->data.is_expired = true;
|
||||
}
|
||||
|
||||
bool isNull() const { return data == nullptr; }
|
||||
|
||||
PoolBase * getPool() const
|
||||
@ -112,9 +121,22 @@ public:
|
||||
while (true)
|
||||
{
|
||||
for (auto & item : items)
|
||||
{
|
||||
if (!item->in_use)
|
||||
return Entry(*item);
|
||||
|
||||
{
|
||||
if (likely(!item->is_expired))
|
||||
{
|
||||
return Entry(*item);
|
||||
}
|
||||
else
|
||||
{
|
||||
expireObject(item->object);
|
||||
item->object = allocObject();
|
||||
item->is_expired = false;
|
||||
return Entry(*item);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (items.size() < max_items)
|
||||
{
|
||||
ObjectPtr object = allocObject();
|
||||
@ -139,6 +161,12 @@ public:
|
||||
items.emplace_back(std::make_shared<PooledObject>(allocObject(), *this));
|
||||
}
|
||||
|
||||
inline size_t size()
|
||||
{
|
||||
std::unique_lock lock(mutex);
|
||||
return items.size();
|
||||
}
|
||||
|
||||
private:
|
||||
/** The maximum size of the pool. */
|
||||
unsigned max_items;
|
||||
@ -162,4 +190,5 @@ protected:
|
||||
|
||||
/** Creates a new object to put into the pool. */
|
||||
virtual ObjectPtr allocObject() = 0;
|
||||
virtual void expireObject(ObjectPtr) {}
|
||||
};
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <functional>
|
||||
#include <base/types.h>
|
||||
#include <base/scope_guard.h>
|
||||
#include <base/sort.h>
|
||||
#include <Common/PoolBase.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/NetException.h>
|
||||
@ -178,7 +179,7 @@ PoolWithFailoverBase<TNestedPool>::getShuffledPools(
|
||||
shuffled_pools.reserve(nested_pools.size());
|
||||
for (size_t i = 0; i < nested_pools.size(); ++i)
|
||||
shuffled_pools.push_back(ShuffledPool{nested_pools[i].get(), &pool_states[i], i, 0});
|
||||
std::sort(
|
||||
::sort(
|
||||
shuffled_pools.begin(), shuffled_pools.end(),
|
||||
[](const ShuffledPool & lhs, const ShuffledPool & rhs)
|
||||
{
|
||||
|
@ -5,6 +5,8 @@
|
||||
|
||||
#include <boost/range/adaptor/reversed.hpp>
|
||||
|
||||
#include <base/sort.h>
|
||||
|
||||
#include <Common/AllocatorWithMemoryTracking.h>
|
||||
#include <Common/ArenaWithFreeLists.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
@ -242,7 +244,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
std::sort(counter_list.begin(), counter_list.end(), [](Counter * l, Counter * r) { return *l > *r; });
|
||||
::sort(counter_list.begin(), counter_list.end(), [](Counter * l, Counter * r) { return *l > *r; });
|
||||
|
||||
if (counter_list.size() > m_capacity)
|
||||
{
|
||||
|
@ -12,6 +12,8 @@
|
||||
//#include <iostream>
|
||||
#include <filesystem>
|
||||
|
||||
#include <base/sort.h>
|
||||
|
||||
/**
|
||||
|
||||
ELF object can contain three different places with symbol names and addresses:
|
||||
@ -498,8 +500,8 @@ void SymbolIndex::update()
|
||||
{
|
||||
dl_iterate_phdr(collectSymbols, &data);
|
||||
|
||||
std::sort(data.objects.begin(), data.objects.end(), [](const Object & a, const Object & b) { return a.address_begin < b.address_begin; });
|
||||
std::sort(data.symbols.begin(), data.symbols.end(), [](const Symbol & a, const Symbol & b) { return a.address_begin < b.address_begin; });
|
||||
::sort(data.objects.begin(), data.objects.end(), [](const Object & a, const Object & b) { return a.address_begin < b.address_begin; });
|
||||
::sort(data.symbols.begin(), data.symbols.end(), [](const Symbol & a, const Symbol & b) { return a.address_begin < b.address_begin; });
|
||||
|
||||
/// We found symbols both from loaded program headers and from ELF symbol tables.
|
||||
data.symbols.erase(std::unique(data.symbols.begin(), data.symbols.end(), [](const Symbol & a, const Symbol & b)
|
||||
|
52
src/Common/tests/gtest_poolbase.cpp
Normal file
52
src/Common/tests/gtest_poolbase.cpp
Normal file
@ -0,0 +1,52 @@
|
||||
#include <memory>
|
||||
#include <gtest/gtest.h>
|
||||
#include <Common/PoolBase.h>
|
||||
#include <Poco/Logger.h>
|
||||
using namespace DB;
|
||||
|
||||
class PoolObject
|
||||
{
|
||||
public:
|
||||
int x = 0;
|
||||
};
|
||||
|
||||
class MyPoolBase : public PoolBase<PoolObject>
|
||||
{
|
||||
public:
|
||||
using Object = PoolBase<PoolObject>::Object;
|
||||
using ObjectPtr = std::shared_ptr<Object>;
|
||||
using Ptr = PoolBase<PoolObject>::Ptr;
|
||||
|
||||
int last_destroy_value = 0;
|
||||
MyPoolBase() : PoolBase<PoolObject>(100, &Poco::Logger::get("MyPoolBase")) { }
|
||||
|
||||
protected:
|
||||
ObjectPtr allocObject() override { return std::make_shared<Object>(); }
|
||||
|
||||
void expireObject(ObjectPtr obj) override
|
||||
{
|
||||
LOG_TRACE(log, "expire object");
|
||||
ASSERT_TRUE(obj->x == 100);
|
||||
last_destroy_value = obj->x;
|
||||
}
|
||||
};
|
||||
|
||||
TEST(PoolBase, testDestroy1)
|
||||
{
|
||||
MyPoolBase pool;
|
||||
{
|
||||
auto obj_entry = pool.get(-1);
|
||||
ASSERT_TRUE(!obj_entry.isNull());
|
||||
obj_entry->x = 100;
|
||||
obj_entry.expire();
|
||||
}
|
||||
ASSERT_EQ(1, pool.size());
|
||||
|
||||
{
|
||||
auto obj_entry = pool.get(-1);
|
||||
ASSERT_TRUE(!obj_entry.isNull());
|
||||
ASSERT_EQ(obj_entry->x, 0);
|
||||
ASSERT_EQ(1, pool.size());
|
||||
}
|
||||
ASSERT_EQ(100, pool.last_destroy_value);
|
||||
}
|
@ -5,7 +5,6 @@
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <base/logger_useful.h>
|
||||
#include <chrono>
|
||||
#include <base/scope_guard.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -246,7 +245,6 @@ void BackgroundSchedulePool::threadFunction()
|
||||
setThreadName(thread_name.c_str());
|
||||
|
||||
attachToThreadGroup();
|
||||
SCOPE_EXIT({ CurrentThread::detachQueryIfNotDetached(); });
|
||||
|
||||
while (!shutdown)
|
||||
{
|
||||
@ -273,7 +271,6 @@ void BackgroundSchedulePool::delayExecutionThreadFunction()
|
||||
setThreadName((thread_name + "/D").c_str());
|
||||
|
||||
attachToThreadGroup();
|
||||
SCOPE_EXIT({ CurrentThread::detachQueryIfNotDetached(); });
|
||||
|
||||
while (!shutdown)
|
||||
{
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <Columns/ColumnSparse.h>
|
||||
|
||||
#include <iterator>
|
||||
#include <base/sort.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -538,7 +539,7 @@ Block Block::sortColumns() const
|
||||
for (auto it = index_by_name.begin(); it != index_by_name.end(); ++it)
|
||||
sorted_index_by_name[i++] = it;
|
||||
}
|
||||
std::sort(sorted_index_by_name.begin(), sorted_index_by_name.end(), [](const auto & lhs, const auto & rhs)
|
||||
::sort(sorted_index_by_name.begin(), sorted_index_by_name.end(), [](const auto & lhs, const auto & rhs)
|
||||
{
|
||||
return lhs->first < rhs->first;
|
||||
});
|
||||
|
@ -1,4 +1,6 @@
|
||||
#include <Core/NamesAndTypes.h>
|
||||
|
||||
#include <base/sort.h>
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <IO/ReadBuffer.h>
|
||||
@ -113,7 +115,7 @@ bool NamesAndTypesList::isSubsetOf(const NamesAndTypesList & rhs) const
|
||||
{
|
||||
NamesAndTypes vector(rhs.begin(), rhs.end());
|
||||
vector.insert(vector.end(), begin(), end());
|
||||
std::sort(vector.begin(), vector.end());
|
||||
::sort(vector.begin(), vector.end());
|
||||
return std::unique(vector.begin(), vector.end()) == vector.begin() + rhs.size();
|
||||
}
|
||||
|
||||
@ -121,16 +123,16 @@ size_t NamesAndTypesList::sizeOfDifference(const NamesAndTypesList & rhs) const
|
||||
{
|
||||
NamesAndTypes vector(rhs.begin(), rhs.end());
|
||||
vector.insert(vector.end(), begin(), end());
|
||||
std::sort(vector.begin(), vector.end());
|
||||
::sort(vector.begin(), vector.end());
|
||||
return (std::unique(vector.begin(), vector.end()) - vector.begin()) * 2 - size() - rhs.size();
|
||||
}
|
||||
|
||||
void NamesAndTypesList::getDifference(const NamesAndTypesList & rhs, NamesAndTypesList & deleted, NamesAndTypesList & added) const
|
||||
{
|
||||
NamesAndTypes lhs_vector(begin(), end());
|
||||
std::sort(lhs_vector.begin(), lhs_vector.end());
|
||||
::sort(lhs_vector.begin(), lhs_vector.end());
|
||||
NamesAndTypes rhs_vector(rhs.begin(), rhs.end());
|
||||
std::sort(rhs_vector.begin(), rhs_vector.end());
|
||||
::sort(rhs_vector.begin(), rhs_vector.end());
|
||||
|
||||
std::set_difference(lhs_vector.begin(), lhs_vector.end(), rhs_vector.begin(), rhs_vector.end(),
|
||||
std::back_inserter(deleted));
|
||||
|
@ -1,5 +1,7 @@
|
||||
#include <DataTypes/EnumValues.h>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <base/sort.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -18,7 +20,7 @@ EnumValues<T>::EnumValues(const Values & values_)
|
||||
if (values.empty())
|
||||
throw Exception{"DataTypeEnum enumeration cannot be empty", ErrorCodes::EMPTY_DATA_PASSED};
|
||||
|
||||
std::sort(std::begin(values), std::end(values), [] (auto & left, auto & right)
|
||||
::sort(std::begin(values), std::end(values), [] (auto & left, auto & right)
|
||||
{
|
||||
return left.second < right.second;
|
||||
});
|
||||
|
@ -37,10 +37,11 @@ void SerializationArray::deserializeBinary(Field & field, ReadBuffer & istr) con
|
||||
{
|
||||
size_t size;
|
||||
readVarUInt(size, istr);
|
||||
field = Array(size);
|
||||
field = Array();
|
||||
Array & arr = get<Array &>(field);
|
||||
arr.reserve(size);
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
nested->deserializeBinary(arr[i], istr);
|
||||
nested->deserializeBinary(arr.emplace_back(), istr);
|
||||
}
|
||||
|
||||
|
||||
|
@ -53,13 +53,15 @@ void SerializationMap::deserializeBinary(Field & field, ReadBuffer & istr) const
|
||||
{
|
||||
size_t size;
|
||||
readVarUInt(size, istr);
|
||||
field = Map(size);
|
||||
for (auto & elem : field.get<Map &>())
|
||||
field = Map();
|
||||
Map & map = field.get<Map &>();
|
||||
map.reserve(size);
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
Tuple tuple(2);
|
||||
key->deserializeBinary(tuple[0], istr);
|
||||
value->deserializeBinary(tuple[1], istr);
|
||||
elem = std::move(tuple);
|
||||
map.push_back(std::move(tuple));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
#include <base/range.h>
|
||||
#include <DataTypes/Serializations/SerializationTuple.h>
|
||||
#include <DataTypes/Serializations/SerializationInfoTuple.h>
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
@ -44,11 +43,11 @@ void SerializationTuple::deserializeBinary(Field & field, ReadBuffer & istr) con
|
||||
{
|
||||
const size_t size = elems.size();
|
||||
|
||||
Tuple tuple(size);
|
||||
for (const auto i : collections::range(0, size))
|
||||
elems[i]->deserializeBinary(tuple[i], istr);
|
||||
|
||||
field = tuple;
|
||||
field = Tuple();
|
||||
Tuple & tuple = get<Tuple &>(field);
|
||||
tuple.reserve(size);
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
elems[i]->deserializeBinary(tuple.emplace_back(), istr);
|
||||
}
|
||||
|
||||
void SerializationTuple::serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr) const
|
||||
@ -73,7 +72,7 @@ static void addElementSafe(size_t num_elems, IColumn & column, F && impl)
|
||||
|
||||
// Check that all columns now have the same size.
|
||||
size_t new_size = column.size();
|
||||
for (auto i : collections::range(1, num_elems))
|
||||
for (size_t i = 1; i < num_elems; ++i)
|
||||
{
|
||||
const auto & element_column = extractElementColumn(column, i);
|
||||
if (element_column.size() != new_size)
|
||||
@ -87,7 +86,7 @@ static void addElementSafe(size_t num_elems, IColumn & column, F && impl)
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
for (const auto & i : collections::range(0, num_elems))
|
||||
for (size_t i = 0; i < num_elems; ++i)
|
||||
{
|
||||
auto & element_column = extractElementColumn(column, i);
|
||||
if (element_column.size() > old_size)
|
||||
@ -102,7 +101,7 @@ void SerializationTuple::deserializeBinary(IColumn & column, ReadBuffer & istr)
|
||||
{
|
||||
addElementSafe(elems.size(), column, [&]
|
||||
{
|
||||
for (const auto & i : collections::range(0, elems.size()))
|
||||
for (size_t i = 0; i < elems.size(); ++i)
|
||||
elems[i]->deserializeBinary(extractElementColumn(column, i), istr);
|
||||
});
|
||||
}
|
||||
@ -110,7 +109,7 @@ void SerializationTuple::deserializeBinary(IColumn & column, ReadBuffer & istr)
|
||||
void SerializationTuple::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||
{
|
||||
writeChar('(', ostr);
|
||||
for (const auto i : collections::range(0, elems.size()))
|
||||
for (size_t i = 0; i < elems.size(); ++i)
|
||||
{
|
||||
if (i != 0)
|
||||
writeChar(',', ostr);
|
||||
@ -126,7 +125,7 @@ void SerializationTuple::deserializeText(IColumn & column, ReadBuffer & istr, co
|
||||
|
||||
addElementSafe(elems.size(), column, [&]
|
||||
{
|
||||
for (const auto i : collections::range(0, size))
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
skipWhitespaceIfAny(istr);
|
||||
if (i != 0)
|
||||
@ -158,7 +157,7 @@ void SerializationTuple::serializeTextJSON(const IColumn & column, size_t row_nu
|
||||
&& have_explicit_names)
|
||||
{
|
||||
writeChar('{', ostr);
|
||||
for (const auto i : collections::range(0, elems.size()))
|
||||
for (size_t i = 0; i < elems.size(); ++i)
|
||||
{
|
||||
if (i != 0)
|
||||
{
|
||||
@ -173,7 +172,7 @@ void SerializationTuple::serializeTextJSON(const IColumn & column, size_t row_nu
|
||||
else
|
||||
{
|
||||
writeChar('[', ostr);
|
||||
for (const auto i : collections::range(0, elems.size()))
|
||||
for (size_t i = 0; i < elems.size(); ++i)
|
||||
{
|
||||
if (i != 0)
|
||||
writeChar(',', ostr);
|
||||
@ -195,7 +194,7 @@ void SerializationTuple::deserializeTextJSON(IColumn & column, ReadBuffer & istr
|
||||
addElementSafe(elems.size(), column, [&]
|
||||
{
|
||||
// Require all elements but in arbitrary order.
|
||||
for (auto i : collections::range(0, elems.size()))
|
||||
for (size_t i = 0; i < elems.size(); ++i)
|
||||
{
|
||||
if (i > 0)
|
||||
{
|
||||
@ -226,7 +225,7 @@ void SerializationTuple::deserializeTextJSON(IColumn & column, ReadBuffer & istr
|
||||
|
||||
addElementSafe(elems.size(), column, [&]
|
||||
{
|
||||
for (const auto i : collections::range(0, size))
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
skipWhitespaceIfAny(istr);
|
||||
if (i != 0)
|
||||
@ -246,7 +245,7 @@ void SerializationTuple::deserializeTextJSON(IColumn & column, ReadBuffer & istr
|
||||
void SerializationTuple::serializeTextXML(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||
{
|
||||
writeCString("<tuple>", ostr);
|
||||
for (const auto i : collections::range(0, elems.size()))
|
||||
for (size_t i = 0; i < elems.size(); ++i)
|
||||
{
|
||||
writeCString("<elem>", ostr);
|
||||
elems[i]->serializeTextXML(extractElementColumn(column, i), row_num, ostr, settings);
|
||||
@ -257,7 +256,7 @@ void SerializationTuple::serializeTextXML(const IColumn & column, size_t row_num
|
||||
|
||||
void SerializationTuple::serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||
{
|
||||
for (const auto i : collections::range(0, elems.size()))
|
||||
for (size_t i = 0; i < elems.size(); ++i)
|
||||
{
|
||||
if (i != 0)
|
||||
writeChar(settings.csv.tuple_delimiter, ostr);
|
||||
@ -270,7 +269,7 @@ void SerializationTuple::deserializeTextCSV(IColumn & column, ReadBuffer & istr,
|
||||
addElementSafe(elems.size(), column, [&]
|
||||
{
|
||||
const size_t size = elems.size();
|
||||
for (const auto i : collections::range(0, size))
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
if (i != 0)
|
||||
{
|
||||
@ -362,7 +361,7 @@ void SerializationTuple::serializeBinaryBulkWithMultipleStreams(
|
||||
{
|
||||
auto * tuple_state = checkAndGetState<SerializeBinaryBulkStateTuple>(state);
|
||||
|
||||
for (const auto i : collections::range(0, elems.size()))
|
||||
for (size_t i = 0; i < elems.size(); ++i)
|
||||
{
|
||||
const auto & element_col = extractElementColumn(column, i);
|
||||
elems[i]->serializeBinaryBulkWithMultipleStreams(element_col, offset, limit, settings, tuple_state->states[i]);
|
||||
@ -382,7 +381,7 @@ void SerializationTuple::deserializeBinaryBulkWithMultipleStreams(
|
||||
auto & column_tuple = assert_cast<ColumnTuple &>(*mutable_column);
|
||||
|
||||
settings.avg_value_size_hint = 0;
|
||||
for (const auto i : collections::range(0, elems.size()))
|
||||
for (size_t i = 0; i < elems.size(); ++i)
|
||||
elems[i]->deserializeBinaryBulkWithMultipleStreams(column_tuple.getColumnPtr(i), limit, settings, tuple_state->states[i], cache);
|
||||
}
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
|
||||
#include <base/logger_useful.h>
|
||||
#include <base/scope_guard_safe.h>
|
||||
#include <base/sort.h>
|
||||
#include <iomanip>
|
||||
#include <filesystem>
|
||||
|
||||
@ -151,7 +152,7 @@ DatabaseTablesIteratorPtr DatabaseLazy::getTablesIterator(ContextPtr, const Filt
|
||||
if (!filter_by_table_name || filter_by_table_name(table_name))
|
||||
filtered_tables.push_back(table_name);
|
||||
}
|
||||
std::sort(filtered_tables.begin(), filtered_tables.end());
|
||||
::sort(filtered_tables.begin(), filtered_tables.end());
|
||||
return std::make_unique<DatabaseLazyIterator>(*this, std::move(filtered_tables));
|
||||
}
|
||||
|
||||
|
@ -142,7 +142,7 @@ ClusterPtr DatabaseReplicated::getClusterImpl() const
|
||||
"It's possible if the first replica is not fully created yet "
|
||||
"or if the last replica was just dropped or due to logical error", database_name);
|
||||
Int32 cversion = stat.cversion;
|
||||
std::sort(hosts.begin(), hosts.end());
|
||||
::sort(hosts.begin(), hosts.end());
|
||||
|
||||
std::vector<zkutil::ZooKeeper::FutureGet> futures;
|
||||
futures.reserve(hosts.size());
|
||||
|
@ -13,10 +13,12 @@
|
||||
#include <base/itoa.h>
|
||||
#include <base/map.h>
|
||||
#include <base/range.h>
|
||||
#include <base/sort.h>
|
||||
#include <Dictionaries/DictionarySource.h>
|
||||
#include <Dictionaries/DictionaryFactory.h>
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
@ -145,7 +147,7 @@ static void validateKeyTypes(const DataTypes & key_types)
|
||||
template <typename T, typename Comp>
|
||||
size_t sortAndUnique(std::vector<T> & vec, Comp comp)
|
||||
{
|
||||
std::sort(vec.begin(), vec.end(),
|
||||
::sort(vec.begin(), vec.end(),
|
||||
[&](const auto & a, const auto & b) { return comp(a, b) < 0; });
|
||||
|
||||
auto new_end = std::unique(vec.begin(), vec.end(),
|
||||
|
@ -3,6 +3,8 @@
|
||||
#include <numeric>
|
||||
#include <cmath>
|
||||
|
||||
#include <base/sort.h>
|
||||
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
@ -250,7 +252,7 @@ void IPolygonDictionary::loadData()
|
||||
polygon_ids.emplace_back(polygon, i);
|
||||
}
|
||||
|
||||
std::sort(polygon_ids.begin(), polygon_ids.end(), [& areas](const auto & lhs, const auto & rhs)
|
||||
::sort(polygon_ids.begin(), polygon_ids.end(), [& areas](const auto & lhs, const auto & rhs)
|
||||
{
|
||||
return areas[lhs.second] < areas[rhs.second];
|
||||
});
|
||||
|
@ -3,11 +3,13 @@
|
||||
#include <Common/ThreadPool.h>
|
||||
|
||||
#include <base/logger_useful.h>
|
||||
#include <base/sort.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <thread>
|
||||
#include <numeric>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -87,7 +89,7 @@ std::vector<Coord> SlabsPolygonIndex::uniqueX(const std::vector<Polygon> & polyg
|
||||
}
|
||||
|
||||
/** Making all_x sorted and distinct */
|
||||
std::sort(all_x.begin(), all_x.end());
|
||||
::sort(all_x.begin(), all_x.end());
|
||||
all_x.erase(std::unique(all_x.begin(), all_x.end()), all_x.end());
|
||||
|
||||
return all_x;
|
||||
@ -104,7 +106,7 @@ void SlabsPolygonIndex::indexBuild(const std::vector<Polygon> & polygons)
|
||||
}
|
||||
|
||||
/** Sorting edges of (left_point, right_point, polygon_id) in that order */
|
||||
std::sort(all_edges.begin(), all_edges.end(), Edge::compareByLeftPoint);
|
||||
::sort(all_edges.begin(), all_edges.end(), Edge::compareByLeftPoint);
|
||||
for (size_t i = 0; i != all_edges.size(); ++i)
|
||||
all_edges[i].edge_id = i;
|
||||
|
||||
@ -298,7 +300,7 @@ bool SlabsPolygonIndex::find(const Point & point, size_t & id) const
|
||||
} while (pos != 0);
|
||||
|
||||
/** Sort all ids and find smallest with odd occurrences */
|
||||
std::sort(intersections.begin(), intersections.end());
|
||||
::sort(intersections.begin(), intersections.end());
|
||||
for (size_t i = 0; i < intersections.size(); i += 2)
|
||||
{
|
||||
if (i + 1 == intersections.size() || intersections[i] != intersections[i + 1])
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <absl/container/flat_hash_set.h>
|
||||
|
||||
#include <base/unaligned.h>
|
||||
#include <base/sort.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/ArenaWithFreeLists.h>
|
||||
@ -24,6 +25,7 @@
|
||||
#include <Dictionaries/ICacheDictionaryStorage.h>
|
||||
#include <Dictionaries/DictionaryHelpers.h>
|
||||
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
extern const Metric Write;
|
||||
@ -1092,7 +1094,7 @@ private:
|
||||
}
|
||||
|
||||
/// Sort blocks by offset before start async io requests
|
||||
std::sort(blocks_to_request.begin(), blocks_to_request.end());
|
||||
::sort(blocks_to_request.begin(), blocks_to_request.end());
|
||||
|
||||
file_buffer.fetchBlocks(configuration.read_buffer_blocks_size, blocks_to_request, [&](size_t block_index, char * block_data)
|
||||
{
|
||||
|
@ -11,6 +11,14 @@
|
||||
#include <fstream>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <Disks/DiskFactory.h>
|
||||
#include <Disks/DiskMemory.h>
|
||||
#include <Disks/DiskRestartProxy.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteBufferFromTemporaryFile.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <base/logger_useful.h>
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
@ -25,7 +33,7 @@ namespace ErrorCodes
|
||||
extern const int UNKNOWN_ELEMENT_IN_CONFIG;
|
||||
extern const int EXCESSIVE_ELEMENT_IN_CONFIG;
|
||||
extern const int PATH_ACCESS_DENIED;
|
||||
extern const int INCORRECT_DISK_INDEX;
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int CANNOT_TRUNCATE_FILE;
|
||||
extern const int CANNOT_UNLINK;
|
||||
extern const int CANNOT_RMDIR;
|
||||
@ -61,9 +69,6 @@ static void loadDiskLocalConfig(const String & name,
|
||||
throw Exception("Disk path must end with /. Disk " + name, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
||||
}
|
||||
|
||||
if (!FS::canRead(path) || !FS::canWrite(path))
|
||||
throw Exception("There is no RW access to the disk " + name + " (" + path + ")", ErrorCodes::PATH_ACCESS_DENIED);
|
||||
|
||||
bool has_space_ratio = config.has(config_prefix + ".keep_free_space_ratio");
|
||||
|
||||
if (config.has(config_prefix + ".keep_free_space_bytes") && has_space_ratio)
|
||||
@ -113,13 +118,48 @@ public:
|
||||
|
||||
UInt64 getSize() const override { return size; }
|
||||
|
||||
DiskPtr getDisk(size_t i) const override;
|
||||
DiskPtr getDisk(size_t i) const override
|
||||
{
|
||||
if (i != 0)
|
||||
throw Exception("Can't use i != 0 with single disk reservation. It's a bug", ErrorCodes::LOGICAL_ERROR);
|
||||
return disk;
|
||||
}
|
||||
|
||||
Disks getDisks() const override { return {disk}; }
|
||||
|
||||
void update(UInt64 new_size) override;
|
||||
void update(UInt64 new_size) override
|
||||
{
|
||||
std::lock_guard lock(DiskLocal::reservation_mutex);
|
||||
disk->reserved_bytes -= size;
|
||||
size = new_size;
|
||||
disk->reserved_bytes += size;
|
||||
}
|
||||
|
||||
~DiskLocalReservation() override;
|
||||
~DiskLocalReservation() override
|
||||
{
|
||||
try
|
||||
{
|
||||
std::lock_guard lock(DiskLocal::reservation_mutex);
|
||||
if (disk->reserved_bytes < size)
|
||||
{
|
||||
disk->reserved_bytes = 0;
|
||||
LOG_ERROR(&Poco::Logger::get("DiskLocal"), "Unbalanced reservations size for disk '{}'.", disk->getName());
|
||||
}
|
||||
else
|
||||
{
|
||||
disk->reserved_bytes -= size;
|
||||
}
|
||||
|
||||
if (disk->reservation_count == 0)
|
||||
LOG_ERROR(&Poco::Logger::get("DiskLocal"), "Unbalanced reservation count for disk '{}'.", disk->getName());
|
||||
else
|
||||
--disk->reservation_count;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
DiskLocalPtr disk;
|
||||
@ -188,7 +228,7 @@ bool DiskLocal::tryReserve(UInt64 bytes)
|
||||
return false;
|
||||
}
|
||||
|
||||
UInt64 DiskLocal::getTotalSpace() const
|
||||
static UInt64 getTotalSpaceByName(const String & name, const String & disk_path, UInt64 keep_free_space_bytes)
|
||||
{
|
||||
struct statvfs fs;
|
||||
if (name == "default") /// for default disk we get space from path/data/
|
||||
@ -201,8 +241,17 @@ UInt64 DiskLocal::getTotalSpace() const
|
||||
return total_size - keep_free_space_bytes;
|
||||
}
|
||||
|
||||
UInt64 DiskLocal::getTotalSpace() const
|
||||
{
|
||||
if (broken || readonly)
|
||||
return 0;
|
||||
return getTotalSpaceByName(name, disk_path, keep_free_space_bytes);
|
||||
}
|
||||
|
||||
UInt64 DiskLocal::getAvailableSpace() const
|
||||
{
|
||||
if (broken || readonly)
|
||||
return 0;
|
||||
/// we use f_bavail, because part of b_free space is
|
||||
/// available for superuser only and for system purposes
|
||||
struct statvfs fs;
|
||||
@ -268,7 +317,7 @@ void DiskLocal::moveDirectory(const String & from_path, const String & to_path)
|
||||
DiskDirectoryIteratorPtr DiskLocal::iterateDirectory(const String & path)
|
||||
{
|
||||
fs::path meta_path = fs::path(disk_path) / path;
|
||||
if (fs::exists(meta_path) && fs::is_directory(meta_path))
|
||||
if (!broken && fs::exists(meta_path) && fs::is_directory(meta_path))
|
||||
return std::make_unique<DiskLocalDirectoryIterator>(disk_path, path);
|
||||
else
|
||||
return std::make_unique<DiskLocalDirectoryIterator>();
|
||||
@ -409,49 +458,191 @@ void DiskLocal::applyNewSettings(const Poco::Util::AbstractConfiguration & confi
|
||||
keep_free_space_bytes = new_keep_free_space_bytes;
|
||||
}
|
||||
|
||||
DiskPtr DiskLocalReservation::getDisk(size_t i) const
|
||||
DiskLocal::DiskLocal(const String & name_, const String & path_, UInt64 keep_free_space_bytes_)
|
||||
: name(name_)
|
||||
, disk_path(path_)
|
||||
, keep_free_space_bytes(keep_free_space_bytes_)
|
||||
, logger(&Poco::Logger::get("DiskLocal"))
|
||||
{
|
||||
if (i != 0)
|
||||
{
|
||||
throw Exception("Can't use i != 0 with single disk reservation", ErrorCodes::INCORRECT_DISK_INDEX);
|
||||
}
|
||||
return disk;
|
||||
}
|
||||
|
||||
void DiskLocalReservation::update(UInt64 new_size)
|
||||
DiskLocal::DiskLocal(
|
||||
const String & name_, const String & path_, UInt64 keep_free_space_bytes_, ContextPtr context, UInt64 local_disk_check_period_ms)
|
||||
: DiskLocal(name_, path_, keep_free_space_bytes_)
|
||||
{
|
||||
std::lock_guard lock(DiskLocal::reservation_mutex);
|
||||
disk->reserved_bytes -= size;
|
||||
size = new_size;
|
||||
disk->reserved_bytes += size;
|
||||
if (local_disk_check_period_ms > 0)
|
||||
disk_checker = std::make_unique<DiskLocalCheckThread>(this, context, local_disk_check_period_ms);
|
||||
}
|
||||
|
||||
DiskLocalReservation::~DiskLocalReservation()
|
||||
void DiskLocal::startup()
|
||||
{
|
||||
try
|
||||
{
|
||||
std::lock_guard lock(DiskLocal::reservation_mutex);
|
||||
if (disk->reserved_bytes < size)
|
||||
{
|
||||
disk->reserved_bytes = 0;
|
||||
LOG_ERROR(disk->log, "Unbalanced reservations size for disk '{}'.", disk->getName());
|
||||
}
|
||||
else
|
||||
{
|
||||
disk->reserved_bytes -= size;
|
||||
}
|
||||
|
||||
if (disk->reservation_count == 0)
|
||||
LOG_ERROR(disk->log, "Unbalanced reservation count for disk '{}'.", disk->getName());
|
||||
else
|
||||
--disk->reservation_count;
|
||||
broken = false;
|
||||
disk_checker_magic_number = -1;
|
||||
disk_checker_can_check_read = true;
|
||||
readonly = !setup();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
tryLogCurrentException(logger, fmt::format("Disk {} is marked as broken during startup", name));
|
||||
broken = true;
|
||||
/// Disk checker is disabled when failing to start up.
|
||||
disk_checker_can_check_read = false;
|
||||
}
|
||||
if (disk_checker && disk_checker_can_check_read)
|
||||
disk_checker->startup();
|
||||
}
|
||||
|
||||
void DiskLocal::shutdown()
|
||||
{
|
||||
if (disk_checker)
|
||||
disk_checker->shutdown();
|
||||
}
|
||||
|
||||
std::optional<UInt32> DiskLocal::readDiskCheckerMagicNumber() const noexcept
|
||||
try
|
||||
{
|
||||
ReadSettings read_settings;
|
||||
/// Proper disk read checking requires direct io
|
||||
read_settings.direct_io_threshold = 1;
|
||||
auto buf = readFile(disk_checker_path, read_settings, {}, {});
|
||||
UInt32 magic_number;
|
||||
readIntBinary(magic_number, *buf);
|
||||
if (buf->eof())
|
||||
return magic_number;
|
||||
LOG_WARNING(logger, "The size of disk check magic number is more than 4 bytes. Mark it as read failure");
|
||||
return {};
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(logger, fmt::format("Cannot read correct disk check magic number from from {}{}", disk_path, disk_checker_path));
|
||||
return {};
|
||||
}
|
||||
|
||||
bool DiskLocal::canRead() const noexcept
|
||||
try
|
||||
{
|
||||
if (FS::canRead(fs::path(disk_path) / disk_checker_path))
|
||||
{
|
||||
auto magic_number = readDiskCheckerMagicNumber();
|
||||
if (magic_number && *magic_number == disk_checker_magic_number)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
LOG_WARNING(logger, "Cannot achieve read over the disk directory: {}", disk_path);
|
||||
return false;
|
||||
}
|
||||
|
||||
struct DiskWriteCheckData
|
||||
{
|
||||
constexpr static size_t PAGE_SIZE = 4096;
|
||||
char data[PAGE_SIZE]{};
|
||||
DiskWriteCheckData()
|
||||
{
|
||||
static const char * magic_string = "ClickHouse disk local write check";
|
||||
static size_t magic_string_len = strlen(magic_string);
|
||||
memcpy(data, magic_string, magic_string_len);
|
||||
memcpy(data + PAGE_SIZE - magic_string_len, magic_string, magic_string_len);
|
||||
}
|
||||
};
|
||||
|
||||
bool DiskLocal::canWrite() const noexcept
|
||||
try
|
||||
{
|
||||
static DiskWriteCheckData data;
|
||||
String tmp_template = fs::path(disk_path) / "";
|
||||
{
|
||||
auto buf = WriteBufferFromTemporaryFile::create(tmp_template);
|
||||
buf->write(data.data, data.PAGE_SIZE);
|
||||
buf->sync();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
LOG_WARNING(logger, "Cannot achieve write over the disk directory: {}", disk_path);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool DiskLocal::setup()
|
||||
{
|
||||
try
|
||||
{
|
||||
fs::create_directories(disk_path);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
LOG_ERROR(logger, "Cannot create the directory of disk {} ({}).", name, disk_path);
|
||||
throw;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
if (!FS::canRead(disk_path))
|
||||
throw Exception(ErrorCodes::PATH_ACCESS_DENIED, "There is no read access to disk {} ({}).", name, disk_path);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
LOG_ERROR(logger, "Cannot gain read access of the disk directory: {}", disk_path);
|
||||
throw;
|
||||
}
|
||||
|
||||
/// If disk checker is disabled, just assume RW by default.
|
||||
if (!disk_checker)
|
||||
return true;
|
||||
|
||||
try
|
||||
{
|
||||
if (exists(disk_checker_path))
|
||||
{
|
||||
auto magic_number = readDiskCheckerMagicNumber();
|
||||
if (magic_number)
|
||||
disk_checker_magic_number = *magic_number;
|
||||
else
|
||||
{
|
||||
/// The checker file is incorrect. Mark the magic number to uninitialized and try to generate a new checker file.
|
||||
disk_checker_magic_number = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
LOG_ERROR(logger, "We cannot tell if {} exists anymore, or read from it. Most likely disk {} is broken", disk_checker_path, name);
|
||||
throw;
|
||||
}
|
||||
|
||||
/// Try to create a new checker file. The disk status can be either broken or readonly.
|
||||
if (disk_checker_magic_number == -1)
|
||||
try
|
||||
{
|
||||
pcg32_fast rng(randomSeed());
|
||||
UInt32 magic_number = rng();
|
||||
{
|
||||
auto buf = writeFile(disk_checker_path, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite);
|
||||
writeIntBinary(magic_number, *buf);
|
||||
}
|
||||
disk_checker_magic_number = magic_number;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
LOG_WARNING(
|
||||
logger,
|
||||
"Cannot create/write to {0}. Disk {1} is either readonly or broken. Without setting up disk checker file, DiskLocalCheckThread "
|
||||
"will not be started. Disk is assumed to be RW. Try manually fix the disk and do `SYSTEM RESTART DISK {1}`",
|
||||
disk_checker_path,
|
||||
name);
|
||||
disk_checker_can_check_read = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (disk_checker_magic_number == -1)
|
||||
throw Exception("disk_checker_magic_number is not initialized. It's a bug", ErrorCodes::LOGICAL_ERROR);
|
||||
return true;
|
||||
}
|
||||
|
||||
void registerDiskLocal(DiskFactory & factory)
|
||||
{
|
||||
@ -459,17 +650,20 @@ void registerDiskLocal(DiskFactory & factory)
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const String & config_prefix,
|
||||
ContextPtr context,
|
||||
const DisksMap & map) -> DiskPtr {
|
||||
const DisksMap & map) -> DiskPtr
|
||||
{
|
||||
String path;
|
||||
UInt64 keep_free_space_bytes;
|
||||
loadDiskLocalConfig(name, config, config_prefix, context, path, keep_free_space_bytes);
|
||||
|
||||
for (const auto & [disk_name, disk_ptr] : map)
|
||||
{
|
||||
if (path == disk_ptr->getPath())
|
||||
throw Exception("Disk " + name + " and Disk " + disk_name + " cannot have the same path" + " (" + path + ")", ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
return std::make_shared<DiskLocal>(name, path, keep_free_space_bytes);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Disk {} and disk {} cannot have the same path ({})", name, disk_name, path);
|
||||
|
||||
std::shared_ptr<IDisk> disk
|
||||
= std::make_shared<DiskLocal>(name, path, keep_free_space_bytes, context, config.getUInt("local_disk_check_period_ms", 0));
|
||||
disk->startup();
|
||||
return std::make_shared<DiskRestartProxy>(disk);
|
||||
};
|
||||
factory.registerDiskType("local", creator);
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/logger_useful.h>
|
||||
#include <Disks/DiskLocalCheckThread.h>
|
||||
#include <Disks/IDisk.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/ReadBufferFromFileBase.h>
|
||||
@ -10,24 +11,22 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
class DiskLocalReservation;
|
||||
|
||||
class DiskLocal : public IDisk
|
||||
{
|
||||
public:
|
||||
friend class DiskLocalCheckThread;
|
||||
friend class DiskLocalReservation;
|
||||
|
||||
DiskLocal(const String & name_, const String & path_, UInt64 keep_free_space_bytes_)
|
||||
: name(name_), disk_path(path_), keep_free_space_bytes(keep_free_space_bytes_)
|
||||
{
|
||||
if (disk_path.back() != '/')
|
||||
throw Exception("Disk path must end with '/', but '" + disk_path + "' doesn't.", ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
DiskLocal(const String & name_, const String & path_, UInt64 keep_free_space_bytes_);
|
||||
DiskLocal(
|
||||
const String & name_,
|
||||
const String & path_,
|
||||
UInt64 keep_free_space_bytes_,
|
||||
ContextPtr context,
|
||||
UInt64 local_disk_check_period_ms);
|
||||
|
||||
const String & getName() const override { return name; }
|
||||
|
||||
@ -106,13 +105,33 @@ public:
|
||||
|
||||
void applyNewSettings(const Poco::Util::AbstractConfiguration & config, ContextPtr context, const String & config_prefix, const DisksMap &) override;
|
||||
|
||||
bool isBroken() const override { return broken; }
|
||||
|
||||
void startup() override;
|
||||
|
||||
void shutdown() override;
|
||||
|
||||
/// Check if the disk is OK to proceed read/write operations. Currently the check is
|
||||
/// rudimentary. The more advanced choice would be using
|
||||
/// https://github.com/smartmontools/smartmontools. However, it's good enough for now.
|
||||
bool canRead() const noexcept;
|
||||
bool canWrite() const noexcept;
|
||||
|
||||
private:
|
||||
bool tryReserve(UInt64 bytes);
|
||||
|
||||
private:
|
||||
/// Setup disk for healthy check. Returns true if it's read-write, false if read-only.
|
||||
/// Throw exception if it's not possible to setup necessary files and directories.
|
||||
bool setup();
|
||||
|
||||
/// Read magic number from disk checker file. Return std::nullopt if exception happens.
|
||||
std::optional<UInt32> readDiskCheckerMagicNumber() const noexcept;
|
||||
|
||||
const String name;
|
||||
const String disk_path;
|
||||
const String disk_checker_path = ".disk_checker_file";
|
||||
std::atomic<UInt64> keep_free_space_bytes;
|
||||
Poco::Logger * logger;
|
||||
|
||||
UInt64 reserved_bytes = 0;
|
||||
UInt64 reservation_count = 0;
|
||||
@ -120,6 +139,14 @@ private:
|
||||
static std::mutex reservation_mutex;
|
||||
|
||||
Poco::Logger * log = &Poco::Logger::get("DiskLocal");
|
||||
|
||||
std::atomic<bool> broken{false};
|
||||
std::atomic<bool> readonly{false};
|
||||
std::unique_ptr<DiskLocalCheckThread> disk_checker;
|
||||
/// A magic number to vaguely check if reading operation generates correct result.
|
||||
/// -1 means there is no available disk_checker_file yet.
|
||||
Int64 disk_checker_magic_number = -1;
|
||||
bool disk_checker_can_check_read = true;
|
||||
};
|
||||
|
||||
|
||||
|
70
src/Disks/DiskLocalCheckThread.cpp
Normal file
70
src/Disks/DiskLocalCheckThread.cpp
Normal file
@ -0,0 +1,70 @@
|
||||
#include <Disks/DiskLocalCheckThread.h>
|
||||
|
||||
#include <Disks/DiskLocal.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <base/logger_useful.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
static const auto DISK_CHECK_ERROR_SLEEP_MS = 1000;
|
||||
static const auto DISK_CHECK_ERROR_RETRY_TIME = 3;
|
||||
|
||||
DiskLocalCheckThread::DiskLocalCheckThread(DiskLocal * disk_, ContextPtr context_, UInt64 local_disk_check_period_ms)
|
||||
: WithContext(context_)
|
||||
, disk(std::move(disk_))
|
||||
, check_period_ms(local_disk_check_period_ms)
|
||||
, log(&Poco::Logger::get(fmt::format("DiskLocalCheckThread({})", disk->getName())))
|
||||
{
|
||||
task = getContext()->getSchedulePool().createTask(log->name(), [this] { run(); });
|
||||
}
|
||||
|
||||
void DiskLocalCheckThread::startup()
|
||||
{
|
||||
need_stop = false;
|
||||
retry = 0;
|
||||
task->activateAndSchedule();
|
||||
}
|
||||
|
||||
void DiskLocalCheckThread::run()
|
||||
{
|
||||
if (need_stop)
|
||||
return;
|
||||
|
||||
bool can_read = disk->canRead();
|
||||
bool can_write = disk->canWrite();
|
||||
if (can_read)
|
||||
{
|
||||
if (disk->broken)
|
||||
LOG_INFO(log, "Disk {0} seems to be fine. It can be recovered using `SYSTEM RESTART DISK {0}`", disk->getName());
|
||||
retry = 0;
|
||||
if (can_write)
|
||||
disk->readonly = false;
|
||||
else
|
||||
{
|
||||
disk->readonly = true;
|
||||
LOG_INFO(log, "Disk {} is readonly", disk->getName());
|
||||
}
|
||||
task->scheduleAfter(check_period_ms);
|
||||
}
|
||||
else if (!disk->broken && retry < DISK_CHECK_ERROR_RETRY_TIME)
|
||||
{
|
||||
++retry;
|
||||
task->scheduleAfter(DISK_CHECK_ERROR_SLEEP_MS);
|
||||
}
|
||||
else
|
||||
{
|
||||
retry = 0;
|
||||
disk->broken = true;
|
||||
LOG_INFO(log, "Disk {} is broken", disk->getName());
|
||||
task->scheduleAfter(check_period_ms);
|
||||
}
|
||||
}
|
||||
|
||||
void DiskLocalCheckThread::shutdown()
|
||||
{
|
||||
need_stop = true;
|
||||
task->deactivate();
|
||||
LOG_TRACE(log, "DiskLocalCheck thread finished");
|
||||
}
|
||||
|
||||
}
|
39
src/Disks/DiskLocalCheckThread.h
Normal file
39
src/Disks/DiskLocalCheckThread.h
Normal file
@ -0,0 +1,39 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/BackgroundSchedulePool.h>
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
class Logger;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
class DiskLocal;
|
||||
|
||||
class DiskLocalCheckThread : WithContext
|
||||
{
|
||||
public:
|
||||
friend class DiskLocal;
|
||||
|
||||
DiskLocalCheckThread(DiskLocal * disk_, ContextPtr context_, UInt64 local_disk_check_period_ms);
|
||||
|
||||
void startup();
|
||||
|
||||
void shutdown();
|
||||
|
||||
private:
|
||||
bool check();
|
||||
void run();
|
||||
|
||||
DiskLocal * disk;
|
||||
size_t check_period_ms;
|
||||
Poco::Logger * log;
|
||||
std::atomic<bool> need_stop{false};
|
||||
|
||||
BackgroundSchedulePool::TaskHolder task;
|
||||
size_t retry{};
|
||||
};
|
||||
|
||||
}
|
@ -40,7 +40,12 @@ DiskSelector::DiskSelector(const Poco::Util::AbstractConfiguration & config, con
|
||||
disks.emplace(disk_name, factory.create(disk_name, config, disk_config_prefix, context, disks));
|
||||
}
|
||||
if (!has_default_disk)
|
||||
disks.emplace(default_disk_name, std::make_shared<DiskLocal>(default_disk_name, context->getPath(), 0));
|
||||
{
|
||||
disks.emplace(
|
||||
default_disk_name,
|
||||
std::make_shared<DiskLocal>(
|
||||
default_disk_name, context->getPath(), 0, context, config.getUInt("local_disk_check_period_ms", 0)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -37,6 +37,12 @@ public:
|
||||
disks.emplace(name, disk);
|
||||
}
|
||||
|
||||
void shutdown()
|
||||
{
|
||||
for (auto & e : disks)
|
||||
e.second->shutdown();
|
||||
}
|
||||
|
||||
private:
|
||||
DisksMap disks;
|
||||
};
|
||||
|
@ -224,6 +224,9 @@ public:
|
||||
|
||||
virtual bool isReadOnly() const { return false; }
|
||||
|
||||
/// Check if disk is broken. Broken disks will have 0 space and not be used.
|
||||
virtual bool isBroken() const { return false; }
|
||||
|
||||
/// Invoked when Global Context is shutdown.
|
||||
virtual void shutdown() {}
|
||||
|
||||
|
@ -60,6 +60,7 @@ public:
|
||||
|
||||
DiskPtr getDisk() const { return getDisk(0); }
|
||||
virtual DiskPtr getDisk(size_t i) const { return disks[i]; }
|
||||
Disks & getDisks() { return disks; }
|
||||
const Disks & getDisks() const { return disks; }
|
||||
|
||||
/// Returns effective value of whether merges are allowed on this volume (true) or not (false).
|
||||
|
@ -164,10 +164,18 @@ DiskPtr StoragePolicy::getAnyDisk() const
|
||||
if (volumes.empty())
|
||||
throw Exception("Storage policy " + backQuote(name) + " has no volumes. It's a bug.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
if (volumes[0]->getDisks().empty())
|
||||
throw Exception("Volume " + backQuote(name) + "." + backQuote(volumes[0]->getName()) + " has no disks. It's a bug.", ErrorCodes::LOGICAL_ERROR);
|
||||
for (const auto & volume : volumes)
|
||||
{
|
||||
if (volume->getDisks().empty())
|
||||
throw Exception("Volume '" + volume->getName() + "' has no disks. It's a bug", ErrorCodes::LOGICAL_ERROR);
|
||||
for (const auto & disk : volume->getDisks())
|
||||
{
|
||||
if (!disk->isBroken())
|
||||
return disk;
|
||||
}
|
||||
}
|
||||
|
||||
return volumes[0]->getDisks()[0];
|
||||
throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "All disks in storage policy {} are broken", name);
|
||||
}
|
||||
|
||||
|
||||
@ -233,6 +241,10 @@ ReservationPtr StoragePolicy::makeEmptyReservationOnLargestDisk() const
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!max_disk)
|
||||
throw Exception(
|
||||
"There is no space on any disk in storage policy: " + name + ". It's likely all disks are broken",
|
||||
ErrorCodes::NOT_ENOUGH_SPACE);
|
||||
auto reservation = max_disk->reserve(0);
|
||||
if (!reservation)
|
||||
{
|
||||
|
@ -36,6 +36,7 @@
|
||||
# include <IO/WriteBufferFromString.h>
|
||||
# include <IO/WriteHelpers.h>
|
||||
# include <base/range.h>
|
||||
# include <base/sort.h>
|
||||
# include <google/protobuf/descriptor.h>
|
||||
# include <google/protobuf/descriptor.pb.h>
|
||||
# include <boost/algorithm/string.hpp>
|
||||
@ -2163,7 +2164,7 @@ namespace
|
||||
for (auto & desc : field_descs_)
|
||||
field_infos.emplace_back(std::move(desc.column_indices), *desc.field_descriptor, std::move(desc.field_serializer));
|
||||
|
||||
std::sort(field_infos.begin(), field_infos.end(),
|
||||
::sort(field_infos.begin(), field_infos.end(),
|
||||
[](const FieldInfo & lhs, const FieldInfo & rhs) { return lhs.field_tag < rhs.field_tag; });
|
||||
|
||||
for (size_t i : collections::range(field_infos.size()))
|
||||
@ -2643,7 +2644,7 @@ namespace
|
||||
missing_column_indices.clear();
|
||||
missing_column_indices.reserve(column_names.size() - used_column_indices.size());
|
||||
auto used_column_indices_sorted = std::move(used_column_indices);
|
||||
std::sort(used_column_indices_sorted.begin(), used_column_indices_sorted.end());
|
||||
::sort(used_column_indices_sorted.begin(), used_column_indices_sorted.end());
|
||||
boost::range::set_difference(collections::range(column_names.size()), used_column_indices_sorted,
|
||||
std::back_inserter(missing_column_indices));
|
||||
|
||||
@ -2755,7 +2756,7 @@ namespace
|
||||
}
|
||||
|
||||
/// Shorter suffixes first.
|
||||
std::sort(out_field_descriptors_with_suffixes.begin(), out_field_descriptors_with_suffixes.end(),
|
||||
::sort(out_field_descriptors_with_suffixes.begin(), out_field_descriptors_with_suffixes.end(),
|
||||
[](const std::pair<const FieldDescriptor *, std::string_view /* suffix */> & f1,
|
||||
const std::pair<const FieldDescriptor *, std::string_view /* suffix */> & f2)
|
||||
{
|
||||
|
@ -125,7 +125,7 @@ private:
|
||||
{
|
||||
const auto & src_data = col->getData();
|
||||
const size_t size = src_data.size();
|
||||
UInt32 scale = src_data.getScale();
|
||||
UInt32 scale = col->getScale();
|
||||
|
||||
auto dst = ColumnVector<ReturnType>::create();
|
||||
auto & dst_data = dst->getData();
|
||||
|
@ -152,9 +152,11 @@ struct ConvertImpl
|
||||
if (const ColVecFrom * col_from = checkAndGetColumn<ColVecFrom>(named_from.column.get()))
|
||||
{
|
||||
typename ColVecTo::MutablePtr col_to = nullptr;
|
||||
|
||||
if constexpr (IsDataTypeDecimal<ToDataType>)
|
||||
{
|
||||
UInt32 scale;
|
||||
|
||||
if constexpr (std::is_same_v<Additions, AccurateConvertStrategyAdditions>
|
||||
|| std::is_same_v<Additions, AccurateOrNullConvertStrategyAdditions>)
|
||||
{
|
||||
@ -208,11 +210,11 @@ struct ConvertImpl
|
||||
bool convert_result = false;
|
||||
|
||||
if constexpr (IsDataTypeDecimal<FromDataType> && IsDataTypeDecimal<ToDataType>)
|
||||
convert_result = tryConvertDecimals<FromDataType, ToDataType>(vec_from[i], vec_from.getScale(), vec_to.getScale(), result);
|
||||
convert_result = tryConvertDecimals<FromDataType, ToDataType>(vec_from[i], col_from->getScale(), col_to->getScale(), result);
|
||||
else if constexpr (IsDataTypeDecimal<FromDataType> && IsDataTypeNumber<ToDataType>)
|
||||
convert_result = tryConvertFromDecimal<FromDataType, ToDataType>(vec_from[i], vec_from.getScale(), result);
|
||||
convert_result = tryConvertFromDecimal<FromDataType, ToDataType>(vec_from[i], col_from->getScale(), result);
|
||||
else if constexpr (IsDataTypeNumber<FromDataType> && IsDataTypeDecimal<ToDataType>)
|
||||
convert_result = tryConvertToDecimal<FromDataType, ToDataType>(vec_from[i], vec_to.getScale(), result);
|
||||
convert_result = tryConvertToDecimal<FromDataType, ToDataType>(vec_from[i], col_to->getScale(), result);
|
||||
|
||||
if (convert_result)
|
||||
vec_to[i] = result;
|
||||
@ -225,11 +227,11 @@ struct ConvertImpl
|
||||
else
|
||||
{
|
||||
if constexpr (IsDataTypeDecimal<FromDataType> && IsDataTypeDecimal<ToDataType>)
|
||||
vec_to[i] = convertDecimals<FromDataType, ToDataType>(vec_from[i], vec_from.getScale(), vec_to.getScale());
|
||||
vec_to[i] = convertDecimals<FromDataType, ToDataType>(vec_from[i], col_from->getScale(), col_to->getScale());
|
||||
else if constexpr (IsDataTypeDecimal<FromDataType> && IsDataTypeNumber<ToDataType>)
|
||||
vec_to[i] = convertFromDecimal<FromDataType, ToDataType>(vec_from[i], vec_from.getScale());
|
||||
vec_to[i] = convertFromDecimal<FromDataType, ToDataType>(vec_from[i], col_from->getScale());
|
||||
else if constexpr (IsDataTypeNumber<FromDataType> && IsDataTypeDecimal<ToDataType>)
|
||||
vec_to[i] = convertToDecimal<FromDataType, ToDataType>(vec_from[i], vec_to.getScale());
|
||||
vec_to[i] = convertToDecimal<FromDataType, ToDataType>(vec_from[i], col_to->getScale());
|
||||
else
|
||||
throw Exception("Unsupported data type in conversion function", ErrorCodes::CANNOT_CONVERT_TYPE);
|
||||
}
|
||||
@ -820,7 +822,7 @@ struct ConvertImpl<FromDataType, std::enable_if_t<!std::is_same_v<FromDataType,
|
||||
else if constexpr (std::is_same_v<FromDataType, DataTypeDateTime>)
|
||||
data_to.resize(size * (strlen("YYYY-MM-DD hh:mm:ss") + 1));
|
||||
else if constexpr (std::is_same_v<FromDataType, DataTypeDateTime64>)
|
||||
data_to.resize(size * (strlen("YYYY-MM-DD hh:mm:ss.") + vec_from.getScale() + 1));
|
||||
data_to.resize(size * (strlen("YYYY-MM-DD hh:mm:ss.") + col_from->getScale() + 1));
|
||||
else
|
||||
data_to.resize(size * 3); /// Arbitrary
|
||||
|
||||
@ -1169,7 +1171,7 @@ struct ConvertThroughParsing
|
||||
if constexpr (to_datetime64)
|
||||
{
|
||||
DateTime64 res = 0;
|
||||
parseDateTime64BestEffort(res, vec_to.getScale(), read_buffer, *local_time_zone, *utc_time_zone);
|
||||
parseDateTime64BestEffort(res, col_to->getScale(), read_buffer, *local_time_zone, *utc_time_zone);
|
||||
vec_to[i] = res;
|
||||
}
|
||||
else
|
||||
@ -1184,7 +1186,7 @@ struct ConvertThroughParsing
|
||||
if constexpr (to_datetime64)
|
||||
{
|
||||
DateTime64 res = 0;
|
||||
parseDateTime64BestEffortUS(res, vec_to.getScale(), read_buffer, *local_time_zone, *utc_time_zone);
|
||||
parseDateTime64BestEffortUS(res, col_to->getScale(), read_buffer, *local_time_zone, *utc_time_zone);
|
||||
vec_to[i] = res;
|
||||
}
|
||||
else
|
||||
@ -1199,12 +1201,12 @@ struct ConvertThroughParsing
|
||||
if constexpr (to_datetime64)
|
||||
{
|
||||
DateTime64 value = 0;
|
||||
readDateTime64Text(value, vec_to.getScale(), read_buffer, *local_time_zone);
|
||||
readDateTime64Text(value, col_to->getScale(), read_buffer, *local_time_zone);
|
||||
vec_to[i] = value;
|
||||
}
|
||||
else if constexpr (IsDataTypeDecimal<ToDataType>)
|
||||
SerializationDecimal<typename ToDataType::FieldType>::readText(
|
||||
vec_to[i], read_buffer, ToDataType::maxPrecision(), vec_to.getScale());
|
||||
vec_to[i], read_buffer, ToDataType::maxPrecision(), col_to->getScale());
|
||||
else
|
||||
{
|
||||
parseImpl<ToDataType>(vec_to[i], read_buffer, local_time_zone);
|
||||
@ -1223,7 +1225,7 @@ struct ConvertThroughParsing
|
||||
if constexpr (to_datetime64)
|
||||
{
|
||||
DateTime64 res = 0;
|
||||
parsed = tryParseDateTime64BestEffort(res, vec_to.getScale(), read_buffer, *local_time_zone, *utc_time_zone);
|
||||
parsed = tryParseDateTime64BestEffort(res, col_to->getScale(), read_buffer, *local_time_zone, *utc_time_zone);
|
||||
vec_to[i] = res;
|
||||
}
|
||||
else
|
||||
@ -1244,12 +1246,12 @@ struct ConvertThroughParsing
|
||||
if constexpr (to_datetime64)
|
||||
{
|
||||
DateTime64 value = 0;
|
||||
parsed = tryReadDateTime64Text(value, vec_to.getScale(), read_buffer, *local_time_zone);
|
||||
parsed = tryReadDateTime64Text(value, col_to->getScale(), read_buffer, *local_time_zone);
|
||||
vec_to[i] = value;
|
||||
}
|
||||
else if constexpr (IsDataTypeDecimal<ToDataType>)
|
||||
parsed = SerializationDecimal<typename ToDataType::FieldType>::tryReadText(
|
||||
vec_to[i], read_buffer, ToDataType::maxPrecision(), vec_to.getScale());
|
||||
vec_to[i], read_buffer, ToDataType::maxPrecision(), col_to->getScale());
|
||||
else
|
||||
parsed = tryParseImpl<ToDataType>(vec_to[i], read_buffer, local_time_zone);
|
||||
}
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <type_traits>
|
||||
#include <array>
|
||||
#include <base/bit_cast.h>
|
||||
#include <base/sort.h>
|
||||
#include <algorithm>
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
@ -422,9 +423,9 @@ private:
|
||||
using Container = typename ColumnDecimal<T>::Container;
|
||||
|
||||
public:
|
||||
static NO_INLINE void apply(const Container & in, Container & out, Scale scale_arg)
|
||||
static NO_INLINE void apply(const Container & in, UInt32 in_scale, Container & out, Scale scale_arg)
|
||||
{
|
||||
scale_arg = in.getScale() - scale_arg;
|
||||
scale_arg = in_scale - scale_arg;
|
||||
if (scale_arg > 0)
|
||||
{
|
||||
size_t scale = intExp10(scale_arg);
|
||||
@ -498,11 +499,11 @@ public:
|
||||
const auto * const col = checkAndGetColumn<ColumnDecimal<T>>(col_general);
|
||||
const typename ColumnDecimal<T>::Container & vec_src = col->getData();
|
||||
|
||||
auto col_res = ColumnDecimal<T>::create(vec_src.size(), vec_src.getScale());
|
||||
auto col_res = ColumnDecimal<T>::create(vec_src.size(), col->getScale());
|
||||
auto & vec_res = col_res->getData();
|
||||
|
||||
if (!vec_res.empty())
|
||||
DecimalRoundingImpl<T, rounding_mode, tie_breaking_mode>::apply(col->getData(), vec_res, scale_arg);
|
||||
DecimalRoundingImpl<T, rounding_mode, tie_breaking_mode>::apply(col->getData(), col->getScale(), vec_res, scale_arg);
|
||||
|
||||
return col_res;
|
||||
}
|
||||
@ -738,7 +739,7 @@ private:
|
||||
for (size_t i = 0; i < boundaries.size(); ++i)
|
||||
boundary_values[i] = boundaries[i].get<ValueType>();
|
||||
|
||||
std::sort(boundary_values.begin(), boundary_values.end());
|
||||
::sort(boundary_values.begin(), boundary_values.end());
|
||||
boundary_values.erase(std::unique(boundary_values.begin(), boundary_values.end()), boundary_values.end());
|
||||
|
||||
size_t size = src.size();
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
#include <base/sort.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include "arrayScalarProduct.h"
|
||||
@ -112,7 +113,7 @@ public:
|
||||
sorted_labels[i].label = label;
|
||||
}
|
||||
|
||||
std::sort(sorted_labels.begin(), sorted_labels.end(), [](const auto & lhs, const auto & rhs) { return lhs.score > rhs.score; });
|
||||
::sort(sorted_labels.begin(), sorted_labels.end(), [](const auto & lhs, const auto & rhs) { return lhs.score > rhs.score; });
|
||||
|
||||
/// We will first calculate non-normalized area.
|
||||
|
||||
|
@ -157,11 +157,11 @@ struct ArrayAggregateImpl
|
||||
return false;
|
||||
|
||||
const AggregationType x = column_const->template getValue<Element>(); // NOLINT
|
||||
const auto & data = checkAndGetColumn<ColVecType>(&column_const->getDataColumn())->getData();
|
||||
const ColVecType * column_typed = checkAndGetColumn<ColVecType>(&column_const->getDataColumn());
|
||||
|
||||
typename ColVecResultType::MutablePtr res_column;
|
||||
if constexpr (is_decimal<Element>)
|
||||
res_column = ColVecResultType::create(offsets.size(), data.getScale());
|
||||
res_column = ColVecResultType::create(offsets.size(), column_typed->getScale());
|
||||
else
|
||||
res_column = ColVecResultType::create(offsets.size());
|
||||
|
||||
@ -185,7 +185,7 @@ struct ArrayAggregateImpl
|
||||
{
|
||||
if constexpr (is_decimal<Element>)
|
||||
{
|
||||
res[i] = DecimalUtils::convertTo<ResultType>(x, data.getScale());
|
||||
res[i] = DecimalUtils::convertTo<ResultType>(x, column_typed->getScale());
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -210,11 +210,11 @@ struct ArrayAggregateImpl
|
||||
throw Exception(ErrorCodes::DECIMAL_OVERFLOW, "Decimal math overflow");
|
||||
}
|
||||
|
||||
auto result_scale = data.getScale() * array_size;
|
||||
auto result_scale = column_typed->getScale() * array_size;
|
||||
if (unlikely(result_scale > DecimalUtils::max_precision<AggregationType>))
|
||||
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Scale {} is out of bounds", result_scale);
|
||||
|
||||
res[i] = DecimalUtils::convertTo<ResultType>(product, data.getScale() * array_size);
|
||||
res[i] = DecimalUtils::convertTo<ResultType>(product, result_scale);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -236,7 +236,7 @@ struct ArrayAggregateImpl
|
||||
|
||||
typename ColVecResultType::MutablePtr res_column;
|
||||
if constexpr (is_decimal<Element>)
|
||||
res_column = ColVecResultType::create(offsets.size(), data.getScale());
|
||||
res_column = ColVecResultType::create(offsets.size(), column->getScale());
|
||||
else
|
||||
res_column = ColVecResultType::create(offsets.size());
|
||||
|
||||
@ -309,7 +309,7 @@ struct ArrayAggregateImpl
|
||||
if constexpr (is_decimal<Element>)
|
||||
{
|
||||
aggregate_value = aggregate_value / AggregationType(count);
|
||||
res[i] = DecimalUtils::convertTo<ResultType>(aggregate_value, data.getScale());
|
||||
res[i] = DecimalUtils::convertTo<ResultType>(aggregate_value, column->getScale());
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -318,7 +318,7 @@ struct ArrayAggregateImpl
|
||||
}
|
||||
else if constexpr (aggregate_operation == AggregateOperation::product && is_decimal<Element>)
|
||||
{
|
||||
auto result_scale = data.getScale() * count;
|
||||
auto result_scale = column->getScale() * count;
|
||||
|
||||
if (unlikely(result_scale > DecimalUtils::max_precision<AggregationType>))
|
||||
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Scale {} is out of bounds", result_scale);
|
||||
|
@ -40,7 +40,7 @@ struct ArrayCompactImpl
|
||||
|
||||
typename ColVecType::MutablePtr res_values_column;
|
||||
if constexpr (is_decimal<T>)
|
||||
res_values_column = ColVecType::create(src_values.size(), src_values.getScale());
|
||||
res_values_column = ColVecType::create(src_values.size(), src_values_column->getScale());
|
||||
else
|
||||
res_values_column = ColVecType::create(src_values.size());
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user