Merge remote-tracking branch 'origin/master' into deprecate-special-zero-datetime

This commit is contained in:
Alexey Milovidov 2020-07-31 23:49:06 +03:00
commit 07f3662f6d
236 changed files with 2434 additions and 1994 deletions

2
.gitmodules vendored
View File

@ -76,7 +76,7 @@
url = https://github.com/google/snappy
[submodule "contrib/cppkafka"]
path = contrib/cppkafka
url = https://github.com/ClickHouse-Extras/cppkafka.git
url = https://github.com/mfontanini/cppkafka.git
[submodule "contrib/brotli"]
path = contrib/brotli
url = https://github.com/google/brotli.git

2
contrib/cppkafka vendored

@ -1 +1 @@
Subproject commit f555ee36aaa74d17ca0dab3ce472070a610b2966
Subproject commit b06e64ef5bffd636d918a742c689f69130c1dbab

2
debian/rules vendored
View File

@ -101,7 +101,7 @@ override_dh_clean:
dh_clean # -X contrib
override_dh_strip:
#https://www.debian.org/doc/debian-policy/ch-source.html#debian-rules-and-deb-build-options
#https://www.debian.org/doc/debian-policy/ch-source.html#debian-rules-and-deb-build-options
ifeq (,$(filter nostrip,$(DEB_BUILD_OPTIONS)))
dh_strip -pclickhouse-common-static --dbg-package=clickhouse-common-static-dbg
endif

View File

@ -161,7 +161,7 @@ case "$stage" in
# Lost connection to the server. This probably means that the server died
# with abort.
echo "failure" > status.txt
if ! grep -a "Received signal \|Logical error" server.log > description.txt
if ! grep -ao "Received signal.*\|Logical error.*\|Assertion.*failed" server.log > description.txt
then
echo "Lost connection to server. See the logs" > description.txt
fi

View File

@ -514,7 +514,12 @@ create table queries engine File(TSVWithNamesAndTypes, 'report/queries.tsv')
;
create table changed_perf_report engine File(TSV, 'report/changed-perf.tsv') as
select left, right, diff, stat_threshold, changed_fail, test, query_index, query_display_name
select
left, right,
left > right
? '- ' || toString(floor(left / right, 3)) || 'x'
: '+ ' || toString(floor(right / left, 3)) || 'x',
diff, stat_threshold, changed_fail, test, query_index, query_display_name
from queries where changed_show order by abs(diff) desc;
create table unstable_queries_report engine File(TSV, 'report/unstable-queries.tsv') as
@ -592,9 +597,11 @@ create table test_times_report engine File(TSV, 'report/test-times.tsv') as
-- report for all queries page, only main metric
create table all_tests_report engine File(TSV, 'report/all-queries.tsv') as
select changed_fail, unstable_fail,
left, right, diff,
floor(left > right ? left / right : right / left, 3),
stat_threshold, test, query_index, query_display_name
left, right,
left > right
? '- ' || toString(floor(left / right, 3)) || 'x'
: '+ ' || toString(floor(right / left, 3)) || 'x',
diff, stat_threshold, test, query_index, query_display_name
from queries order by test, query_index;
-- queries for which we will build flamegraphs (see below)

View File

@ -63,7 +63,48 @@ p.links a {{ padding: 5px; margin: 3px; background: #FFF; line-height: 2; white-
color: inherit;
text-decoration: none;
}}
tr:nth-child(odd) td {{filter: brightness(95%);}}
.all-query-times tr td:nth-child(1),
.all-query-times tr td:nth-child(2),
.all-query-times tr td:nth-child(3),
.all-query-times tr td:nth-child(4),
.all-query-times tr td:nth-child(5),
.all-query-times tr td:nth-child(7),
.changes-in-performance tr td:nth-child(1),
.changes-in-performance tr td:nth-child(2),
.changes-in-performance tr td:nth-child(3),
.changes-in-performance tr td:nth-child(4),
.changes-in-performance tr td:nth-child(5),
.changes-in-performance tr td:nth-child(7),
.unstable-queries tr td:nth-child(1),
.unstable-queries tr td:nth-child(2),
.unstable-queries tr td:nth-child(3),
.unstable-queries tr td:nth-child(4),
.unstable-queries tr td:nth-child(6),
.test-performance-changes tr td:nth-child(2),
.test-performance-changes tr td:nth-child(3),
.test-performance-changes tr td:nth-child(4),
.test-performance-changes tr td:nth-child(5),
.test-performance-changes tr td:nth-child(6),
.test-times tr td:nth-child(2),
.test-times tr td:nth-child(3),
.test-times tr td:nth-child(4),
.test-times tr td:nth-child(5),
.test-times tr td:nth-child(6),
.test-times tr td:nth-child(7),
.test-times tr td:nth-child(8),
.concurrent-benchmarks tr td:nth-child(2),
.concurrent-benchmarks tr td:nth-child(3),
.concurrent-benchmarks tr td:nth-child(4),
.concurrent-benchmarks tr td:nth-child(5),
.metric-changes tr td:nth-child(2),
.metric-changes tr td:nth-child(3),
.metric-changes tr td:nth-child(4),
.metric-changes tr td:nth-child(5)
{{ text-align: right; }}
</style>
<title>Clickhouse performance comparison</title>
</head>
@ -111,11 +152,14 @@ def tableHeader(r):
return tr(''.join([th(f) for f in r]))
def tableStart(title):
return """
<h2 id="{anchor}"><a class="cancela" href="#{anchor}">{title}</a></h2>
<table>""".format(
anchor = nextTableAnchor(),
title = title)
anchor = nextTableAnchor();
cls = '-'.join(title.lower().split(' ')[:3]);
return f"""
<h2 id="{anchor}">
<a class="cancela" href="#{anchor}">{title}</a>
</h2>
<table class="{cls}">
"""
def tableEnd():
return '</table>'
@ -238,12 +282,13 @@ if args.report == 'main':
columns = [
'Old,&nbsp;s', # 0
'New,&nbsp;s', # 1
'Relative difference (new&nbsp;&minus;&nbsp;old) / old', # 2
'p&nbsp;<&nbsp;0.001 threshold', # 3
# Failed # 4
'Test', # 5
'#', # 6
'Query', # 7
'Times speedup / slowdown', # 2
'Relative difference (new&nbsp;&minus;&nbsp;old) / old', # 3
'p&nbsp;<&nbsp;0.001 threshold', # 4
# Failed # 5
'Test', # 6
'#', # 7
'Query', # 8
]
print(tableHeader(columns))
@ -251,15 +296,15 @@ if args.report == 'main':
attrs = ['' for c in columns]
attrs[4] = None
for row in rows:
if int(row[4]):
if float(row[2]) < 0.:
if int(row[5]):
if float(row[3]) < 0.:
faster_queries += 1
attrs[2] = f'style="background: {color_good}"'
attrs[2] = attrs[3] = f'style="background: {color_good}"'
else:
slower_queries += 1
attrs[2] = f'style="background: {color_bad}"'
attrs[2] = attrs[3] = f'style="background: {color_bad}"'
else:
attrs[2] = ''
attrs[2] = attrs[3] = ''
print(tableRow(row, attrs))
@ -281,7 +326,7 @@ if args.report == 'main':
'Old,&nbsp;s', #0
'New,&nbsp;s', #1
'Relative difference (new&nbsp;-&nbsp;old)/old', #2
'p&nbsp;<&nbsp;0.001 threshold', #3
'p&nbsp;&lt;&nbsp;0.001 threshold', #3
# Failed #4
'Test', #5
'#', #6
@ -498,9 +543,9 @@ elif args.report == 'all-queries':
# Unstable #1
'Old,&nbsp;s', #2
'New,&nbsp;s', #3
'Relative difference (new&nbsp;&minus;&nbsp;old) / old', #4
'Times speedup / slowdown', #5
'p&nbsp;<&nbsp;0.001 threshold', #6
'Times speedup / slowdown', #4
'Relative difference (new&nbsp;&minus;&nbsp;old) / old', #5
'p&nbsp;&lt;&nbsp;0.001 threshold', #6
'Test', #7
'#', #8
'Query', #9
@ -519,12 +564,12 @@ elif args.report == 'all-queries':
attrs[6] = ''
if int(r[0]):
if float(r[4]) > 0.:
attrs[4] = f'style="background: {color_bad}"'
if float(r[5]) > 0.:
attrs[4] = attrs[5] = f'style="background: {color_bad}"'
else:
attrs[4] = f'style="background: {color_good}"'
attrs[4] = attrs[5] = f'style="background: {color_good}"'
else:
attrs[4] = ''
attrs[4] = attrs[5] = ''
if (float(r[2]) + float(r[3])) / 2 > allowed_single_run_time:
attrs[2] = f'style="background: {color_bad}"'

View File

@ -16,7 +16,7 @@ if [ ${CLICKHOUSE_PACKAGES_ARG} != ${NO_REBUILD_FLAG} ]; then
fi
# In order to allow packages directory to be anywhere, and to reduce amoun of context sent to the docker daemon,
# In order to allow packages directory to be anywhere, and to reduce amount of context sent to the docker daemon,
# all images are built in multiple stages:
# 1. build base image, install dependencies
# 2. run image with volume mounted, install what needed from those volumes
@ -26,14 +26,14 @@ fi
# TODO: optionally mount most recent clickhouse-test and queries directory from local machine
if [ ${CLICKHOUSE_PACKAGES_ARG} != ${NO_REBUILD_FLAG} ]; then
docker build \
docker build --network=host \
-f "${CLICKHOUSE_DOCKER_DIR}/test/stateless/clickhouse-statelest-test-runner.Dockerfile" \
--target clickhouse-test-runner-base \
-t clickhouse-test-runner-base:preinstall \
"${CLICKHOUSE_DOCKER_DIR}/test/stateless"
docker rm -f clickhouse-test-runner-installing-packages || true
docker run \
docker run --network=host \
-v "${CLICKHOUSE_PACKAGES_DIR}:/packages" \
--name clickhouse-test-runner-installing-packages \
clickhouse-test-runner-base:preinstall
@ -50,19 +50,19 @@ if [ -z "${CLICKHOUSE_SERVER_IMAGE}" ]; then
CLICKHOUSE_SERVER_IMAGE="yandex/clickhouse-server:local"
if [ ${CLICKHOUSE_PACKAGES_ARG} != ${NO_REBUILD_FLAG} ]; then
docker build \
docker build --network=host \
-f "${CLICKHOUSE_DOCKER_DIR}/server/local.Dockerfile" \
--target clickhouse-server-base \
-t clickhouse-server-base:preinstall \
"${CLICKHOUSE_DOCKER_DIR}/server"
docker rm -f clickhouse_server_base_installing_server || true
docker run -v "${CLICKHOUSE_PACKAGES_DIR}:/packages" \
docker run --network=host -v "${CLICKHOUSE_PACKAGES_DIR}:/packages" \
--name clickhouse_server_base_installing_server \
clickhouse-server-base:preinstall
docker commit clickhouse_server_base_installing_server clickhouse-server-base:postinstall
docker build \
docker build --network=host \
-f "${CLICKHOUSE_DOCKER_DIR}/server/local.Dockerfile" \
--target clickhouse-server \
-t "${CLICKHOUSE_SERVER_IMAGE}" \

View File

@ -1015,7 +1015,7 @@ The table below shows supported data types and how they match ClickHouse [data t
Unsupported Avro data types: `record` (non-root), `map`
Unsupported Avro logical data types: `uuid`, `time-millis`, `time-micros`, `duration`
Unsupported Avro logical data types: `time-millis`, `time-micros`, `duration`
### Inserting Data {#inserting-data-1}

View File

@ -76,8 +76,11 @@ ECT 1
```
By default, data is returned in TabSeparated format (for more information, see the “Formats” section).
You use the FORMAT clause of the query to request any other format.
Also, you can use the default_format URL parameter or X-ClickHouse-Format header to specify a default format other than TabSeparated.
``` bash
$ echo 'SELECT 1 FORMAT Pretty' | curl 'http://localhost:8123/?' --data-binary @-
┏━━━┓
@ -167,7 +170,7 @@ $ echo "SELECT 1" | gzip -c | curl -sS --data-binary @- -H 'Content-Encoding: gz
!!! note "Note"
Some HTTP clients might decompress data from the server by default (with `gzip` and `deflate`) and you might get decompressed data even if you use the compression settings correctly.
You can use the database URL parameter to specify the default database.
You can use the database URL parameter or X-ClickHouse-Database header to specify the default database.
``` bash
$ echo 'SELECT number FROM numbers LIMIT 10' | curl 'http://localhost:8123/?database=system' --data-binary @-

View File

@ -0,0 +1,89 @@
# system.stack_trace {#system-tables_stack_trace}
Contains stack traces of all server threads. Allows developers to introspect the server state.
To analyze stack frames, use the `addressToLine`, `addressToSymbol` and `demangle` [introspection functions](../../sql-reference/functions/introspection.md).
Columns:
- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Thread identifier.
- `query_id` ([String](../../sql-reference/data-types/string.md)) — Query identifier that can be used to get details about a query that was running from the [query_log](../system-tables/query_log.md) system table.
- `trace` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — A [stack trace](https://en.wikipedia.org/wiki/Stack_trace) which represents a list of physical addresses where the called methods are stored.
**Example**
Enabling introspection functions:
``` sql
SET allow_introspection_functions = 1;
```
Getting symbols from ClickHouse object files:
``` sql
WITH arrayMap(x -> demangle(addressToSymbol(x)), trace) AS all SELECT thread_id, query_id, arrayStringConcat(all, '\n') AS res FROM system.stack_trace LIMIT 1 \G
```
``` text
Row 1:
──────
thread_id: 686
query_id: 1a11f70b-626d-47c1-b948-f9c7b206395d
res: sigqueue
DB::StorageSystemStackTrace::fillData(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::Context const&, DB::SelectQueryInfo const&) const
DB::IStorageSystemOneBlock<DB::StorageSystemStackTrace>::read(std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > const&, DB::SelectQueryInfo const&, DB::Context const&, DB::QueryProcessingStage::Enum, unsigned long, unsigned int)
DB::InterpreterSelectQuery::executeFetchColumns(DB::QueryProcessingStage::Enum, DB::QueryPipeline&, std::__1::shared_ptr<DB::PrewhereInfo> const&, std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > const&)
DB::InterpreterSelectQuery::executeImpl(DB::QueryPipeline&, std::__1::shared_ptr<DB::IBlockInputStream> const&, std::__1::optional<DB::Pipe>)
DB::InterpreterSelectQuery::execute()
DB::InterpreterSelectWithUnionQuery::execute()
DB::executeQueryImpl(char const*, char const*, DB::Context&, bool, DB::QueryProcessingStage::Enum, bool, DB::ReadBuffer*)
DB::executeQuery(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, DB::Context&, bool, DB::QueryProcessingStage::Enum, bool)
DB::TCPHandler::runImpl()
DB::TCPHandler::run()
Poco::Net::TCPServerConnection::start()
Poco::Net::TCPServerDispatcher::run()
Poco::PooledThread::run()
Poco::ThreadImpl::runnableEntry(void*)
start_thread
__clone
```
Getting filenames and line numbers in ClickHouse source code:
``` sql
WITH arrayMap(x -> addressToLine(x), trace) AS all, arrayFilter(x -> x LIKE '%/dbms/%', all) AS dbms SELECT thread_id, query_id, arrayStringConcat(notEmpty(dbms) ? dbms : all, '\n') AS res FROM system.stack_trace LIMIT 1 \G
```
``` text
Row 1:
──────
thread_id: 686
query_id: cad353e7-1c29-4b2e-949f-93e597ab7a54
res: /lib/x86_64-linux-gnu/libc-2.27.so
/build/obj-x86_64-linux-gnu/../src/Storages/System/StorageSystemStackTrace.cpp:182
/build/obj-x86_64-linux-gnu/../contrib/libcxx/include/vector:656
/build/obj-x86_64-linux-gnu/../src/Interpreters/InterpreterSelectQuery.cpp:1338
/build/obj-x86_64-linux-gnu/../src/Interpreters/InterpreterSelectQuery.cpp:751
/build/obj-x86_64-linux-gnu/../contrib/libcxx/include/optional:224
/build/obj-x86_64-linux-gnu/../src/Interpreters/InterpreterSelectWithUnionQuery.cpp:192
/build/obj-x86_64-linux-gnu/../src/Interpreters/executeQuery.cpp:384
/build/obj-x86_64-linux-gnu/../src/Interpreters/executeQuery.cpp:643
/build/obj-x86_64-linux-gnu/../src/Server/TCPHandler.cpp:251
/build/obj-x86_64-linux-gnu/../src/Server/TCPHandler.cpp:1197
/build/obj-x86_64-linux-gnu/../contrib/poco/Net/src/TCPServerConnection.cpp:57
/build/obj-x86_64-linux-gnu/../contrib/libcxx/include/atomic:856
/build/obj-x86_64-linux-gnu/../contrib/poco/Foundation/include/Poco/Mutex_POSIX.h:59
/build/obj-x86_64-linux-gnu/../contrib/poco/Foundation/include/Poco/AutoPtr.h:223
/lib/x86_64-linux-gnu/libpthread-2.27.so
/lib/x86_64-linux-gnu/libc-2.27.so
```
**See Also**
- [Introspection Functions](../../sql-reference/functions/introspection.md) — Which introspection functions are available and how to use them.
- [system.trace_log](../system-tables/trace_log.md) — Contains stack traces collected by the sampling query profiler.
- [arrayMap](../../sql-reference/functions/higher-order-functions.md#higher_order_functions-array-map) — Description and usage example of the `arrayMap` function.
- [arrayFilter](../../sql-reference/functions/higher-order-functions.md#higher_order_functions-array-filter) — Description and usage example of the `arrayFilter` function.
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/stack_trace) <!--hide-->

View File

@ -983,7 +983,8 @@ ProfileEvents.Values: [1,97,81,5,81]
- [system.query_log](#system_tables-query_log) — описание системной таблицы `query_log`, которая содержит общую информацию о выполненных запросах.
## system.trace\_log {#system_tables-trace_log}
## system.trace_log {#system_tables-trace_log}
Contains stack traces collected by the sampling query profiler.
@ -1029,6 +1030,93 @@ thread_number: 48
query_id: acc4d61f-5bd1-4a3e-bc91-2180be37c915
trace: [94222141367858,94222152240175,94222152325351,94222152329944,94222152330796,94222151449980,94222144088167,94222151682763,94222144088167,94222151682763,94222144088167,94222144058283,94222144059248,94222091840750,94222091842302,94222091831228,94222189631488,140509950166747,140509942945935]
```
## system.stack_trace {#system-tables_stack_trace}
Содержит трассировки стека всех серверных потоков. Позволяет разработчикам анализировать состояние сервера.
Для анализа логов используйте [функции интроспекции](../sql-reference/functions/introspection.md): `addressToLine`, `addressToSymbol` и `demangle`.
Столбцы:
- `thread_id` ([UInt64](../sql-reference/data-types/int-uint.md)) — Идентификатор потока.
- `query_id` ([String](../sql-reference/data-types/string.md)) — Идентификатор запроса. Может быть использован для получения подробной информации о выполненном запросе из системной таблицы [query_log](#system_tables-query_log).
- `trace` ([Array(UInt64)](../sql-reference/data-types/array.md)) — [Трассировка стека](https://en.wikipedia.org/wiki/Stack_trace). Представляет собой список физических адресов, по которым расположены вызываемые методы.
**Пример**
Включение функций интроспекции:
``` sql
SET allow_introspection_functions = 1;
```
Получение символов из объектных файлов ClickHouse:
``` sql
WITH arrayMap(x -> demangle(addressToSymbol(x)), trace) AS all SELECT thread_id, query_id, arrayStringConcat(all, '\n') AS res FROM system.stack_trace LIMIT 1 \G
```
``` text
Row 1:
──────
thread_id: 686
query_id: 1a11f70b-626d-47c1-b948-f9c7b206395d
res: sigqueue
DB::StorageSystemStackTrace::fillData(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::Context const&, DB::SelectQueryInfo const&) const
DB::IStorageSystemOneBlock<DB::StorageSystemStackTrace>::read(std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > const&, DB::SelectQueryInfo const&, DB::Context const&, DB::QueryProcessingStage::Enum, unsigned long, unsigned int)
DB::InterpreterSelectQuery::executeFetchColumns(DB::QueryProcessingStage::Enum, DB::QueryPipeline&, std::__1::shared_ptr<DB::PrewhereInfo> const&, std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > const&)
DB::InterpreterSelectQuery::executeImpl(DB::QueryPipeline&, std::__1::shared_ptr<DB::IBlockInputStream> const&, std::__1::optional<DB::Pipe>)
DB::InterpreterSelectQuery::execute()
DB::InterpreterSelectWithUnionQuery::execute()
DB::executeQueryImpl(char const*, char const*, DB::Context&, bool, DB::QueryProcessingStage::Enum, bool, DB::ReadBuffer*)
DB::executeQuery(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, DB::Context&, bool, DB::QueryProcessingStage::Enum, bool)
DB::TCPHandler::runImpl()
DB::TCPHandler::run()
Poco::Net::TCPServerConnection::start()
Poco::Net::TCPServerDispatcher::run()
Poco::PooledThread::run()
Poco::ThreadImpl::runnableEntry(void*)
start_thread
__clone
```
Получение имен файлов и номеров строк в исходном коде ClickHouse:
``` sql
WITH arrayMap(x -> addressToLine(x), trace) AS all, arrayFilter(x -> x LIKE '%/dbms/%', all) AS dbms SELECT thread_id, query_id, arrayStringConcat(notEmpty(dbms) ? dbms : all, '\n') AS res FROM system.stack_trace LIMIT 1 \G
```
``` text
Row 1:
──────
thread_id: 686
query_id: cad353e7-1c29-4b2e-949f-93e597ab7a54
res: /lib/x86_64-linux-gnu/libc-2.27.so
/build/obj-x86_64-linux-gnu/../src/Storages/System/StorageSystemStackTrace.cpp:182
/build/obj-x86_64-linux-gnu/../contrib/libcxx/include/vector:656
/build/obj-x86_64-linux-gnu/../src/Interpreters/InterpreterSelectQuery.cpp:1338
/build/obj-x86_64-linux-gnu/../src/Interpreters/InterpreterSelectQuery.cpp:751
/build/obj-x86_64-linux-gnu/../contrib/libcxx/include/optional:224
/build/obj-x86_64-linux-gnu/../src/Interpreters/InterpreterSelectWithUnionQuery.cpp:192
/build/obj-x86_64-linux-gnu/../src/Interpreters/executeQuery.cpp:384
/build/obj-x86_64-linux-gnu/../src/Interpreters/executeQuery.cpp:643
/build/obj-x86_64-linux-gnu/../src/Server/TCPHandler.cpp:251
/build/obj-x86_64-linux-gnu/../src/Server/TCPHandler.cpp:1197
/build/obj-x86_64-linux-gnu/../contrib/poco/Net/src/TCPServerConnection.cpp:57
/build/obj-x86_64-linux-gnu/../contrib/libcxx/include/atomic:856
/build/obj-x86_64-linux-gnu/../contrib/poco/Foundation/include/Poco/Mutex_POSIX.h:59
/build/obj-x86_64-linux-gnu/../contrib/poco/Foundation/include/Poco/AutoPtr.h:223
/lib/x86_64-linux-gnu/libpthread-2.27.so
/lib/x86_64-linux-gnu/libc-2.27.so
```
**См. также**
- [Функции интроспекции](../sql-reference/functions/introspection.md) — Что такое функции интроспекции и как их использовать.
- [system.trace_log](system-tables.md#system_tables-trace_log) — Содержит трассировки стека, собранные профилировщиком выборочных запросов.
- [arrayMap](../sql-reference/functions/higher-order-functions.md#higher_order_functions-array-map) — Описание и пример использования функции `arrayMap`.
- [arrayFilter](../sql-reference/functions/higher-order-functions.md#higher_order_functions-array-filter) — Описание и пример использования функции `arrayFilter`.
## system.replicas {#system_tables-replicas}

View File

@ -1,7 +1,7 @@
---
toc_priority: 62
toc_folder_title: hidden
toc_title: Функции для работы с географическими координатами
toc_folder_title: Гео-данные
toc_title: hidden
---

View File

@ -1,71 +1,68 @@
---
toc_priority: 39
toc_title: GRANT
---
# GRANT
# GRANT {#grant}
- Присваивает [привилегии](#grant-privileges) пользователям или ролям ClickHouse.
- Назначает роли пользователям или другим ролям.
- Grants [privileges](#grant-privileges) to ClickHouse user accounts or roles.
- Assigns roles to user accounts or to the other roles.
Отозвать привилегию можно с помощью выражения [REVOKE](revoke.md). Чтобы вывести список присвоенных привилегий, воспользуйтесь выражением [SHOW GRANTS](show.md#show-grants-statement).
To revoke privileges, use the [REVOKE](../../sql-reference/statements/revoke.md) statement. Also you can list granted privileges with the [SHOW GRANTS](../../sql-reference/statements/show.md#show-grants-statement) statement.
## Синтаксис присвоения привилегий {#grant-privigele-syntax}
## Granting Privilege Syntax {#grant-privigele-syntax}
``` sql
```sql
GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.table|db.*|*.*|table|*} TO {user | role | CURRENT_USER} [,...] [WITH GRANT OPTION]
```
- `privilege` — Type of privilege.
- `role` — ClickHouse user role.
- `user` — ClickHouse user account.
- `privilege` — Тип привилегии
- `role`Роль пользователя ClickHouse.
- `user`Пользователь ClickHouse.
The `WITH GRANT OPTION` clause grants `user` or `role` with permission to execute the `GRANT` query. Users can grant privileges of the same scope they have and less.
`WITH GRANT OPTION` разрешает пользователю или роли выполнять запрос `GRANT`. Пользователь может выдавать только те привилегии, которые есть у него, той же или меньшей области действий.
## Assigning Role Syntax {#assign-role-syntax}
``` sql
## Синтаксис назначения ролей {#assign-role-syntax}
```sql
GRANT [ON CLUSTER cluster_name] role [,...] TO {user | another_role | CURRENT_USER} [,...] [WITH ADMIN OPTION]
```
- `role` — ClickHouse user role.
- `user` — ClickHouse user account.
- `role`Роль пользователя ClickHouse.
- `user`Пользователь ClickHouse.
The `WITH ADMIN OPTION` clause grants [ADMIN OPTION](#admin-option-privilege) privilege to `user` or `role`.
`WITH ADMIN OPTION` присваивает привилегию [ADMIN OPTION](#admin-option-privilege) пользователю или роли.
## Usage {#grant-usage}
## Использование {#grant-usage}
To use `GRANT`, your account must have the `GRANT OPTION` privilege. You can grant privileges only inside the scope of your account privileges.
Для использования `GRANT` пользователь должен иметь привилегию `GRANT OPTION`. Пользователь может выдавать привилегии только внутри области действий назначенных ему самому привилегий.
For example, administrator has granted privileges to the `john` account by the query:
Например, администратор выдал привилегию пользователю `john`:
``` sql
```sql
GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION
```
It means that `john` has the permission to execute:
Это означает, что пользователю `john` разрешено выполнять:
- `SELECT x,y FROM db.table`.
- `SELECT x FROM db.table`.
- `SELECT y FROM db.table`.
`john` cant execute `SELECT z FROM db.table`. The `SELECT * FROM db.table` also is not available. Processing this query, ClickHouse doesnt return any data, even `x` and `y`. The only exception is if a table contains only `x` and `y` columns. In this case ClickHouse returns all the data.
`john` не может выполнить `SELECT z FROM db.table` или `SELECT * FROM db.table`. После обработки данных запросов ClickHouse ничего не вернет — даже `x` или `y`. Единственное исключение — если таблица содержит только столбцы `x` и `y`. В таком случае ClickHouse вернет все данные.
Also `john` has the `GRANT OPTION` privilege, so it can grant other users with privileges of the same or smaller scope.
Также у `john` есть привилегия `GRANT OPTION`. `john` может выдать другим пользователям привилегии той же или меньшей области действий из тех, которые есть у него.
Specifying privileges you can use asterisk (`*`) instead of a table or a database name. For example, the `GRANT SELECT ON db.* TO john` query allows `john` to execute the `SELECT` query over all the tables in `db` database. Also, you can omit database name. In this case privileges are granted for current database. For example, `GRANT SELECT ON * TO john` grants the privilege on all the tables in the current database, `GRANT SELECT ON mytable TO john` grants the privilege on the `mytable` table in the current database.
При присвоении привилегий допускается использовать астериск (`*`) вместо имени таблицы или базы данных. Например, запрос `GRANT SELECT ON db.* TO john` позволит пользователю `john` выполнять `SELECT` над всеми таблицам в базе данных `db`. Также вы можете опускать имя базы данных. В таком случае привилегии позволят совершать операции над текущей базой данных. Например, запрос `GRANT SELECT ON * TO john` выдаст привилегию на выполнение `SELECT` над всеми таблицами в текущей базе данных; `GRANT SELECT ON mytable TO john` — только над таблицей `mytable` в текущей базе данных.
Access to the `system` database is always allowed (since this database is used for processing queries).
Доступ к базе данных `system` разрешен всегда (данная база данных используется при обработке запросов).
You can grant multiple privileges to multiple accounts in one query. The query `GRANT SELECT, INSERT ON *.* TO john, robin` allows accounts `john` and `robin` to execute the `INSERT` and `SELECT` queries over all the tables in all the databases on the server.
Вы можете присвоить несколько привилегий нескольким пользователям в одном запросе. Запрос `GRANT SELECT, INSERT ON *.* TO john, robin` позволит пользователям `john` и `robin` выполнять `INSERT` и `SELECT` над всеми таблицами всех баз данных на сервере.
## Privileges {#grant-privileges}
Privilege is a permission to execute specific kind of queries.
## Привилегии {#grant-privileges}
Privileges have a hierarchical structure. A set of permitted queries depends on the privilege scope.
Привилегия — это разрешение на выполнение определенного типа запросов.
Hierarchy of privileges:
Привилегии имеют иерархическую структуру. Набор разрешенных запросов зависит от области действия привилегии.
Иерархия привилегий:
- [SELECT](#grant-select)
- [INSERT](#grant-insert)
@ -96,7 +93,7 @@ Hierarchy of privileges:
- `ALTER FETCH PARTITION`
- `ALTER FREEZE PARTITION`
- `ALTER VIEW`
- `ALTER VIEW REFRESH`
- `ALTER VIEW REFRESH `
- `ALTER VIEW MODIFY QUERY`
- [CREATE](#grant-create)
- `CREATE DATABASE`
@ -171,304 +168,312 @@ Hierarchy of privileges:
- `FILE`
- `URL`
- `REMOTE`
- `YSQL`
- `MYSQL`
- `ODBC`
- `JDBC`
- `HDFS`
- `S3`
- [dictGet](#grant-dictget)
Examples of how this hierarchy is treated:
Примеры того, как трактуется данная иерархия:
- The `ALTER` privilege includes all other `ALTER*` privileges.
- `ALTER CONSTRAINT` includes `ALTER ADD CONSTRAINT` and `ALTER DROP CONSTRAINT` privileges.
- Привилегия `ALTER` включает все остальные `ALTER*` привилегии.
- `ALTER CONSTRAINT` включает `ALTER ADD CONSTRAINT` и `ALTER DROP CONSTRAINT`.
Privileges are applied at different levels. Knowing of a level suggests syntax available for privilege.
Привилегии применяются на разных уровнях. Уровень определяет синтаксис присваивания привилегии.
Levels (from lower to higher):
Уровни (от низшего к высшему):
- `COLUMN` — Privilege can be granted for column, table, database, or globally.
- `TABLE` — Privilege can be granted for table, database, or globally.
- `VIEW` — Privilege can be granted for view, database, or globally.
- `DICTIONARY` — Privilege can be granted for dictionary, database, or globally.
- `DATABASE` — Privilege can be granted for database or globally.
- `GLOBAL` — Privilege can be granted only globally.
- `GROUP` — Groups privileges of different levels. When `GROUP`-level privilege is granted, only that privileges from the group are granted which correspond to the used syntax.
- `COLUMN` — Привилегия присваивается для столбца, таблицы, базы данных или глобально.
- `TABLE` — Привилегия присваивается для таблицы, базы данных или глобально.
- `VIEW` — Привилегия присваивается для представления, базы данных или глобально.
- `DICTIONARY` — Привилегия присваивается для словаря, базы данных или глобально.
- `DATABASE` — Привилегия присваивается для базы данных или глобально.
- `GLOBAL` — Привилегия присваивается только глобально.
- `GROUP` — Группирует привилегии разных уровней. При присвоении привилегии уровня `GROUP` присваиваются только привилегии из группы в соответствии с используемым синтаксисом.
Examples of allowed syntax:
Примеры допустимого синтаксиса:
- `GRANT SELECT(x) ON db.table TO user`
- `GRANT SELECT ON db.* TO user`
Examples of disallowed syntax:
Примеры недопустимого синтаксиса:
- `GRANT CREATE USER(x) ON db.table TO user`
- `GRANT CREATE USER ON db.* TO user`
The special privilege [ALL](#grant-all) grants all the privileges to a user account or a role.
Специальная привилегия [ALL](#grant-all) присваивает все привилегии пользователю или роли.
By default, a user account or a role has no privileges.
По умолчанию пользователь или роль не имеют привилегий.
If a user or a role has no privileges, it is displayed as [NONE](#grant-none) privilege.
Отсутствие привилегий у пользователя или роли отображается как привилегия [NONE](#grant-none).
Выполнение некоторых запросов требует определенного набора привилегий. Например, чтобы выполнить запрос [RENAME](misc.md#misc_operations-rename), нужны следующие привилегии: `SELECT`, `CREATE TABLE`, `INSERT` и `DROP TABLE`.
Some queries by their implementation require a set of privileges. For example, to execute the [RENAME](../../sql-reference/statements/misc.md#misc_operations-rename) query you need the following privileges: `SELECT`, `CREATE TABLE`, `INSERT` and `DROP TABLE`.
### SELECT {#grant-select}
Allows executing [SELECT](../../sql-reference/statements/select/index.md) queries.
Разрешает выполнять запросы [SELECT](select/index.md).
Privilege level: `COLUMN`.
Уровень: `COLUMN`.
**Description**
**Описание**
User granted with this privilege can execute `SELECT` queries over a specified list of columns in the specified table and database. If user includes other columns then specified a query returns no data.
Пользователь с данной привилегией может выполнять запросы `SELECT` над определенными столбцами из определенной таблицы и базы данных. При включении в запрос других столбцов запрос ничего не вернет.
Consider the following privilege:
Рассмотрим следующую привилегию:
``` sql
```sql
GRANT SELECT(x,y) ON db.table TO john
```
This privilege allows `john` to execute any `SELECT` query that involves data from the `x` and/or `y` columns in `db.table`, for example, `SELECT x FROM db.table`. `john` cant execute `SELECT z FROM db.table`. The `SELECT * FROM db.table` also is not available. Processing this query, ClickHouse doesnt return any data, even `x` and `y`. The only exception is if a table contains only `x` and `y` columns, in this case ClickHouse returns all the data.
Данная привилегия позволяет пользователю `john` выполнять выборку данных из столбцов `x` и/или `y` в `db.table`, например, `SELECT x FROM db.table`. `john` не может выполнить `SELECT z FROM db.table` или `SELECT * FROM db.table`. После обработки данных запросов ClickHouse ничего не вернет — даже `x` или `y`. Единственное исключение — если таблица содержит только столбцы `x` и `y`. В таком случае ClickHouse вернет все данные.
### INSERT {#grant-insert}
Allows executing [INSERT](../../sql-reference/statements/insert-into.md) queries.
Разрешает выполнять запросы [INSERT](insert-into.md).
Privilege level: `COLUMN`.
Уровень: `COLUMN`.
**Description**
**Описание**
User granted with this privilege can execute `INSERT` queries over a specified list of columns in the specified table and database. If user includes other columns then specified a query doesnt insert any data.
Пользователь с данной привилегией может выполнять запросы `INSERT` над определенными столбцами из определенной таблицы и базы данных. При включении в запрос других столбцов запрос не добавит никаких данных.
**Example**
**Пример**
``` sql
```sql
GRANT INSERT(x,y) ON db.table TO john
```
The granted privilege allows `john` to insert data to the `x` and/or `y` columns in `db.table`.
Присвоенная привилегия позволит пользователю `john` вставить данные в столбцы `x` и/или `y` в `db.table`.
### ALTER {#grant-alter}
Allows executing [ALTER](../../sql-reference/statements/alter.md) queries according to the following hierarchy of privileges:
Разрешает выполнять запросы [ALTER](alter.md) в соответствии со следующей иерархией привилегий:
- `ALTER`. Level: `COLUMN`.
- `ALTER TABLE`. Level: `GROUP`
- `ALTER UPDATE`. Level: `COLUMN`. Aliases: `UPDATE`
- `ALTER DELETE`. Level: `COLUMN`. Aliases: `DELETE`
- `ALTER COLUMN`. Level: `GROUP`
- `ALTER ADD COLUMN`. Level: `COLUMN`. Aliases: `ADD COLUMN`
- `ALTER DROP COLUMN`. Level: `COLUMN`. Aliases: `DROP COLUMN`
- `ALTER MODIFY COLUMN`. Level: `COLUMN`. Aliases: `MODIFY COLUMN`
- `ALTER COMMENT COLUMN`. Level: `COLUMN`. Aliases: `COMMENT COLUMN`
- `ALTER CLEAR COLUMN`. Level: `COLUMN`. Aliases: `CLEAR COLUMN`
- `ALTER RENAME COLUMN`. Level: `COLUMN`. Aliases: `RENAME COLUMN`
- `ALTER INDEX`. Level: `GROUP`. Aliases: `INDEX`
- `ALTER ORDER BY`. Level: `TABLE`. Aliases: `ALTER MODIFY ORDER BY`, `MODIFY ORDER BY`
- `ALTER ADD INDEX`. Level: `TABLE`. Aliases: `ADD INDEX`
- `ALTER DROP INDEX`. Level: `TABLE`. Aliases: `DROP INDEX`
- `ALTER MATERIALIZE INDEX`. Level: `TABLE`. Aliases: `MATERIALIZE INDEX`
- `ALTER CLEAR INDEX`. Level: `TABLE`. Aliases: `CLEAR INDEX`
- `ALTER CONSTRAINT`. Level: `GROUP`. Aliases: `CONSTRAINT`
- `ALTER ADD CONSTRAINT`. Level: `TABLE`. Aliases: `ADD CONSTRAINT`
- `ALTER DROP CONSTRAINT`. Level: `TABLE`. Aliases: `DROP CONSTRAINT`
- `ALTER TTL`. Level: `TABLE`. Aliases: `ALTER MODIFY TTL`, `MODIFY TTL`
- `ALTER MATERIALIZE TTL`. Level: `TABLE`. Aliases: `MATERIALIZE TTL`
- `ALTER SETTINGS`. Level: `TABLE`. Aliases: `ALTER SETTING`, `ALTER MODIFY SETTING`, `MODIFY SETTING`
- `ALTER MOVE PARTITION`. Level: `TABLE`. Aliases: `ALTER MOVE PART`, `MOVE PARTITION`, `MOVE PART`
- `ALTER FETCH PARTITION`. Level: `TABLE`. Aliases: `FETCH PARTITION`
- `ALTER FREEZE PARTITION`. Level: `TABLE`. Aliases: `FREEZE PARTITION`
- `ALTER VIEW` Level: `GROUP`
- `ALTER VIEW REFRESH`. Level: `VIEW`. Aliases: `ALTER LIVE VIEW REFRESH`, `REFRESH VIEW`
- `ALTER VIEW MODIFY QUERY`. Level: `VIEW`. Aliases: `ALTER TABLE MODIFY QUERY`
- `ALTER`. Уровень: `COLUMN`.
- `ALTER TABLE`. Уровень: `GROUP`
- `ALTER UPDATE`. Уровень: `COLUMN`. Алиасы: `UPDATE`
- `ALTER DELETE`. Уровень: `COLUMN`. Алиасы: `DELETE`
- `ALTER COLUMN`. Уровень: `GROUP`
- `ALTER ADD COLUMN`. Уровень: `COLUMN`. Алиасы: `ADD COLUMN`
- `ALTER DROP COLUMN`. Уровень: `COLUMN`. Алиасы: `DROP COLUMN`
- `ALTER MODIFY COLUMN`. Уровень: `COLUMN`. Алиасы: `MODIFY COLUMN`
- `ALTER COMMENT COLUMN`. Уровень: `COLUMN`. Алиасы: `COMMENT COLUMN`
- `ALTER CLEAR COLUMN`. Уровень: `COLUMN`. Алиасы: `CLEAR COLUMN`
- `ALTER RENAME COLUMN`. Уровень: `COLUMN`. Алиасы: `RENAME COLUMN`
- `ALTER INDEX`. Уровень: `GROUP`. Алиасы: `INDEX`
- `ALTER ORDER BY`. Уровень: `TABLE`. Алиасы: `ALTER MODIFY ORDER BY`, `MODIFY ORDER BY`
- `ALTER ADD INDEX`. Уровень: `TABLE`. Алиасы: `ADD INDEX`
- `ALTER DROP INDEX`. Уровень: `TABLE`. Алиасы: `DROP INDEX`
- `ALTER MATERIALIZE INDEX`. Уровень: `TABLE`. Алиасы: `MATERIALIZE INDEX`
- `ALTER CLEAR INDEX`. Уровень: `TABLE`. Алиасы: `CLEAR INDEX`
- `ALTER CONSTRAINT`. Уровень: `GROUP`. Алиасы: `CONSTRAINT`
- `ALTER ADD CONSTRAINT`. Уровень: `TABLE`. Алиасы: `ADD CONSTRAINT`
- `ALTER DROP CONSTRAINT`. Уровень: `TABLE`. Алиасы: `DROP CONSTRAINT`
- `ALTER TTL`. Уровень: `TABLE`. Алиасы: `ALTER MODIFY TTL`, `MODIFY TTL`
- `ALTER MATERIALIZE TTL`. Уровень: `TABLE`. Алиасы: `MATERIALIZE TTL`
- `ALTER SETTINGS`. Уровень: `TABLE`. Алиасы: `ALTER SETTING`, `ALTER MODIFY SETTING`, `MODIFY SETTING`
- `ALTER MOVE PARTITION`. Уровень: `TABLE`. Алиасы: `ALTER MOVE PART`, `MOVE PARTITION`, `MOVE PART`
- `ALTER FETCH PARTITION`. Уровень: `TABLE`. Алиасы: `FETCH PARTITION`
- `ALTER FREEZE PARTITION`. Уровень: `TABLE`. Алиасы: `FREEZE PARTITION`
- `ALTER VIEW` Уровень: `GROUP`
- `ALTER VIEW REFRESH `. Уровень: `VIEW`. Алиасы: `ALTER LIVE VIEW REFRESH`, `REFRESH VIEW`
- `ALTER VIEW MODIFY QUERY`. Уровень: `VIEW`. Алиасы: `ALTER TABLE MODIFY QUERY`
Examples of how this hierarchy is treated:
Примеры того, как трактуется данная иерархия:
- The `ALTER` privilege includes all other `ALTER*` privileges.
- `ALTER CONSTRAINT` includes `ALTER ADD CONSTRAINT` and `ALTER DROP CONSTRAINT` privileges.
- Привилегия `ALTER` включает все остальные `ALTER*` привилегии.
- `ALTER CONSTRAINT` включает `ALTER ADD CONSTRAINT` и `ALTER DROP CONSTRAINT`.
**Notes**
**Дополнительно**
- The `MODIFY SETTING` privilege allows modifying table engine settings. It doesnt affect settings or server configuration parameters.
- The `ATTACH` operation needs the [CREATE](#grant-create) privilege.
- The `DETACH` operation needs the [DROP](#grant-drop) privilege.
- To stop mutation by the [KILL MUTATION](../../sql-reference/statements/misc.md#kill-mutation-statement) query, you need to have a privilege to start this mutation. For example, if you want to stop the `ALTER UPDATE` query, you need the `ALTER UPDATE`, `ALTER TABLE`, or `ALTER` privilege.
- Привилегия `MODIFY SETTING` позволяет изменять настройки движков таблиц. Не влияет на настройки или конфигурационные параметры сервера.
- Операция `ATTACH` требует наличие привилегии [CREATE](#grant-create).
- Операция `DETACH` требует наличие привилегии [DROP](#grant-drop).
- Для остановки мутации с помощью [KILL MUTATION](misc.md#kill-mutation-statement), необходима привилегия на выполнение данной мутации. Например, чтобы остановить запрос `ALTER UPDATE`, необходима одна из привилегий: `ALTER UPDATE`, `ALTER TABLE` или `ALTER`.
### CREATE {#grant-create}
Allows executing [CREATE](../../sql-reference/statements/create.md) and [ATTACH](../../sql-reference/statements/misc.md#attach) DDL-queries according to the following hierarchy of privileges:
Разрешает выполнять DDL-запросы [CREATE](create.md) и [ATTACH](misc.md#attach) в соответствии со следующей иерархией привилегий:
- `CREATE`. Level: `GROUP`
- `CREATE DATABASE`. Level: `DATABASE`
- `CREATE TABLE`. Level: `TABLE`
- `CREATE VIEW`. Level: `VIEW`
- `CREATE DICTIONARY`. Level: `DICTIONARY`
- `CREATE TEMPORARY TABLE`. Level: `GLOBAL`
- `CREATE`. Уровень: `GROUP`
- `CREATE DATABASE`. Уровень: `DATABASE`
- `CREATE TABLE`. Уровень: `TABLE`
- `CREATE VIEW`. Уровень: `VIEW`
- `CREATE DICTIONARY`. Уровень: `DICTIONARY`
- `CREATE TEMPORARY TABLE`. Уровень: `GLOBAL`
**Notes**
**Дополнительно**
- To delete the created table, a user needs [DROP](#grant-drop).
- Для удаления созданной таблицы пользователю необходима привилегия [DROP](#grant-drop).
### DROP {#grant-drop}
Allows executing [DROP](../../sql-reference/statements/misc.md#drop) and [DETACH](../../sql-reference/statements/misc.md#detach-statement) queries according to the following hierarchy of privileges:
Разрешает выполнять запросы [DROP](misc.md#drop) и [DETACH](misc.md#detach-statement) в соответствии со следующей иерархией привилегий:
- `DROP`. Уровень:
- `DROP DATABASE`. Уровень: `DATABASE`
- `DROP TABLE`. Уровень: `TABLE`
- `DROP VIEW`. Уровень: `VIEW`
- `DROP DICTIONARY`. Уровень: `DICTIONARY`
- `DROP`. Level:
- `DROP DATABASE`. Level: `DATABASE`
- `DROP TABLE`. Level: `TABLE`
- `DROP VIEW`. Level: `VIEW`
- `DROP DICTIONARY`. Level: `DICTIONARY`
### TRUNCATE {#grant-truncate}
Allows executing [TRUNCATE](../../sql-reference/statements/misc.md#truncate-statement) queries.
Разрешает выполнять запросы [TRUNCATE](misc.md#truncate-statement).
Privilege level: `TABLE`.
Уровень: `TABLE`.
### OPTIMIZE {#grant-optimize}
Allows executing [OPTIMIZE TABLE](../../sql-reference/statements/misc.md#misc_operations-optimize) queries.
Разрешает выполнять запросы [OPTIMIZE TABLE](misc.md#misc_operations-optimize).
Privilege level: `TABLE`.
Уровень: `TABLE`.
### SHOW {#grant-show}
Allows executing `SHOW`, `DESCRIBE`, `USE`, and `EXISTS` queries according to the following hierarchy of privileges:
Разрешает выполнять запросы `SHOW`, `DESCRIBE`, `USE` и `EXISTS` в соответствии со следующей иерархией привилегий:
- `SHOW`. Level: `GROUP`
- `SHOW DATABASES`. Level: `DATABASE`. Allows to execute `SHOW DATABASES`, `SHOW CREATE DATABASE`, `USE <database>` queries.
- `SHOW TABLES`. Level: `TABLE`. Allows to execute `SHOW TABLES`, `EXISTS <table>`, `CHECK <table>` queries.
- `SHOW COLUMNS`. Level: `COLUMN`. Allows to execute `SHOW CREATE TABLE`, `DESCRIBE` queries.
- `SHOW DICTIONARIES`. Level: `DICTIONARY`. Allows to execute `SHOW DICTIONARIES`, `SHOW CREATE DICTIONARY`, `EXISTS <dictionary>` queries.
- `SHOW`. Уровень: `GROUP`
- `SHOW DATABASES`. Уровень: `DATABASE`. Разрешает выполнять запросы `SHOW DATABASES`, `SHOW CREATE DATABASE`, `USE <database>`.
- `SHOW TABLES`. Уровень: `TABLE`. Разрешает выполнять запросы `SHOW TABLES`, `EXISTS <table>`, `CHECK <table>`.
- `SHOW COLUMNS`. Уровень: `COLUMN`. Разрешает выполнять запросы `SHOW CREATE TABLE`, `DESCRIBE`.
- `SHOW DICTIONARIES`. Уровень: `DICTIONARY`. Разрешает выполнять запросы `SHOW DICTIONARIES`, `SHOW CREATE DICTIONARY`, `EXISTS <dictionary>`.
**Notes**
**Дополнительно**
У пользователя есть привилегия `SHOW`, если ему присвоена любая другая привилегия по отношению к определенной таблице, словарю или базе данных.
A user has the `SHOW` privilege if it has any other privilege concerning the specified table, dictionary or database.
### KILL QUERY {#grant-kill-query}
Allows executing [KILL](../../sql-reference/statements/misc.md#kill-query-statement) queries according to the following hierarchy of privileges:
Разрешает выполнять запросы [KILL](misc.md#kill-query-statement) в соответствии со следующей иерархией привилегий:
Privilege level: `GLOBAL`.
Уровень: `GLOBAL`.
**Notes**
**Дополнительно**
`KILL QUERY` позволяет пользователю останавливать запросы других пользователей.
`KILL QUERY` privilege allows one user to kill queries of other users.
### ACCESS MANAGEMENT {#grant-access-management}
Allows a user to execute queries that manage users, roles and row policies.
Разрешает пользователю выполнять запросы на управление пользователями, ролями и политиками доступа к строкам.
- `ACCESS MANAGEMENT`. Level: `GROUP`
- `CREATE USER`. Level: `GLOBAL`
- `ALTER USER`. Level: `GLOBAL`
- `DROP USER`. Level: `GLOBAL`
- `CREATE ROLE`. Level: `GLOBAL`
- `ALTER ROLE`. Level: `GLOBAL`
- `DROP ROLE`. Level: `GLOBAL`
- `ROLE ADMIN`. Level: `GLOBAL`
- `CREATE ROW POLICY`. Level: `GLOBAL`. Aliases: `CREATE POLICY`
- `ALTER ROW POLICY`. Level: `GLOBAL`. Aliases: `ALTER POLICY`
- `DROP ROW POLICY`. Level: `GLOBAL`. Aliases: `DROP POLICY`
- `CREATE QUOTA`. Level: `GLOBAL`
- `ALTER QUOTA`. Level: `GLOBAL`
- `DROP QUOTA`. Level: `GLOBAL`
- `CREATE SETTINGS PROFILE`. Level: `GLOBAL`. Aliases: `CREATE PROFILE`
- `ALTER SETTINGS PROFILE`. Level: `GLOBAL`. Aliases: `ALTER PROFILE`
- `DROP SETTINGS PROFILE`. Level: `GLOBAL`. Aliases: `DROP PROFILE`
- `SHOW ACCESS`. Level: `GROUP`
- `SHOW_USERS`. Level: `GLOBAL`. Aliases: `SHOW CREATE USER`
- `SHOW_ROLES`. Level: `GLOBAL`. Aliases: `SHOW CREATE ROLE`
- `SHOW_ROW_POLICIES`. Level: `GLOBAL`. Aliases: `SHOW POLICIES`, `SHOW CREATE ROW POLICY`, `SHOW CREATE POLICY`
- `SHOW_QUOTAS`. Level: `GLOBAL`. Aliases: `SHOW CREATE QUOTA`
- `SHOW_SETTINGS_PROFILES`. Level: `GLOBAL`. Aliases: `SHOW PROFILES`, `SHOW CREATE SETTINGS PROFILE`, `SHOW CREATE PROFILE`
- `ACCESS MANAGEMENT`. Уровень: `GROUP`
- `CREATE USER`. Уровень: `GLOBAL`
- `ALTER USER`. Уровень: `GLOBAL`
- `DROP USER`. Уровень: `GLOBAL`
- `CREATE ROLE`. Уровень: `GLOBAL`
- `ALTER ROLE`. Уровень: `GLOBAL`
- `DROP ROLE`. Уровень: `GLOBAL`
- `ROLE ADMIN`. Уровень: `GLOBAL`
- `CREATE ROW POLICY`. Уровень: `GLOBAL`. Алиасы: `CREATE POLICY`
- `ALTER ROW POLICY`. Уровень: `GLOBAL`. Алиасы: `ALTER POLICY`
- `DROP ROW POLICY`. Уровень: `GLOBAL`. Алиасы: `DROP POLICY`
- `CREATE QUOTA`. Уровень: `GLOBAL`
- `ALTER QUOTA`. Уровень: `GLOBAL`
- `DROP QUOTA`. Уровень: `GLOBAL`
- `CREATE SETTINGS PROFILE`. Уровень: `GLOBAL`. Алиасы: `CREATE PROFILE`
- `ALTER SETTINGS PROFILE`. Уровень: `GLOBAL`. Алиасы: `ALTER PROFILE`
- `DROP SETTINGS PROFILE`. Уровень: `GLOBAL`. Алиасы: `DROP PROFILE`
- `SHOW ACCESS`. Уровень: `GROUP`
- `SHOW_USERS`. Уровень: `GLOBAL`. Алиасы: `SHOW CREATE USER`
- `SHOW_ROLES`. Уровень: `GLOBAL`. Алиасы: `SHOW CREATE ROLE`
- `SHOW_ROW_POLICIES`. Уровень: `GLOBAL`. Алиасы: `SHOW POLICIES`, `SHOW CREATE ROW POLICY`, `SHOW CREATE POLICY`
- `SHOW_QUOTAS`. Уровень: `GLOBAL`. Алиасы: `SHOW CREATE QUOTA`
- `SHOW_SETTINGS_PROFILES`. Уровень: `GLOBAL`. Алиасы: `SHOW PROFILES`, `SHOW CREATE SETTINGS PROFILE`, `SHOW CREATE PROFILE`
The `ROLE ADMIN` privilege allows a user to assign and revoke any roles including those which are not assigned to the user with the admin option.
Привилегия `ROLE ADMIN` разрешает пользователю назначать и отзывать любые роли, включая те, которые не назначены пользователю с опцией администратора.
### SYSTEM {#grant-system}
Allows a user to execute [SYSTEM](../../sql-reference/statements/system.md) queries according to the following hierarchy of privileges.
Разрешает выполнять запросы [SYSTEM](system.md) в соответствии со следующей иерархией привилегий:
- `SYSTEM`. Level: `GROUP`
- `SYSTEM SHUTDOWN`. Level: `GLOBAL`. Aliases: `SYSTEM KILL`, `SHUTDOWN`
- `SYSTEM DROP CACHE`. Aliases: `DROP CACHE`
- `SYSTEM DROP DNS CACHE`. Level: `GLOBAL`. Aliases: `SYSTEM DROP DNS`, `DROP DNS CACHE`, `DROP DNS`
- `SYSTEM DROP MARK CACHE`. Level: `GLOBAL`. Aliases: `SYSTEM DROP MARK`, `DROP MARK CACHE`, `DROP MARKS`
- `SYSTEM DROP UNCOMPRESSED CACHE`. Level: `GLOBAL`. Aliases: `SYSTEM DROP UNCOMPRESSED`, `DROP UNCOMPRESSED CACHE`, `DROP UNCOMPRESSED`
- `SYSTEM RELOAD`. Level: `GROUP`
- `SYSTEM RELOAD CONFIG`. Level: `GLOBAL`. Aliases: `RELOAD CONFIG`
- `SYSTEM RELOAD DICTIONARY`. Level: `GLOBAL`. Aliases: `SYSTEM RELOAD DICTIONARIES`, `RELOAD DICTIONARY`, `RELOAD DICTIONARIES`
- `SYSTEM RELOAD EMBEDDED DICTIONARIES`. Level: `GLOBAL`. Aliases: R`ELOAD EMBEDDED DICTIONARIES`
- `SYSTEM MERGES`. Level: `TABLE`. Aliases: `SYSTEM STOP MERGES`, `SYSTEM START MERGES`, `STOP MERGES`, `START MERGES`
- `SYSTEM TTL MERGES`. Level: `TABLE`. Aliases: `SYSTEM STOP TTL MERGES`, `SYSTEM START TTL MERGES`, `STOP TTL MERGES`, `START TTL MERGES`
- `SYSTEM FETCHES`. Level: `TABLE`. Aliases: `SYSTEM STOP FETCHES`, `SYSTEM START FETCHES`, `STOP FETCHES`, `START FETCHES`
- `SYSTEM MOVES`. Level: `TABLE`. Aliases: `SYSTEM STOP MOVES`, `SYSTEM START MOVES`, `STOP MOVES`, `START MOVES`
- `SYSTEM SENDS`. Level: `GROUP`. Aliases: `SYSTEM STOP SENDS`, `SYSTEM START SENDS`, `STOP SENDS`, `START SENDS`
- `SYSTEM DISTRIBUTED SENDS`. Level: `TABLE`. Aliases: `SYSTEM STOP DISTRIBUTED SENDS`, `SYSTEM START DISTRIBUTED SENDS`, `STOP DISTRIBUTED SENDS`, `START DISTRIBUTED SENDS`
- `SYSTEM REPLICATED SENDS`. Level: `TABLE`. Aliases: `SYSTEM STOP REPLICATED SENDS`, `SYSTEM START REPLICATED SENDS`, `STOP REPLICATED SENDS`, `START REPLICATED SENDS`
- `SYSTEM REPLICATION QUEUES`. Level: `TABLE`. Aliases: `SYSTEM STOP REPLICATION QUEUES`, `SYSTEM START REPLICATION QUEUES`, `STOP REPLICATION QUEUES`, `START REPLICATION QUEUES`
- `SYSTEM SYNC REPLICA`. Level: `TABLE`. Aliases: `SYNC REPLICA`
- `SYSTEM RESTART REPLICA`. Level: `TABLE`. Aliases: `RESTART REPLICA`
- `SYSTEM FLUSH`. Level: `GROUP`
- `SYSTEM FLUSH DISTRIBUTED`. Level: `TABLE`. Aliases: `FLUSH DISTRIBUTED`
- `SYSTEM FLUSH LOGS`. Level: `GLOBAL`. Aliases: `FLUSH LOGS`
- `SYSTEM`. Уровень: `GROUP`
- `SYSTEM SHUTDOWN`. Уровень: `GLOBAL`. Алиасы: `SYSTEM KILL`, `SHUTDOWN`
- `SYSTEM DROP CACHE`. Алиасы: `DROP CACHE`
- `SYSTEM DROP DNS CACHE`. Уровень: `GLOBAL`. Алиасы: `SYSTEM DROP DNS`, `DROP DNS CACHE`, `DROP DNS`
- `SYSTEM DROP MARK CACHE`. Уровень: `GLOBAL`. Алиасы: `SYSTEM DROP MARK`, `DROP MARK CACHE`, `DROP MARKS`
- `SYSTEM DROP UNCOMPRESSED CACHE`. Уровень: `GLOBAL`. Алиасы: `SYSTEM DROP UNCOMPRESSED`, `DROP UNCOMPRESSED CACHE`, `DROP UNCOMPRESSED`
- `SYSTEM RELOAD`. Уровень: `GROUP`
- `SYSTEM RELOAD CONFIG`. Уровень: `GLOBAL`. Алиасы: `RELOAD CONFIG`
- `SYSTEM RELOAD DICTIONARY`. Уровень: `GLOBAL`. Алиасы: `SYSTEM RELOAD DICTIONARIES`, `RELOAD DICTIONARY`, `RELOAD DICTIONARIES`
- `SYSTEM RELOAD EMBEDDED DICTIONARIES`. Уровень: `GLOBAL`. Алиасы: `RELOAD EMBEDDED DICTIONARIES`
- `SYSTEM MERGES`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP MERGES`, `SYSTEM START MERGES`, `STOP MERGES`, `START MERGES`
- `SYSTEM TTL MERGES`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP TTL MERGES`, `SYSTEM START TTL MERGES`, `STOP TTL MERGES`, `START TTL MERGES`
- `SYSTEM FETCHES`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP FETCHES`, `SYSTEM START FETCHES`, `STOP FETCHES`, `START FETCHES`
- `SYSTEM MOVES`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP MOVES`, `SYSTEM START MOVES`, `STOP MOVES`, `START MOVES`
- `SYSTEM SENDS`. Уровень: `GROUP`. Алиасы: `SYSTEM STOP SENDS`, `SYSTEM START SENDS`, `STOP SENDS`, `START SENDS`
- `SYSTEM DISTRIBUTED SENDS`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP DISTRIBUTED SENDS`, `SYSTEM START DISTRIBUTED SENDS`, `STOP DISTRIBUTED SENDS`, `START DISTRIBUTED SENDS`
- `SYSTEM REPLICATED SENDS`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP REPLICATED SENDS`, `SYSTEM START REPLICATED SENDS`, `STOP REPLICATED SENDS`, `START REPLICATED SENDS`
- `SYSTEM REPLICATION QUEUES`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP REPLICATION QUEUES`, `SYSTEM START REPLICATION QUEUES`, `STOP REPLICATION QUEUES`, `START REPLICATION QUEUES`
- `SYSTEM SYNC REPLICA`. Уровень: `TABLE`. Алиасы: `SYNC REPLICA`
- `SYSTEM RESTART REPLICA`. Уровень: `TABLE`. Алиасы: `RESTART REPLICA`
- `SYSTEM FLUSH`. Уровень: `GROUP`
- `SYSTEM FLUSH DISTRIBUTED`. Уровень: `TABLE`. Алиасы: `FLUSH DISTRIBUTED`
- `SYSTEM FLUSH LOGS`. Уровень: `GLOBAL`. Алиасы: `FLUSH LOGS`
Привилегия `SYSTEM RELOAD EMBEDDED DICTIONARIES` имплицитно присваивается привилегией `SYSTEM RELOAD DICTIONARY ON *.*`.
The `SYSTEM RELOAD EMBEDDED DICTIONARIES` privilege implicitly granted by the `SYSTEM RELOAD DICTIONARY ON *.*` privilege.
### INTROSPECTION {#grant-introspection}
Allows using [introspection](../../operations/optimizing-performance/sampling-query-profiler.md) functions.
Разрешает использовать функции [интроспекции](../../operations/optimizing-performance/sampling-query-profiler.md).
- `INTROSPECTION`. Уровень: `GROUP`. Алиасы: `INTROSPECTION FUNCTIONS`
- `addressToLine`. Уровень: `GLOBAL`
- `addressToSymbol`. Уровень: `GLOBAL`
- `demangle`. Уровень: `GLOBAL`
- `INTROSPECTION`. Level: `GROUP`. Aliases: `INTROSPECTION FUNCTIONS`
- `addressToLine`. Level: `GLOBAL`
- `addressToSymbol`. Level: `GLOBAL`
- `demangle`. Level: `GLOBAL`
### SOURCES {#grant-sources}
Allows using external data sources. Applies to [table engines](../../engines/table-engines/index.md) and [table functions](../../sql-reference/table-functions/index.md#table-functions).
Разрешает использовать внешние источники данных. Применяется к [движкам таблиц](../../engines/table-engines/index.md) и [табличным функциям](../table-functions/index.md#table-functions).
- `SOURCES`. Level: `GROUP`
- `FILE`. Level: `GLOBAL`
- `URL`. Level: `GLOBAL`
- `REMOTE`. Level: `GLOBAL`
- `MYSQL`. Level: `GLOBAL`
- `ODBC`. Level: `GLOBAL`
- `JDBC`. Level: `GLOBAL`
- `HDFS`. Level: `GLOBAL`
- `S3`. Level: `GLOBAL`
- `SOURCES`. Уровень: `GROUP`
- `FILE`. Уровень: `GLOBAL`
- `URL`. Уровень: `GLOBAL`
- `REMOTE`. Уровень: `GLOBAL`
- `YSQL`. Уровень: `GLOBAL`
- `ODBC`. Уровень: `GLOBAL`
- `JDBC`. Уровень: `GLOBAL`
- `HDFS`. Уровень: `GLOBAL`
- `S3`. Уровень: `GLOBAL`
The `SOURCES` privilege enables use of all the sources. Also you can grant a privilege for each source individually. To use sources, you need additional privileges.
Привилегия `SOURCES` разрешает использование всех источников. Также вы можете присвоить привилегию для каждого источника отдельно. Для использования источников необходимы дополнительные привилегии.
Examples:
Примеры:
- To create a table with the [MySQL table engine](../../engines/table-engines/integrations/mysql.md), you need `CREATE TABLE (ON db.table_name)` and `MYSQL` privileges.
- To use the [mysql table function](../../sql-reference/table-functions/mysql.md), you need `CREATE TEMPORARY TABLE` and `MYSQL` privileges.
- Чтобы создать таблицу с [движком MySQL](../../engines/table-engines/integrations/mysql.md), необходимы привилегии `CREATE TABLE (ON db.table_name)` и `MYSQL`.
- Чтобы использовать [табличную функцию mysql](../table-functions/mysql.md), необходимы привилегии `CREATE TEMPORARY TABLE` и `MYSQL`.
### dictGet {#grant-dictget}
- `dictGet`. Aliases: `dictHas`, `dictGetHierarchy`, `dictIsIn`
- `dictGet`. Алиасы: `dictHas`, `dictGetHierarchy`, `dictIsIn`
Allows a user to execute [dictGet](../../sql-reference/functions/ext-dict-functions.md#dictget), [dictHas](../../sql-reference/functions/ext-dict-functions.md#dicthas), [dictGetHierarchy](../../sql-reference/functions/ext-dict-functions.md#dictgethierarchy), [dictIsIn](../../sql-reference/functions/ext-dict-functions.md#dictisin) functions.
Разрешает вызывать функции [dictGet](../functions/ext-dict-functions.md#dictget), [dictHas](../functions/ext-dict-functions.md#dicthas), [dictGetHierarchy](../functions/ext-dict-functions.md#dictgethierarchy), [dictIsIn](../functions/ext-dict-functions.md#dictisin).
Privilege level: `DICTIONARY`.
Уровень: `DICTIONARY`.
**Examples**
**Примеры**
- `GRANT dictGet ON mydb.mydictionary TO john`
- `GRANT dictGet ON mydictionary TO john`
### ALL {#grant-all}
Grants all the privileges on regulated entity to a user account or a role.
Присваивает пользователю или роли все привилегии на объект с регулируемым доступом.
### NONE {#grant-none}
Doesnt grant any privileges.
Не присваивает никаких привилегий.
### ADMIN OPTION {#admin-option-privilege}
The `ADMIN OPTION` privilege allows a user to grant their role to another user.
Привилегия `ADMIN OPTION` разрешает пользователю назначать свои роли другому пользователю.
[Original article](https://clickhouse.tech/docs/en/query_language/grant/) <!--hide-->
[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/grant/) <!--hide-->

View File

@ -0,0 +1,78 @@
#!/usr/bin/env python3
import subprocess
import requests
import os
import time
FNAME_START = "+++"
CLOUDFLARE_URL = "https://api.cloudflare.com/client/v4/zones/4fc6fb1d46e87851605aa7fa69ca6fe0/purge_cache"
# we have changes in revision and commit sha on all pages
# so such changes have to be ignored
MIN_CHANGED_WORDS = 4
def collect_changed_files():
proc = subprocess.Popen("git diff HEAD~1 --word-diff=porcelain | grep -e '^+[^+]\|^\-[^\-]\|^\+\+\+'", stdout=subprocess.PIPE, shell=True)
changed_files = []
current_file_name = ""
changed_words = []
while True:
line = proc.stdout.readline().decode("utf-8").strip()
if not line:
break
if FNAME_START in line:
if changed_words:
if len(changed_words) > MIN_CHANGED_WORDS:
changed_files.append(current_file_name)
changed_words = []
current_file_name = line[6:]
else:
changed_words.append(line)
return changed_files
def filter_and_transform_changed_files(changed_files, base_domain):
result = []
for f in changed_files:
if f.endswith(".html"):
result.append(base_domain + f.replace("index.html", ""))
return result
def convert_to_dicts(changed_files, batch_size):
result = []
current_batch = {"files": []}
for f in changed_files:
if len(current_batch["files"]) >= batch_size:
result.append(current_batch)
current_batch = {"files": []}
current_batch["files"].append(f)
if current_batch["files"]:
result.append(current_batch)
return result
def post_data(prepared_batches, token):
headers = {"Authorization": "Bearer {}".format(token)}
for batch in prepared_batches:
print("Pugring cache for", ", ".join(batch["files"]))
response = requests.post(CLOUDFLARE_URL, json=batch, headers=headers)
response.raise_for_status()
time.sleep(3)
if __name__ == "__main__":
token = os.getenv("CLOUDFLARE_TOKEN")
if not token:
raise Exception("Env variable CLOUDFLARE_TOKEN is empty")
base_domain = os.getenv("BASE_DOMAIN", "https://content.clickhouse.tech/")
changed_files = collect_changed_files()
print("Found", len(changed_files), "changed files")
filtered_files = filter_and_transform_changed_files(changed_files, base_domain)
print("Files rest after filtering", len(filtered_files))
prepared_batches = convert_to_dicts(filtered_files, 25)
post_data(prepared_batches, token)

View File

@ -10,13 +10,6 @@ GIT_PROD_URI="git@github.com:ClickHouse/clickhouse-website-content.git"
EXTRA_BUILD_ARGS="${EXTRA_BUILD_ARGS:---enable-stable-releases --minify --verbose}"
HISTORY_SIZE="${HISTORY_SIZE:-5}"
if [[ -z "$1" ]]
then
TAG=$(head -c 8 /dev/urandom | xxd -p)
else
TAG="$1"
fi
DOCKER_HASH="$2"
if [[ -z "$1" ]]
then
source "${BASE_DIR}/venv/bin/activate"
@ -44,27 +37,6 @@ then
if [[ ! -z "${CLOUDFLARE_TOKEN}" ]]
then
sleep 1m
git diff --stat="9999,9999" --diff-filter=M HEAD~1 | grep '|' | awk '$1 ~ /\.html$/ { if ($3>6) { url="https://content.clickhouse.tech/"$1; sub(/index.html/, "", url); print "\""url"\""; }}' | split -l 25 /dev/stdin PURGE
for FILENAME in $(ls PURGE*)
do
POST_DATA=$(cat "${FILENAME}" | sed -n -e 'H;${x;s/\n/,/g;s/^,//;p;}' | awk '{print "{\"files\":["$0"]}";}')
sleep 3s
set +x
curl -X POST "https://api.cloudflare.com/client/v4/zones/4fc6fb1d46e87851605aa7fa69ca6fe0/purge_cache" -H "Authorization: Bearer ${CLOUDFLARE_TOKEN}" -H "Content-Type:application/json" --data "${POST_DATA}"
set -x
rm "${FILENAME}"
done
python3 "${BASE_DIR}/purge_cache_for_changed_files.py"
fi
cd "${BUILD_DIR}"
DOCKER_HASH=$(head -c 16 < /dev/urandom | xxd -p)
fi
QLOUD_ENDPOINT="https://platform.yandex-team.ru/api/v1"
QLOUD_PROJECT="clickhouse.clickhouse-website"
if [[ -z "$1" ]]
then
QLOUD_ENV="${QLOUD_PROJECT}.test"
else
QLOUD_ENV="${QLOUD_PROJECT}.prod"
fi
echo ">>> Successfully deployed ${TAG} ${DOCKER_HASH} to ${QLOUD_ENV} <<<"

View File

@ -106,7 +106,7 @@ void ClusterCopierApp::mainImpl()
context->setConfig(loaded_config.configuration);
context->setApplicationType(Context::ApplicationType::LOCAL);
context->setPath(process_path);
context->setPath(process_path + "/");
registerFunctions();
registerAggregateFunctions();

View File

@ -1,5 +1,3 @@
#include <iomanip>
#include <Poco/Net/NetException.h>
#include <Core/Defines.h>
#include <Compression/CompressedReadBuffer.h>

View File

@ -17,7 +17,6 @@
#endif
#include <Common/Exception.h>
#include <IO/WriteHelpers.h>
#include <Poco/String.h>
#include <algorithm>

View File

@ -1,7 +1,6 @@
#include <Columns/ColumnAggregateFunction.h>
#include <Columns/ColumnsCommon.h>
#include <Common/assert_cast.h>
#include <AggregateFunctions/AggregateFunctionState.h>
#include <DataStreams/ColumnGathererStream.h>
#include <IO/WriteBufferFromArena.h>
#include <IO/WriteBufferFromString.h>

View File

@ -2,11 +2,9 @@
#include <Columns/ColumnConst.h>
#include <Columns/ColumnsCommon.h>
#include <Common/PODArray.h>
#include <Common/typeid_cast.h>
#include <Common/WeakHash.h>
#include <Common/HashTable/Hash.h>
#include <common/defines.h>
#if defined(MEMORY_SANITIZER)
#include <sanitizer/msan_interface.h>

View File

@ -1,4 +1,3 @@
#include <Core/Defines.h>
#include <Common/Arena.h>
#include <Common/memcmpSmall.h>
#include <Common/assert_cast.h>

View File

@ -11,7 +11,6 @@
#include <Common/assert_cast.h>
#include <Common/WeakHash.h>
#include <Common/HashTable/Hash.h>
#include <IO/WriteBuffer.h>
#include <IO/WriteHelpers.h>
#include <Columns/ColumnsCommon.h>
#include <DataStreams/ColumnGathererStream.h>

View File

@ -6,7 +6,6 @@
#include <cstring>
#include <algorithm>
#include <sstream>
#include <iostream>
#include <functional>
#include <Poco/DOM/Text.h>
#include <Poco/DOM/Attr.h>

View File

@ -1,6 +1,5 @@
#include "configReadClient.h"
#include <Poco/Util/Application.h>
#include <Poco/Util/LayeredConfiguration.h>
#include <Poco/File.h>
#include "ConfigProcessor.h"

View File

@ -497,6 +497,9 @@ namespace ErrorCodes
extern const int CANNOT_CONNECT_RABBITMQ = 530;
extern const int CANNOT_FSTAT = 531;
extern const int LDAP_ERROR = 532;
extern const int INCONSISTENT_RESERVATIONS = 533;
extern const int NO_RESERVATIONS_PROVIDED = 534;
extern const int UNKNOWN_RAID_TYPE = 535;
extern const int KEEPER_EXCEPTION = 999;
extern const int POCO_EXCEPTION = 1000;

View File

@ -1,9 +1,7 @@
#include <Core/UUID.h>
#include <IO/ReadBuffer.h>
#include <IO/WriteBuffer.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include <IO/ReadBufferFromString.h>
#include <IO/WriteBufferFromString.h>
#include <IO/Operators.h>
#include <Common/FieldVisitors.h>

View File

@ -117,6 +117,8 @@
M(SelectedParts, "Number of data parts selected to read from a MergeTree table.") \
M(SelectedRanges, "Number of (non-adjacent) ranges in all data parts selected to read from a MergeTree table.") \
M(SelectedMarks, "Number of marks (index granules) selected to read from a MergeTree table.") \
M(SelectedRows, "Number of rows SELECTed from all tables.") \
M(SelectedBytes, "Number of bytes (uncompressed; for columns as they stored in memory) SELECTed from all tables.") \
\
M(Merge, "Number of launched background merges.") \
M(MergedRows, "Rows read for background merges. This is the number of rows before merge.") \

View File

@ -5,7 +5,6 @@
#include <Common/StackTrace.h>
#include <Common/TraceCollector.h>
#include <Common/thread_local_rng.h>
#include <common/StringRef.h>
#include <common/logger_useful.h>
#include <common/phdr_cache.h>
#include <common/errnoToString.h>

View File

@ -1,7 +1,5 @@
#include "StatusFile.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/file.h>
#include <fcntl.h>
#include <errno.h>

View File

@ -1,5 +1,4 @@
#include <signal.h>
#include <time.h>
#include <sys/time.h>
#if defined(OS_LINUX)
# include <sys/sysinfo.h>
@ -8,9 +7,7 @@
#include <random>
#include <common/defines.h>
#include <common/sleep.h>
#include <common/getThreadId.h>
#include <IO/ReadHelpers.h>

View File

@ -13,9 +13,6 @@
#include <Common/StackTrace.h>
#include <common/logger_useful.h>
#include <unistd.h>
#include <fcntl.h>
namespace DB
{

View File

@ -1,5 +1,3 @@
#include <string.h>
#include <Common/ProfileEvents.h>
#include <Common/ZooKeeper/IKeeper.h>

View File

@ -3,7 +3,6 @@
#include "KeeperException.h"
#include "TestKeeper.h"
#include <random>
#include <functional>
#include <pcg-random/pcg_random.hpp>
@ -11,8 +10,6 @@
#include <common/find_symbols.h>
#include <Common/randomSeed.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/PODArray.h>
#include <Common/thread_local_rng.h>
#include <Common/Exception.h>
#include <Poco/Net/NetException.h>

View File

@ -8,9 +8,6 @@
#include <IO/Operators.h>
#include <IO/WriteBufferFromString.h>
#include <Poco/Exception.h>
#include <Poco/Net/NetException.h>
#if !defined(ARCADIA_BUILD)
# include <Common/config.h>
#endif

View File

@ -12,7 +12,9 @@
using namespace DB;
TEST(zkutil, ZookeeperConnected)
template <typename... Args>
auto getZooKeeper(Args &&... args)
{
/// In our CI infrastructure it is typical that ZooKeeper is unavailable for some amount of time.
size_t i;
@ -20,9 +22,10 @@ TEST(zkutil, ZookeeperConnected)
{
try
{
auto zookeeper = std::make_unique<zkutil::ZooKeeper>("localhost:2181");
auto zookeeper = std::make_unique<zkutil::ZooKeeper>("localhost:2181", std::forward<Args>(args)...);
zookeeper->exists("/");
zookeeper->createIfNotExists("/clickhouse_test", "Unit tests of ClickHouse");
return zookeeper;
}
catch (...)
{
@ -30,18 +33,16 @@ TEST(zkutil, ZookeeperConnected)
sleep(1);
continue;
}
break;
}
if (i == 100)
{
std::cerr << "No zookeeper after " << i << " tries. skip tests." << std::endl;
exit(0);
}
}
TEST(zkutil, MultiNiceExceptionMsg)
{
auto zookeeper = std::make_unique<zkutil::ZooKeeper>("localhost:2181");
auto zookeeper = getZooKeeper();
Coordination::Requests ops;
@ -79,13 +80,13 @@ TEST(zkutil, MultiNiceExceptionMsg)
TEST(zkutil, MultiAsync)
{
auto zookeeper = std::make_unique<zkutil::ZooKeeper>("localhost:2181");
Coordination::Requests ops;
zookeeper->tryRemoveRecursive("/clickhouse_test/zkutil_multi");
getZooKeeper()->tryRemoveRecursive("/clickhouse_test/zkutil_multi");
{
ops.clear();
auto zookeeper = getZooKeeper();
auto fut = zookeeper->asyncMulti(ops);
}
@ -94,6 +95,7 @@ TEST(zkutil, MultiAsync)
ops.emplace_back(zkutil::makeCreateRequest("/clickhouse_test/zkutil_multi", "", zkutil::CreateMode::Persistent));
ops.emplace_back(zkutil::makeCreateRequest("/clickhouse_test/zkutil_multi/a", "", zkutil::CreateMode::Persistent));
auto zookeeper = getZooKeeper();
auto fut = zookeeper->tryAsyncMulti(ops);
ops.clear();
@ -104,6 +106,7 @@ TEST(zkutil, MultiAsync)
EXPECT_ANY_THROW
(
auto zookeeper = getZooKeeper();
std::vector<std::future<Coordination::MultiResponse>> futures;
for (size_t i = 0; i < 10000; ++i)
@ -131,6 +134,7 @@ TEST(zkutil, MultiAsync)
ops.emplace_back(zkutil::makeCreateRequest("/clickhouse_test/zkutil_multi", "_", zkutil::CreateMode::Persistent));
ops.emplace_back(zkutil::makeCreateRequest("/clickhouse_test/zkutil_multi/a", "_", zkutil::CreateMode::Persistent));
auto zookeeper = getZooKeeper();
auto fut = zookeeper->tryAsyncMulti(ops);
ops.clear();
@ -155,13 +159,12 @@ TEST(zkutil, WatchGetChildrenWithChroot)
{
try
{
const String zk_server = "localhost:2181";
const String prefix = "/clickhouse_test/zkutil/watch_get_children_with_chroot";
/// Create chroot node firstly
auto zookeeper = std::make_unique<zkutil::ZooKeeper>(zk_server);
auto zookeeper = getZooKeeper();
zookeeper->createAncestors(prefix + "/");
zookeeper = std::make_unique<zkutil::ZooKeeper>(zk_server, "",
zookeeper = getZooKeeper("",
zkutil::DEFAULT_SESSION_TIMEOUT,
zkutil::DEFAULT_OPERATION_TIMEOUT,
prefix);
@ -173,7 +176,7 @@ TEST(zkutil, WatchGetChildrenWithChroot)
zkutil::EventPtr event = std::make_shared<Poco::Event>();
zookeeper->getChildren(queue_path, nullptr, event);
{
auto zookeeper2 = std::make_unique<zkutil::ZooKeeper>(zk_server, "",
auto zookeeper2 = getZooKeeper("",
zkutil::DEFAULT_SESSION_TIMEOUT,
zkutil::DEFAULT_OPERATION_TIMEOUT,
prefix);
@ -192,13 +195,12 @@ TEST(zkutil, MultiCreateSequential)
{
try
{
const String zk_server = "localhost:2181";
const String prefix = "/clickhouse_test/zkutil";
/// Create chroot node firstly
auto zookeeper = std::make_unique<zkutil::ZooKeeper>(zk_server);
auto zookeeper = getZooKeeper();
zookeeper->createAncestors(prefix + "/");
zookeeper = std::make_unique<zkutil::ZooKeeper>(zk_server, "",
zookeeper = getZooKeeper("",
zkutil::DEFAULT_SESSION_TIMEOUT,
zkutil::DEFAULT_OPERATION_TIMEOUT,
"/clickhouse_test");

View File

@ -5,7 +5,6 @@
#include <syscall.h>
#include <unistd.h>
#include <linux/capability.h>
#include <linux/netlink.h>
#include <Common/Exception.h>

View File

@ -1,5 +1,4 @@
#include "CompressedReadBuffer.h"
#include <Compression/CompressionInfo.h>
#include <Compression/LZ4_decompress_faster.h>

View File

@ -1,14 +1,11 @@
#include "CompressedReadBufferBase.h"
#include <vector>
#include <cstring>
#include <cassert>
#include <city.h>
#include <Common/PODArray.h>
#include <Common/ProfileEvents.h>
#include <Common/Exception.h>
#include <Common/hex.h>
#include <common/unaligned.h>
#include <Compression/ICompressionCodec.h>
#include <Compression/CompressionFactory.h>
#include <IO/ReadBuffer.h>

View File

@ -2,7 +2,6 @@
#include "CompressedReadBufferFromFile.h"
#include <Compression/CompressionInfo.h>
#include <Compression/LZ4_decompress_faster.h>
#include <IO/WriteHelpers.h>
#include <IO/createReadBufferFromFileBase.h>

View File

@ -1,4 +1,3 @@
#include <memory>
#include <city.h>
#include <string.h>

View File

@ -5,7 +5,6 @@
#include <Parsers/IAST.h>
#include <Parsers/ASTLiteral.h>
#include <IO/WriteHelpers.h>
#include <cstdlib>
namespace DB

View File

@ -9,7 +9,6 @@
#include <string.h>
#include <algorithm>
#include <cstdlib>
#include <type_traits>
#include <bitset>

View File

@ -3,12 +3,10 @@
#include <Common/PODArray.h>
#include <common/unaligned.h>
#include <Compression/CompressionFactory.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include <IO/WriteBufferFromString.h>
#include <IO/Operators.h>
#include <Common/hex.h>
#include <sstream>
namespace DB

View File

@ -1,9 +1,7 @@
#include <Compression/CompressionCodecZSTD.h>
#include <Compression/CompressionInfo.h>
#include <IO/ReadHelpers.h>
#include <Compression/CompressionFactory.h>
#include <zstd.h>
#include <Core/Field.h>
#include <Parsers/IAST.h>
#include <Parsers/ASTLiteral.h>
#include <Common/typeid_cast.h>

View File

@ -1,10 +1,7 @@
#include <Compression/CompressionFactory.h>
#include <Parsers/parseQuery.h>
#include <Parsers/ParserCreateQuery.h>
#include <Parsers/ASTFunction.h>
#include <Parsers/ASTIdentifier.h>
#include <Parsers/ASTLiteral.h>
#include <Common/typeid_cast.h>
#include <Poco/String.h>
#include <IO/ReadBuffer.h>
#include <Parsers/queryToString.h>

View File

@ -2,8 +2,6 @@
#include <string.h>
#include <iostream>
#include <random>
#include <algorithm>
#include <Core/Defines.h>
#include <Common/Stopwatch.h>
#include <common/types.h>

View File

@ -1,11 +1,9 @@
#include <string>
#include <iostream>
#include <sstream>
#include <fstream>
#include <iomanip>
#include <Core/Types.h>
#include <Common/Stopwatch.h>
#include <IO/WriteBufferFromFile.h>
#include <IO/ReadBufferFromFile.h>

View File

@ -11,8 +11,6 @@
#include <Parsers/IParser.h>
#include <Parsers/TokenIterator.h>
#include <fmt/format.h>
#include <random>
#include <bitset>
#include <cmath>

View File

@ -6,13 +6,11 @@
#include <IO/WriteBufferFromString.h>
#include <IO/Operators.h>
#include <Common/typeid_cast.h>
#include <Common/assert_cast.h>
#include <Columns/ColumnConst.h>
#include <iterator>
#include <memory>
namespace DB

View File

@ -1,6 +1,5 @@
#include <Core/ColumnsWithTypeAndName.h>
#include <IO/WriteBufferFromString.h>
#include <IO/WriteHelpers.h>
#include <IO/Operators.h>

View File

@ -7,7 +7,6 @@
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypeEnum.h>
#include <Common/typeid_cast.h>
#include <ext/range.h>
namespace DB

View File

@ -1,12 +1,10 @@
#include <iostream>
#include <iomanip>
#include <sstream>
#include <Core/Field.h>
#include <Common/FieldVisitors.h>
#include <Common/Stopwatch.h>
#include <IO/WriteBufferFromFileDescriptor.h>
#include <IO/ReadHelpers.h>
#include <DataTypes/DataTypeString.h>

View File

@ -9,6 +9,8 @@
namespace ProfileEvents
{
extern const Event ThrottlerSleepMicroseconds;
extern const Event SelectedRows;
extern const Event SelectedBytes;
}
@ -263,6 +265,9 @@ void IBlockInputStream::progressImpl(const Progress & value)
if (quota && limits.mode == LIMITS_TOTAL)
quota->used({Quota::READ_ROWS, value.read_rows}, {Quota::READ_BYTES, value.read_bytes});
}
ProfileEvents::increment(ProfileEvents::SelectedRows, value.read_rows);
ProfileEvents::increment(ProfileEvents::SelectedBytes, value.read_bytes);
}

View File

@ -1,4 +1,3 @@
#include <Columns/ColumnsNumber.h>
#include <Columns/ColumnTuple.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeCustom.h>

View File

@ -4,7 +4,6 @@
#include <DataTypes/DataTypeCustomSimpleTextSerialization.h>
#include <DataTypes/DataTypeFactory.h>
#include <DataTypes/DataTypeCustom.h>
#include <Functions/FunctionHelpers.h>
#include <Functions/FunctionsCoding.h>
namespace DB

View File

@ -1,14 +1,9 @@
#include <Common/FieldVisitors.h>
#include <Common/typeid_cast.h>
#include <IO/ReadHelpers.h>
#include <Columns/ColumnAggregateFunction.h>
#include <DataTypes/DataTypeCustomSimpleAggregateFunction.h>
#include <DataTypes/DataTypeLowCardinality.h>
#include <DataTypes/DataTypeTuple.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeFactory.h>
#include <AggregateFunctions/AggregateFunctionFactory.h>

View File

@ -1,10 +1,7 @@
#include <DataTypes/DataTypeDateTime.h>
#include <Columns/ColumnDecimal.h>
#include <Columns/ColumnVector.h>
#include <Columns/ColumnsNumber.h>
#include <Common/assert_cast.h>
#include <Common/typeid_cast.h>
#include <common/DateLUT.h>
#include <DataTypes/DataTypeFactory.h>
#include <Formats/FormatSettings.h>

View File

@ -1,8 +1,6 @@
#include <DataTypes/DataTypeDateTime64.h>
#include <Columns/ColumnDecimal.h>
#include <Columns/ColumnVector.h>
#include <Columns/ColumnsNumber.h>
#include <Common/assert_cast.h>
#include <Common/typeid_cast.h>
#include <common/DateLUT.h>

View File

@ -1,5 +1,4 @@
#include <Columns/ColumnFixedString.h>
#include <Columns/ColumnsNumber.h>
#include <Columns/ColumnConst.h>
#include <Formats/FormatSettings.h>

View File

@ -1,7 +1,6 @@
#include <Core/Defines.h>
#include <Columns/ColumnString.h>
#include <Columns/ColumnsNumber.h>
#include <Columns/ColumnConst.h>
#include <Common/typeid_cast.h>

View File

@ -3,7 +3,6 @@
#include <Parsers/IAST.h>
#include <Parsers/ASTLiteral.h>
namespace DB

View File

@ -4,8 +4,6 @@
#include <Common/Exception.h>
#include <Common/escapeForFileName.h>
#include <Core/Defines.h>
#include <IO/WriteHelpers.h>
#include <DataTypes/IDataType.h>

View File

@ -1,6 +1,5 @@
#include "CacheDictionary.h"
#include <functional>
#include <memory>
#include <Columns/ColumnString.h>
#include <Common/BitHelpers.h>

View File

@ -1,16 +1,8 @@
#include <iostream>
#include <sstream>
#include <Core/Types.h>
#include <Poco/Util/XMLConfiguration.h>
#include <Parsers/ASTCreateQuery.h>
#include <Parsers/ASTDropQuery.h>
#include <Parsers/DumpASTNode.h>
#include <Parsers/ParserCreateQuery.h>
#include <Parsers/ParserDictionary.h>
#include <Parsers/ParserDropQuery.h>
#include <Parsers/ParserTablePropertiesQuery.h>
#include <Parsers/TablePropertiesQueriesASTs.h>
#include <Parsers/formatAST.h>
#include <Parsers/parseQuery.h>
#include <Dictionaries/getDictionaryConfigurationFromAST.h>

View File

@ -25,6 +25,7 @@ using DiskDirectoryIteratorPtr = std::unique_ptr<IDiskDirectoryIterator>;
class IReservation;
using ReservationPtr = std::unique_ptr<IReservation>;
using Reservations = std::vector<ReservationPtr>;
class ReadBufferFromFileBase;
class WriteBufferFromFileBase;

View File

@ -10,10 +10,32 @@ namespace DB
namespace ErrorCodes
{
extern const int EXCESSIVE_ELEMENT_IN_CONFIG;
extern const int INCONSISTENT_RESERVATIONS;
extern const int NO_RESERVATIONS_PROVIDED;
extern const int UNKNOWN_VOLUME_TYPE;
}
String volumeTypeToString(VolumeType type)
{
switch (type)
{
case VolumeType::JBOD:
return "JBOD";
case VolumeType::RAID1:
return "RAID1";
case VolumeType::SINGLE_DISK:
return "SINGLE_DISK";
case VolumeType::UNKNOWN:
return "UNKNOWN";
}
throw Exception("Unknown volume type, please add it to DB::volumeTypeToString", ErrorCodes::UNKNOWN_VOLUME_TYPE);
}
IVolume::IVolume(
String name_, const Poco::Util::AbstractConfiguration & config, const String & config_prefix, DiskSelectorPtr disk_selector)
String name_,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
DiskSelectorPtr disk_selector)
: name(std::move(name_))
{
Poco::Util::AbstractConfiguration::Keys keys;
@ -40,4 +62,43 @@ UInt64 IVolume::getMaxUnreservedFreeSpace() const
return res;
}
MultiDiskReservation::MultiDiskReservation(Reservations & reservations_, UInt64 size_)
: reservations(std::move(reservations_))
, size(size_)
{
if (reservations.empty())
{
throw Exception("At least one reservation must be provided to MultiDiskReservation", ErrorCodes::NO_RESERVATIONS_PROVIDED);
}
for (auto & reservation : reservations)
{
if (reservation->getSize() != size_)
{
throw Exception("Reservations must have same size", ErrorCodes::INCONSISTENT_RESERVATIONS);
}
}
}
Disks MultiDiskReservation::getDisks() const
{
Disks res;
res.reserve(reservations.size());
for (const auto & reservation : reservations)
{
res.push_back(reservation->getDisk());
}
return res;
}
void MultiDiskReservation::update(UInt64 new_size)
{
for (auto & reservation : reservations)
{
reservation->update(new_size);
}
size = new_size;
}
}

View File

@ -11,10 +11,13 @@ namespace DB
enum class VolumeType
{
JBOD,
RAID1,
SINGLE_DISK,
UNKNOWN
};
String volumeTypeToString(VolumeType t);
class IVolume;
using VolumePtr = std::shared_ptr<IVolume>;
using Volumes = std::vector<VolumePtr>;
@ -33,7 +36,10 @@ using Volumes = std::vector<VolumePtr>;
class IVolume : public Space
{
public:
IVolume(String name_, Disks disks_): disks(std::move(disks_)), name(name_)
IVolume(String name_, Disks disks_, size_t max_data_part_size_ = 0)
: disks(std::move(disks_))
, name(name_)
, max_data_part_size(max_data_part_size_)
{
}
@ -53,12 +59,35 @@ public:
/// Return biggest unreserved space across all disks
UInt64 getMaxUnreservedFreeSpace() const;
DiskPtr getDisk(size_t i = 0) const { return disks[i]; }
DiskPtr getDisk() const { return getDisk(0); }
virtual DiskPtr getDisk(size_t i) const { return disks[i]; }
const Disks & getDisks() const { return disks; }
protected:
Disks disks;
const String name;
public:
/// Max size of reservation, zero means unlimited size
UInt64 max_data_part_size = 0;
};
/// Reservation for multiple disks at once. Can be used in RAID1 implementation.
class MultiDiskReservation : public IReservation
{
public:
MultiDiskReservation(Reservations & reservations, UInt64 size);
UInt64 getSize() const override { return size; }
DiskPtr getDisk(size_t i) const override { return reservations[i]->getDisk(); }
Disks getDisks() const override;
void update(UInt64 new_size) override;
private:
Reservations reservations;
UInt64 size;
};
}

View File

@ -22,5 +22,6 @@ public:
};
using VolumeSingleDiskPtr = std::shared_ptr<SingleDiskVolume>;
using VolumesSingleDiskPtr = std::vector<VolumeSingleDiskPtr>;
}

View File

@ -71,7 +71,7 @@ StoragePolicy::StoragePolicy(
}
StoragePolicy::StoragePolicy(String name_, VolumesJBOD volumes_, double move_factor_)
StoragePolicy::StoragePolicy(String name_, Volumes volumes_, double move_factor_)
: volumes(std::move(volumes_)), name(std::move(name_)), move_factor(move_factor_)
{
if (volumes.empty())
@ -204,7 +204,7 @@ void StoragePolicy::checkCompatibleWith(const StoragePolicyPtr & new_storage_pol
for (const auto & volume : getVolumes())
{
if (new_volume_names.count(volume->getName()) == 0)
throw Exception("New storage policy shall contain volumes of old one", ErrorCodes::LOGICAL_ERROR);
throw Exception("New storage policy shall contain volumes of old one", ErrorCodes::BAD_ARGUMENTS);
std::unordered_set<String> new_disk_names;
for (const auto & disk : new_storage_policy->getVolumeByName(volume->getName())->getDisks())
@ -212,7 +212,7 @@ void StoragePolicy::checkCompatibleWith(const StoragePolicyPtr & new_storage_pol
for (const auto & disk : volume->getDisks())
if (new_disk_names.count(disk->getName()) == 0)
throw Exception("New storage policy shall contain disks of old one", ErrorCodes::LOGICAL_ERROR);
throw Exception("New storage policy shall contain disks of old one", ErrorCodes::BAD_ARGUMENTS);
}
}
@ -257,7 +257,7 @@ StoragePolicySelector::StoragePolicySelector(
{
auto default_volume = std::make_shared<VolumeJBOD>(default_volume_name, std::vector<DiskPtr>{disks->get(default_disk_name)}, 0);
auto default_policy = std::make_shared<StoragePolicy>(default_storage_policy_name, VolumesJBOD{default_volume}, 0.0);
auto default_policy = std::make_shared<StoragePolicy>(default_storage_policy_name, Volumes{default_volume}, 0.0);
policies.emplace(default_storage_policy_name, default_policy);
}
}

View File

@ -4,6 +4,7 @@
#include <Disks/IDisk.h>
#include <Disks/IVolume.h>
#include <Disks/VolumeJBOD.h>
#include <Disks/VolumeRAID1.h>
#include <Disks/SingleDiskVolume.h>
#include <IO/WriteHelpers.h>
#include <Common/CurrentMetrics.h>
@ -33,7 +34,7 @@ class StoragePolicy
public:
StoragePolicy(String name_, const Poco::Util::AbstractConfiguration & config, const String & config_prefix, DiskSelectorPtr disks);
StoragePolicy(String name_, VolumesJBOD volumes_, double move_factor_);
StoragePolicy(String name_, Volumes volumes_, double move_factor_);
bool isDefaultPolicy() const;
@ -65,16 +66,16 @@ public:
/// Do not use this function when it is possible to predict size.
ReservationPtr makeEmptyReservationOnLargestDisk() const;
const VolumesJBOD & getVolumes() const { return volumes; }
const Volumes & getVolumes() const { return volumes; }
/// Returns number [0., 1.] -- fraction of free space on disk
/// which should be kept with help of background moves
double getMoveFactor() const { return move_factor; }
/// Get volume by index from storage_policy
VolumeJBODPtr getVolume(size_t i) const { return (i < volumes_names.size() ? volumes[i] : VolumeJBODPtr()); }
VolumePtr getVolume(size_t i) const { return (i < volumes_names.size() ? volumes[i] : VolumePtr()); }
VolumeJBODPtr getVolumeByName(const String & volume_name) const
VolumePtr getVolumeByName(const String & volume_name) const
{
auto it = volumes_names.find(volume_name);
if (it == volumes_names.end())
@ -86,7 +87,7 @@ public:
void checkCompatibleWith(const StoragePolicyPtr & new_storage_policy) const;
private:
VolumesJBOD volumes;
Volumes volumes;
const String name;
std::map<String, size_t> volumes_names;

View File

@ -17,8 +17,8 @@ VolumeJBOD::VolumeJBOD(
String name_,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
DiskSelectorPtr disk_selector
) : IVolume(name_, config, config_prefix, disk_selector)
DiskSelectorPtr disk_selector)
: IVolume(name_, config, config_prefix, disk_selector)
{
Poco::Logger * logger = &Poco::Logger::get("StorageConfiguration");
@ -55,7 +55,7 @@ VolumeJBOD::VolumeJBOD(
LOG_WARNING(logger, "Volume {} max_data_part_size is too low ({} < {})", backQuote(name), ReadableSize(max_data_part_size), ReadableSize(MIN_PART_SIZE));
}
DiskPtr VolumeJBOD::getNextDisk()
DiskPtr VolumeJBOD::getDisk(size_t /* index */) const
{
size_t start_from = last_used.fetch_add(1u, std::memory_order_relaxed);
size_t index = start_from % disks.size();
@ -64,7 +64,8 @@ DiskPtr VolumeJBOD::getNextDisk()
ReservationPtr VolumeJBOD::reserve(UInt64 bytes)
{
/// This volume can not store files which size greater than max_data_part_size
/// This volume can not store data which size is greater than `max_data_part_size`
/// to ensure that parts of size greater than that go to another volume(s).
if (max_data_part_size != 0 && bytes > max_data_part_size)
return {};

View File

@ -14,7 +14,7 @@ class VolumeJBOD : public IVolume
{
public:
VolumeJBOD(String name_, Disks disks_, UInt64 max_data_part_size_)
: IVolume(name_, disks_), max_data_part_size(max_data_part_size_)
: IVolume(name_, disks_, max_data_part_size_)
{
}
@ -27,19 +27,17 @@ public:
VolumeType getType() const override { return VolumeType::JBOD; }
/// Next disk (round-robin)
/// Always returns next disk (round-robin), ignores argument.
///
/// - Used with policy for temporary data
/// - Ignores all limitations
/// - Shares last access with reserve()
DiskPtr getNextDisk();
DiskPtr getDisk(size_t index) const override;
/// Uses Round-robin to choose disk for reservation.
/// Returns valid reservation or nullptr if there is no space left on any disk.
ReservationPtr reserve(UInt64 bytes) override;
/// Max size of reservation
UInt64 max_data_part_size = 0;
private:
mutable std::atomic<size_t> last_used = 0;
};

29
src/Disks/VolumeRAID1.cpp Normal file
View File

@ -0,0 +1,29 @@
#include "VolumeRAID1.h"
#include <Common/StringUtils/StringUtils.h>
#include <Common/quoteString.h>
namespace DB
{
ReservationPtr VolumeRAID1::reserve(UInt64 bytes)
{
/// This volume can not store data which size is greater than `max_data_part_size`
/// to ensure that parts of size greater than that go to another volume(s).
if (max_data_part_size != 0 && bytes > max_data_part_size)
return {};
Reservations res(disks.size());
for (size_t i = 0; i < disks.size(); ++i)
{
res[i] = disks[i]->reserve(bytes);
if (!res[i])
return {};
}
return std::make_unique<MultiDiskReservation>(res, bytes);
}
}

37
src/Disks/VolumeRAID1.h Normal file
View File

@ -0,0 +1,37 @@
#pragma once
#include <Disks/createVolume.h>
#include <Disks/VolumeJBOD.h>
namespace DB
{
/// Volume which reserserves space on each underlying disk.
///
/// NOTE: Just interface implementation, doesn't used in codebase,
/// also not available for user.
class VolumeRAID1 : public VolumeJBOD
{
public:
VolumeRAID1(String name_, Disks disks_, UInt64 max_data_part_size_)
: VolumeJBOD(name_, disks_, max_data_part_size_)
{
}
VolumeRAID1(
String name_,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
DiskSelectorPtr disk_selector)
: VolumeJBOD(name_, config, config_prefix, disk_selector)
{
}
VolumeType getType() const override { return VolumeType::RAID1; }
ReservationPtr reserve(UInt64 bytes) override;
};
using VolumeRAID1Ptr = std::shared_ptr<VolumeRAID1>;
}

View File

@ -1,17 +1,53 @@
#include "createVolume.h"
#include <Disks/SingleDiskVolume.h>
#include <Disks/VolumeJBOD.h>
#include <Disks/VolumeRAID1.h>
#include <boost/algorithm/string.hpp>
namespace DB
{
namespace ErrorCodes
{
extern const int UNKNOWN_RAID_TYPE;
}
VolumePtr createVolumeFromReservation(const ReservationPtr & reservation, VolumePtr other_volume)
{
if (other_volume->getType() == VolumeType::JBOD || other_volume->getType() == VolumeType::SINGLE_DISK)
{
/// Since reservation on JBOD choices one of disks and makes reservation there, volume
/// Since reservation on JBOD chooses one of disks and makes reservation there, volume
/// for such type of reservation will be with one disk.
return std::make_shared<SingleDiskVolume>(other_volume->getName(), reservation->getDisk());
}
if (other_volume->getType() == VolumeType::RAID1)
{
auto volume = std::dynamic_pointer_cast<VolumeRAID1>(other_volume);
return std::make_shared<VolumeRAID1>(volume->getName(), reservation->getDisks(), volume->max_data_part_size);
}
return nullptr;
}
VolumePtr createVolumeFromConfig(
String name,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
DiskSelectorPtr disk_selector
)
{
auto has_raid_type = config.has(config_prefix + ".raid_type");
if (!has_raid_type)
{
return std::make_shared<VolumeJBOD>(name, config, config_prefix, disk_selector);
}
String raid_type = config.getString(config_prefix + ".raid_type");
if (raid_type == "JBOD")
{
return std::make_shared<VolumeJBOD>(name, config, config_prefix, disk_selector);
}
throw Exception("Unknown raid type '" + raid_type + "'", ErrorCodes::UNKNOWN_RAID_TYPE);
}
}

View File

@ -1,12 +1,16 @@
#pragma once
#include <Disks/IVolume.h>
#include <Disks/VolumeJBOD.h>
#include <Disks/SingleDiskVolume.h>
namespace DB
{
VolumePtr createVolumeFromReservation(const ReservationPtr & reservation, VolumePtr other_volume);
VolumePtr createVolumeFromConfig(
String name_,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
DiskSelectorPtr disk_selector
);
}

View File

@ -16,6 +16,7 @@ SRCS(
SingleDiskVolume.cpp
StoragePolicy.cpp
VolumeJBOD.cpp
VolumeRAID1.cpp
)
END()

View File

@ -1,7 +1,6 @@
#include "ProtobufColumnMatcher.h"
#if USE_PROTOBUF
#include <Common/Exception.h>
#include <google/protobuf/descriptor.h>
#include <google/protobuf/descriptor.pb.h>
#include <Poco/String.h>

View File

@ -1,7 +1,6 @@
#include <string>
#include <iostream>
#include <fstream>
#include <IO/ReadBufferFromFile.h>
#include <IO/WriteBufferFromFile.h>

View File

@ -8,7 +8,6 @@
#include <Common/assert_cast.h>
#include <DataTypes/DataTypeNullable.h>
#include <DataTypes/DataTypeLowCardinality.h>
#include <IO/WriteHelpers.h>
namespace DB

View File

@ -4,8 +4,6 @@
#include <Interpreters/Context.h>
#include <Interpreters/ExternalModelsLoader.h>
#include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypesNumber.h>
#include <Columns/ColumnString.h>
#include <ext/range.h>
#include <string>

View File

@ -1,4 +1,3 @@
#include <Functions/IFunctionImpl.h>
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionDateOrDateTimeAddInterval.h>

View File

@ -1,4 +1,3 @@
#include <Functions/IFunctionImpl.h>
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionDateOrDateTimeAddInterval.h>

View File

@ -1,4 +1,3 @@
#include <Functions/IFunctionImpl.h>
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionDateOrDateTimeAddInterval.h>

View File

@ -1,4 +1,3 @@
#include <Functions/IFunctionImpl.h>
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionDateOrDateTimeAddInterval.h>

View File

@ -1,4 +1,3 @@
#include <Functions/IFunctionImpl.h>
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionDateOrDateTimeAddInterval.h>

View File

@ -1,4 +1,3 @@
#include <Functions/IFunctionImpl.h>
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionDateOrDateTimeAddInterval.h>

View File

@ -1,4 +1,3 @@
#include <Functions/IFunctionImpl.h>
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionDateOrDateTimeAddInterval.h>

View File

@ -1,4 +1,3 @@
#include <Functions/IFunctionImpl.h>
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionDateOrDateTimeAddInterval.h>

View File

@ -1,6 +1,5 @@
#if defined(__ELF__) && !defined(__FreeBSD__)
#include <Common/Elf.h>
#include <Common/Dwarf.h>
#include <Common/SymbolIndex.h>
#include <Common/HashTable/HashMap.h>
@ -9,7 +8,6 @@
#include <Columns/ColumnsNumber.h>
#include <DataTypes/DataTypeString.h>
#include <Functions/IFunctionImpl.h>
#include <Functions/FunctionHelpers.h>
#include <Functions/FunctionFactory.h>
#include <IO/WriteBufferFromArena.h>
#include <IO/WriteHelpers.h>

View File

@ -5,7 +5,6 @@
#include <Columns/ColumnsNumber.h>
#include <DataTypes/DataTypeString.h>
#include <Functions/IFunctionImpl.h>
#include <Functions/FunctionHelpers.h>
#include <Functions/FunctionFactory.h>
#include <Access/AccessFlags.h>
#include <Interpreters/Context.h>

View File

@ -1,5 +1,4 @@
#include <Functions/IFunctionImpl.h>
#include <Functions/FunctionHelpers.h>
#include <Functions/FunctionFactory.h>
#include <DataTypes/DataTypeNullable.h>
#include <Core/ColumnNumbers.h>

View File

@ -1,5 +1,4 @@
#include <Functions/IFunctionImpl.h>
#include <Functions/FunctionHelpers.h>
#include <Functions/FunctionFactory.h>
#include <DataTypes/DataTypeString.h>
#include <Columns/ColumnString.h>

View File

@ -4,7 +4,6 @@
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionHelpers.h>
#include <Functions/GatherUtils/Algorithms.h>
#include <Functions/GatherUtils/GatherUtils.h>
#include <Functions/GatherUtils/Sinks.h>
#include <Functions/GatherUtils/Slices.h>
#include <Functions/GatherUtils/Sources.h>

View File

@ -1,9 +1,7 @@
#include <common/demangle.h>
#include <Columns/ColumnString.h>
#include <Columns/ColumnsNumber.h>
#include <DataTypes/DataTypeString.h>
#include <Functions/IFunction.h>
#include <Functions/FunctionHelpers.h>
#include <Functions/FunctionFactory.h>
#include <IO/WriteHelpers.h>
#include <Access/AccessFlags.h>

View File

@ -1,6 +1,5 @@
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionsComparison.h>
#include <Functions/FunctionsLogical.h>
namespace DB

View File

@ -2,16 +2,12 @@
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionHelpers.h>
#include <DataTypes/DataTypeAggregateFunction.h>
#include <DataTypes/DataTypesNumber.h>
#include <Columns/ColumnAggregateFunction.h>
#include <Common/typeid_cast.h>
#include <Columns/ColumnVector.h>
#include <Columns/ColumnsNumber.h>
#include <iostream>
#include <Common/PODArray.h>
#include <Columns/ColumnArray.h>
namespace DB
{

View File

@ -9,7 +9,6 @@
#include <memory>
#include <string>
#include <vector>
namespace DB

View File

@ -5,12 +5,9 @@
#include <Functions/FunctionHelpers.h>
#include <Functions/IFunctionImpl.h>
#include <pcg_random.hpp>
#include <Common/UTF8Helpers.h>
#include <Common/randomSeed.h>
#include <common/arithmeticOverflow.h>
#include <common/defines.h>
#include <memory>
namespace DB

View File

@ -1,6 +1,5 @@
#include <array>
#include <math.h>
#include <Columns/ColumnConst.h>
#include <Columns/ColumnsNumber.h>
#include <DataTypes/DataTypesNumber.h>
#include <Functions/FunctionFactory.h>

Some files were not shown because too many files have changed in this diff Show More