mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge remote-tracking branch 'origin/master' into HEAD
This commit is contained in:
commit
a420b1989a
@ -421,3 +421,5 @@ add_subdirectory (tests)
|
||||
add_subdirectory (utils)
|
||||
|
||||
include (cmake/print_include_directories.cmake)
|
||||
|
||||
include (cmake/sanitize_target_link_libraries.cmake)
|
||||
|
@ -16,4 +16,5 @@ ClickHouse is an open-source column-oriented database management system that all
|
||||
|
||||
## Upcoming Events
|
||||
|
||||
* [ClickHouse at Yandex Cloud Webinar (in Russian)](https://cloud.yandex.ru/events/144) on July 7, 2020.
|
||||
* [ClickHouse for genetic data (in Russian)](https://cloud.yandex.ru/events/152) on July 14, 2020.
|
||||
* [ClickHouse virtual office hours](https://www.eventbrite.com/e/clickhouse-july-virtual-meetup-tickets-111199787558) on July 15, 2020.
|
||||
|
56
cmake/sanitize_target_link_libraries.cmake
Normal file
56
cmake/sanitize_target_link_libraries.cmake
Normal file
@ -0,0 +1,56 @@
|
||||
# When you will try to link target with the directory (that exists), cmake will
|
||||
# skip this without an error, only the following warning will be reported:
|
||||
#
|
||||
# target_link_libraries(main /tmp)
|
||||
#
|
||||
# WARNING: Target "main" requests linking to directory "/tmp". Targets may link only to libraries. CMake is dropping the item.
|
||||
#
|
||||
# And there is no cmake policy that controls this.
|
||||
# (I guess the reason that it is allowed is because of FRAMEWORK for OSX).
|
||||
#
|
||||
# So to avoid error-prone cmake rules, this can be sanitized.
|
||||
# There are the following ways:
|
||||
# - overwrite target_link_libraries()/link_libraries() and check *before*
|
||||
# calling real macro, but this requires duplicate all supported syntax
|
||||
# -- too complex
|
||||
# - overwrite target_link_libraries() and check LINK_LIBRARIES property, this
|
||||
# works great
|
||||
# -- but cannot be used with link_libraries()
|
||||
# - use BUILDSYSTEM_TARGETS property to get list of all targets and sanitize
|
||||
# -- this will work.
|
||||
|
||||
# https://stackoverflow.com/a/62311397/328260
|
||||
function (get_all_targets var)
|
||||
set (targets)
|
||||
get_all_targets_recursive (targets ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
set (${var} ${targets} PARENT_SCOPE)
|
||||
endfunction()
|
||||
macro (get_all_targets_recursive targets dir)
|
||||
get_property (subdirectories DIRECTORY ${dir} PROPERTY SUBDIRECTORIES)
|
||||
foreach (subdir ${subdirectories})
|
||||
get_all_targets_recursive (${targets} ${subdir})
|
||||
endforeach ()
|
||||
get_property (current_targets DIRECTORY ${dir} PROPERTY BUILDSYSTEM_TARGETS)
|
||||
list (APPEND ${targets} ${current_targets})
|
||||
endmacro ()
|
||||
|
||||
macro (sanitize_link_libraries target)
|
||||
get_target_property(target_type ${target} TYPE)
|
||||
if (${target_type} STREQUAL "INTERFACE_LIBRARY")
|
||||
get_property(linked_libraries TARGET ${target} PROPERTY INTERFACE_LINK_LIBRARIES)
|
||||
else()
|
||||
get_property(linked_libraries TARGET ${target} PROPERTY LINK_LIBRARIES)
|
||||
endif()
|
||||
foreach (linked_library ${linked_libraries})
|
||||
if (TARGET ${linked_library})
|
||||
# just in case, skip if TARGET
|
||||
elseif (IS_DIRECTORY ${linked_library})
|
||||
message(FATAL_ERROR "${target} requested to link with directory: ${linked_library}")
|
||||
endif()
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
get_all_targets (all_targets)
|
||||
foreach (target ${all_targets})
|
||||
sanitize_link_libraries(${target})
|
||||
endforeach()
|
@ -79,5 +79,9 @@
|
||||
"docker/test/integration/runner": {
|
||||
"name": "yandex/clickhouse-integration-tests-runner",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/testflows/runner": {
|
||||
"name": "yandex/clickhouse-testflows-runner",
|
||||
"dependent": []
|
||||
}
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ ccache --zero-stats ||:
|
||||
ln -s /usr/lib/x86_64-linux-gnu/libOpenCL.so.1.0.0 /usr/lib/libOpenCL.so ||:
|
||||
rm -f CMakeCache.txt
|
||||
cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DSANITIZE=$SANITIZER $CMAKE_FLAGS ..
|
||||
ninja clickhouse-bundle
|
||||
ninja $NINJA_FLAGS clickhouse-bundle
|
||||
mv ./programs/clickhouse* /output
|
||||
mv ./src/unit_tests_dbms /output
|
||||
find . -name '*.so' -print -exec mv '{}' /output \;
|
||||
|
@ -149,6 +149,8 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
|
||||
|
||||
if clang_tidy:
|
||||
cmake_flags.append('-DENABLE_CLANG_TIDY=1')
|
||||
# Don't stop on first error to find more clang-tidy errors in one run.
|
||||
result.append('NINJA_FLAGS=-k0')
|
||||
|
||||
if with_coverage:
|
||||
cmake_flags.append('-DWITH_COVERAGE=1')
|
||||
|
@ -1,6 +1,6 @@
|
||||
## Docker containers for integration tests
|
||||
- `base` container with required packages
|
||||
- `runner` container with that runs integration tests in docker
|
||||
- `compose` contains docker_compose YaML files that are used in tests
|
||||
- `runnner/compose` contains docker\_compose YaML files that are used in tests
|
||||
|
||||
How to run integration tests is described in tests/integration/README.md
|
||||
How to run integration tests is described in tests/integration/README.md
|
||||
|
@ -63,6 +63,7 @@ RUN set -eux; \
|
||||
|
||||
COPY modprobe.sh /usr/local/bin/modprobe
|
||||
COPY dockerd-entrypoint.sh /usr/local/bin/
|
||||
COPY compose/ /compose/
|
||||
|
||||
RUN set -x \
|
||||
&& addgroup --system dockremap \
|
||||
|
@ -6,8 +6,6 @@
|
||||
<allow_introspection_functions>1</allow_introspection_functions>
|
||||
<log_queries>1</log_queries>
|
||||
<metrics_perf_events_enabled>1</metrics_perf_events_enabled>
|
||||
<memory_profiler_sample_probability>1</memory_profiler_sample_probability>
|
||||
<max_untracked_memory>1048576</max_untracked_memory> <!-- 1MB -->
|
||||
</default>
|
||||
</profiles>
|
||||
</yandex>
|
||||
|
76
docker/test/testflows/runner/Dockerfile
Normal file
76
docker/test/testflows/runner/Dockerfile
Normal file
@ -0,0 +1,76 @@
|
||||
# docker build -t yandex/clickhouse-testflows-runner .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN apt-get update \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||
ca-certificates \
|
||||
bash \
|
||||
btrfs-progs \
|
||||
e2fsprogs \
|
||||
iptables \
|
||||
xfsprogs \
|
||||
tar \
|
||||
pigz \
|
||||
wget \
|
||||
git \
|
||||
iproute2 \
|
||||
cgroupfs-mount \
|
||||
python3-pip \
|
||||
tzdata \
|
||||
libreadline-dev \
|
||||
libicu-dev \
|
||||
bsdutils \
|
||||
curl \
|
||||
liblua5.1-dev \
|
||||
luajit \
|
||||
libssl-dev \
|
||||
libcurl4-openssl-dev \
|
||||
gdb \
|
||||
&& rm -rf \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/cache/debconf \
|
||||
/tmp/* \
|
||||
&& apt-get clean
|
||||
|
||||
ENV TZ=Europe/Moscow
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN pip3 install urllib3 testflows==1.6.24 docker-compose docker dicttoxml kazoo tzlocal
|
||||
|
||||
ENV DOCKER_CHANNEL stable
|
||||
ENV DOCKER_VERSION 17.09.1-ce
|
||||
|
||||
RUN set -eux; \
|
||||
\
|
||||
# this "case" statement is generated via "update.sh"
|
||||
\
|
||||
if ! wget -O docker.tgz "https://download.docker.com/linux/static/${DOCKER_CHANNEL}/x86_64/docker-${DOCKER_VERSION}.tgz"; then \
|
||||
echo >&2 "error: failed to download 'docker-${DOCKER_VERSION}' from '${DOCKER_CHANNEL}' for '${x86_64}'"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
\
|
||||
tar --extract \
|
||||
--file docker.tgz \
|
||||
--strip-components 1 \
|
||||
--directory /usr/local/bin/ \
|
||||
; \
|
||||
rm docker.tgz; \
|
||||
\
|
||||
dockerd --version; \
|
||||
docker --version
|
||||
|
||||
COPY modprobe.sh /usr/local/bin/modprobe
|
||||
COPY dockerd-entrypoint.sh /usr/local/bin/
|
||||
|
||||
RUN set -x \
|
||||
&& addgroup --system dockremap \
|
||||
&& adduser --system dockremap \
|
||||
&& adduser dockremap dockremap \
|
||||
&& echo 'dockremap:165536:65536' >> /etc/subuid \
|
||||
&& echo 'dockremap:165536:65536' >> /etc/subgid
|
||||
|
||||
VOLUME /var/lib/docker
|
||||
EXPOSE 2375
|
||||
ENTRYPOINT ["dockerd-entrypoint.sh"]
|
||||
CMD ["sh", "-c", "python3 regression.py --no-color --local --clickhouse-binary-path ${CLICKHOUSE_TESTS_SERVER_BIN_PATH} --log test.log ${TESTFLOWS_OPTS} && cat test.log | tfs report results --format json > results.json"]
|
||||
|
26
docker/test/testflows/runner/dockerd-entrypoint.sh
Executable file
26
docker/test/testflows/runner/dockerd-entrypoint.sh
Executable file
@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 &>/var/log/somefile &
|
||||
|
||||
set +e
|
||||
reties=0
|
||||
while true; do
|
||||
docker info &>/dev/null && break
|
||||
reties=$[$reties+1]
|
||||
if [[ $reties -ge 100 ]]; then # 10 sec max
|
||||
echo "Can't start docker daemon, timeout exceeded." >&2
|
||||
exit 1;
|
||||
fi
|
||||
sleep 0.1
|
||||
done
|
||||
set -e
|
||||
|
||||
echo "Start tests"
|
||||
export CLICKHOUSE_TESTS_SERVER_BIN_PATH=/clickhouse
|
||||
export CLICKHOUSE_TESTS_CLIENT_BIN_PATH=/clickhouse
|
||||
export CLICKHOUSE_TESTS_BASE_CONFIG_DIR=/clickhouse-config
|
||||
export CLICKHOUSE_ODBC_BRIDGE_BINARY_PATH=/clickhouse-odbc-bridge
|
||||
|
||||
cd /ClickHouse/tests/testflows
|
||||
exec "$@"
|
20
docker/test/testflows/runner/modprobe.sh
Executable file
20
docker/test/testflows/runner/modprobe.sh
Executable file
@ -0,0 +1,20 @@
|
||||
#!/bin/sh
|
||||
set -eu
|
||||
|
||||
# "modprobe" without modprobe
|
||||
# https://twitter.com/lucabruno/status/902934379835662336
|
||||
|
||||
# this isn't 100% fool-proof, but it'll have a much higher success rate than simply using the "real" modprobe
|
||||
|
||||
# Docker often uses "modprobe -va foo bar baz"
|
||||
# so we ignore modules that start with "-"
|
||||
for module; do
|
||||
if [ "${module#-}" = "$module" ]; then
|
||||
ip link show "$module" || true
|
||||
lsmod | grep "$module" || true
|
||||
fi
|
||||
done
|
||||
|
||||
# remove /usr/local/... from PATH so we can exec the real modprobe as a last resort
|
||||
export PATH='/usr/sbin:/usr/bin:/sbin:/bin'
|
||||
exec modprobe "$@"
|
@ -1,7 +1,18 @@
|
||||
---
|
||||
toc_folder_title: Commercial
|
||||
toc_priority: 70
|
||||
toc_title: Commercial
|
||||
toc_title: Introduction
|
||||
---
|
||||
|
||||
# ClickHouse Commercial Services
|
||||
|
||||
This section is a directory of commercial service providers specializing in ClickHouse. They are independent companies not necessarily affiliated with Yandex.
|
||||
|
||||
Service categories:
|
||||
|
||||
- [Cloud](cloud.md)
|
||||
- [Support](support.md)
|
||||
|
||||
|
||||
!!! note "For service providers"
|
||||
If you happen to represent one of them, feel free to open a pull request adding your company to the respective section (or even adding a new section if the service doesn't fit into existing categories). The easiest way to open a pull-request for documentation page is by using a “pencil” edit button in the top-right corner. If your service available in some local market, make sure to mention it in a localized documentation page as well (or at least point it out in a pull-request description).
|
||||
|
@ -1,6 +1,9 @@
|
||||
---
|
||||
toc_folder_title: Optimizing Performance
|
||||
toc_priority: 52
|
||||
toc_hidden: true
|
||||
---
|
||||
|
||||
# Optimizing Performance
|
||||
|
||||
- [Sampling query profiler](sampling-query-profiler.md)
|
||||
|
@ -1,6 +1,31 @@
|
||||
---
|
||||
toc_folder_title: Domains
|
||||
toc_priority: 56
|
||||
toc_folder_title: Domains
|
||||
toc_title: Overview
|
||||
---
|
||||
|
||||
# Domains {#domains}
|
||||
|
||||
Domains are special-purpose types that add some extra features atop of existing base type, but leaving on-wire and on-disc format of the underlying data type intact. At the moment, ClickHouse does not support user-defined domains.
|
||||
|
||||
You can use domains anywhere corresponding base type can be used, for example:
|
||||
|
||||
- Create a column of a domain type
|
||||
- Read/write values from/to domain column
|
||||
- Use it as an index if a base type can be used as an index
|
||||
- Call functions with values of domain column
|
||||
|
||||
### Extra Features of Domains {#extra-features-of-domains}
|
||||
|
||||
- Explicit column type name in `SHOW CREATE TABLE` or `DESCRIBE TABLE`
|
||||
- Input from human-friendly format with `INSERT INTO domain_table(domain_column) VALUES(...)`
|
||||
- Output to human-friendly format for `SELECT domain_column FROM domain_table`
|
||||
- Loading data from an external source in the human-friendly format: `INSERT INTO domain_table FORMAT CSV ...`
|
||||
|
||||
### Limitations {#limitations}
|
||||
|
||||
- Can’t convert index column of base type to domain type via `ALTER TABLE`.
|
||||
- Can’t implicitly convert string values into domain values when inserting data from another column or table.
|
||||
- Domain adds no constrains on stored values.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/data_types/domains/) <!--hide-->
|
||||
|
@ -1,30 +0,0 @@
|
||||
---
|
||||
toc_priority: 58
|
||||
toc_title: Overview
|
||||
---
|
||||
|
||||
# Domains {#domains}
|
||||
|
||||
Domains are special-purpose types that add some extra features atop of existing base type, but leaving on-wire and on-disc format of the underlying data type intact. At the moment, ClickHouse does not support user-defined domains.
|
||||
|
||||
You can use domains anywhere corresponding base type can be used, for example:
|
||||
|
||||
- Create a column of a domain type
|
||||
- Read/write values from/to domain column
|
||||
- Use it as an index if a base type can be used as an index
|
||||
- Call functions with values of domain column
|
||||
|
||||
### Extra Features of Domains {#extra-features-of-domains}
|
||||
|
||||
- Explicit column type name in `SHOW CREATE TABLE` or `DESCRIBE TABLE`
|
||||
- Input from human-friendly format with `INSERT INTO domain_table(domain_column) VALUES(...)`
|
||||
- Output to human-friendly format for `SELECT domain_column FROM domain_table`
|
||||
- Loading data from an external source in the human-friendly format: `INSERT INTO domain_table FORMAT CSV ...`
|
||||
|
||||
### Limitations {#limitations}
|
||||
|
||||
- Can’t convert index column of base type to domain type via `ALTER TABLE`.
|
||||
- Can’t implicitly convert string values into domain values when inserting data from another column or table.
|
||||
- Domain adds no constrains on stored values.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/data_types/domains/overview) <!--hide-->
|
@ -149,6 +149,63 @@ Rounds down a date with time to the start of the hour.
|
||||
|
||||
Rounds down a date with time to the start of the minute.
|
||||
|
||||
## toStartOfSecond {#tostartofsecond}
|
||||
|
||||
Truncates sub-seconds.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toStartOfSecond(value[, timezone])
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `value` — Date and time. [DateTime64](../data-types/datetime64.md).
|
||||
- `timezone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) for the returned value (optional). If not specified, the function uses the timezone of the `value` parameter. [String](../data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Input value without sub-seconds.
|
||||
|
||||
Type: [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Examples**
|
||||
|
||||
Query without timezone:
|
||||
|
||||
``` sql
|
||||
WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64
|
||||
SELECT toStartOfSecond(dt64);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌───toStartOfSecond(dt64)─┐
|
||||
│ 2020-01-01 10:20:30.000 │
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
Query with timezone:
|
||||
|
||||
``` sql
|
||||
WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64
|
||||
SELECT toStartOfSecond(dt64, 'Europe/Moscow');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─toStartOfSecond(dt64, 'Europe/Moscow')─┐
|
||||
│ 2020-01-01 13:20:30.000 │
|
||||
└────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**See also**
|
||||
|
||||
- [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) server configuration parameter.
|
||||
|
||||
## toStartOfFiveMinute {#tostartoffiveminute}
|
||||
|
||||
Rounds down a date with time to the start of the five-minute interval.
|
||||
|
@ -1,6 +1,19 @@
|
||||
---
|
||||
toc_folder_title: Statements
|
||||
toc_priority: 31
|
||||
toc_hidden: true
|
||||
---
|
||||
|
||||
# ClickHouse SQL Statements
|
||||
|
||||
Statements represent various kinds of action you can perform using SQL queries. Each kind of statement has it's own syntax and usage details that are described separately:
|
||||
|
||||
- [SELECT](select/index.md)
|
||||
- [INSERT INTO](insert-into.md)
|
||||
- [CREATE](create.md)
|
||||
- [ALTER](alter.md)
|
||||
- [SYSTEM](system.md)
|
||||
- [SHOW](show.md)
|
||||
- [GRANT](grant.md)
|
||||
- [REVOKE](revoke.md)
|
||||
- [Other](misc.md)
|
||||
|
@ -1,8 +1,33 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_folder_title: Dominio
|
||||
toc_priority: 56
|
||||
toc_folder_title: Dominio
|
||||
toc_title: "Descripci\xF3n"
|
||||
---
|
||||
|
||||
# Dominio {#domains}
|
||||
|
||||
Los dominios son tipos de propósito especial que agregan algunas características adicionales encima del tipo base existente, pero dejando intacto el formato en cable y en disco del tipo de datos subyacente. Por el momento, ClickHouse no admite dominios definidos por el usuario.
|
||||
|
||||
Puede usar dominios en cualquier lugar que se pueda usar el tipo base correspondiente, por ejemplo:
|
||||
|
||||
- Crear una columna de un tipo de dominio
|
||||
- Leer/escribir valores desde/a la columna de dominio
|
||||
- Úselo como un índice si un tipo base se puede usar como un índice
|
||||
- Funciones de llamada con valores de la columna de dominio
|
||||
|
||||
### Características adicionales de los dominios {#extra-features-of-domains}
|
||||
|
||||
- Nombre de tipo de columna explícito en `SHOW CREATE TABLE` o `DESCRIBE TABLE`
|
||||
- Entrada del formato humano-amistoso con `INSERT INTO domain_table(domain_column) VALUES(...)`
|
||||
- Salida al formato humano-amistoso para `SELECT domain_column FROM domain_table`
|
||||
- Carga de datos desde una fuente externa en el formato de uso humano: `INSERT INTO domain_table FORMAT CSV ...`
|
||||
|
||||
### Limitacion {#limitations}
|
||||
|
||||
- No se puede convertir la columna de índice del tipo base al tipo de dominio a través de `ALTER TABLE`.
|
||||
- No se pueden convertir implícitamente valores de cadena en valores de dominio al insertar datos de otra columna o tabla.
|
||||
- Domain no agrega restricciones en los valores almacenados.
|
||||
|
||||
[Artículo Original](https://clickhouse.tech/docs/en/data_types/domains/) <!--hide-->
|
||||
|
@ -1,32 +0,0 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_priority: 58
|
||||
toc_title: "Descripci\xF3n"
|
||||
---
|
||||
|
||||
# Dominio {#domains}
|
||||
|
||||
Los dominios son tipos de propósito especial que agregan algunas características adicionales encima del tipo base existente, pero dejando intacto el formato en cable y en disco del tipo de datos subyacente. Por el momento, ClickHouse no admite dominios definidos por el usuario.
|
||||
|
||||
Puede usar dominios en cualquier lugar que se pueda usar el tipo base correspondiente, por ejemplo:
|
||||
|
||||
- Crear una columna de un tipo de dominio
|
||||
- Leer/escribir valores desde/a la columna de dominio
|
||||
- Úselo como un índice si un tipo base se puede usar como un índice
|
||||
- Funciones de llamada con valores de la columna de dominio
|
||||
|
||||
### Características adicionales de los dominios {#extra-features-of-domains}
|
||||
|
||||
- Nombre de tipo de columna explícito en `SHOW CREATE TABLE` o `DESCRIBE TABLE`
|
||||
- Entrada del formato humano-amistoso con `INSERT INTO domain_table(domain_column) VALUES(...)`
|
||||
- Salida al formato humano-amistoso para `SELECT domain_column FROM domain_table`
|
||||
- Carga de datos desde una fuente externa en el formato de uso humano: `INSERT INTO domain_table FORMAT CSV ...`
|
||||
|
||||
### Limitacion {#limitations}
|
||||
|
||||
- No se puede convertir la columna de índice del tipo base al tipo de dominio a través de `ALTER TABLE`.
|
||||
- No se pueden convertir implícitamente valores de cadena en valores de dominio al insertar datos de otra columna o tabla.
|
||||
- Domain no agrega restricciones en los valores almacenados.
|
||||
|
||||
[Artículo Original](https://clickhouse.tech/docs/en/data_types/domains/overview) <!--hide-->
|
@ -1,8 +1,33 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_folder_title: "\u062F\u0627\u0645\u0646\u0647"
|
||||
toc_priority: 56
|
||||
toc_folder_title: "\u062F\u0627\u0645\u0646\u0647"
|
||||
toc_title: "\u0628\u0631\u0631\u0633\u06CC \u0627\u062C\u0645\u0627\u0644\u06CC"
|
||||
---
|
||||
|
||||
# دامنه {#domains}
|
||||
|
||||
دامنه انواع خاصی است که اضافه کردن برخی از ویژگی های اضافی در بالای نوع پایه موجود, اما ترک بر روی سیم و بر روی دیسک فرمت از نوع داده اساسی دست نخورده. درحال حاضر, تاتر می کند دامنه تعریف شده توسط کاربر را پشتیبانی نمی کند.
|
||||
|
||||
شما می توانید دامنه در هر نقطه نوع پایه مربوطه استفاده می شود, مثلا:
|
||||
|
||||
- ایجاد یک ستون از یک نوع دامنه
|
||||
- خواندن / نوشتن مقادیر از / به ستون دامنه
|
||||
- اگر یک نوع پایه می تواند به عنوان یک شاخص استفاده می شود به عنوان شاخص استفاده می شود
|
||||
- توابع تماس با مقادیر ستون دامنه
|
||||
|
||||
### ویژگی های اضافی از دامنه {#extra-features-of-domains}
|
||||
|
||||
- صریح نام نوع ستون در `SHOW CREATE TABLE` یا `DESCRIBE TABLE`
|
||||
- ورودی از فرمت انسان دوستانه با `INSERT INTO domain_table(domain_column) VALUES(...)`
|
||||
- خروجی به فرمت انسان دوستانه برای `SELECT domain_column FROM domain_table`
|
||||
- بارگیری داده ها از یک منبع خارجی در قالب انسان دوستانه: `INSERT INTO domain_table FORMAT CSV ...`
|
||||
|
||||
### محدودیت ها {#limitations}
|
||||
|
||||
- می توانید ستون شاخص از نوع پایه به نوع دامنه از طریق تبدیل کنید `ALTER TABLE`.
|
||||
- نمی تواند به طور ضمنی تبدیل مقادیر رشته به ارزش دامنه در هنگام قرار دادن داده ها از ستون یا جدول دیگر.
|
||||
- دامنه می افزاید: هیچ محدودیتی در مقادیر ذخیره شده.
|
||||
|
||||
[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/domains/overview) <!--hide-->
|
||||
|
@ -1,32 +0,0 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_priority: 58
|
||||
toc_title: "\u0628\u0631\u0631\u0633\u06CC \u0627\u062C\u0645\u0627\u0644\u06CC"
|
||||
---
|
||||
|
||||
# دامنه {#domains}
|
||||
|
||||
دامنه انواع خاصی است که اضافه کردن برخی از ویژگی های اضافی در بالای نوع پایه موجود, اما ترک بر روی سیم و بر روی دیسک فرمت از نوع داده اساسی دست نخورده. درحال حاضر, تاتر می کند دامنه تعریف شده توسط کاربر را پشتیبانی نمی کند.
|
||||
|
||||
شما می توانید دامنه در هر نقطه نوع پایه مربوطه استفاده می شود, مثلا:
|
||||
|
||||
- ایجاد یک ستون از یک نوع دامنه
|
||||
- خواندن / نوشتن مقادیر از / به ستون دامنه
|
||||
- اگر یک نوع پایه می تواند به عنوان یک شاخص استفاده می شود به عنوان شاخص استفاده می شود
|
||||
- توابع تماس با مقادیر ستون دامنه
|
||||
|
||||
### ویژگی های اضافی از دامنه {#extra-features-of-domains}
|
||||
|
||||
- صریح نام نوع ستون در `SHOW CREATE TABLE` یا `DESCRIBE TABLE`
|
||||
- ورودی از فرمت انسان دوستانه با `INSERT INTO domain_table(domain_column) VALUES(...)`
|
||||
- خروجی به فرمت انسان دوستانه برای `SELECT domain_column FROM domain_table`
|
||||
- بارگیری داده ها از یک منبع خارجی در قالب انسان دوستانه: `INSERT INTO domain_table FORMAT CSV ...`
|
||||
|
||||
### محدودیت ها {#limitations}
|
||||
|
||||
- می توانید ستون شاخص از نوع پایه به نوع دامنه از طریق تبدیل کنید `ALTER TABLE`.
|
||||
- نمی تواند به طور ضمنی تبدیل مقادیر رشته به ارزش دامنه در هنگام قرار دادن داده ها از ستون یا جدول دیگر.
|
||||
- دامنه می افزاید: هیچ محدودیتی در مقادیر ذخیره شده.
|
||||
|
||||
[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/domains/overview) <!--hide-->
|
@ -3,6 +3,31 @@ machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_folder_title: Domaine
|
||||
toc_priority: 56
|
||||
toc_title: "Aper\xE7u"
|
||||
---
|
||||
|
||||
# Domaine {#domains}
|
||||
|
||||
Les domaines sont des types spéciaux qui ajoutent des fonctionnalités supplémentaires au sommet du type de base existant, mais en laissant le format on-wire et on-disc du type de données sous-jacent intact. À l'heure actuelle, ClickHouse ne prend pas en charge les domaines définis par l'utilisateur.
|
||||
|
||||
Vous pouvez utiliser des domaines partout type de base correspondant peut être utilisé, par exemple:
|
||||
|
||||
- Créer une colonne d'un type de domaine
|
||||
- Valeurs de lecture / écriture depuis / vers la colonne de domaine
|
||||
- L'utiliser comme un indice si un type de base peut être utilisée comme un indice
|
||||
- Fonctions d'appel avec des valeurs de colonne de domaine
|
||||
|
||||
### Fonctionnalités supplémentaires des domaines {#extra-features-of-domains}
|
||||
|
||||
- Nom de type de colonne explicite dans `SHOW CREATE TABLE` ou `DESCRIBE TABLE`
|
||||
- Entrée du format convivial avec `INSERT INTO domain_table(domain_column) VALUES(...)`
|
||||
- Sortie au format convivial pour `SELECT domain_column FROM domain_table`
|
||||
- Chargement de données à partir d'une source externe dans un format convivial: `INSERT INTO domain_table FORMAT CSV ...`
|
||||
|
||||
### Limitation {#limitations}
|
||||
|
||||
- Impossible de convertir la colonne d'index du type de base en type de domaine via `ALTER TABLE`.
|
||||
- Impossible de convertir implicitement des valeurs de chaîne en valeurs de domaine lors de l'insertion de données d'une autre colonne ou table.
|
||||
- Le domaine n'ajoute aucune contrainte sur les valeurs stockées.
|
||||
|
||||
[Article Original](https://clickhouse.tech/docs/en/data_types/domains/overview) <!--hide-->
|
||||
|
@ -1,32 +0,0 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_priority: 58
|
||||
toc_title: "Aper\xE7u"
|
||||
---
|
||||
|
||||
# Domaine {#domains}
|
||||
|
||||
Les domaines sont des types spéciaux qui ajoutent des fonctionnalités supplémentaires au sommet du type de base existant, mais en laissant le format on-wire et on-disc du type de données sous-jacent intact. À l'heure actuelle, ClickHouse ne prend pas en charge les domaines définis par l'utilisateur.
|
||||
|
||||
Vous pouvez utiliser des domaines partout type de base correspondant peut être utilisé, par exemple:
|
||||
|
||||
- Créer une colonne d'un type de domaine
|
||||
- Valeurs de lecture / écriture depuis / vers la colonne de domaine
|
||||
- L'utiliser comme un indice si un type de base peut être utilisée comme un indice
|
||||
- Fonctions d'appel avec des valeurs de colonne de domaine
|
||||
|
||||
### Fonctionnalités supplémentaires des domaines {#extra-features-of-domains}
|
||||
|
||||
- Nom de type de colonne explicite dans `SHOW CREATE TABLE` ou `DESCRIBE TABLE`
|
||||
- Entrée du format convivial avec `INSERT INTO domain_table(domain_column) VALUES(...)`
|
||||
- Sortie au format convivial pour `SELECT domain_column FROM domain_table`
|
||||
- Chargement de données à partir d'une source externe dans un format convivial: `INSERT INTO domain_table FORMAT CSV ...`
|
||||
|
||||
### Limitation {#limitations}
|
||||
|
||||
- Impossible de convertir la colonne d'index du type de base en type de domaine via `ALTER TABLE`.
|
||||
- Impossible de convertir implicitement des valeurs de chaîne en valeurs de domaine lors de l'insertion de données d'une autre colonne ou table.
|
||||
- Le domaine n'ajoute aucune contrainte sur les valeurs stockées.
|
||||
|
||||
[Article Original](https://clickhouse.tech/docs/en/data_types/domains/overview) <!--hide-->
|
@ -3,6 +3,31 @@ machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_folder_title: "\u30C9\u30E1\u30A4\u30F3"
|
||||
toc_priority: 56
|
||||
toc_title: "\u6982\u8981"
|
||||
---
|
||||
|
||||
# ドメイン {#domains}
|
||||
|
||||
ドメインは、既存の基本型の上にいくつかの余分な機能を追加する特殊な目的の型ですが、基になるデータ型のオンワイヤおよびオンディスク形式は 現時点では、ClickHouseはユーザー定義ドメインをサポートしていません。
|
||||
|
||||
たとえば、対応する基本タイプを使用できる任意の場所でドメインを使用できます:
|
||||
|
||||
- ドメイン型の列を作成する
|
||||
- ドメイン列から/への読み取り/書き込み値
|
||||
- 基本型をインデックスとして使用できる場合は、インデックスとして使用します
|
||||
- ドメイン列の値を持つ関数の呼び出し
|
||||
|
||||
### ドメインの追加機能 {#extra-features-of-domains}
|
||||
|
||||
- 明示的な列タイプ名 `SHOW CREATE TABLE` または `DESCRIBE TABLE`
|
||||
- 人間に優しいフォーマットからの入力 `INSERT INTO domain_table(domain_column) VALUES(...)`
|
||||
- 人間に優しいフォーマットへの出力 `SELECT domain_column FROM domain_table`
|
||||
- 人間に優しい形式で外部ソースからデータを読み込む: `INSERT INTO domain_table FORMAT CSV ...`
|
||||
|
||||
### 制限 {#limitations}
|
||||
|
||||
- 基本型のインデックス列をドメイン型に変換できません `ALTER TABLE`.
|
||||
- 別の列または表からデータを挿入するときに、文字列値を暗黙的にドメイン値に変換できません。
|
||||
- ドメインは、格納された値に制約を追加しません。
|
||||
|
||||
[元の記事](https://clickhouse.tech/docs/en/data_types/domains/overview) <!--hide-->
|
||||
|
@ -1,32 +0,0 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_priority: 58
|
||||
toc_title: "\u6982\u8981"
|
||||
---
|
||||
|
||||
# ドメイン {#domains}
|
||||
|
||||
ドメインは、既存の基本型の上にいくつかの余分な機能を追加する特殊な目的の型ですが、基になるデータ型のオンワイヤおよびオンディスク形式は 現時点では、ClickHouseはユーザー定義ドメインをサポートしていません。
|
||||
|
||||
たとえば、対応する基本タイプを使用できる任意の場所でドメインを使用できます:
|
||||
|
||||
- ドメイン型の列を作成する
|
||||
- ドメイン列から/への読み取り/書き込み値
|
||||
- 基本型をインデックスとして使用できる場合は、インデックスとして使用します
|
||||
- ドメイン列の値を持つ関数の呼び出し
|
||||
|
||||
### ドメインの追加機能 {#extra-features-of-domains}
|
||||
|
||||
- 明示的な列タイプ名 `SHOW CREATE TABLE` または `DESCRIBE TABLE`
|
||||
- 人間に優しいフォーマットからの入力 `INSERT INTO domain_table(domain_column) VALUES(...)`
|
||||
- 人間に優しいフォーマットへの出力 `SELECT domain_column FROM domain_table`
|
||||
- 人間に優しい形式で外部ソースからデータを読み込む: `INSERT INTO domain_table FORMAT CSV ...`
|
||||
|
||||
### 制限 {#limitations}
|
||||
|
||||
- 基本型のインデックス列をドメイン型に変換できません `ALTER TABLE`.
|
||||
- 別の列または表からデータを挿入するときに、文字列値を暗黙的にドメイン値に変換できません。
|
||||
- ドメインは、格納された値に制約を追加しません。
|
||||
|
||||
[元の記事](https://clickhouse.tech/docs/en/data_types/domains/overview) <!--hide-->
|
@ -290,6 +290,7 @@ query_language/table_functions/remote.md sql-reference/table-functions/remote.md
|
||||
query_language/table_functions/url.md sql-reference/table-functions/url.md
|
||||
roadmap.md whats-new/roadmap.md
|
||||
security_changelog.md whats-new/security-changelog.md
|
||||
sql-reference/data-types/domains/overview.md sql-reference/data-types/domains/index.md
|
||||
sql_reference/aggregate_functions/combinators.md sql-reference/aggregate-functions/combinators.md
|
||||
sql_reference/aggregate_functions/index.md sql-reference/aggregate-functions/index.md
|
||||
sql_reference/aggregate_functions/parametric_functions.md sql-reference/aggregate-functions/parametric-functions.md
|
||||
|
@ -1,6 +1,33 @@
|
||||
---
|
||||
toc_folder_title: Domains
|
||||
toc_folder_title: Домены
|
||||
toc_title_title: Обзор
|
||||
toc_priority: 56
|
||||
---
|
||||
|
||||
|
||||
# Домены {#domeny}
|
||||
|
||||
Домены — это типы данных специального назначения, которые добавляют некоторые дополнительные функции поверх существующего базового типа. На данный момент ClickHouse не поддерживает пользовательские домены.
|
||||
|
||||
Вы можете использовать домены везде, где можно использовать соответствующий базовый тип:
|
||||
|
||||
- Создание столбца с доменным типом данных.
|
||||
- Чтение/запись значений из/в столбец с доменным типом данных.
|
||||
- Используйте его как индекс, если базовый тип можно использовать в качестве индекса.
|
||||
- Вызов функций со значениями столбца, имеющего доменный тип данных.
|
||||
- и так далее.
|
||||
|
||||
### Дополнительные возможности доменов {#dopolnitelnye-vozmozhnosti-domenov}
|
||||
|
||||
- Явное название типа данных столбца в запросах `SHOW CREATE TABLE` и `DESCRIBE TABLE`
|
||||
- Ввод данных в удобном человеку формате `INSERT INTO domain_table(domain_column) VALUES(...)`
|
||||
- Вывод данных в удобном человеку формате `SELECT domain_column FROM domain_table`
|
||||
- Загрузка данных из внешнего источника в удобном для человека формате: `INSERT INTO domain_table FORMAT CSV ...`
|
||||
|
||||
### Ограничения {#ogranicheniia}
|
||||
|
||||
- Невозможно преобразовать базовый тип данных в доменный для индексного столбца с помощью `ALTER TABLE`.
|
||||
- Невозможно неявно преобразовывать строковые значение в значения с доменным типом данных при вставке данных из другого столбца или таблицы.
|
||||
- Домен не добавляет ограничения на хранимые значения.
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/domains/overview) <!--hide-->
|
||||
|
@ -1,26 +0,0 @@
|
||||
# Домены {#domeny}
|
||||
|
||||
Домены — это типы данных специального назначения, которые добавляют некоторые дополнительные функции поверх существующего базового типа. На данный момент ClickHouse не поддерживает пользовательские домены.
|
||||
|
||||
Вы можете использовать домены везде, где можно использовать соответствующий базовый тип:
|
||||
|
||||
- Создание столбца с доменным типом данных.
|
||||
- Чтение/запись значений из/в столбец с доменным типом данных.
|
||||
- Используйте его как индекс, если базовый тип можно использовать в качестве индекса.
|
||||
- Вызов функций со значениями столбца, имеющего доменный тип данных.
|
||||
- и так далее.
|
||||
|
||||
### Дополнительные возможности доменов {#dopolnitelnye-vozmozhnosti-domenov}
|
||||
|
||||
- Явное название типа данных столбца в запросах `SHOW CREATE TABLE` и `DESCRIBE TABLE`
|
||||
- Ввод данных в удобном человеку формате `INSERT INTO domain_table(domain_column) VALUES(...)`
|
||||
- Вывод данных в удобном человеку формате `SELECT domain_column FROM domain_table`
|
||||
- Загрузка данных из внешнего источника в удобном для человека формате: `INSERT INTO domain_table FORMAT CSV ...`
|
||||
|
||||
### Ограничения {#ogranicheniia}
|
||||
|
||||
- Невозможно преобразовать базовый тип данных в доменный для индексного столбца с помощью `ALTER TABLE`.
|
||||
- Невозможно неявно преобразовывать строковые значение в значения с доменным типом данных при вставке данных из другого столбца или таблицы.
|
||||
- Домен не добавляет ограничения на хранимые значения.
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/domains/overview) <!--hide-->
|
@ -121,6 +121,62 @@ Result:
|
||||
|
||||
Округляет дату-с-временем вниз до начала минуты.
|
||||
|
||||
## toStartOfSecond {#tostartofsecond}
|
||||
|
||||
Отсекает доли секунды.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
toStartOfSecond(value[, timezone])
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `value` — Дата и время. [DateTime64](../data-types/datetime64.md).
|
||||
- `timezone` — [Часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) для возвращаемого значения (необязательно). Если параметр не задан, используется часовой пояс параметра `value`. [String](../data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Входное значение с отсеченными долями секунды.
|
||||
|
||||
Тип: [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Примеры**
|
||||
|
||||
Пример без часового пояса:
|
||||
|
||||
``` sql
|
||||
WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64 SELECT toStartOfSecond(dt64);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌───toStartOfSecond(dt64)─┐
|
||||
│ 2020-01-01 10:20:30.000 │
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
Пример с часовым поясом:
|
||||
|
||||
``` sql
|
||||
WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64 SELECT toStartOfSecond(dt64, 'Europe/Moscow');
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─toStartOfSecond(dt64, 'Europe/Moscow')─┐
|
||||
│ 2020-01-01 13:20:30.000 │
|
||||
└────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**См. также**
|
||||
|
||||
- Часовая зона сервера, конфигурационный параметр [timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone).
|
||||
|
||||
|
||||
## toStartOfFiveMinute {#tostartoffiveminute}
|
||||
|
||||
Округляет дату-с-временем вниз до начала пятиминутного интервала.
|
||||
|
@ -1,8 +1,34 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_priority: 58
|
||||
toc_title: "Genel bak\u0131\u015F"
|
||||
toc_folder_title: Etkiler
|
||||
toc_priority: 56
|
||||
---
|
||||
|
||||
# Etkiler {#domains}
|
||||
|
||||
Etki alanları, varolan temel türün üstüne bazı ek özellikler ekleyen, ancak temel veri türünün kablolu ve disk üstü biçimini sağlam bırakan özel amaçlı türlerdir. Şu anda, ClickHouse kullanıcı tanımlı etki alanlarını desteklemiyor.
|
||||
|
||||
Örneğin, ilgili taban türünün kullanılabileceği her yerde etki alanlarını kullanabilirsiniz:
|
||||
|
||||
- Etki alanı türünde bir sütun oluşturma
|
||||
- Alan sütunundan/alanına değerleri okuma / yazma
|
||||
- Bir temel türü bir dizin olarak kullanılabilir, bir dizin olarak kullanın
|
||||
- Etki alanı sütun değerleri ile çağrı fonksiyonları
|
||||
|
||||
### Alanların ekstra özellikleri {#extra-features-of-domains}
|
||||
|
||||
- Açık sütun türü adı `SHOW CREATE TABLE` veya `DESCRIBE TABLE`
|
||||
- İle insan dostu format inputtan giriş `INSERT INTO domain_table(domain_column) VALUES(...)`
|
||||
- İçin insan dostu forma outputta çıktı `SELECT domain_column FROM domain_table`
|
||||
- Harici bir kaynaktan insan dostu biçimde veri yükleme: `INSERT INTO domain_table FORMAT CSV ...`
|
||||
|
||||
### Sınırlamalar {#limitations}
|
||||
|
||||
- Temel türün dizin sütununu etki alanı türüne dönüştürülemiyor `ALTER TABLE`.
|
||||
- Başka bir sütun veya tablodan veri eklerken dize değerlerini dolaylı olarak etki alanı değerlerine dönüştüremez.
|
||||
- Etki alanı, depolanan değerler üzerinde hiçbir kısıtlama ekler.
|
||||
|
||||
[Orijinal makale](https://clickhouse.tech/docs/en/data_types/domains/overview) <!--hide-->
|
||||
|
@ -1,32 +0,0 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_priority: 58
|
||||
toc_title: "Genel bak\u0131\u015F"
|
||||
---
|
||||
|
||||
# Etkiler {#domains}
|
||||
|
||||
Etki alanları, varolan temel türün üstüne bazı ek özellikler ekleyen, ancak temel veri türünün kablolu ve disk üstü biçimini sağlam bırakan özel amaçlı türlerdir. Şu anda, ClickHouse kullanıcı tanımlı etki alanlarını desteklemiyor.
|
||||
|
||||
Örneğin, ilgili taban türünün kullanılabileceği her yerde etki alanlarını kullanabilirsiniz:
|
||||
|
||||
- Etki alanı türünde bir sütun oluşturma
|
||||
- Alan sütunundan/alanına değerleri okuma / yazma
|
||||
- Bir temel türü bir dizin olarak kullanılabilir, bir dizin olarak kullanın
|
||||
- Etki alanı sütun değerleri ile çağrı fonksiyonları
|
||||
|
||||
### Alanların ekstra özellikleri {#extra-features-of-domains}
|
||||
|
||||
- Açık sütun türü adı `SHOW CREATE TABLE` veya `DESCRIBE TABLE`
|
||||
- İle insan dostu format inputtan giriş `INSERT INTO domain_table(domain_column) VALUES(...)`
|
||||
- İçin insan dostu forma outputta çıktı `SELECT domain_column FROM domain_table`
|
||||
- Harici bir kaynaktan insan dostu biçimde veri yükleme: `INSERT INTO domain_table FORMAT CSV ...`
|
||||
|
||||
### Sınırlamalar {#limitations}
|
||||
|
||||
- Temel türün dizin sütununu etki alanı türüne dönüştürülemiyor `ALTER TABLE`.
|
||||
- Başka bir sütun veya tablodan veri eklerken dize değerlerini dolaylı olarak etki alanı değerlerine dönüştüremez.
|
||||
- Etki alanı, depolanan değerler üzerinde hiçbir kısıtlama ekler.
|
||||
|
||||
[Orijinal makale](https://clickhouse.tech/docs/en/data_types/domains/overview) <!--hide-->
|
@ -1,8 +1,34 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_folder_title: "\u57DF"
|
||||
toc_title: "域"
|
||||
toc_priority: 56
|
||||
---
|
||||
|
||||
# 域 {#domains}
|
||||
|
||||
Domain类型是特定实现的类型,它总是与某个现存的基础类型保持二进制兼容的同时添加一些额外的特性,以能够在维持磁盘数据不变的情况下使用这些额外的特性。目前ClickHouse暂不支持自定义domain类型。
|
||||
|
||||
如果你可以在一个地方使用与Domain类型二进制兼容的基础类型,那么在相同的地方您也可以使用Domain类型,例如:
|
||||
|
||||
- 使用Domain类型作为表中列的类型
|
||||
- 对Domain类型的列进行读/写数据
|
||||
- 如果与Domain二进制兼容的基础类型可以作为索引,那么Domain类型也可以作为索引
|
||||
- 将Domain类型作为参数传递给函数使用
|
||||
- 其他
|
||||
|
||||
### Domains的额外特性 {#domainsde-e-wai-te-xing}
|
||||
|
||||
- 在执行SHOW CREATE TABLE 或 DESCRIBE TABLE时,其对应的列总是展示为Domain类型的名称
|
||||
- 在INSERT INTO domain\_table(domain\_column) VALUES(…)中输入数据总是以更人性化的格式进行输入
|
||||
- 在SELECT domain\_column FROM domain\_table中数据总是以更人性化的格式输出
|
||||
- 在INSERT INTO domain\_table FORMAT CSV …中,实现外部源数据以更人性化的格式载入
|
||||
|
||||
### Domains类型的限制 {#domainslei-xing-de-xian-zhi}
|
||||
|
||||
- 无法通过`ALTER TABLE`将基础类型的索引转换为Domain类型的索引。
|
||||
- 当从其他列或表插入数据时,无法将string类型的值隐式地转换为Domain类型的值。
|
||||
- 无法对存储为Domain类型的值添加约束。
|
||||
|
||||
[来源文章](https://clickhouse.tech/docs/en/data_types/domains/overview) <!--hide-->
|
||||
|
||||
|
||||
|
@ -1,26 +0,0 @@
|
||||
# 域 {#domains}
|
||||
|
||||
Domain类型是特定实现的类型,它总是与某个现存的基础类型保持二进制兼容的同时添加一些额外的特性,以能够在维持磁盘数据不变的情况下使用这些额外的特性。目前ClickHouse暂不支持自定义domain类型。
|
||||
|
||||
如果你可以在一个地方使用与Domain类型二进制兼容的基础类型,那么在相同的地方您也可以使用Domain类型,例如:
|
||||
|
||||
- 使用Domain类型作为表中列的类型
|
||||
- 对Domain类型的列进行读/写数据
|
||||
- 如果与Domain二进制兼容的基础类型可以作为索引,那么Domain类型也可以作为索引
|
||||
- 将Domain类型作为参数传递给函数使用
|
||||
- 其他
|
||||
|
||||
### Domains的额外特性 {#domainsde-e-wai-te-xing}
|
||||
|
||||
- 在执行SHOW CREATE TABLE 或 DESCRIBE TABLE时,其对应的列总是展示为Domain类型的名称
|
||||
- 在INSERT INTO domain\_table(domain\_column) VALUES(…)中输入数据总是以更人性化的格式进行输入
|
||||
- 在SELECT domain\_column FROM domain\_table中数据总是以更人性化的格式输出
|
||||
- 在INSERT INTO domain\_table FORMAT CSV …中,实现外部源数据以更人性化的格式载入
|
||||
|
||||
### Domains类型的限制 {#domainslei-xing-de-xian-zhi}
|
||||
|
||||
- 无法通过`ALTER TABLE`将基础类型的索引转换为Domain类型的索引。
|
||||
- 当从其他列或表插入数据时,无法将string类型的值隐式地转换为Domain类型的值。
|
||||
- 无法对存储为Domain类型的值添加约束。
|
||||
|
||||
[来源文章](https://clickhouse.tech/docs/en/data_types/domains/overview) <!--hide-->
|
@ -169,17 +169,6 @@ ASTPtr extractOrderBy(const ASTPtr & storage_ast)
|
||||
}
|
||||
|
||||
|
||||
String createCommaSeparatedStringFrom(const Names & names)
|
||||
{
|
||||
std::ostringstream ss;
|
||||
if (!names.empty())
|
||||
{
|
||||
std::copy(names.begin(), std::prev(names.end()), std::ostream_iterator<std::string>(ss, ", "));
|
||||
ss << names.back();
|
||||
}
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
Names extractPrimaryKeyColumnNames(const ASTPtr & storage_ast)
|
||||
{
|
||||
const auto sorting_key_ast = extractOrderBy(storage_ast);
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/NestedUtils.h>
|
||||
#include <Parsers/ParserCreateQuery.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Parsers/ParserQuery.h>
|
||||
@ -197,8 +198,6 @@ ASTPtr extractPrimaryKey(const ASTPtr & storage_ast);
|
||||
|
||||
ASTPtr extractOrderBy(const ASTPtr & storage_ast);
|
||||
|
||||
String createCommaSeparatedStringFrom(const Names & names);
|
||||
|
||||
Names extractPrimaryKeyColumnNames(const ASTPtr & storage_ast);
|
||||
|
||||
String extractReplicatedTableZookeeperPath(const ASTPtr & storage_ast);
|
||||
|
@ -268,7 +268,7 @@ inline TaskTable::TaskTable(TaskCluster & parent, const Poco::Util::AbstractConf
|
||||
ParserStorage parser_storage;
|
||||
engine_push_ast = parseQuery(parser_storage, engine_push_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
|
||||
engine_push_partition_key_ast = extractPartitionKey(engine_push_ast);
|
||||
primary_key_comma_separated = createCommaSeparatedStringFrom(extractPrimaryKeyColumnNames(engine_push_ast));
|
||||
primary_key_comma_separated = Nested::createCommaSeparatedStringFrom(extractPrimaryKeyColumnNames(engine_push_ast));
|
||||
engine_push_zk_path = extractReplicatedTableZookeeperPath(engine_push_ast);
|
||||
}
|
||||
|
||||
|
@ -88,35 +88,35 @@ void ColumnAggregateFunction::addArena(ConstArenaPtr arena_)
|
||||
MutableColumnPtr ColumnAggregateFunction::convertToValues(MutableColumnPtr column)
|
||||
{
|
||||
/** If the aggregate function returns an unfinalized/unfinished state,
|
||||
* then you just need to copy pointers to it and also shared ownership of data.
|
||||
*
|
||||
* Also replace the aggregate function with the nested function.
|
||||
* That is, if this column is the states of the aggregate function `aggState`,
|
||||
* then we return the same column, but with the states of the aggregate function `agg`.
|
||||
* These are the same states, changing only the function to which they correspond.
|
||||
*
|
||||
* Further is quite difficult to understand.
|
||||
* Example when this happens:
|
||||
*
|
||||
* SELECT k, finalizeAggregation(quantileTimingState(0.5)(x)) FROM ... GROUP BY k WITH TOTALS
|
||||
*
|
||||
* This calculates the aggregate function `quantileTimingState`.
|
||||
* Its return type AggregateFunction(quantileTiming(0.5), UInt64)`.
|
||||
* Due to the presence of WITH TOTALS, during aggregation the states of this aggregate function will be stored
|
||||
* in the ColumnAggregateFunction column of type
|
||||
* AggregateFunction(quantileTimingState(0.5), UInt64).
|
||||
* Then, in `TotalsHavingTransform`, it will be called `convertToValues` method,
|
||||
* to get the "ready" values.
|
||||
* But it just converts a column of type
|
||||
* `AggregateFunction(quantileTimingState(0.5), UInt64)`
|
||||
* into `AggregateFunction(quantileTiming(0.5), UInt64)`
|
||||
* - in the same states.
|
||||
*column_aggregate_func
|
||||
* Then `finalizeAggregation` function will be calculated, which will call `convertToValues` already on the result.
|
||||
* And this converts a column of type
|
||||
* AggregateFunction(quantileTiming(0.5), UInt64)
|
||||
* into UInt16 - already finished result of `quantileTiming`.
|
||||
*/
|
||||
* then you just need to copy pointers to it and also shared ownership of data.
|
||||
*
|
||||
* Also replace the aggregate function with the nested function.
|
||||
* That is, if this column is the states of the aggregate function `aggState`,
|
||||
* then we return the same column, but with the states of the aggregate function `agg`.
|
||||
* These are the same states, changing only the function to which they correspond.
|
||||
*
|
||||
* Further is quite difficult to understand.
|
||||
* Example when this happens:
|
||||
*
|
||||
* SELECT k, finalizeAggregation(quantileTimingState(0.5)(x)) FROM ... GROUP BY k WITH TOTALS
|
||||
*
|
||||
* This calculates the aggregate function `quantileTimingState`.
|
||||
* Its return type AggregateFunction(quantileTiming(0.5), UInt64)`.
|
||||
* Due to the presence of WITH TOTALS, during aggregation the states of this aggregate function will be stored
|
||||
* in the ColumnAggregateFunction column of type
|
||||
* AggregateFunction(quantileTimingState(0.5), UInt64).
|
||||
* Then, in `TotalsHavingTransform`, it will be called `convertToValues` method,
|
||||
* to get the "ready" values.
|
||||
* But it just converts a column of type
|
||||
* `AggregateFunction(quantileTimingState(0.5), UInt64)`
|
||||
* into `AggregateFunction(quantileTiming(0.5), UInt64)`
|
||||
* - in the same states.
|
||||
*
|
||||
* Then `finalizeAggregation` function will be calculated, which will call `convertToValues` already on the result.
|
||||
* And this converts a column of type
|
||||
* AggregateFunction(quantileTiming(0.5), UInt64)
|
||||
* into UInt16 - already finished result of `quantileTiming`.
|
||||
*/
|
||||
auto & column_aggregate_func = assert_cast<ColumnAggregateFunction &>(*column);
|
||||
auto & func = column_aggregate_func.func;
|
||||
auto & data = column_aggregate_func.data;
|
||||
|
41
src/Core/SortDescription.cpp
Normal file
41
src/Core/SortDescription.cpp
Normal file
@ -0,0 +1,41 @@
|
||||
#include <Core/SortDescription.h>
|
||||
#include <Core/Block.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
void dumpSortDescription(const SortDescription & description, const Block & header, WriteBuffer & out)
|
||||
{
|
||||
bool first = true;
|
||||
|
||||
for (const auto & desc : description)
|
||||
{
|
||||
if (!first)
|
||||
out << ", ";
|
||||
first = false;
|
||||
|
||||
if (!desc.column_name.empty())
|
||||
out << desc.column_name;
|
||||
else
|
||||
{
|
||||
if (desc.column_number < header.columns())
|
||||
out << header.getByPosition(desc.column_number).name;
|
||||
else
|
||||
out << "?";
|
||||
|
||||
out << " (pos " << desc.column_number << ")";
|
||||
}
|
||||
|
||||
if (desc.direction > 0)
|
||||
out << " ASC";
|
||||
else
|
||||
out << " DESC";
|
||||
|
||||
if (desc.with_fill)
|
||||
out << " WITH FILL";
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -71,4 +71,9 @@ struct SortColumnDescription
|
||||
/// Description of the sorting rule for several columns.
|
||||
using SortDescription = std::vector<SortColumnDescription>;
|
||||
|
||||
class Block;
|
||||
|
||||
/// Outputs user-readable description into `out`.
|
||||
void dumpSortDescription(const SortDescription & description, const Block & header, WriteBuffer & out);
|
||||
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ SRCS(
|
||||
NamesAndTypes.cpp
|
||||
Settings.cpp
|
||||
SettingsCollection.cpp
|
||||
SortDescription.cpp
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -70,6 +70,17 @@ std::pair<std::string, std::string> splitName(const std::string & name)
|
||||
return {{ begin, first_end }, { second_begin, end }};
|
||||
}
|
||||
|
||||
std::string createCommaSeparatedStringFrom(const Names & names)
|
||||
{
|
||||
std::ostringstream ss;
|
||||
if (!names.empty())
|
||||
{
|
||||
std::copy(names.begin(), std::prev(names.end()), std::ostream_iterator<std::string>(ss, ", "));
|
||||
ss << names.back();
|
||||
}
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
|
||||
std::string extractTableName(const std::string & nested_name)
|
||||
{
|
||||
|
@ -13,6 +13,8 @@ namespace Nested
|
||||
|
||||
std::pair<std::string, std::string> splitName(const std::string & name);
|
||||
|
||||
std::string createCommaSeparatedStringFrom(const Names & names);
|
||||
|
||||
/// Returns the prefix of the name to the first '.'. Or the name is unchanged if there is no dot.
|
||||
std::string extractTableName(const std::string & nested_name);
|
||||
|
||||
|
@ -42,7 +42,7 @@ using UInt8Container = ColumnUInt8::Container;
|
||||
using UInt8ColumnPtrs = std::vector<const ColumnUInt8 *>;
|
||||
|
||||
|
||||
MutableColumnPtr convertFromTernaryData(const UInt8Container & ternary_data, const bool make_nullable)
|
||||
MutableColumnPtr buildColumnFromTernaryData(const UInt8Container & ternary_data, const bool make_nullable)
|
||||
{
|
||||
const size_t rows_count = ternary_data.size();
|
||||
|
||||
@ -63,7 +63,7 @@ MutableColumnPtr convertFromTernaryData(const UInt8Container & ternary_data, con
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool tryConvertColumnToUInt8(const IColumn * column, UInt8Container & res)
|
||||
bool tryConvertColumnToBool(const IColumn * column, UInt8Container & res)
|
||||
{
|
||||
const auto col = checkAndGetColumn<ColumnVector<T>>(column);
|
||||
if (!col)
|
||||
@ -71,22 +71,22 @@ bool tryConvertColumnToUInt8(const IColumn * column, UInt8Container & res)
|
||||
|
||||
std::transform(
|
||||
col->getData().cbegin(), col->getData().cend(), res.begin(),
|
||||
[](const auto x) { return x != 0; });
|
||||
[](const auto x) { return !!x; });
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void convertColumnToUInt8(const IColumn * column, UInt8Container & res)
|
||||
void convertAnyColumnToBool(const IColumn * column, UInt8Container & res)
|
||||
{
|
||||
if (!tryConvertColumnToUInt8<Int8>(column, res) &&
|
||||
!tryConvertColumnToUInt8<Int16>(column, res) &&
|
||||
!tryConvertColumnToUInt8<Int32>(column, res) &&
|
||||
!tryConvertColumnToUInt8<Int64>(column, res) &&
|
||||
!tryConvertColumnToUInt8<UInt16>(column, res) &&
|
||||
!tryConvertColumnToUInt8<UInt32>(column, res) &&
|
||||
!tryConvertColumnToUInt8<UInt64>(column, res) &&
|
||||
!tryConvertColumnToUInt8<Float32>(column, res) &&
|
||||
!tryConvertColumnToUInt8<Float64>(column, res))
|
||||
if (!tryConvertColumnToBool<Int8>(column, res) &&
|
||||
!tryConvertColumnToBool<Int16>(column, res) &&
|
||||
!tryConvertColumnToBool<Int32>(column, res) &&
|
||||
!tryConvertColumnToBool<Int64>(column, res) &&
|
||||
!tryConvertColumnToBool<UInt16>(column, res) &&
|
||||
!tryConvertColumnToBool<UInt32>(column, res) &&
|
||||
!tryConvertColumnToBool<UInt64>(column, res) &&
|
||||
!tryConvertColumnToBool<Float32>(column, res) &&
|
||||
!tryConvertColumnToBool<Float64>(column, res))
|
||||
throw Exception("Unexpected type of column: " + column->getName(), ErrorCodes::ILLEGAL_COLUMN);
|
||||
}
|
||||
|
||||
@ -119,7 +119,7 @@ static bool extractConstColumns(ColumnRawPtrs & in, UInt8 & res, Func && func)
|
||||
}
|
||||
|
||||
template <class Op>
|
||||
inline bool extractConstColumns(ColumnRawPtrs & in, UInt8 & res)
|
||||
inline bool extractConstColumnsAsBool(ColumnRawPtrs & in, UInt8 & res)
|
||||
{
|
||||
return extractConstColumns<Op>(
|
||||
in, res,
|
||||
@ -131,7 +131,7 @@ inline bool extractConstColumns(ColumnRawPtrs & in, UInt8 & res)
|
||||
}
|
||||
|
||||
template <class Op>
|
||||
inline bool extractConstColumnsTernary(ColumnRawPtrs & in, UInt8 & res_3v)
|
||||
inline bool extractConstColumnsAsTernary(ColumnRawPtrs & in, UInt8 & res_3v)
|
||||
{
|
||||
return extractConstColumns<Op>(
|
||||
in, res_3v,
|
||||
@ -145,6 +145,7 @@ inline bool extractConstColumnsTernary(ColumnRawPtrs & in, UInt8 & res_3v)
|
||||
}
|
||||
|
||||
|
||||
/// N.B. This class calculates result only for non-nullable types
|
||||
template <typename Op, size_t N>
|
||||
class AssociativeApplierImpl
|
||||
{
|
||||
@ -158,7 +159,7 @@ public:
|
||||
/// Returns a combination of values in the i-th row of all columns stored in the constructor.
|
||||
inline ResultValueType apply(const size_t i) const
|
||||
{
|
||||
const auto & a = vec[i];
|
||||
const auto a = !!vec[i];
|
||||
if constexpr (Op::isSaturable())
|
||||
return Op::isSaturatedValue(a) ? a : Op::apply(a, next.apply(i));
|
||||
else
|
||||
@ -179,7 +180,7 @@ public:
|
||||
explicit AssociativeApplierImpl(const UInt8ColumnPtrs & in)
|
||||
: vec(in[in.size() - 1]->getData()) {}
|
||||
|
||||
inline ResultValueType apply(const size_t i) const { return vec[i]; }
|
||||
inline ResultValueType apply(const size_t i) const { return !!vec[i]; }
|
||||
|
||||
private:
|
||||
const UInt8Container & vec;
|
||||
@ -188,7 +189,7 @@ private:
|
||||
|
||||
/// A helper class used by AssociativeGenericApplierImpl
|
||||
/// Allows for on-the-fly conversion of any data type into intermediate ternary representation
|
||||
using ValueGetter = std::function<Ternary::ResultType (size_t)>;
|
||||
using TernaryValueGetter = std::function<Ternary::ResultType (size_t)>;
|
||||
|
||||
template <typename ... Types>
|
||||
struct ValueGetterBuilderImpl;
|
||||
@ -196,7 +197,7 @@ struct ValueGetterBuilderImpl;
|
||||
template <typename Type, typename ...Types>
|
||||
struct ValueGetterBuilderImpl<Type, Types...>
|
||||
{
|
||||
static ValueGetter build(const IColumn * x)
|
||||
static TernaryValueGetter build(const IColumn * x)
|
||||
{
|
||||
if (const auto * nullable_column = typeid_cast<const ColumnNullable *>(x))
|
||||
{
|
||||
@ -218,7 +219,7 @@ struct ValueGetterBuilderImpl<Type, Types...>
|
||||
template <>
|
||||
struct ValueGetterBuilderImpl<>
|
||||
{
|
||||
static ValueGetter build(const IColumn * x)
|
||||
static TernaryValueGetter build(const IColumn * x)
|
||||
{
|
||||
throw Exception(
|
||||
std::string("Unknown numeric column of type: ") + demangle(typeid(x).name()),
|
||||
@ -247,13 +248,13 @@ public:
|
||||
{
|
||||
const auto a = val_getter(i);
|
||||
if constexpr (Op::isSaturable())
|
||||
return Op::isSaturatedValue(a) ? a : Op::apply(a, next.apply(i));
|
||||
return Op::isSaturatedValueTernary(a) ? a : Op::apply(a, next.apply(i));
|
||||
else
|
||||
return Op::apply(a, next.apply(i));
|
||||
}
|
||||
|
||||
private:
|
||||
const ValueGetter val_getter;
|
||||
const TernaryValueGetter val_getter;
|
||||
const AssociativeGenericApplierImpl<Op, N - 1> next;
|
||||
};
|
||||
|
||||
@ -271,7 +272,7 @@ public:
|
||||
inline ResultValueType apply(const size_t i) const { return val_getter(i); }
|
||||
|
||||
private:
|
||||
const ValueGetter val_getter;
|
||||
const TernaryValueGetter val_getter;
|
||||
};
|
||||
|
||||
|
||||
@ -332,13 +333,13 @@ static void executeForTernaryLogicImpl(ColumnRawPtrs arguments, ColumnWithTypeAn
|
||||
{
|
||||
/// Combine all constant columns into a single constant value.
|
||||
UInt8 const_3v_value = 0;
|
||||
const bool has_consts = extractConstColumnsTernary<Op>(arguments, const_3v_value);
|
||||
const bool has_consts = extractConstColumnsAsTernary<Op>(arguments, const_3v_value);
|
||||
|
||||
/// If the constant value uniquely determines the result, return it.
|
||||
if (has_consts && (arguments.empty() || Op::isSaturatedValue(const_3v_value)))
|
||||
if (has_consts && (arguments.empty() || Op::isSaturatedValueTernary(const_3v_value)))
|
||||
{
|
||||
result_info.column = ColumnConst::create(
|
||||
convertFromTernaryData(UInt8Container({const_3v_value}), result_info.type->isNullable()),
|
||||
buildColumnFromTernaryData(UInt8Container({const_3v_value}), result_info.type->isNullable()),
|
||||
input_rows_count
|
||||
);
|
||||
return;
|
||||
@ -349,7 +350,7 @@ static void executeForTernaryLogicImpl(ColumnRawPtrs arguments, ColumnWithTypeAn
|
||||
|
||||
OperationApplier<Op, AssociativeGenericApplierImpl>::apply(arguments, result_column->getData(), has_consts);
|
||||
|
||||
result_info.column = convertFromTernaryData(result_column->getData(), result_info.type->isNullable());
|
||||
result_info.column = buildColumnFromTernaryData(result_column->getData(), result_info.type->isNullable());
|
||||
}
|
||||
|
||||
|
||||
@ -402,12 +403,13 @@ struct TypedExecutorInvoker<Op>
|
||||
};
|
||||
|
||||
|
||||
/// Types of all of the arguments are guaranteed to be non-nullable here
|
||||
template <class Op>
|
||||
static void basicExecuteImpl(ColumnRawPtrs arguments, ColumnWithTypeAndName & result_info, size_t input_rows_count)
|
||||
{
|
||||
/// Combine all constant columns into a single constant value.
|
||||
UInt8 const_val = 0;
|
||||
bool has_consts = extractConstColumns<Op>(arguments, const_val);
|
||||
bool has_consts = extractConstColumnsAsBool<Op>(arguments, const_val);
|
||||
|
||||
/// If the constant value uniquely determines the result, return it.
|
||||
if (has_consts && (arguments.empty() || Op::apply(const_val, 0) == Op::apply(const_val, 1)))
|
||||
@ -447,7 +449,7 @@ static void basicExecuteImpl(ColumnRawPtrs arguments, ColumnWithTypeAndName & re
|
||||
else
|
||||
{
|
||||
auto converted_column = ColumnUInt8::create(input_rows_count);
|
||||
convertColumnToUInt8(column, converted_column->getData());
|
||||
convertAnyColumnToBool(column, converted_column->getData());
|
||||
uint8_args.push_back(converted_column.get());
|
||||
converted_columns_holder.emplace_back(std::move(converted_column));
|
||||
}
|
||||
@ -496,7 +498,8 @@ DataTypePtr FunctionAnyArityLogical<Impl, Name>::getReturnTypeImpl(const DataTyp
|
||||
}
|
||||
|
||||
template <typename Impl, typename Name>
|
||||
void FunctionAnyArityLogical<Impl, Name>::executeImpl(Block & block, const ColumnNumbers & arguments, size_t result_index, size_t input_rows_count)
|
||||
void FunctionAnyArityLogical<Impl, Name>::executeImpl(
|
||||
Block & block, const ColumnNumbers & arguments, size_t result_index, size_t input_rows_count)
|
||||
{
|
||||
ColumnRawPtrs args_in;
|
||||
for (const auto arg_index : arguments)
|
||||
|
@ -36,9 +36,21 @@ namespace Ternary
|
||||
{
|
||||
using ResultType = UInt8;
|
||||
|
||||
static constexpr UInt8 False = 0;
|
||||
static constexpr UInt8 True = -1;
|
||||
static constexpr UInt8 Null = 1;
|
||||
/** These carefully picked values magically work so bitwise "and", "or" on them
|
||||
* corresponds to the expected results in three-valued logic.
|
||||
*
|
||||
* False and True are represented by all-0 and all-1 bits, so all bitwise operations on them work as expected.
|
||||
* Null is represented as single 1 bit. So, it is something in between False and True.
|
||||
* And "or" works like maximum and "and" works like minimum:
|
||||
* "or" keeps True as is and lifts False with Null to Null.
|
||||
* "and" keeps False as is and downs True with Null to Null.
|
||||
*
|
||||
* This logic does not apply for "not" and "xor" - they work with default implementation for NULLs:
|
||||
* anything with NULL returns NULL, otherwise use conventional two-valued logic.
|
||||
*/
|
||||
static constexpr UInt8 False = 0; /// All zero bits.
|
||||
static constexpr UInt8 True = -1; /// All one bits.
|
||||
static constexpr UInt8 Null = 1; /// Single one bit.
|
||||
|
||||
template <typename T>
|
||||
inline ResultType makeValue(T value)
|
||||
@ -61,8 +73,16 @@ struct AndImpl
|
||||
using ResultType = UInt8;
|
||||
|
||||
static inline constexpr bool isSaturable() { return true; }
|
||||
static inline constexpr bool isSaturatedValue(UInt8 a) { return a == Ternary::False; }
|
||||
|
||||
/// Final value in two-valued logic (no further operations with True, False will change this value)
|
||||
static inline constexpr bool isSaturatedValue(bool a) { return !a; }
|
||||
|
||||
/// Final value in three-valued logic (no further operations with True, False, Null will change this value)
|
||||
static inline constexpr bool isSaturatedValueTernary(UInt8 a) { return a == Ternary::False; }
|
||||
|
||||
static inline constexpr ResultType apply(UInt8 a, UInt8 b) { return a & b; }
|
||||
|
||||
/// Will use three-valued logic for NULLs (see above) or default implementation (any operation with NULL returns NULL).
|
||||
static inline constexpr bool specialImplementationForNulls() { return true; }
|
||||
};
|
||||
|
||||
@ -71,7 +91,8 @@ struct OrImpl
|
||||
using ResultType = UInt8;
|
||||
|
||||
static inline constexpr bool isSaturable() { return true; }
|
||||
static inline constexpr bool isSaturatedValue(UInt8 a) { return a == Ternary::True; }
|
||||
static inline constexpr bool isSaturatedValue(bool a) { return a; }
|
||||
static inline constexpr bool isSaturatedValueTernary(UInt8 a) { return a == Ternary::True; }
|
||||
static inline constexpr ResultType apply(UInt8 a, UInt8 b) { return a | b; }
|
||||
static inline constexpr bool specialImplementationForNulls() { return true; }
|
||||
};
|
||||
@ -82,7 +103,8 @@ struct XorImpl
|
||||
|
||||
static inline constexpr bool isSaturable() { return false; }
|
||||
static inline constexpr bool isSaturatedValue(bool) { return false; }
|
||||
static inline constexpr ResultType apply(UInt8 a, UInt8 b) { return !!a != !!b; }
|
||||
static inline constexpr bool isSaturatedValueTernary(UInt8) { return false; }
|
||||
static inline constexpr ResultType apply(UInt8 a, UInt8 b) { return a != b; }
|
||||
static inline constexpr bool specialImplementationForNulls() { return false; }
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
|
@ -11,6 +11,11 @@ namespace ErrorCodes
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
}
|
||||
|
||||
|
||||
/** Replaces values where condition is met with the previous value that have condition not met
|
||||
* (or with the first value if condition was true for all elements before).
|
||||
* Looks somewhat similar to arrayFilter, but instead removing elements, it fills gaps with the value of previous element.
|
||||
*/
|
||||
template <bool reverse>
|
||||
struct ArrayFillImpl
|
||||
{
|
||||
|
@ -251,7 +251,6 @@ FunctionArrayIntersect::CastArgumentsResult FunctionArrayIntersect::castColumns(
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
if (!arg.type->equals(*return_type) && !arg.type->equals(*nullable_return_type))
|
||||
{
|
||||
/// If result has array type Array(T) still cast Array(Nullable(U)) to Array(Nullable(T))
|
||||
|
@ -14,6 +14,11 @@
|
||||
#include <Poco/Net/HTTPResponse.h>
|
||||
#include <common/logger_useful.h>
|
||||
|
||||
namespace DB::ErrorCodes
|
||||
{
|
||||
extern const int TOO_MANY_REDIRECTS;
|
||||
}
|
||||
|
||||
namespace DB::S3
|
||||
{
|
||||
PocoHTTPClient::PocoHTTPClient(const Aws::Client::ClientConfiguration & clientConfiguration)
|
||||
@ -153,8 +158,10 @@ void PocoHTTPClient::MakeRequestInternal(
|
||||
else
|
||||
response->GetResponseStream().SetUnderlyingStream(std::make_shared<PocoHTTPResponseStream>(session, response_body_stream));
|
||||
|
||||
break;
|
||||
return;
|
||||
}
|
||||
throw Exception(String("Too many redirects while trying to access ") + request.GetUri().GetURIString(),
|
||||
ErrorCodes::TOO_MANY_REDIRECTS);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
102
src/Interpreters/AggregateDescription.cpp
Normal file
102
src/Interpreters/AggregateDescription.cpp
Normal file
@ -0,0 +1,102 @@
|
||||
#include <Interpreters/AggregateDescription.h>
|
||||
#include <Common/FieldVisitors.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
void AggregateDescription::explain(WriteBuffer & out, size_t indent) const
|
||||
{
|
||||
String prefix(indent, ' ');
|
||||
|
||||
out << prefix << column_name << '\n';
|
||||
|
||||
auto dump_params = [&](const Array & arr)
|
||||
{
|
||||
bool first = true;
|
||||
for (const auto & param : arr)
|
||||
{
|
||||
if (!first)
|
||||
out << ", ";
|
||||
|
||||
first = false;
|
||||
|
||||
out << applyVisitor(FieldVisitorToString(), param);
|
||||
}
|
||||
};
|
||||
|
||||
if (function)
|
||||
{
|
||||
/// Double whitespace is intentional.
|
||||
out << prefix << " Function: " << function->getName();
|
||||
|
||||
const auto & params = function->getParameters();
|
||||
if (!params.empty())
|
||||
{
|
||||
out << "(";
|
||||
dump_params(params);
|
||||
out << ")";
|
||||
}
|
||||
|
||||
out << "(";
|
||||
|
||||
bool first = true;
|
||||
for (const auto & type : function->getArgumentTypes())
|
||||
{
|
||||
if (!first)
|
||||
out << ", ";
|
||||
first = false;
|
||||
|
||||
out << type->getName();
|
||||
}
|
||||
|
||||
out << ") → " << function->getReturnType()->getName() << "\n";
|
||||
}
|
||||
else
|
||||
out << prefix << " Function: nullptr\n";
|
||||
|
||||
if (!parameters.empty())
|
||||
{
|
||||
out << prefix << " Parameters: ";
|
||||
dump_params(parameters);
|
||||
out << '\n';
|
||||
}
|
||||
|
||||
out << prefix << " Arguments: ";
|
||||
|
||||
if (argument_names.empty())
|
||||
out << "none\n";
|
||||
else
|
||||
{
|
||||
bool first = true;
|
||||
for (const auto & arg : argument_names)
|
||||
{
|
||||
if (!first)
|
||||
out << ", ";
|
||||
first = false;
|
||||
|
||||
out << arg;
|
||||
}
|
||||
out << "\n";
|
||||
}
|
||||
|
||||
out << prefix << " Argument positions: ";
|
||||
|
||||
if (arguments.empty())
|
||||
out << "none\n";
|
||||
else
|
||||
{
|
||||
bool first = true;
|
||||
for (auto arg : arguments)
|
||||
{
|
||||
if (!first)
|
||||
out << ", ";
|
||||
first = false;
|
||||
|
||||
out << arg;
|
||||
}
|
||||
out << '\n';
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -15,6 +15,8 @@ struct AggregateDescription
|
||||
ColumnNumbers arguments;
|
||||
Names argument_names; /// used if no `arguments` are specified.
|
||||
String column_name; /// What name to use for a column with aggregate function values
|
||||
|
||||
void explain(WriteBuffer & out, size_t indent) const; /// Get description for EXPLAIN query.
|
||||
};
|
||||
|
||||
using AggregateDescriptions = std::vector<AggregateDescription>;
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <AggregateFunctions/AggregateFunctionState.h>
|
||||
#include <AggregateFunctions/AggregateFunctionResample.h>
|
||||
#include <Disks/StoragePolicy.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
|
||||
namespace ProfileEvents
|
||||
@ -151,6 +152,42 @@ Block Aggregator::Params::getHeader(
|
||||
return materializeBlock(res);
|
||||
}
|
||||
|
||||
void Aggregator::Params::explain(WriteBuffer & out, size_t indent) const
|
||||
{
|
||||
Strings res;
|
||||
const auto & header = src_header ? src_header
|
||||
: intermediate_header;
|
||||
|
||||
String prefix(indent, ' ');
|
||||
|
||||
{
|
||||
/// Dump keys.
|
||||
out << prefix << "Keys: ";
|
||||
|
||||
bool first = true;
|
||||
for (auto key : keys)
|
||||
{
|
||||
if (!first)
|
||||
out << ", ";
|
||||
first = false;
|
||||
|
||||
if (key >= header.columns())
|
||||
out << "unknown position " << key;
|
||||
else
|
||||
out << header.getByPosition(key).name;
|
||||
}
|
||||
|
||||
out << '\n';
|
||||
}
|
||||
|
||||
if (!aggregates.empty())
|
||||
{
|
||||
out << prefix << "Aggregates:\n";
|
||||
|
||||
for (const auto & aggregate : aggregates)
|
||||
aggregate.explain(out, indent + 4);
|
||||
}
|
||||
}
|
||||
|
||||
Aggregator::Aggregator(const Params & params_)
|
||||
: params(params_),
|
||||
|
@ -923,6 +923,9 @@ public:
|
||||
{
|
||||
return getHeader(src_header, intermediate_header, keys, aggregates, final);
|
||||
}
|
||||
|
||||
/// Returns keys and aggregated for EXPLAIN query
|
||||
void explain(WriteBuffer & out, size_t indent) const;
|
||||
};
|
||||
|
||||
Aggregator(const Params & params_);
|
||||
|
@ -10,16 +10,25 @@
|
||||
#include <Parsers/DumpASTNode.h>
|
||||
#include <Parsers/queryToString.h>
|
||||
#include <Parsers/ASTExplainQuery.h>
|
||||
#include <Parsers/ASTTablesInSelectQuery.h>
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
#include <IO/WriteBufferFromOStream.h>
|
||||
|
||||
#include <Storages/StorageView.h>
|
||||
#include <sstream>
|
||||
|
||||
#include <Processors/QueryPlan/QueryPlan.h>
|
||||
#include <Processors/printPipeline.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int INCORRECT_QUERY;
|
||||
extern const int INVALID_SETTING_VALUE;
|
||||
extern const int UNKNOWN_SETTING;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
struct ExplainAnalyzedSyntaxMatcher
|
||||
@ -79,10 +88,133 @@ Block InterpreterExplainQuery::getSampleBlock()
|
||||
return block;
|
||||
}
|
||||
|
||||
/// Split str by line feed and write as separate row to ColumnString.
|
||||
static void fillColumn(IColumn & column, const std::string & str)
|
||||
{
|
||||
size_t start = 0;
|
||||
size_t end = 0;
|
||||
size_t size = str.size();
|
||||
|
||||
while (end < size)
|
||||
{
|
||||
if (str[end] == '\n')
|
||||
{
|
||||
column.insertData(str.data() + start, end - start);
|
||||
start = end + 1;
|
||||
}
|
||||
|
||||
++end;
|
||||
}
|
||||
|
||||
if (start < end)
|
||||
column.insertData(str.data() + start, end - start);
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
/// Settings. Different for each explain type.
|
||||
|
||||
struct QueryPlanSettings
|
||||
{
|
||||
QueryPlan::ExplainPlanOptions query_plan_options;
|
||||
|
||||
constexpr static char name[] = "PLAN";
|
||||
|
||||
std::unordered_map<std::string, std::reference_wrapper<bool>> boolean_settings =
|
||||
{
|
||||
{"header", query_plan_options.header},
|
||||
{"description", query_plan_options.description},
|
||||
{"actions", query_plan_options.actions}
|
||||
};
|
||||
};
|
||||
|
||||
struct QueryPipelineSettings
|
||||
{
|
||||
QueryPlan::ExplainPipelineOptions query_pipeline_options;
|
||||
bool graph = false;
|
||||
bool compact = true;
|
||||
|
||||
constexpr static char name[] = "PIPELINE";
|
||||
|
||||
std::unordered_map<std::string, std::reference_wrapper<bool>> boolean_settings =
|
||||
{
|
||||
{"header", query_pipeline_options.header},
|
||||
{"graph", graph},
|
||||
{"compact", compact},
|
||||
};
|
||||
};
|
||||
|
||||
template <typename Settings>
|
||||
struct ExplainSettings : public Settings
|
||||
{
|
||||
using Settings::boolean_settings;
|
||||
|
||||
bool has(const std::string & name_) const
|
||||
{
|
||||
return boolean_settings.count(name_) > 0;
|
||||
}
|
||||
|
||||
void setBooleanSetting(const std::string & name_, bool value)
|
||||
{
|
||||
auto it = boolean_settings.find(name_);
|
||||
if (it == boolean_settings.end())
|
||||
throw Exception("Unknown setting for ExplainSettings: " + name_, ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
it->second.get() = value;
|
||||
}
|
||||
|
||||
std::string getSettingsList() const
|
||||
{
|
||||
std::string res;
|
||||
for (const auto & setting : boolean_settings)
|
||||
{
|
||||
if (!res.empty())
|
||||
res += ", ";
|
||||
|
||||
res += setting.first;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Settings>
|
||||
ExplainSettings<Settings> checkAndGetSettings(const ASTPtr & ast_settings)
|
||||
{
|
||||
if (!ast_settings)
|
||||
return {};
|
||||
|
||||
ExplainSettings<Settings> settings;
|
||||
const auto & set_query = ast_settings->as<ASTSetQuery &>();
|
||||
|
||||
for (const auto & change : set_query.changes)
|
||||
{
|
||||
if (!settings.has(change.name))
|
||||
throw Exception("Unknown setting \"" + change.name + "\" for EXPLAIN " + Settings::name + " query. "
|
||||
"Supported settings: " + settings.getSettingsList(), ErrorCodes::UNKNOWN_SETTING);
|
||||
|
||||
if (change.value.getType() != Field::Types::UInt64)
|
||||
throw Exception("Invalid type " + std::string(change.value.getTypeName()) + " for setting \"" + change.name +
|
||||
"\" only boolean settings are supported", ErrorCodes::INVALID_SETTING_VALUE);
|
||||
|
||||
auto value = change.value.get<UInt64>();
|
||||
if (value > 1)
|
||||
throw Exception("Invalid value " + std::to_string(value) + " for setting \"" + change.name +
|
||||
"\". Only boolean settings are supported", ErrorCodes::INVALID_SETTING_VALUE);
|
||||
|
||||
settings.setBooleanSetting(change.name, value);
|
||||
}
|
||||
|
||||
return settings;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
BlockInputStreamPtr InterpreterExplainQuery::executeImpl()
|
||||
{
|
||||
const auto & ast = query->as<ASTExplainQuery &>();
|
||||
|
||||
Block sample_block = getSampleBlock();
|
||||
MutableColumns res_columns = sample_block.cloneEmptyColumns();
|
||||
|
||||
@ -90,17 +222,63 @@ BlockInputStreamPtr InterpreterExplainQuery::executeImpl()
|
||||
|
||||
if (ast.getKind() == ASTExplainQuery::ParsedAST)
|
||||
{
|
||||
dumpAST(ast, ss);
|
||||
if (ast.getSettings())
|
||||
throw Exception("Settings are not supported for EXPLAIN AST query.", ErrorCodes::UNKNOWN_SETTING);
|
||||
|
||||
dumpAST(*ast.getExplainedQuery(), ss);
|
||||
}
|
||||
else if (ast.getKind() == ASTExplainQuery::AnalyzedSyntax)
|
||||
{
|
||||
if (ast.getSettings())
|
||||
throw Exception("Settings are not supported for EXPLAIN SYNTAX query.", ErrorCodes::UNKNOWN_SETTING);
|
||||
|
||||
ExplainAnalyzedSyntaxVisitor::Data data{.context = context};
|
||||
ExplainAnalyzedSyntaxVisitor(data).visit(query);
|
||||
|
||||
ast.children.at(0)->format(IAST::FormatSettings(ss, false));
|
||||
ast.getExplainedQuery()->format(IAST::FormatSettings(ss, false));
|
||||
}
|
||||
else if (ast.getKind() == ASTExplainQuery::QueryPlan)
|
||||
{
|
||||
if (!dynamic_cast<const ASTSelectWithUnionQuery *>(ast.getExplainedQuery().get()))
|
||||
throw Exception("Only SELECT is supported for EXPLAIN query", ErrorCodes::INCORRECT_QUERY);
|
||||
|
||||
auto settings = checkAndGetSettings<QueryPlanSettings>(ast.getSettings());
|
||||
QueryPlan plan;
|
||||
|
||||
InterpreterSelectWithUnionQuery interpreter(ast.getExplainedQuery(), context, SelectQueryOptions());
|
||||
interpreter.buildQueryPlan(plan);
|
||||
|
||||
WriteBufferFromOStream buffer(ss);
|
||||
plan.explainPlan(buffer, settings.query_plan_options);
|
||||
}
|
||||
else if (ast.getKind() == ASTExplainQuery::QueryPipeline)
|
||||
{
|
||||
if (!dynamic_cast<const ASTSelectWithUnionQuery *>(ast.getExplainedQuery().get()))
|
||||
throw Exception("Only SELECT is supported for EXPLAIN query", ErrorCodes::INCORRECT_QUERY);
|
||||
|
||||
auto settings = checkAndGetSettings<QueryPipelineSettings>(ast.getSettings());
|
||||
QueryPlan plan;
|
||||
|
||||
InterpreterSelectWithUnionQuery interpreter(ast.getExplainedQuery(), context, SelectQueryOptions());
|
||||
interpreter.buildQueryPlan(plan);
|
||||
auto pipeline = plan.buildQueryPipeline();
|
||||
|
||||
WriteBufferFromOStream buffer(ss);
|
||||
|
||||
if (settings.graph)
|
||||
{
|
||||
if (settings.compact)
|
||||
printPipelineCompact(pipeline->getProcessors(), buffer, settings.query_pipeline_options.header);
|
||||
else
|
||||
printPipeline(pipeline->getProcessors(), buffer);
|
||||
}
|
||||
else
|
||||
{
|
||||
plan.explainPipeline(buffer, settings.query_pipeline_options);
|
||||
}
|
||||
}
|
||||
|
||||
res_columns[0]->insert(ss.str());
|
||||
fillColumn(*res_columns[0], ss.str());
|
||||
|
||||
return std::make_shared<OneBlockInputStream>(sample_block.cloneWithColumns(std::move(res_columns)));
|
||||
}
|
||||
|
@ -54,7 +54,7 @@
|
||||
#include <Processors/QueryPlan/CubeStep.h>
|
||||
#include <Processors/QueryPlan/FillingStep.h>
|
||||
#include <Processors/QueryPlan/ExtremesStep.h>
|
||||
#include <Processors/QueryPlan/OffsetsStep.h>
|
||||
#include <Processors/QueryPlan/OffsetStep.h>
|
||||
#include <Processors/QueryPlan/FinishSortingStep.h>
|
||||
#include <Processors/QueryPlan/QueryPlan.h>
|
||||
|
||||
@ -962,7 +962,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, const BlockInpu
|
||||
*/
|
||||
|
||||
if (!expressions.first_stage && !expressions.need_aggregate && !(query.group_by_with_totals && !aggregate_final))
|
||||
executeMergeSorted(query_plan, "before ORDER BY");
|
||||
executeMergeSorted(query_plan, "for ORDER BY");
|
||||
else /// Otherwise, just sort.
|
||||
executeOrder(query_plan, query_info.input_order_info);
|
||||
}
|
||||
@ -1589,7 +1589,7 @@ void InterpreterSelectQuery::executeOrder(QueryPlan & query_plan, InputOrderInfo
|
||||
limit,
|
||||
SizeLimits(settings.max_rows_to_sort, settings.max_bytes_to_sort, settings.sort_overflow_mode));
|
||||
|
||||
partial_sorting->setStepDescription("Sort each block before ORDER BY");
|
||||
partial_sorting->setStepDescription("Sort each block for ORDER BY");
|
||||
query_plan.addStep(std::move(partial_sorting));
|
||||
|
||||
/// Merge the sorted blocks.
|
||||
@ -1600,11 +1600,11 @@ void InterpreterSelectQuery::executeOrder(QueryPlan & query_plan, InputOrderInfo
|
||||
settings.max_bytes_before_external_sort, context->getTemporaryVolume(),
|
||||
settings.min_free_disk_space_for_temporary_data);
|
||||
|
||||
merge_sorting_step->setStepDescription("Merge sorted blocks before ORDER BY");
|
||||
merge_sorting_step->setStepDescription("Merge sorted blocks for ORDER BY");
|
||||
query_plan.addStep(std::move(merge_sorting_step));
|
||||
|
||||
/// If there are several streams, we merge them into one
|
||||
executeMergeSorted(query_plan, output_order_descr, limit, "before ORDER BY");
|
||||
executeMergeSorted(query_plan, output_order_descr, limit, "for ORDER BY");
|
||||
}
|
||||
|
||||
|
||||
@ -1785,7 +1785,7 @@ void InterpreterSelectQuery::executeOffset(QueryPlan & query_plan)
|
||||
UInt64 limit_offset;
|
||||
std::tie(limit_length, limit_offset) = getLimitLengthAndOffset(query, *context);
|
||||
|
||||
auto offsets_step = std::make_unique<OffsetsStep>(query_plan.getCurrentDataStream(), limit_offset);
|
||||
auto offsets_step = std::make_unique<OffsetStep>(query_plan.getCurrentDataStream(), limit_offset);
|
||||
query_plan.addStep(std::move(offsets_step));
|
||||
}
|
||||
}
|
||||
|
@ -257,7 +257,7 @@ struct ColumnAliasesMatcher
|
||||
if (!last_table)
|
||||
{
|
||||
IdentifierSemantic::coverName(node, alias);
|
||||
node.setAlias("");
|
||||
node.setAlias({});
|
||||
}
|
||||
}
|
||||
else if (node.compound())
|
||||
|
@ -76,7 +76,7 @@ static void cleanAliasAndCollectIdentifiers(ASTPtr & predicate, std::vector<ASTI
|
||||
}
|
||||
|
||||
if (const auto alias = predicate->tryGetAlias(); !alias.empty())
|
||||
predicate->setAlias("");
|
||||
predicate->setAlias({});
|
||||
|
||||
if (ASTIdentifier * identifier = predicate->as<ASTIdentifier>())
|
||||
identifiers.emplace_back(identifier);
|
||||
|
@ -18,6 +18,7 @@ SRCS(
|
||||
ActionsVisitor.cpp
|
||||
addMissingDefaults.cpp
|
||||
addTypeConversionToAST.cpp
|
||||
AggregateDescription.cpp
|
||||
Aggregator.cpp
|
||||
AnyInputOptimize.cpp
|
||||
ArithmeticOperationsInAgrFuncOptimize.cpp
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Parsers/ASTQueryWithOutput.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -8,45 +8,78 @@ namespace DB
|
||||
|
||||
|
||||
/// AST, EXPLAIN or other query with meaning of explanation query instead of execution
|
||||
class ASTExplainQuery : public IAST
|
||||
class ASTExplainQuery : public ASTQueryWithOutput
|
||||
{
|
||||
public:
|
||||
enum ExplainKind
|
||||
{
|
||||
ParsedAST,
|
||||
AnalyzedSyntax,
|
||||
ParsedAST, /// 'EXPLAIN AST SELECT ...'
|
||||
AnalyzedSyntax, /// 'EXPLAIN SYNTAX SELECT ...'
|
||||
QueryPlan, /// 'EXPLAIN SELECT ...'
|
||||
QueryPipeline, /// 'EXPLAIN PIPELINE ...'
|
||||
};
|
||||
|
||||
ASTExplainQuery(ExplainKind kind_)
|
||||
: kind(kind_)
|
||||
{}
|
||||
ASTExplainQuery(ExplainKind kind_, bool old_syntax_)
|
||||
: kind(kind_), old_syntax(old_syntax_)
|
||||
{
|
||||
}
|
||||
|
||||
String getID(char delim) const override { return "Explain" + (delim + toString(kind)); }
|
||||
String getID(char delim) const override { return "Explain" + (delim + toString(kind, old_syntax)); }
|
||||
ExplainKind getKind() const { return kind; }
|
||||
ASTPtr clone() const override
|
||||
{
|
||||
auto res = std::make_shared<ASTExplainQuery>(*this);
|
||||
res->children.clear();
|
||||
res->children.push_back(children[0]->clone());
|
||||
cloneOutputOptions(*res);
|
||||
return res;
|
||||
}
|
||||
|
||||
protected:
|
||||
void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override
|
||||
void setExplainedQuery(ASTPtr query_)
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << toString(kind) << (settings.hilite ? hilite_none : "") << " ";
|
||||
children.at(0)->formatImpl(settings, state, frame);
|
||||
children.emplace_back(query_);
|
||||
query = std::move(query_);
|
||||
}
|
||||
|
||||
void setSettings(ASTPtr settings_)
|
||||
{
|
||||
children.emplace_back(settings_);
|
||||
ast_settings = std::move(settings_);
|
||||
}
|
||||
|
||||
const ASTPtr & getExplainedQuery() const { return query; }
|
||||
const ASTPtr & getSettings() const { return ast_settings; }
|
||||
|
||||
protected:
|
||||
void formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << toString(kind, old_syntax) << (settings.hilite ? hilite_none : "");
|
||||
|
||||
if (ast_settings)
|
||||
{
|
||||
settings.ostr << ' ';
|
||||
ast_settings->formatImpl(settings, state, frame);
|
||||
}
|
||||
|
||||
settings.ostr << settings.nl_or_ws;
|
||||
query->formatImpl(settings, state, frame);
|
||||
}
|
||||
|
||||
private:
|
||||
ExplainKind kind;
|
||||
bool old_syntax; /// "EXPLAIN AST" -> "AST", "EXPLAIN SYNTAX" -> "ANALYZE"
|
||||
|
||||
static String toString(ExplainKind kind)
|
||||
ASTPtr query;
|
||||
ASTPtr ast_settings;
|
||||
|
||||
static String toString(ExplainKind kind, bool old_syntax)
|
||||
{
|
||||
switch (kind)
|
||||
{
|
||||
case ParsedAST: return "AST";
|
||||
case AnalyzedSyntax: return "ANALYZE";
|
||||
case ParsedAST: return old_syntax ? "AST" : "EXPLAIN AST";
|
||||
case AnalyzedSyntax: return old_syntax ? "ANALYZE" : "EXPLAIN SYNTAX";
|
||||
case QueryPlan: return "EXPLAIN";
|
||||
case QueryPipeline: return "EXPLAIN PIPELINE";
|
||||
}
|
||||
|
||||
__builtin_unreachable();
|
||||
|
72
src/Parsers/ParserExplainQuery.cpp
Normal file
72
src/Parsers/ParserExplainQuery.cpp
Normal file
@ -0,0 +1,72 @@
|
||||
#include <Parsers/ParserExplainQuery.h>
|
||||
#include <Parsers/ASTExplainQuery.h>
|
||||
#include <Parsers/CommonParsers.h>
|
||||
#include <Parsers/ParserSelectWithUnionQuery.h>
|
||||
#include <Parsers/ParserSetQuery.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
bool ParserExplainQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
{
|
||||
ASTExplainQuery::ExplainKind kind;
|
||||
bool old_syntax = false;
|
||||
|
||||
ParserKeyword s_ast("AST");
|
||||
ParserKeyword s_analyze("ANALYZE");
|
||||
ParserKeyword s_explain("EXPLAIN");
|
||||
ParserKeyword s_syntax("SYNTAX");
|
||||
ParserKeyword s_pipeline("PIPELINE");
|
||||
ParserKeyword s_plan("PLAN");
|
||||
|
||||
if (enable_debug_queries && s_ast.ignore(pos, expected))
|
||||
{
|
||||
old_syntax = true;
|
||||
kind = ASTExplainQuery::ExplainKind::ParsedAST;
|
||||
}
|
||||
else if (enable_debug_queries && s_analyze.ignore(pos, expected))
|
||||
{
|
||||
old_syntax = true;
|
||||
kind = ASTExplainQuery::ExplainKind::AnalyzedSyntax;
|
||||
}
|
||||
else if (s_explain.ignore(pos, expected))
|
||||
{
|
||||
kind = ASTExplainQuery::QueryPlan;
|
||||
|
||||
if (s_ast.ignore(pos, expected))
|
||||
kind = ASTExplainQuery::ExplainKind::ParsedAST;
|
||||
else if (s_syntax.ignore(pos, expected))
|
||||
kind = ASTExplainQuery::ExplainKind::AnalyzedSyntax;
|
||||
else if (s_pipeline.ignore(pos, expected))
|
||||
kind = ASTExplainQuery::ExplainKind::QueryPipeline;
|
||||
else if (s_plan.ignore(pos, expected))
|
||||
kind = ASTExplainQuery::ExplainKind::QueryPlan;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
|
||||
auto explain_query = std::make_shared<ASTExplainQuery>(kind, old_syntax);
|
||||
|
||||
{
|
||||
ASTPtr settings;
|
||||
ParserSetQuery parser_settings(true);
|
||||
|
||||
auto begin = pos;
|
||||
if (parser_settings.parse(pos, settings, expected))
|
||||
explain_query->setSettings(std::move(settings));
|
||||
else
|
||||
pos = begin;
|
||||
}
|
||||
|
||||
ParserSelectWithUnionQuery select_p;
|
||||
ASTPtr query;
|
||||
if (!select_p.parse(pos, query, expected))
|
||||
return false;
|
||||
|
||||
explain_query->setExplainedQuery(std::move(query));
|
||||
|
||||
node = std::move(explain_query);
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
25
src/Parsers/ParserExplainQuery.h
Normal file
25
src/Parsers/ParserExplainQuery.h
Normal file
@ -0,0 +1,25 @@
|
||||
#pragma once
|
||||
|
||||
#include <Parsers/IParserBase.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
||||
class ParserExplainQuery : public IParserBase
|
||||
{
|
||||
public:
|
||||
explicit ParserExplainQuery(bool enable_debug_queries_ = false)
|
||||
: enable_debug_queries(enable_debug_queries_)
|
||||
{
|
||||
}
|
||||
|
||||
protected:
|
||||
const char * getName() const override { return "EXPLAIN"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
|
||||
private:
|
||||
bool enable_debug_queries;
|
||||
};
|
||||
|
||||
}
|
@ -19,6 +19,7 @@
|
||||
#include <Parsers/ParserShowCreateAccessEntityQuery.h>
|
||||
#include <Parsers/ParserShowGrantsQuery.h>
|
||||
#include <Parsers/ParserShowPrivilegesQuery.h>
|
||||
#include <Parsers/ParserExplainQuery.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -44,21 +45,13 @@ bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
||||
ParserShowCreateAccessEntityQuery show_create_access_entity_p;
|
||||
ParserShowGrantsQuery show_grants_p;
|
||||
ParserShowPrivilegesQuery show_privileges_p;
|
||||
ParserExplainQuery explain_p(enable_debug_queries);
|
||||
|
||||
ASTPtr query;
|
||||
|
||||
ParserKeyword s_ast("AST");
|
||||
ParserKeyword s_analyze("ANALYZE");
|
||||
bool explain_ast = false;
|
||||
bool analyze_syntax = false;
|
||||
|
||||
if (enable_explain && s_ast.ignore(pos, expected))
|
||||
explain_ast = true;
|
||||
|
||||
if (enable_explain && s_analyze.ignore(pos, expected))
|
||||
analyze_syntax = true;
|
||||
|
||||
bool parsed = select_p.parse(pos, query, expected)
|
||||
bool parsed =
|
||||
explain_p.parse(pos, query, expected)
|
||||
|| select_p.parse(pos, query, expected)
|
||||
|| show_create_access_entity_p.parse(pos, query, expected) /// should be before `show_tables_p`
|
||||
|| show_tables_p.parse(pos, query, expected)
|
||||
|| table_p.parse(pos, query, expected)
|
||||
@ -116,19 +109,17 @@ bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
||||
query_with_output.children.push_back(query_with_output.settings_ast);
|
||||
}
|
||||
|
||||
if (explain_ast)
|
||||
if (auto * ast = query->as<ASTExplainQuery>())
|
||||
{
|
||||
node = std::make_shared<ASTExplainQuery>(ASTExplainQuery::ParsedAST);
|
||||
node->children.push_back(query);
|
||||
/// Set default format TSV, because output is a single string column.
|
||||
if (!ast->format)
|
||||
{
|
||||
ast->format = std::make_shared<ASTIdentifier>("TSV");
|
||||
ast->children.push_back(ast->format);
|
||||
}
|
||||
}
|
||||
else if (analyze_syntax)
|
||||
{
|
||||
node = std::make_shared<ASTExplainQuery>(ASTExplainQuery::AnalyzedSyntax);
|
||||
node->children.push_back(query);
|
||||
}
|
||||
else
|
||||
node = query;
|
||||
|
||||
node = std::move(query);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -11,8 +11,9 @@ namespace DB
|
||||
class ParserQueryWithOutput : public IParserBase
|
||||
{
|
||||
public:
|
||||
ParserQueryWithOutput(bool enable_explain_ = false)
|
||||
: enable_explain(enable_explain_)
|
||||
/// enable_debug_queries flag enables queries 'AST SELECT' and 'ANALYZE SELECT'
|
||||
explicit ParserQueryWithOutput(bool enable_debug_queries_ = false)
|
||||
: enable_debug_queries(enable_debug_queries_)
|
||||
{}
|
||||
|
||||
protected:
|
||||
@ -21,7 +22,7 @@ protected:
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
|
||||
private:
|
||||
bool enable_explain;
|
||||
bool enable_debug_queries;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -84,6 +84,7 @@ SRCS(
|
||||
ParserDictionaryAttributeDeclaration.cpp
|
||||
ParserDropAccessEntityQuery.cpp
|
||||
ParserDropQuery.cpp
|
||||
ParserExplainQuery.cpp
|
||||
ParserGrantQuery.cpp
|
||||
ParserInsertQuery.cpp
|
||||
ParserKillQueryQuery.cpp
|
||||
|
@ -15,6 +15,8 @@ namespace ErrorCodes
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
class IQueryPlanStep;
|
||||
|
||||
class IProcessor;
|
||||
using ProcessorPtr = std::shared_ptr<IProcessor>;
|
||||
using Processors = std::vector<ProcessorPtr>;
|
||||
@ -288,6 +290,16 @@ public:
|
||||
void enableQuota() { has_quota = true; }
|
||||
bool hasQuota() const { return has_quota; }
|
||||
|
||||
/// Step of QueryPlan from which processor was created.
|
||||
void setQueryPlanStep(IQueryPlanStep * step, size_t group = 0)
|
||||
{
|
||||
query_plan_step = step;
|
||||
query_plan_step_group = group;
|
||||
}
|
||||
|
||||
IQueryPlanStep * getQueryPlanStep() const { return query_plan_step; }
|
||||
size_t getQueryPlanStepGroup() const { return query_plan_step_group; }
|
||||
|
||||
protected:
|
||||
virtual void onCancel() {}
|
||||
|
||||
@ -299,6 +311,9 @@ private:
|
||||
size_t stream_number = NO_STREAM;
|
||||
|
||||
bool has_quota = false;
|
||||
|
||||
IQueryPlanStep * query_plan_step = nullptr;
|
||||
size_t query_plan_step_group = 0;
|
||||
};
|
||||
|
||||
|
||||
|
@ -94,6 +94,12 @@ static bool isInPrimaryKey(const SortDescription & description, const std::strin
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool isInPartitionKey(const std::string & column_name, const Names & partition_key_columns)
|
||||
{
|
||||
auto is_in_partition_key = std::find(partition_key_columns.begin(), partition_key_columns.end(), column_name);
|
||||
return is_in_partition_key != partition_key_columns.end();
|
||||
}
|
||||
|
||||
/// Returns true if merge result is not empty
|
||||
static bool mergeMap(const SummingSortedAlgorithm::MapDescription & desc,
|
||||
Row & row, const ColumnRawPtrs & raw_columns, size_t row_number)
|
||||
@ -181,7 +187,8 @@ static bool mergeMap(const SummingSortedAlgorithm::MapDescription & desc,
|
||||
static SummingSortedAlgorithm::ColumnsDefinition defineColumns(
|
||||
const Block & header,
|
||||
const SortDescription & description,
|
||||
const Names & column_names_to_sum)
|
||||
const Names & column_names_to_sum,
|
||||
const Names & partition_key_columns)
|
||||
{
|
||||
size_t num_columns = header.columns();
|
||||
SummingSortedAlgorithm::ColumnsDefinition def;
|
||||
@ -223,8 +230,8 @@ static SummingSortedAlgorithm::ColumnsDefinition defineColumns(
|
||||
continue;
|
||||
}
|
||||
|
||||
/// Are they inside the PK?
|
||||
if (isInPrimaryKey(description, column.name, i))
|
||||
/// Are they inside the primary key or partiton key?
|
||||
if (isInPrimaryKey(description, column.name, i) || isInPartitionKey(column.name, partition_key_columns))
|
||||
{
|
||||
def.column_numbers_not_to_aggregate.push_back(i);
|
||||
continue;
|
||||
@ -617,9 +624,10 @@ SummingSortedAlgorithm::SummingSortedAlgorithm(
|
||||
const Block & header, size_t num_inputs,
|
||||
SortDescription description_,
|
||||
const Names & column_names_to_sum,
|
||||
const Names & partition_key_columns,
|
||||
size_t max_block_size)
|
||||
: IMergingAlgorithmWithDelayedChunk(num_inputs, std::move(description_))
|
||||
, columns_definition(defineColumns(header, description, column_names_to_sum))
|
||||
, columns_definition(defineColumns(header, description, column_names_to_sum, partition_key_columns))
|
||||
, merged_data(getMergedDataColumns(header, columns_definition), max_block_size, columns_definition)
|
||||
{
|
||||
}
|
||||
|
@ -20,6 +20,8 @@ public:
|
||||
SortDescription description_,
|
||||
/// List of columns to be summed. If empty, all numeric columns that are not in the description are taken.
|
||||
const Names & column_names_to_sum,
|
||||
/// List of partition key columns. They have to be excluded.
|
||||
const Names & partition_key_columns,
|
||||
size_t max_block_size);
|
||||
|
||||
void initialize(Inputs inputs) override;
|
||||
|
@ -16,6 +16,7 @@ public:
|
||||
SortDescription description_,
|
||||
/// List of columns to be summed. If empty, all numeric columns that are not in the description are taken.
|
||||
const Names & column_names_to_sum,
|
||||
const Names & partition_key_columns,
|
||||
size_t max_block_size)
|
||||
: IMergingTransform(
|
||||
num_inputs, header, header, true,
|
||||
@ -23,6 +24,7 @@ public:
|
||||
num_inputs,
|
||||
std::move(description_),
|
||||
column_names_to_sum,
|
||||
partition_key_columns,
|
||||
max_block_size)
|
||||
{
|
||||
}
|
||||
|
@ -69,7 +69,8 @@ void QueryPipeline::init(Pipe pipe)
|
||||
init(std::move(pipes));
|
||||
}
|
||||
|
||||
static OutputPort * uniteExtremes(const std::vector<OutputPort *> & ports, const Block & header, Processors & processors)
|
||||
static OutputPort * uniteExtremes(const std::vector<OutputPort *> & ports, const Block & header,
|
||||
QueryPipeline::ProcessorsContainer & processors)
|
||||
{
|
||||
/// Here we calculate extremes for extremes in case we unite several pipelines.
|
||||
/// Example: select number from numbers(2) union all select number from numbers(3)
|
||||
@ -90,14 +91,15 @@ static OutputPort * uniteExtremes(const std::vector<OutputPort *> & ports, const
|
||||
connect(resize->getOutputs().front(), extremes->getInputPort());
|
||||
connect(extremes->getOutputPort(), sink->getPort());
|
||||
|
||||
processors.emplace_back(std::move(resize));
|
||||
processors.emplace_back(std::move(extremes));
|
||||
processors.emplace_back(std::move(sink));
|
||||
processors.emplace(std::move(resize));
|
||||
processors.emplace(std::move(extremes));
|
||||
processors.emplace(std::move(sink));
|
||||
|
||||
return extremes_port;
|
||||
}
|
||||
|
||||
static OutputPort * uniteTotals(const std::vector<OutputPort *> & ports, const Block & header, Processors & processors)
|
||||
static OutputPort * uniteTotals(const std::vector<OutputPort *> & ports, const Block & header,
|
||||
QueryPipeline::ProcessorsContainer & processors)
|
||||
{
|
||||
/// Calculate totals fro several streams.
|
||||
/// Take totals from first sources which has any, skip others.
|
||||
@ -115,8 +117,8 @@ static OutputPort * uniteTotals(const std::vector<OutputPort *> & ports, const B
|
||||
|
||||
connect(concat->getOutputs().front(), limit->getInputPort());
|
||||
|
||||
processors.emplace_back(std::move(concat));
|
||||
processors.emplace_back(std::move(limit));
|
||||
processors.emplace(std::move(concat));
|
||||
processors.emplace(std::move(limit));
|
||||
|
||||
return totals_port;
|
||||
}
|
||||
@ -167,8 +169,7 @@ void QueryPipeline::init(Pipes pipes)
|
||||
}
|
||||
|
||||
streams.addStream(&pipe.getPort(), pipe.maxParallelStreams());
|
||||
auto cur_processors = std::move(pipe).detachProcessors();
|
||||
processors.insert(processors.end(), cur_processors.begin(), cur_processors.end());
|
||||
processors.emplace(std::move(pipe).detachProcessors());
|
||||
}
|
||||
|
||||
if (!totals.empty())
|
||||
@ -242,7 +243,7 @@ void QueryPipeline::addSimpleTransformImpl(const TProcessorGetter & getter)
|
||||
{
|
||||
connect(*stream, transform->getInputs().front());
|
||||
stream = &transform->getOutputs().front();
|
||||
processors.emplace_back(std::move(transform));
|
||||
processors.emplace(std::move(transform));
|
||||
}
|
||||
};
|
||||
|
||||
@ -293,7 +294,7 @@ void QueryPipeline::setSinks(const ProcessorGetterWithStreamKind & getter)
|
||||
transform = std::make_shared<NullSink>(stream->getHeader());
|
||||
|
||||
connect(*stream, transform->getInputs().front());
|
||||
processors.emplace_back(std::move(transform));
|
||||
processors.emplace(std::move(transform));
|
||||
};
|
||||
|
||||
for (auto & stream : streams)
|
||||
@ -339,7 +340,7 @@ void QueryPipeline::addPipe(Processors pipe)
|
||||
header = output.getHeader();
|
||||
}
|
||||
|
||||
processors.insert(processors.end(), pipe.begin(), pipe.end());
|
||||
processors.emplace(pipe);
|
||||
current_header = std::move(header);
|
||||
}
|
||||
|
||||
@ -352,7 +353,7 @@ void QueryPipeline::addDelayedStream(ProcessorPtr source)
|
||||
|
||||
IProcessor::PortNumbers delayed_streams = { streams.size() };
|
||||
streams.addStream(&source->getOutputs().front(), 0);
|
||||
processors.emplace_back(std::move(source));
|
||||
processors.emplace(std::move(source));
|
||||
|
||||
auto processor = std::make_shared<DelayedPortsProcessor>(current_header, streams.size(), delayed_streams);
|
||||
addPipe({ std::move(processor) });
|
||||
@ -383,7 +384,7 @@ void QueryPipeline::resize(size_t num_streams, bool force, bool strict)
|
||||
for (auto & output : resize->getOutputs())
|
||||
streams.addStream(&output, 0);
|
||||
|
||||
processors.emplace_back(std::move(resize));
|
||||
processors.emplace(std::move(resize));
|
||||
}
|
||||
|
||||
void QueryPipeline::enableQuotaForCurrentStreams()
|
||||
@ -412,7 +413,7 @@ void QueryPipeline::addTotalsHavingTransform(ProcessorPtr transform)
|
||||
streams.assign({ &outputs.front() });
|
||||
totals_having_port = &outputs.back();
|
||||
current_header = outputs.front().getHeader();
|
||||
processors.emplace_back(std::move(transform));
|
||||
processors.emplace(std::move(transform));
|
||||
}
|
||||
|
||||
void QueryPipeline::addDefaultTotals()
|
||||
@ -434,7 +435,7 @@ void QueryPipeline::addDefaultTotals()
|
||||
|
||||
auto source = std::make_shared<SourceFromSingleChunk>(current_header, Chunk(std::move(columns), 1));
|
||||
totals_having_port = &source->getPort();
|
||||
processors.emplace_back(source);
|
||||
processors.emplace(std::move(source));
|
||||
}
|
||||
|
||||
void QueryPipeline::addTotals(ProcessorPtr source)
|
||||
@ -448,7 +449,7 @@ void QueryPipeline::addTotals(ProcessorPtr source)
|
||||
assertBlocksHaveEqualStructure(current_header, source->getOutputs().front().getHeader(), "QueryPipeline");
|
||||
|
||||
totals_having_port = &source->getOutputs().front();
|
||||
processors.emplace_back(std::move(source));
|
||||
processors.emplace(std::move(source));
|
||||
}
|
||||
|
||||
void QueryPipeline::dropTotalsAndExtremes()
|
||||
@ -457,7 +458,7 @@ void QueryPipeline::dropTotalsAndExtremes()
|
||||
{
|
||||
auto null_sink = std::make_shared<NullSink>(port->getHeader());
|
||||
connect(*port, null_sink->getPort());
|
||||
processors.emplace_back(std::move(null_sink));
|
||||
processors.emplace(std::move(null_sink));
|
||||
port = nullptr;
|
||||
};
|
||||
|
||||
@ -486,7 +487,7 @@ void QueryPipeline::addExtremesTransform()
|
||||
stream = &transform->getOutputPort();
|
||||
extremes.push_back(&transform->getExtremesPort());
|
||||
|
||||
processors.emplace_back(std::move(transform));
|
||||
processors.emplace(std::move(transform));
|
||||
}
|
||||
|
||||
if (extremes.size() == 1)
|
||||
@ -510,8 +511,8 @@ void QueryPipeline::addCreatingSetsTransform(ProcessorPtr transform)
|
||||
connect(*streams.back(), concat->getInputs().back());
|
||||
|
||||
streams.assign({ &concat->getOutputs().front() });
|
||||
processors.emplace_back(std::move(transform));
|
||||
processors.emplace_back(std::move(concat));
|
||||
processors.emplace(std::move(transform));
|
||||
processors.emplace(std::move(concat));
|
||||
}
|
||||
|
||||
void QueryPipeline::setOutputFormat(ProcessorPtr output)
|
||||
@ -538,17 +539,17 @@ void QueryPipeline::setOutputFormat(ProcessorPtr output)
|
||||
{
|
||||
auto null_source = std::make_shared<NullSource>(totals.getHeader());
|
||||
totals_having_port = &null_source->getPort();
|
||||
processors.emplace_back(std::move(null_source));
|
||||
processors.emplace(std::move(null_source));
|
||||
}
|
||||
|
||||
if (!extremes_port)
|
||||
{
|
||||
auto null_source = std::make_shared<NullSource>(extremes.getHeader());
|
||||
extremes_port = &null_source->getPort();
|
||||
processors.emplace_back(std::move(null_source));
|
||||
processors.emplace(std::move(null_source));
|
||||
}
|
||||
|
||||
processors.emplace_back(std::move(output));
|
||||
processors.emplace(std::move(output));
|
||||
|
||||
connect(*streams.front(), main);
|
||||
connect(*totals_having_port, totals);
|
||||
@ -587,6 +588,7 @@ void QueryPipeline::unitePipelines(
|
||||
{
|
||||
auto & pipeline = *pipeline_ptr;
|
||||
pipeline.checkInitialized();
|
||||
pipeline.processors.setCollectedProcessors(processors.getCollectedProcessors());
|
||||
|
||||
if (!pipeline.isCompleted())
|
||||
{
|
||||
@ -604,7 +606,7 @@ void QueryPipeline::unitePipelines(
|
||||
|
||||
connect(*pipeline.extremes_port, converting->getInputPort());
|
||||
extremes.push_back(&converting->getOutputPort());
|
||||
processors.push_back(std::move(converting));
|
||||
processors.emplace(std::move(converting));
|
||||
}
|
||||
|
||||
/// Take totals only from first port.
|
||||
@ -615,10 +617,13 @@ void QueryPipeline::unitePipelines(
|
||||
|
||||
connect(*pipeline.totals_having_port, converting->getInputPort());
|
||||
totals.push_back(&converting->getOutputPort());
|
||||
processors.push_back(std::move(converting));
|
||||
processors.emplace(std::move(converting));
|
||||
}
|
||||
|
||||
processors.insert(processors.end(), pipeline.processors.begin(), pipeline.processors.end());
|
||||
auto * collector = processors.setCollectedProcessors(nullptr);
|
||||
processors.emplace(pipeline.processors.detach());
|
||||
processors.setCollectedProcessors(collector);
|
||||
|
||||
streams.addStreams(pipeline.streams);
|
||||
|
||||
table_locks.insert(table_locks.end(), std::make_move_iterator(pipeline.table_locks.begin()), std::make_move_iterator(pipeline.table_locks.end()));
|
||||
@ -649,7 +654,7 @@ void QueryPipeline::unitePipelines(
|
||||
|
||||
void QueryPipeline::setProgressCallback(const ProgressCallback & callback)
|
||||
{
|
||||
for (auto & processor : processors)
|
||||
for (auto & processor : processors.get())
|
||||
{
|
||||
if (auto * source = dynamic_cast<ISourceWithProgress *>(processor.get()))
|
||||
source->setProgressCallback(callback);
|
||||
@ -663,7 +668,7 @@ void QueryPipeline::setProcessListElement(QueryStatus * elem)
|
||||
{
|
||||
process_list_element = elem;
|
||||
|
||||
for (auto & processor : processors)
|
||||
for (auto & processor : processors.get())
|
||||
{
|
||||
if (auto * source = dynamic_cast<ISourceWithProgress *>(processor.get()))
|
||||
source->setProcessListElement(elem);
|
||||
@ -775,7 +780,7 @@ Pipe QueryPipeline::getPipe() &&
|
||||
|
||||
Pipes QueryPipeline::getPipes() &&
|
||||
{
|
||||
Pipe pipe(std::move(processors), streams.at(0), totals_having_port, extremes_port);
|
||||
Pipe pipe(processors.detach(), streams.at(0), totals_having_port, extremes_port);
|
||||
pipe.max_parallel_streams = streams.maxParallelStreams();
|
||||
|
||||
for (auto & lock : table_locks)
|
||||
@ -807,7 +812,7 @@ PipelineExecutorPtr QueryPipeline::execute()
|
||||
if (!isCompleted())
|
||||
throw Exception("Cannot execute pipeline because it is not completed.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
return std::make_shared<PipelineExecutor>(processors, process_list_element);
|
||||
return std::make_shared<PipelineExecutor>(processors.get(), process_list_element);
|
||||
}
|
||||
|
||||
QueryPipeline & QueryPipeline::operator= (QueryPipeline && rhs)
|
||||
@ -837,4 +842,49 @@ QueryPipeline & QueryPipeline::operator= (QueryPipeline && rhs)
|
||||
return *this;
|
||||
}
|
||||
|
||||
void QueryPipeline::ProcessorsContainer::emplace(ProcessorPtr processor)
|
||||
{
|
||||
if (collected_processors)
|
||||
collected_processors->emplace_back(processor);
|
||||
|
||||
processors.emplace_back(std::move(processor));
|
||||
}
|
||||
|
||||
void QueryPipeline::ProcessorsContainer::emplace(Processors processors_)
|
||||
{
|
||||
for (auto & processor : processors_)
|
||||
emplace(std::move(processor));
|
||||
}
|
||||
|
||||
Processors * QueryPipeline::ProcessorsContainer::setCollectedProcessors(Processors * collected_processors_)
|
||||
{
|
||||
if (collected_processors && collected_processors_)
|
||||
throw Exception("Cannot set collected processors to QueryPipeline because "
|
||||
"another one object was already created for current pipeline." , ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
std::swap(collected_processors, collected_processors_);
|
||||
return collected_processors_;
|
||||
}
|
||||
|
||||
QueryPipelineProcessorsCollector::QueryPipelineProcessorsCollector(QueryPipeline & pipeline_, IQueryPlanStep * step_)
|
||||
: pipeline(pipeline_), step(step_)
|
||||
{
|
||||
pipeline.processors.setCollectedProcessors(&processors);
|
||||
}
|
||||
|
||||
QueryPipelineProcessorsCollector::~QueryPipelineProcessorsCollector()
|
||||
{
|
||||
pipeline.processors.setCollectedProcessors(nullptr);
|
||||
}
|
||||
|
||||
Processors QueryPipelineProcessorsCollector::detachProcessors(size_t group)
|
||||
{
|
||||
for (auto & processor : processors)
|
||||
processor->setQueryPlanStep(step, group);
|
||||
|
||||
Processors res;
|
||||
res.swap(processors);
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -18,6 +18,8 @@ class Context;
|
||||
|
||||
class IOutputFormat;
|
||||
|
||||
class QueryPipelineProcessorsCollector;
|
||||
|
||||
class QueryPipeline
|
||||
{
|
||||
private:
|
||||
@ -69,6 +71,27 @@ private:
|
||||
};
|
||||
|
||||
public:
|
||||
|
||||
class ProcessorsContainer
|
||||
{
|
||||
public:
|
||||
bool empty() const { return processors.empty(); }
|
||||
void emplace(ProcessorPtr processor);
|
||||
void emplace(Processors processors_);
|
||||
Processors * getCollectedProcessors() const { return collected_processors; }
|
||||
Processors * setCollectedProcessors(Processors * collected_processors);
|
||||
Processors & get() { return processors; }
|
||||
const Processors & get() const { return processors; }
|
||||
Processors detach() { return std::move(processors); }
|
||||
private:
|
||||
/// All added processors.
|
||||
Processors processors;
|
||||
|
||||
/// If is set, all newly created processors will be added to this too.
|
||||
/// It is needed for debug. See QueryPipelineProcessorsCollector below.
|
||||
Processors * collected_processors = nullptr;
|
||||
};
|
||||
|
||||
QueryPipeline() = default;
|
||||
QueryPipeline(QueryPipeline &&) = default;
|
||||
~QueryPipeline() = default;
|
||||
@ -136,6 +159,8 @@ public:
|
||||
|
||||
void enableQuotaForCurrentStreams();
|
||||
|
||||
/// Unite several pipelines together. Result pipeline would have common_header structure.
|
||||
/// If collector is used, it will collect only newly-added processors, but not processors from pipelines.
|
||||
void unitePipelines(std::vector<std::unique_ptr<QueryPipeline>> pipelines, const Block & common_header);
|
||||
|
||||
PipelineExecutorPtr execute();
|
||||
@ -180,6 +205,9 @@ public:
|
||||
Pipe getPipe() &&;
|
||||
Pipes getPipes() &&;
|
||||
|
||||
/// Get internal processors.
|
||||
const Processors & getProcessors() const { return processors.get(); }
|
||||
|
||||
private:
|
||||
/// Destruction order: processors, header, locks, temporary storages, local contexts
|
||||
|
||||
@ -193,8 +221,7 @@ private:
|
||||
/// Common header for each stream.
|
||||
Block current_header;
|
||||
|
||||
/// All added processors.
|
||||
Processors processors;
|
||||
ProcessorsContainer processors;
|
||||
|
||||
/// Port for each independent "stream".
|
||||
Streams streams;
|
||||
@ -222,6 +249,24 @@ private:
|
||||
void addSimpleTransformImpl(const TProcessorGetter & getter);
|
||||
|
||||
void initRowsBeforeLimit();
|
||||
|
||||
friend class QueryPipelineProcessorsCollector;
|
||||
};
|
||||
|
||||
/// This is a small class which collects newly added processors to QueryPipeline.
|
||||
/// Pipeline must live longer that this class.
|
||||
class QueryPipelineProcessorsCollector
|
||||
{
|
||||
public:
|
||||
explicit QueryPipelineProcessorsCollector(QueryPipeline & pipeline_, IQueryPlanStep * step_ = nullptr);
|
||||
~QueryPipelineProcessorsCollector();
|
||||
|
||||
Processors detachProcessors(size_t group = 0);
|
||||
|
||||
private:
|
||||
QueryPipeline & pipeline;
|
||||
IQueryPlanStep * step;
|
||||
Processors processors;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ AddingDelayedSourceStep::AddingDelayedSourceStep(
|
||||
|
||||
void AddingDelayedSourceStep::transformPipeline(QueryPipeline & pipeline)
|
||||
{
|
||||
source->setQueryPlanStep(this);
|
||||
pipeline.addDelayedStream(source);
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,7 @@ AggregatingStep::AggregatingStep(
|
||||
bool storage_has_evenly_distributed_read_,
|
||||
InputOrderInfoPtr group_by_info_,
|
||||
SortDescription group_by_sort_description_)
|
||||
: ITransformingStep(input_stream_, params_.getHeader(final_), getTraits())
|
||||
: ITransformingStep(input_stream_, params_.getHeader(final_), getTraits(), false)
|
||||
, params(std::move(params_))
|
||||
, final(std::move(final_))
|
||||
, max_block_size(max_block_size_)
|
||||
@ -41,6 +41,8 @@ AggregatingStep::AggregatingStep(
|
||||
|
||||
void AggregatingStep::transformPipeline(QueryPipeline & pipeline)
|
||||
{
|
||||
QueryPipelineProcessorsCollector collector(pipeline, this);
|
||||
|
||||
/// Forget about current totals and extremes. They will be calculated again after aggregation if needed.
|
||||
pipeline.dropTotalsAndExtremes();
|
||||
|
||||
@ -76,6 +78,8 @@ void AggregatingStep::transformPipeline(QueryPipeline & pipeline)
|
||||
return std::make_shared<AggregatingInOrderTransform>(header, transform_params, group_by_sort_description, max_block_size, many_data, counter++);
|
||||
});
|
||||
|
||||
aggregating_in_order = collector.detachProcessors(0);
|
||||
|
||||
for (auto & column_description : group_by_sort_description)
|
||||
{
|
||||
if (!column_description.column_name.empty())
|
||||
@ -92,6 +96,7 @@ void AggregatingStep::transformPipeline(QueryPipeline & pipeline)
|
||||
max_block_size);
|
||||
|
||||
pipeline.addPipe({ std::move(transform) });
|
||||
aggregating_sorted = collector.detachProcessors(1);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -99,6 +104,8 @@ void AggregatingStep::transformPipeline(QueryPipeline & pipeline)
|
||||
{
|
||||
return std::make_shared<AggregatingInOrderTransform>(header, transform_params, group_by_sort_description, max_block_size);
|
||||
});
|
||||
|
||||
aggregating_in_order = collector.detachProcessors(0);
|
||||
}
|
||||
|
||||
pipeline.addSimpleTransform([&](const Block & header)
|
||||
@ -106,6 +113,8 @@ void AggregatingStep::transformPipeline(QueryPipeline & pipeline)
|
||||
return std::make_shared<FinalizingSimpleTransform>(header, transform_params);
|
||||
});
|
||||
|
||||
finalizing = collector.detachProcessors(2);
|
||||
|
||||
pipeline.enableQuotaForCurrentStreams();
|
||||
return;
|
||||
}
|
||||
@ -127,6 +136,8 @@ void AggregatingStep::transformPipeline(QueryPipeline & pipeline)
|
||||
});
|
||||
|
||||
pipeline.resize(1);
|
||||
|
||||
aggregating = collector.detachProcessors(0);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -136,9 +147,29 @@ void AggregatingStep::transformPipeline(QueryPipeline & pipeline)
|
||||
{
|
||||
return std::make_shared<AggregatingTransform>(header, transform_params);
|
||||
});
|
||||
|
||||
aggregating = collector.detachProcessors(0);
|
||||
}
|
||||
|
||||
pipeline.enableQuotaForCurrentStreams();
|
||||
}
|
||||
|
||||
void AggregatingStep::describeActions(FormatSettings & settings) const
|
||||
{
|
||||
params.explain(settings.out, settings.offset);
|
||||
}
|
||||
|
||||
void AggregatingStep::describePipeline(FormatSettings & settings) const
|
||||
{
|
||||
if (!aggregating.empty())
|
||||
IQueryPlanStep::describePipeline(aggregating, settings);
|
||||
else
|
||||
{
|
||||
/// Processors are printed in reverse order.
|
||||
IQueryPlanStep::describePipeline(finalizing, settings);
|
||||
IQueryPlanStep::describePipeline(aggregating_sorted, settings);
|
||||
IQueryPlanStep::describePipeline(aggregating_in_order, settings);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -29,6 +29,9 @@ public:
|
||||
|
||||
void transformPipeline(QueryPipeline & pipeline) override;
|
||||
|
||||
void describeActions(FormatSettings &) const override;
|
||||
void describePipeline(FormatSettings & settings) const override;
|
||||
|
||||
private:
|
||||
Aggregator::Params params;
|
||||
bool final;
|
||||
@ -40,6 +43,13 @@ private:
|
||||
|
||||
InputOrderInfoPtr group_by_info;
|
||||
SortDescription group_by_sort_description;
|
||||
|
||||
Processors aggregating_in_order;
|
||||
Processors aggregating_sorted;
|
||||
Processors finalizing;
|
||||
|
||||
Processors aggregating;
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <Processors/QueryPlan/ConvertingStep.h>
|
||||
#include <Processors/QueryPipeline.h>
|
||||
#include <Processors/Transforms/ConvertingTransform.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -30,4 +31,40 @@ void ConvertingStep::transformPipeline(QueryPipeline & pipeline)
|
||||
});
|
||||
}
|
||||
|
||||
void ConvertingStep::describeActions(FormatSettings & settings) const
|
||||
{
|
||||
const auto & header = input_streams[0].header;
|
||||
auto conversion = ConvertingTransform(header, result_header, ConvertingTransform::MatchColumnsMode::Name)
|
||||
.getConversion();
|
||||
|
||||
auto dump_description = [&](const ColumnWithTypeAndName & elem, bool is_const)
|
||||
{
|
||||
settings.out << elem.name << ' ' << elem.type->getName() << (is_const ? " Const" : "") << '\n';
|
||||
};
|
||||
|
||||
String prefix(settings.offset, ' ');
|
||||
|
||||
for (size_t i = 0; i < conversion.size(); ++i)
|
||||
{
|
||||
const auto & from = header.getByPosition(conversion[i]);
|
||||
const auto & to = result_header.getByPosition(i);
|
||||
|
||||
bool from_const = from.column && isColumnConst(*from.column);
|
||||
bool to_const = to.column && isColumnConst(*to.column);
|
||||
|
||||
settings.out << prefix;
|
||||
|
||||
if (from.name == to.name && from.type->equals(*to.type) && from_const == to_const)
|
||||
dump_description(from, from_const);
|
||||
else
|
||||
{
|
||||
dump_description(to, to_const);
|
||||
settings.out << " ← ";
|
||||
dump_description(from, from_const);
|
||||
}
|
||||
|
||||
settings.out << '\n';
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -14,6 +14,8 @@ public:
|
||||
|
||||
void transformPipeline(QueryPipeline & pipeline) override;
|
||||
|
||||
void describeActions(FormatSettings & settings) const override;
|
||||
|
||||
private:
|
||||
Block result_header;
|
||||
};
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <Processors/QueryPlan/CreatingSetsStep.h>
|
||||
#include <Processors/QueryPipeline.h>
|
||||
#include <Processors/Transforms/CreatingSetsTransform.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -37,4 +38,20 @@ void CreatingSetsStep::transformPipeline(QueryPipeline & pipeline)
|
||||
pipeline.addCreatingSetsTransform(std::move(creating_sets));
|
||||
}
|
||||
|
||||
void CreatingSetsStep::describeActions(FormatSettings & settings) const
|
||||
{
|
||||
String prefix(settings.offset, ' ');
|
||||
|
||||
for (const auto & set : subqueries_for_sets)
|
||||
{
|
||||
settings.out << prefix;
|
||||
if (set.second.set)
|
||||
settings.out << "Set: ";
|
||||
else if (set.second.join)
|
||||
settings.out << "Join: ";
|
||||
|
||||
settings.out << set.first << '\n';
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -20,6 +20,8 @@ public:
|
||||
|
||||
void transformPipeline(QueryPipeline & pipeline) override;
|
||||
|
||||
void describeActions(FormatSettings & settings) const override;
|
||||
|
||||
private:
|
||||
SubqueriesForSets subqueries_for_sets;
|
||||
SizeLimits network_transfer_limits;
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <Processors/QueryPlan/DistinctStep.h>
|
||||
#include <Processors/Transforms/DistinctTransform.h>
|
||||
#include <Processors/QueryPipeline.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -68,4 +69,27 @@ void DistinctStep::transformPipeline(QueryPipeline & pipeline)
|
||||
});
|
||||
}
|
||||
|
||||
void DistinctStep::describeActions(FormatSettings & settings) const
|
||||
{
|
||||
String prefix(settings.offset, ' ');
|
||||
settings.out << prefix << "Columns: ";
|
||||
|
||||
if (columns.empty())
|
||||
settings.out << "none";
|
||||
else
|
||||
{
|
||||
bool first = true;
|
||||
for (const auto & column : columns)
|
||||
{
|
||||
if (!first)
|
||||
settings.out << ", ";
|
||||
first = false;
|
||||
|
||||
settings.out << column;
|
||||
}
|
||||
}
|
||||
|
||||
settings.out << '\n';
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -20,6 +20,8 @@ public:
|
||||
|
||||
void transformPipeline(QueryPipeline & pipeline) override;
|
||||
|
||||
void describeActions(FormatSettings & settings) const override;
|
||||
|
||||
private:
|
||||
SizeLimits set_size_limits;
|
||||
UInt64 limit_hint;
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <Processors/QueryPipeline.h>
|
||||
#include <Processors/Transforms/InflatingExpressionTransform.h>
|
||||
#include <Interpreters/ExpressionActions.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -37,6 +38,25 @@ void ExpressionStep::transformPipeline(QueryPipeline & pipeline)
|
||||
});
|
||||
}
|
||||
|
||||
static void doDescribeActions(const ExpressionActionsPtr & expression, IQueryPlanStep::FormatSettings & settings)
|
||||
{
|
||||
String prefix(settings.offset, ' ');
|
||||
bool first = true;
|
||||
|
||||
for (const auto & action : expression->getActions())
|
||||
{
|
||||
settings.out << prefix << (first ? "Actions: "
|
||||
: " ");
|
||||
first = false;
|
||||
settings.out << action.toString() << '\n';
|
||||
}
|
||||
}
|
||||
|
||||
void ExpressionStep::describeActions(FormatSettings & settings) const
|
||||
{
|
||||
doDescribeActions(expression, settings);
|
||||
}
|
||||
|
||||
InflatingExpressionStep::InflatingExpressionStep(const DataStream & input_stream_, ExpressionActionsPtr expression_)
|
||||
: ITransformingStep(
|
||||
input_stream_,
|
||||
@ -64,4 +84,9 @@ void InflatingExpressionStep::transformPipeline(QueryPipeline & pipeline)
|
||||
});
|
||||
}
|
||||
|
||||
void InflatingExpressionStep::describeActions(FormatSettings & settings) const
|
||||
{
|
||||
doDescribeActions(expression, settings);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -21,6 +21,8 @@ public:
|
||||
|
||||
void transformPipeline(QueryPipeline & pipeline) override;
|
||||
|
||||
void describeActions(FormatSettings & settings) const override;
|
||||
|
||||
private:
|
||||
ExpressionActionsPtr expression;
|
||||
};
|
||||
@ -36,6 +38,8 @@ public:
|
||||
|
||||
void transformPipeline(QueryPipeline & pipeline) override;
|
||||
|
||||
void describeActions(FormatSettings & settings) const override;
|
||||
|
||||
private:
|
||||
ExpressionActionsPtr expression;
|
||||
};
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <Processors/QueryPlan/FillingStep.h>
|
||||
#include <Processors/Transforms/FillingTransform.h>
|
||||
#include <Processors/QueryPipeline.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -36,4 +37,11 @@ void FillingStep::transformPipeline(QueryPipeline & pipeline)
|
||||
});
|
||||
}
|
||||
|
||||
void FillingStep::describeActions(FormatSettings & settings) const
|
||||
{
|
||||
settings.out << String(settings.offset, ' ');
|
||||
dumpSortDescription(sort_description, input_streams.front().header, settings.out);
|
||||
settings.out << '\n';
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -15,6 +15,8 @@ public:
|
||||
|
||||
void transformPipeline(QueryPipeline & pipeline) override;
|
||||
|
||||
void describeActions(FormatSettings & settings) const override;
|
||||
|
||||
private:
|
||||
SortDescription sort_description;
|
||||
};
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include <Processors/Transforms/FilterTransform.h>
|
||||
#include <Processors/QueryPipeline.h>
|
||||
#include <Interpreters/ExpressionActions.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -42,4 +43,19 @@ void FilterStep::transformPipeline(QueryPipeline & pipeline)
|
||||
});
|
||||
}
|
||||
|
||||
void FilterStep::describeActions(FormatSettings & settings) const
|
||||
{
|
||||
String prefix(settings.offset, ' ');
|
||||
settings.out << prefix << "Filter column: " << filter_column_name << '\n';
|
||||
|
||||
bool first = true;
|
||||
for (const auto & action : expression->getActions())
|
||||
{
|
||||
settings.out << prefix << (first ? "Actions: "
|
||||
: " ");
|
||||
first = false;
|
||||
settings.out << action.toString() << '\n';
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -20,6 +20,8 @@ public:
|
||||
String getName() const override { return "Filter"; }
|
||||
void transformPipeline(QueryPipeline & pipeline) override;
|
||||
|
||||
void describeActions(FormatSettings & settings) const override;
|
||||
|
||||
private:
|
||||
ExpressionActionsPtr expression;
|
||||
String filter_column_name;
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <Processors/Merges/MergingSortedTransform.h>
|
||||
#include <Processors/Transforms/PartialSortingTransform.h>
|
||||
#include <Processors/Transforms/FinishSortingTransform.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -68,4 +69,20 @@ void FinishSortingStep::transformPipeline(QueryPipeline & pipeline)
|
||||
}
|
||||
}
|
||||
|
||||
void FinishSortingStep::describeActions(FormatSettings & settings) const
|
||||
{
|
||||
String prefix(settings.offset, ' ');
|
||||
|
||||
settings.out << prefix << "Prefix sort description: ";
|
||||
dumpSortDescription(prefix_description, input_streams.front().header, settings.out);
|
||||
settings.out << '\n';
|
||||
|
||||
settings.out << prefix << "Result sort description: ";
|
||||
dumpSortDescription(result_description, input_streams.front().header, settings.out);
|
||||
settings.out << '\n';
|
||||
|
||||
if (limit)
|
||||
settings.out << prefix << "Limit " << limit << '\n';
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -20,6 +20,8 @@ public:
|
||||
|
||||
void transformPipeline(QueryPipeline & pipeline) override;
|
||||
|
||||
void describeActions(FormatSettings & settings) const override;
|
||||
|
||||
private:
|
||||
SortDescription prefix_description;
|
||||
SortDescription result_description;
|
||||
|
@ -1,4 +1,6 @@
|
||||
#include <Processors/QueryPlan/IQueryPlanStep.h>
|
||||
#include <Processors/IProcessor.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -16,4 +18,94 @@ const DataStream & IQueryPlanStep::getOutputStream() const
|
||||
return *output_stream;
|
||||
}
|
||||
|
||||
static void doDescribeHeader(const Block & header, size_t count, IQueryPlanStep::FormatSettings & settings)
|
||||
{
|
||||
String prefix(settings.offset, settings.indent_char);
|
||||
prefix += "Header";
|
||||
|
||||
if (count > 1)
|
||||
prefix += " × " + std::to_string(count) + " ";
|
||||
|
||||
prefix += ": ";
|
||||
|
||||
settings.out << prefix;
|
||||
|
||||
if (!header)
|
||||
{
|
||||
settings.out << " empty\n";
|
||||
return;
|
||||
}
|
||||
|
||||
prefix.assign(prefix.size(), settings.indent_char);
|
||||
bool first = true;
|
||||
|
||||
for (const auto & elem : header)
|
||||
{
|
||||
if (!first)
|
||||
settings.out << prefix;
|
||||
|
||||
first = false;
|
||||
elem.dumpStructure(settings.out);
|
||||
settings.out << '\n';
|
||||
}
|
||||
}
|
||||
|
||||
static void doDescribeProcessor(const IProcessor & processor, size_t count, IQueryPlanStep::FormatSettings & settings)
|
||||
{
|
||||
settings.out << String(settings.offset, settings.indent_char) << processor.getName();
|
||||
if (count > 1)
|
||||
settings.out << " × " << std::to_string(count);
|
||||
|
||||
size_t num_inputs = processor.getInputs().size();
|
||||
size_t num_outputs = processor.getOutputs().size();
|
||||
if (num_inputs != 1 || num_outputs != 1)
|
||||
settings.out << " " << std::to_string(num_inputs) << " → " << std::to_string(num_outputs);
|
||||
|
||||
settings.out << '\n';
|
||||
|
||||
if (settings.write_header)
|
||||
{
|
||||
const Block * last_header = nullptr;
|
||||
size_t num_equal_headers = 0;
|
||||
|
||||
for (const auto & port : processor.getOutputs())
|
||||
{
|
||||
if (last_header && !blocksHaveEqualStructure(*last_header, port.getHeader()))
|
||||
{
|
||||
doDescribeHeader(*last_header, num_equal_headers, settings);
|
||||
num_equal_headers = 0;
|
||||
}
|
||||
|
||||
++num_equal_headers;
|
||||
last_header = &port.getHeader();
|
||||
}
|
||||
|
||||
if (last_header)
|
||||
doDescribeHeader(*last_header, num_equal_headers, settings);
|
||||
}
|
||||
|
||||
settings.offset += settings.indent;
|
||||
}
|
||||
|
||||
void IQueryPlanStep::describePipeline(const Processors & processors, FormatSettings & settings)
|
||||
{
|
||||
const IProcessor * prev = nullptr;
|
||||
size_t count = 0;
|
||||
|
||||
for (auto it = processors.rbegin(); it != processors.rend(); ++it)
|
||||
{
|
||||
if (prev && prev->getName() != (*it)->getName())
|
||||
{
|
||||
doDescribeProcessor(*prev, count, settings);
|
||||
count = 0;
|
||||
}
|
||||
|
||||
++count;
|
||||
prev = it->get();
|
||||
}
|
||||
|
||||
if (prev)
|
||||
doDescribeProcessor(*prev, count, settings);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -8,6 +8,10 @@ class QueryPipeline;
|
||||
using QueryPipelinePtr = std::unique_ptr<QueryPipeline>;
|
||||
using QueryPipelines = std::vector<QueryPipelinePtr>;
|
||||
|
||||
class IProcessor;
|
||||
using ProcessorPtr = std::shared_ptr<IProcessor>;
|
||||
using Processors = std::vector<ProcessorPtr>;
|
||||
|
||||
/// Description of data stream.
|
||||
/// Single logical data stream may relate to many ports of pipeline.
|
||||
class DataStream
|
||||
@ -57,12 +61,29 @@ public:
|
||||
const std::string & getStepDescription() const { return step_description; }
|
||||
void setStepDescription(std::string description) { step_description = std::move(description); }
|
||||
|
||||
struct FormatSettings
|
||||
{
|
||||
WriteBuffer & out;
|
||||
size_t offset = 0;
|
||||
const size_t indent = 2;
|
||||
const char indent_char = ' ';
|
||||
const bool write_header = false;
|
||||
};
|
||||
|
||||
/// Get detailed description of step actions. This is shown in EXPLAIN query with options `actions = 1`.
|
||||
virtual void describeActions(FormatSettings & /*settings*/) const {}
|
||||
|
||||
/// Get description of processors added in current step. Should be called after updatePipeline().
|
||||
virtual void describePipeline(FormatSettings & /*settings*/) const {}
|
||||
|
||||
protected:
|
||||
DataStreams input_streams;
|
||||
std::optional<DataStream> output_stream;
|
||||
|
||||
/// Text description about what current step does.
|
||||
std::string step_description;
|
||||
|
||||
static void describePipeline(const Processors & processors, FormatSettings & settings);
|
||||
};
|
||||
|
||||
using QueryPlanStepPtr = std::unique_ptr<IQueryPlanStep>;
|
||||
|
@ -12,8 +12,15 @@ ISourceStep::ISourceStep(DataStream output_stream_)
|
||||
QueryPipelinePtr ISourceStep::updatePipeline(QueryPipelines)
|
||||
{
|
||||
auto pipeline = std::make_unique<QueryPipeline>();
|
||||
QueryPipelineProcessorsCollector collector(*pipeline, this);
|
||||
initializePipeline(*pipeline);
|
||||
processors = collector.detachProcessors();
|
||||
return pipeline;
|
||||
}
|
||||
|
||||
void ISourceStep::describePipeline(FormatSettings & settings) const
|
||||
{
|
||||
IQueryPlanStep::describePipeline(processors, settings);
|
||||
}
|
||||
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user